Subversion Repositories HelenOS

Compare Revisions

Ignore whitespace Rev 2081 → Rev 2082

/trunk/kernel/arch/sparc64/include/atomic.h
57,7 → 57,7
 
a = *((uint64_t *) x);
b = a + i;
__asm__ volatile ("casx %0, %2, %1\n" : "+m" (*((uint64_t *)x)), "+r" (b) : "r" (a));
asm volatile ("casx %0, %2, %1\n" : "+m" (*((uint64_t *)x)), "+r" (b) : "r" (a));
} while (a != b);
 
return a;
98,7 → 98,7
uint64_t v = 1;
volatile uintptr_t x = (uint64_t) &val->count;
 
__asm__ volatile ("casx %0, %2, %1\n" : "+m" (*((uint64_t *) x)), "+r" (v) : "r" (0));
asm volatile ("casx %0, %2, %1\n" : "+m" (*((uint64_t *) x)), "+r" (v) : "r" (0));
 
return v;
}
110,7 → 110,7
 
volatile uintptr_t x = (uint64_t) &val->count;
 
__asm__ volatile (
asm volatile (
"0:\n"
"casx %0, %3, %1\n"
"brz %1, 2f\n"
/trunk/kernel/arch/sparc64/include/asm.h
51,7 → 51,7
{
uint64_t v;
__asm__ volatile ("rdpr %%pstate, %0\n" : "=r" (v));
asm volatile ("rdpr %%pstate, %0\n" : "=r" (v));
return v;
}
62,7 → 62,7
*/
static inline void pstate_write(uint64_t v)
{
__asm__ volatile ("wrpr %0, %1, %%pstate\n" : : "r" (v), "i" (0));
asm volatile ("wrpr %0, %1, %%pstate\n" : : "r" (v), "i" (0));
}
 
/** Read TICK_compare Register.
73,7 → 73,7
{
uint64_t v;
__asm__ volatile ("rd %%tick_cmpr, %0\n" : "=r" (v));
asm volatile ("rd %%tick_cmpr, %0\n" : "=r" (v));
return v;
}
84,7 → 84,7
*/
static inline void tick_compare_write(uint64_t v)
{
__asm__ volatile ("wr %0, %1, %%tick_cmpr\n" : : "r" (v), "i" (0));
asm volatile ("wr %0, %1, %%tick_cmpr\n" : : "r" (v), "i" (0));
}
 
/** Read TICK Register.
95,7 → 95,7
{
uint64_t v;
__asm__ volatile ("rdpr %%tick, %0\n" : "=r" (v));
asm volatile ("rdpr %%tick, %0\n" : "=r" (v));
return v;
}
106,7 → 106,7
*/
static inline void tick_write(uint64_t v)
{
__asm__ volatile ("wrpr %0, %1, %%tick\n" : : "r" (v), "i" (0));
asm volatile ("wrpr %0, %1, %%tick\n" : : "r" (v), "i" (0));
}
 
/** Read FPRS Register.
117,7 → 117,7
{
uint64_t v;
__asm__ volatile ("rd %%fprs, %0\n" : "=r" (v));
asm volatile ("rd %%fprs, %0\n" : "=r" (v));
return v;
}
128,7 → 128,7
*/
static inline void fprs_write(uint64_t v)
{
__asm__ volatile ("wr %0, %1, %%fprs\n" : : "r" (v), "i" (0));
asm volatile ("wr %0, %1, %%fprs\n" : : "r" (v), "i" (0));
}
 
/** Read SOFTINT Register.
139,7 → 139,7
{
uint64_t v;
 
__asm__ volatile ("rd %%softint, %0\n" : "=r" (v));
asm volatile ("rd %%softint, %0\n" : "=r" (v));
 
return v;
}
150,7 → 150,7
*/
static inline void softint_write(uint64_t v)
{
__asm__ volatile ("wr %0, %1, %%softint\n" : : "r" (v), "i" (0));
asm volatile ("wr %0, %1, %%softint\n" : : "r" (v), "i" (0));
}
 
/** Write CLEAR_SOFTINT Register.
161,7 → 161,7
*/
static inline void clear_softint_write(uint64_t v)
{
__asm__ volatile ("wr %0, %1, %%clear_softint\n" : : "r" (v), "i" (0));
asm volatile ("wr %0, %1, %%clear_softint\n" : : "r" (v), "i" (0));
}
 
/** Write SET_SOFTINT Register.
172,7 → 172,7
*/
static inline void set_softint_write(uint64_t v)
{
__asm__ volatile ("wr %0, %1, %%set_softint\n" : : "r" (v), "i" (0));
asm volatile ("wr %0, %1, %%set_softint\n" : : "r" (v), "i" (0));
}
 
/** Enable interrupts.
247,7 → 247,7
{
uintptr_t unbiased_sp;
__asm__ volatile ("add %%sp, %1, %0\n" : "=r" (unbiased_sp) : "i" (STACK_BIAS));
asm volatile ("add %%sp, %1, %0\n" : "=r" (unbiased_sp) : "i" (STACK_BIAS));
return ALIGN_DOWN(unbiased_sp, STACK_SIZE);
}
260,7 → 260,7
{
uint64_t v;
__asm__ volatile ("rdpr %%ver, %0\n" : "=r" (v));
asm volatile ("rdpr %%ver, %0\n" : "=r" (v));
return v;
}
273,7 → 273,7
{
uint64_t v;
__asm__ volatile ("rdpr %%tpc, %0\n" : "=r" (v));
asm volatile ("rdpr %%tpc, %0\n" : "=r" (v));
return v;
}
286,7 → 286,7
{
uint64_t v;
__asm__ volatile ("rdpr %%tl, %0\n" : "=r" (v));
asm volatile ("rdpr %%tl, %0\n" : "=r" (v));
return v;
}
299,7 → 299,7
{
uint64_t v;
__asm__ volatile ("rdpr %%tba, %0\n" : "=r" (v));
asm volatile ("rdpr %%tba, %0\n" : "=r" (v));
return v;
}
310,7 → 310,7
*/
static inline void tba_write(uint64_t v)
{
__asm__ volatile ("wrpr %0, %1, %%tba\n" : : "r" (v), "i" (0));
asm volatile ("wrpr %0, %1, %%tba\n" : : "r" (v), "i" (0));
}
 
/** Load uint64_t from alternate space.
324,7 → 324,7
{
uint64_t v;
__asm__ volatile ("ldxa [%1] %2, %0\n" : "=r" (v) : "r" (va), "i" ((unsigned) asi));
asm volatile ("ldxa [%1] %2, %0\n" : "=r" (v) : "r" (va), "i" ((unsigned) asi));
return v;
}
337,25 → 337,25
*/
static inline void asi_u64_write(asi_t asi, uintptr_t va, uint64_t v)
{
__asm__ volatile ("stxa %0, [%1] %2\n" : : "r" (v), "r" (va), "i" ((unsigned) asi) : "memory");
asm volatile ("stxa %0, [%1] %2\n" : : "r" (v), "r" (va), "i" ((unsigned) asi) : "memory");
}
 
/** Flush all valid register windows to memory. */
static inline void flushw(void)
{
__asm__ volatile ("flushw\n");
asm volatile ("flushw\n");
}
 
/** Switch to nucleus by setting TL to 1. */
static inline void nucleus_enter(void)
{
__asm__ volatile ("wrpr %g0, 1, %tl\n");
asm volatile ("wrpr %g0, 1, %tl\n");
}
 
/** Switch from nucleus by setting TL to 0. */
static inline void nucleus_leave(void)
{
__asm__ volatile ("wrpr %g0, %g0, %tl\n");
asm volatile ("wrpr %g0, %g0, %tl\n");
}
 
/** Read UPA_CONFIG register.
/trunk/kernel/arch/sparc64/include/barrier.h
39,12 → 39,12
* Our critical section barriers are prepared for the weakest RMO memory model.
*/
#define CS_ENTER_BARRIER() \
__asm__ volatile ( \
asm volatile ( \
"membar #LoadLoad | #LoadStore\n" \
::: "memory" \
)
#define CS_LEAVE_BARRIER() \
__asm__ volatile ( \
asm volatile ( \
"membar #StoreStore\n" \
"membar #LoadStore\n" \
::: "memory" \
51,11 → 51,11
)
 
#define memory_barrier() \
__asm__ volatile ("membar #LoadLoad | #StoreStore\n" ::: "memory")
asm volatile ("membar #LoadLoad | #StoreStore\n" ::: "memory")
#define read_barrier() \
__asm__ volatile ("membar #LoadLoad\n" ::: "memory")
asm volatile ("membar #LoadLoad\n" ::: "memory")
#define write_barrier() \
__asm__ volatile ("membar #StoreStore\n" ::: "memory")
asm volatile ("membar #StoreStore\n" ::: "memory")
 
/** Flush Instruction Memory instruction. */
static inline void flush(void)
70,13 → 70,13
* DTLB.
*/
__asm__ volatile ("flush %o7\n");
asm volatile ("flush %o7\n");
}
 
/** Memory Barrier instruction. */
static inline void membar(void)
{
__asm__ volatile ("membar #Sync\n");
asm volatile ("membar #Sync\n");
}
 
#endif
/trunk/kernel/arch/sparc64/src/fpu_context.c
39,7 → 39,7
 
void fpu_context_save(fpu_context_t *fctx)
{
__asm__ volatile (
asm volatile (
"std %%f0, %0\n"
"std %%f2, %1\n"
"std %%f4, %2\n"
67,7 → 67,7
* GCC (4.1.1) can't handle more than 30 operands in one asm statement.
*/
__asm__ volatile (
asm volatile (
"std %%f32, %0\n"
"std %%f34, %1\n"
"std %%f36, %2\n"
90,12 → 90,12
"=m" (fctx->d[28]), "=m" (fctx->d[29]), "=m" (fctx->d[30]), "=m" (fctx->d[31])
);
__asm__ volatile ("stx %%fsr, %0\n" : "=m" (fctx->fsr));
asm volatile ("stx %%fsr, %0\n" : "=m" (fctx->fsr));
}
 
void fpu_context_restore(fpu_context_t *fctx)
{
__asm__ volatile (
asm volatile (
"ldd %0, %%f0\n"
"ldd %1, %%f2\n"
"ldd %2, %%f4\n"
124,7 → 124,7
* GCC (4.1.1) can't handle more than 30 operands in one asm statement.
*/
__asm__ volatile (
asm volatile (
"ldd %0, %%f32\n"
"ldd %1, %%f34\n"
"ldd %2, %%f36\n"
148,7 → 148,7
"m" (fctx->d[28]), "m" (fctx->d[29]), "m" (fctx->d[30]), "m" (fctx->d[31])
);
__asm__ volatile ("ldx %0, %%fsr\n" : : "m" (fctx->fsr));
asm volatile ("ldx %0, %%fsr\n" : : "m" (fctx->fsr));
}
 
void fpu_enable(void)