/trunk/kernel/arch/sparc64/include/atomic.h |
---|
57,7 → 57,7 |
a = *((uint64_t *) x); |
b = a + i; |
__asm__ volatile ("casx %0, %2, %1\n" : "+m" (*((uint64_t *)x)), "+r" (b) : "r" (a)); |
asm volatile ("casx %0, %2, %1\n" : "+m" (*((uint64_t *)x)), "+r" (b) : "r" (a)); |
} while (a != b); |
return a; |
98,7 → 98,7 |
uint64_t v = 1; |
volatile uintptr_t x = (uint64_t) &val->count; |
__asm__ volatile ("casx %0, %2, %1\n" : "+m" (*((uint64_t *) x)), "+r" (v) : "r" (0)); |
asm volatile ("casx %0, %2, %1\n" : "+m" (*((uint64_t *) x)), "+r" (v) : "r" (0)); |
return v; |
} |
110,7 → 110,7 |
volatile uintptr_t x = (uint64_t) &val->count; |
__asm__ volatile ( |
asm volatile ( |
"0:\n" |
"casx %0, %3, %1\n" |
"brz %1, 2f\n" |
/trunk/kernel/arch/sparc64/include/asm.h |
---|
51,7 → 51,7 |
{ |
uint64_t v; |
__asm__ volatile ("rdpr %%pstate, %0\n" : "=r" (v)); |
asm volatile ("rdpr %%pstate, %0\n" : "=r" (v)); |
return v; |
} |
62,7 → 62,7 |
*/ |
static inline void pstate_write(uint64_t v) |
{ |
__asm__ volatile ("wrpr %0, %1, %%pstate\n" : : "r" (v), "i" (0)); |
asm volatile ("wrpr %0, %1, %%pstate\n" : : "r" (v), "i" (0)); |
} |
/** Read TICK_compare Register. |
73,7 → 73,7 |
{ |
uint64_t v; |
__asm__ volatile ("rd %%tick_cmpr, %0\n" : "=r" (v)); |
asm volatile ("rd %%tick_cmpr, %0\n" : "=r" (v)); |
return v; |
} |
84,7 → 84,7 |
*/ |
static inline void tick_compare_write(uint64_t v) |
{ |
__asm__ volatile ("wr %0, %1, %%tick_cmpr\n" : : "r" (v), "i" (0)); |
asm volatile ("wr %0, %1, %%tick_cmpr\n" : : "r" (v), "i" (0)); |
} |
/** Read TICK Register. |
95,7 → 95,7 |
{ |
uint64_t v; |
__asm__ volatile ("rdpr %%tick, %0\n" : "=r" (v)); |
asm volatile ("rdpr %%tick, %0\n" : "=r" (v)); |
return v; |
} |
106,7 → 106,7 |
*/ |
static inline void tick_write(uint64_t v) |
{ |
__asm__ volatile ("wrpr %0, %1, %%tick\n" : : "r" (v), "i" (0)); |
asm volatile ("wrpr %0, %1, %%tick\n" : : "r" (v), "i" (0)); |
} |
/** Read FPRS Register. |
117,7 → 117,7 |
{ |
uint64_t v; |
__asm__ volatile ("rd %%fprs, %0\n" : "=r" (v)); |
asm volatile ("rd %%fprs, %0\n" : "=r" (v)); |
return v; |
} |
128,7 → 128,7 |
*/ |
static inline void fprs_write(uint64_t v) |
{ |
__asm__ volatile ("wr %0, %1, %%fprs\n" : : "r" (v), "i" (0)); |
asm volatile ("wr %0, %1, %%fprs\n" : : "r" (v), "i" (0)); |
} |
/** Read SOFTINT Register. |
139,7 → 139,7 |
{ |
uint64_t v; |
__asm__ volatile ("rd %%softint, %0\n" : "=r" (v)); |
asm volatile ("rd %%softint, %0\n" : "=r" (v)); |
return v; |
} |
150,7 → 150,7 |
*/ |
static inline void softint_write(uint64_t v) |
{ |
__asm__ volatile ("wr %0, %1, %%softint\n" : : "r" (v), "i" (0)); |
asm volatile ("wr %0, %1, %%softint\n" : : "r" (v), "i" (0)); |
} |
/** Write CLEAR_SOFTINT Register. |
161,7 → 161,7 |
*/ |
static inline void clear_softint_write(uint64_t v) |
{ |
__asm__ volatile ("wr %0, %1, %%clear_softint\n" : : "r" (v), "i" (0)); |
asm volatile ("wr %0, %1, %%clear_softint\n" : : "r" (v), "i" (0)); |
} |
/** Write SET_SOFTINT Register. |
172,7 → 172,7 |
*/ |
static inline void set_softint_write(uint64_t v) |
{ |
__asm__ volatile ("wr %0, %1, %%set_softint\n" : : "r" (v), "i" (0)); |
asm volatile ("wr %0, %1, %%set_softint\n" : : "r" (v), "i" (0)); |
} |
/** Enable interrupts. |
247,7 → 247,7 |
{ |
uintptr_t unbiased_sp; |
__asm__ volatile ("add %%sp, %1, %0\n" : "=r" (unbiased_sp) : "i" (STACK_BIAS)); |
asm volatile ("add %%sp, %1, %0\n" : "=r" (unbiased_sp) : "i" (STACK_BIAS)); |
return ALIGN_DOWN(unbiased_sp, STACK_SIZE); |
} |
260,7 → 260,7 |
{ |
uint64_t v; |
__asm__ volatile ("rdpr %%ver, %0\n" : "=r" (v)); |
asm volatile ("rdpr %%ver, %0\n" : "=r" (v)); |
return v; |
} |
273,7 → 273,7 |
{ |
uint64_t v; |
__asm__ volatile ("rdpr %%tpc, %0\n" : "=r" (v)); |
asm volatile ("rdpr %%tpc, %0\n" : "=r" (v)); |
return v; |
} |
286,7 → 286,7 |
{ |
uint64_t v; |
__asm__ volatile ("rdpr %%tl, %0\n" : "=r" (v)); |
asm volatile ("rdpr %%tl, %0\n" : "=r" (v)); |
return v; |
} |
299,7 → 299,7 |
{ |
uint64_t v; |
__asm__ volatile ("rdpr %%tba, %0\n" : "=r" (v)); |
asm volatile ("rdpr %%tba, %0\n" : "=r" (v)); |
return v; |
} |
310,7 → 310,7 |
*/ |
static inline void tba_write(uint64_t v) |
{ |
__asm__ volatile ("wrpr %0, %1, %%tba\n" : : "r" (v), "i" (0)); |
asm volatile ("wrpr %0, %1, %%tba\n" : : "r" (v), "i" (0)); |
} |
/** Load uint64_t from alternate space. |
324,7 → 324,7 |
{ |
uint64_t v; |
__asm__ volatile ("ldxa [%1] %2, %0\n" : "=r" (v) : "r" (va), "i" ((unsigned) asi)); |
asm volatile ("ldxa [%1] %2, %0\n" : "=r" (v) : "r" (va), "i" ((unsigned) asi)); |
return v; |
} |
337,25 → 337,25 |
*/ |
static inline void asi_u64_write(asi_t asi, uintptr_t va, uint64_t v) |
{ |
__asm__ volatile ("stxa %0, [%1] %2\n" : : "r" (v), "r" (va), "i" ((unsigned) asi) : "memory"); |
asm volatile ("stxa %0, [%1] %2\n" : : "r" (v), "r" (va), "i" ((unsigned) asi) : "memory"); |
} |
/** Flush all valid register windows to memory. */ |
static inline void flushw(void) |
{ |
__asm__ volatile ("flushw\n"); |
asm volatile ("flushw\n"); |
} |
/** Switch to nucleus by setting TL to 1. */ |
static inline void nucleus_enter(void) |
{ |
__asm__ volatile ("wrpr %g0, 1, %tl\n"); |
asm volatile ("wrpr %g0, 1, %tl\n"); |
} |
/** Switch from nucleus by setting TL to 0. */ |
static inline void nucleus_leave(void) |
{ |
__asm__ volatile ("wrpr %g0, %g0, %tl\n"); |
asm volatile ("wrpr %g0, %g0, %tl\n"); |
} |
/** Read UPA_CONFIG register. |
/trunk/kernel/arch/sparc64/include/barrier.h |
---|
39,12 → 39,12 |
* Our critical section barriers are prepared for the weakest RMO memory model. |
*/ |
#define CS_ENTER_BARRIER() \ |
__asm__ volatile ( \ |
asm volatile ( \ |
"membar #LoadLoad | #LoadStore\n" \ |
::: "memory" \ |
) |
#define CS_LEAVE_BARRIER() \ |
__asm__ volatile ( \ |
asm volatile ( \ |
"membar #StoreStore\n" \ |
"membar #LoadStore\n" \ |
::: "memory" \ |
51,11 → 51,11 |
) |
#define memory_barrier() \ |
__asm__ volatile ("membar #LoadLoad | #StoreStore\n" ::: "memory") |
asm volatile ("membar #LoadLoad | #StoreStore\n" ::: "memory") |
#define read_barrier() \ |
__asm__ volatile ("membar #LoadLoad\n" ::: "memory") |
asm volatile ("membar #LoadLoad\n" ::: "memory") |
#define write_barrier() \ |
__asm__ volatile ("membar #StoreStore\n" ::: "memory") |
asm volatile ("membar #StoreStore\n" ::: "memory") |
/** Flush Instruction Memory instruction. */ |
static inline void flush(void) |
70,13 → 70,13 |
* DTLB. |
*/ |
__asm__ volatile ("flush %o7\n"); |
asm volatile ("flush %o7\n"); |
} |
/** Memory Barrier instruction. */ |
static inline void membar(void) |
{ |
__asm__ volatile ("membar #Sync\n"); |
asm volatile ("membar #Sync\n"); |
} |
#endif |
/trunk/kernel/arch/sparc64/src/fpu_context.c |
---|
39,7 → 39,7 |
void fpu_context_save(fpu_context_t *fctx) |
{ |
__asm__ volatile ( |
asm volatile ( |
"std %%f0, %0\n" |
"std %%f2, %1\n" |
"std %%f4, %2\n" |
67,7 → 67,7 |
* GCC (4.1.1) can't handle more than 30 operands in one asm statement. |
*/ |
__asm__ volatile ( |
asm volatile ( |
"std %%f32, %0\n" |
"std %%f34, %1\n" |
"std %%f36, %2\n" |
90,12 → 90,12 |
"=m" (fctx->d[28]), "=m" (fctx->d[29]), "=m" (fctx->d[30]), "=m" (fctx->d[31]) |
); |
__asm__ volatile ("stx %%fsr, %0\n" : "=m" (fctx->fsr)); |
asm volatile ("stx %%fsr, %0\n" : "=m" (fctx->fsr)); |
} |
void fpu_context_restore(fpu_context_t *fctx) |
{ |
__asm__ volatile ( |
asm volatile ( |
"ldd %0, %%f0\n" |
"ldd %1, %%f2\n" |
"ldd %2, %%f4\n" |
124,7 → 124,7 |
* GCC (4.1.1) can't handle more than 30 operands in one asm statement. |
*/ |
__asm__ volatile ( |
asm volatile ( |
"ldd %0, %%f32\n" |
"ldd %1, %%f34\n" |
"ldd %2, %%f36\n" |
148,7 → 148,7 |
"m" (fctx->d[28]), "m" (fctx->d[29]), "m" (fctx->d[30]), "m" (fctx->d[31]) |
); |
__asm__ volatile ("ldx %0, %%fsr\n" : : "m" (fctx->fsr)); |
asm volatile ("ldx %0, %%fsr\n" : : "m" (fctx->fsr)); |
} |
void fpu_enable(void) |
/trunk/kernel/arch/ia64/include/atomic.h |
---|
46,7 → 46,7 |
{ |
long v; |
__asm__ volatile ("fetchadd8.rel %0 = %1, %2\n" : "=r" (v), "+m" (val->count) : "i" (imm)); |
asm volatile ("fetchadd8.rel %0 = %1, %2\n" : "=r" (v), "+m" (val->count) : "i" (imm)); |
return v; |
} |
/trunk/kernel/arch/ia64/include/asm.h |
---|
49,7 → 49,7 |
{ |
uint64_t v; |
__asm__ volatile ("and %0 = %1, r12" : "=r" (v) : "r" (~(STACK_SIZE-1))); |
asm volatile ("and %0 = %1, r12" : "=r" (v) : "r" (~(STACK_SIZE-1))); |
return v; |
} |
62,7 → 62,7 |
{ |
uint64_t v; |
__asm__ volatile ("mov %0 = psr\n" : "=r" (v)); |
asm volatile ("mov %0 = psr\n" : "=r" (v)); |
return v; |
} |
75,7 → 75,7 |
{ |
uint64_t v; |
__asm__ volatile ("mov %0 = cr.iva\n" : "=r" (v)); |
asm volatile ("mov %0 = cr.iva\n" : "=r" (v)); |
return v; |
} |
86,7 → 86,7 |
*/ |
static inline void iva_write(uint64_t v) |
{ |
__asm__ volatile ("mov cr.iva = %0\n" : : "r" (v)); |
asm volatile ("mov cr.iva = %0\n" : : "r" (v)); |
} |
98,7 → 98,7 |
{ |
uint64_t v; |
__asm__ volatile ("mov %0 = cr.ivr\n" : "=r" (v)); |
asm volatile ("mov %0 = cr.ivr\n" : "=r" (v)); |
return v; |
} |
109,7 → 109,7 |
*/ |
static inline void itc_write(uint64_t v) |
{ |
__asm__ volatile ("mov ar.itc = %0\n" : : "r" (v)); |
asm volatile ("mov ar.itc = %0\n" : : "r" (v)); |
} |
/** Read ITC (Interval Timer Counter) register. |
120,7 → 120,7 |
{ |
uint64_t v; |
__asm__ volatile ("mov %0 = ar.itc\n" : "=r" (v)); |
asm volatile ("mov %0 = ar.itc\n" : "=r" (v)); |
return v; |
} |
131,7 → 131,7 |
*/ |
static inline void itm_write(uint64_t v) |
{ |
__asm__ volatile ("mov cr.itm = %0\n" : : "r" (v)); |
asm volatile ("mov cr.itm = %0\n" : : "r" (v)); |
} |
/** Read ITM (Interval Timer Match) register. |
142,7 → 142,7 |
{ |
uint64_t v; |
__asm__ volatile ("mov %0 = cr.itm\n" : "=r" (v)); |
asm volatile ("mov %0 = cr.itm\n" : "=r" (v)); |
return v; |
} |
155,7 → 155,7 |
{ |
uint64_t v; |
__asm__ volatile ("mov %0 = cr.itv\n" : "=r" (v)); |
asm volatile ("mov %0 = cr.itv\n" : "=r" (v)); |
return v; |
} |
166,7 → 166,7 |
*/ |
static inline void itv_write(uint64_t v) |
{ |
__asm__ volatile ("mov cr.itv = %0\n" : : "r" (v)); |
asm volatile ("mov cr.itv = %0\n" : : "r" (v)); |
} |
/** Write EOI (End Of Interrupt) register. |
175,7 → 175,7 |
*/ |
static inline void eoi_write(uint64_t v) |
{ |
__asm__ volatile ("mov cr.eoi = %0\n" : : "r" (v)); |
asm volatile ("mov cr.eoi = %0\n" : : "r" (v)); |
} |
/** Read TPR (Task Priority Register). |
186,7 → 186,7 |
{ |
uint64_t v; |
__asm__ volatile ("mov %0 = cr.tpr\n" : "=r" (v)); |
asm volatile ("mov %0 = cr.tpr\n" : "=r" (v)); |
return v; |
} |
197,7 → 197,7 |
*/ |
static inline void tpr_write(uint64_t v) |
{ |
__asm__ volatile ("mov cr.tpr = %0\n" : : "r" (v)); |
asm volatile ("mov cr.tpr = %0\n" : : "r" (v)); |
} |
/** Disable interrupts. |
211,7 → 211,7 |
{ |
uint64_t v; |
__asm__ volatile ( |
asm volatile ( |
"mov %0 = psr\n" |
"rsm %1\n" |
: "=r" (v) |
232,7 → 232,7 |
{ |
uint64_t v; |
__asm__ volatile ( |
asm volatile ( |
"mov %0 = psr\n" |
"ssm %1\n" |
";;\n" |
270,7 → 270,7 |
/** Disable protection key checking. */ |
static inline void pk_disable(void) |
{ |
__asm__ volatile ("rsm %0\n" : : "i" (PSR_PK_MASK)); |
asm volatile ("rsm %0\n" : : "i" (PSR_PK_MASK)); |
} |
extern void cpu_halt(void); |
/trunk/kernel/arch/ia64/include/mm/page.h |
---|
194,7 → 194,7 |
{ |
uint64_t ret; |
__asm__ volatile ("thash %0 = %1\n" : "=r" (ret) : "r" (va)); |
asm volatile ("thash %0 = %1\n" : "=r" (ret) : "r" (va)); |
return ret; |
} |
212,7 → 212,7 |
{ |
uint64_t ret; |
__asm__ volatile ("ttag %0 = %1\n" : "=r" (ret) : "r" (va)); |
asm volatile ("ttag %0 = %1\n" : "=r" (ret) : "r" (va)); |
return ret; |
} |
227,7 → 227,7 |
{ |
uint64_t ret; |
ASSERT(i < REGION_REGISTERS); |
__asm__ volatile ("mov %0 = rr[%1]\n" : "=r" (ret) : "r" (i << VRN_SHIFT)); |
asm volatile ("mov %0 = rr[%1]\n" : "=r" (ret) : "r" (i << VRN_SHIFT)); |
return ret; |
} |
239,7 → 239,7 |
static inline void rr_write(index_t i, uint64_t v) |
{ |
ASSERT(i < REGION_REGISTERS); |
__asm__ volatile ( |
asm volatile ( |
"mov rr[%0] = %1\n" |
: |
: "r" (i << VRN_SHIFT), "r" (v) |
254,7 → 254,7 |
{ |
uint64_t ret; |
__asm__ volatile ("mov %0 = cr.pta\n" : "=r" (ret)); |
asm volatile ("mov %0 = cr.pta\n" : "=r" (ret)); |
return ret; |
} |
265,7 → 265,7 |
*/ |
static inline void pta_write(uint64_t v) |
{ |
__asm__ volatile ("mov cr.pta = %0\n" : : "r" (v)); |
asm volatile ("mov cr.pta = %0\n" : : "r" (v)); |
} |
extern void page_arch_init(void); |
/trunk/kernel/arch/ia64/include/barrier.h |
---|
41,12 → 41,12 |
#define CS_ENTER_BARRIER() memory_barrier() |
#define CS_LEAVE_BARRIER() memory_barrier() |
#define memory_barrier() __asm__ volatile ("mf\n" ::: "memory") |
#define memory_barrier() asm volatile ("mf\n" ::: "memory") |
#define read_barrier() memory_barrier() |
#define write_barrier() memory_barrier() |
#define srlz_i() __asm__ volatile (";; srlz.i ;;\n" ::: "memory") |
#define srlz_d() __asm__ volatile (";; srlz.d\n" ::: "memory") |
#define srlz_i() asm volatile (";; srlz.i ;;\n" ::: "memory") |
#define srlz_d() asm volatile (";; srlz.d\n" ::: "memory") |
#endif |
/trunk/kernel/arch/ia64/include/cpu.h |
---|
58,7 → 58,7 |
{ |
uint64_t v; |
__asm__ volatile ("mov %0 = cpuid[%1]\n" : "=r" (v) : "r" (n)); |
asm volatile ("mov %0 = cpuid[%1]\n" : "=r" (v) : "r" (n)); |
return v; |
} |
/trunk/kernel/arch/ia64/src/ia64.c |
---|
133,7 → 133,7 |
psr.ri = 0; /* start with instruction #0 */ |
psr.bn = 1; /* start in bank 0 */ |
__asm__ volatile ("mov %0 = ar.rsc\n" : "=r" (rsc.value)); |
asm volatile ("mov %0 = ar.rsc\n" : "=r" (rsc.value)); |
rsc.loadrs = 0; |
rsc.be = false; |
rsc.pl = PL_USER; |
/trunk/kernel/arch/ia64/src/ski/ski.c |
---|
69,7 → 69,7 |
*/ |
void ski_putchar(chardev_t *d, const char ch) |
{ |
__asm__ volatile ( |
asm volatile ( |
"mov r15 = %0\n" |
"mov r32 = %1\n" /* r32 is in0 */ |
"break 0x80000\n" /* modifies r8 */ |
95,7 → 95,7 |
{ |
uint64_t ch; |
__asm__ volatile ( |
asm volatile ( |
"mov r15 = %1\n" |
"break 0x80000;;\n" /* modifies r8 */ |
"mov %0 = r8;;\n" |
204,7 → 204,7 |
*/ |
void ski_init_console(void) |
{ |
__asm__ volatile ( |
asm volatile ( |
"mov r15 = %0\n" |
"break 0x80000\n" |
: |
/trunk/kernel/arch/ia64/src/proc/scheduler.c |
---|
73,7 → 73,7 |
* Record address of kernel stack to bank 0 r23. |
* These values will be found there after switch from userspace. |
*/ |
__asm__ volatile ( |
asm volatile ( |
"bsw.0\n" |
"mov r22 = %0\n" |
"mov r23 = %1\n" |
/trunk/kernel/arch/ia64/src/mm/tlb.c |
---|
72,7 → 72,7 |
for(i = 0; i < count1; i++) { |
for(j = 0; j < count2; j++) { |
__asm__ volatile ( |
asm volatile ( |
"ptc.e %0 ;;" |
: |
: "r" (adr) |
179,7 → 179,7 |
} |
/*cnt+=(page!=va);*/ |
for(; va<(page+cnt*(PAGE_SIZE)); va += (1<<ps)) { |
__asm__ volatile ( |
asm volatile ( |
"ptc.l %0,%1;;" |
: |
: "r" (va), "r" (ps<<2) |
244,7 → 244,7 |
srlz_i(); |
} |
__asm__ volatile ( |
asm volatile ( |
"mov r8=psr;;\n" |
"rsm %0;;\n" /* PSR_IC_MASK */ |
"srlz.d;;\n" |
320,7 → 320,7 |
srlz_i(); |
} |
__asm__ volatile ( |
asm volatile ( |
"mov r8=psr;;\n" |
"rsm %0;;\n" /* PSR_IC_MASK */ |
"srlz.d;;\n" |
382,7 → 382,7 |
*/ |
void dtr_purge(uintptr_t page, count_t width) |
{ |
__asm__ volatile ("ptr.d %0, %1\n" : : "r" (page), "r" (width<<2)); |
asm volatile ("ptr.d %0, %1\n" : : "r" (page), "r" (width<<2)); |
} |
/trunk/kernel/arch/ia32xen/include/asm.h |
---|
59,13 → 59,13 |
#define GEN_READ_REG(reg) static inline unative_t read_ ##reg (void) \ |
{ \ |
unative_t res; \ |
__asm__ volatile ("movl %%" #reg ", %0" : "=r" (res) ); \ |
asm volatile ("movl %%" #reg ", %0" : "=r" (res) ); \ |
return res; \ |
} |
#define GEN_WRITE_REG(reg) static inline void write_ ##reg (unative_t regn) \ |
{ \ |
__asm__ volatile ("movl %0, %%" #reg : : "r" (regn)); \ |
asm volatile ("movl %0, %%" #reg : : "r" (regn)); \ |
} |
GEN_READ_REG(cr0); |
92,7 → 92,7 |
* @param port Port to write to |
* @param val Value to write |
*/ |
static inline void outb(uint16_t port, uint8_t val) { __asm__ volatile ("outb %b0, %w1\n" : : "a" (val), "d" (port) ); } |
static inline void outb(uint16_t port, uint8_t val) { asm volatile ("outb %b0, %w1\n" : : "a" (val), "d" (port) ); } |
/** Word to port |
* |
101,7 → 101,7 |
* @param port Port to write to |
* @param val Value to write |
*/ |
static inline void outw(uint16_t port, uint16_t val) { __asm__ volatile ("outw %w0, %w1\n" : : "a" (val), "d" (port) ); } |
static inline void outw(uint16_t port, uint16_t val) { asm volatile ("outw %w0, %w1\n" : : "a" (val), "d" (port) ); } |
/** Double word to port |
* |
110,7 → 110,7 |
* @param port Port to write to |
* @param val Value to write |
*/ |
static inline void outl(uint16_t port, uint32_t val) { __asm__ volatile ("outl %l0, %w1\n" : : "a" (val), "d" (port) ); } |
static inline void outl(uint16_t port, uint32_t val) { asm volatile ("outl %l0, %w1\n" : : "a" (val), "d" (port) ); } |
/** Byte from port |
* |
119,7 → 119,7 |
* @param port Port to read from |
* @return Value read |
*/ |
static inline uint8_t inb(uint16_t port) { uint8_t val; __asm__ volatile ("inb %w1, %b0 \n" : "=a" (val) : "d" (port) ); return val; } |
static inline uint8_t inb(uint16_t port) { uint8_t val; asm volatile ("inb %w1, %b0 \n" : "=a" (val) : "d" (port) ); return val; } |
/** Word from port |
* |
128,7 → 128,7 |
* @param port Port to read from |
* @return Value read |
*/ |
static inline uint16_t inw(uint16_t port) { uint16_t val; __asm__ volatile ("inw %w1, %w0 \n" : "=a" (val) : "d" (port) ); return val; } |
static inline uint16_t inw(uint16_t port) { uint16_t val; asm volatile ("inw %w1, %w0 \n" : "=a" (val) : "d" (port) ); return val; } |
/** Double word from port |
* |
137,7 → 137,7 |
* @param port Port to read from |
* @return Value read |
*/ |
static inline uint32_t inl(uint16_t port) { uint32_t val; __asm__ volatile ("inl %w1, %l0 \n" : "=a" (val) : "d" (port) ); return val; } |
static inline uint32_t inl(uint16_t port) { uint32_t val; asm volatile ("inl %w1, %l0 \n" : "=a" (val) : "d" (port) ); return val; } |
/** Enable interrupts. |
* |
213,7 → 213,7 |
{ |
uintptr_t v; |
__asm__ volatile ("andl %%esp, %0\n" : "=r" (v) : "0" (~(STACK_SIZE-1))); |
asm volatile ("andl %%esp, %0\n" : "=r" (v) : "0" (~(STACK_SIZE-1))); |
return v; |
} |
223,7 → 223,7 |
{ |
uintptr_t *ip; |
__asm__ volatile ( |
asm volatile ( |
"mov %%eip, %0" |
: "=r" (ip) |
); |
236,7 → 236,7 |
*/ |
static inline void invlpg(uintptr_t addr) |
{ |
__asm__ volatile ("invlpg %0\n" :: "m" (*(unative_t *)addr)); |
asm volatile ("invlpg %0\n" :: "m" (*(unative_t *)addr)); |
} |
/** Load GDTR register from memory. |
245,7 → 245,7 |
*/ |
static inline void gdtr_load(ptr_16_32_t *gdtr_reg) |
{ |
__asm__ volatile ("lgdtl %0\n" : : "m" (*gdtr_reg)); |
asm volatile ("lgdtl %0\n" : : "m" (*gdtr_reg)); |
} |
/** Store GDTR register to memory. |
254,7 → 254,7 |
*/ |
static inline void gdtr_store(ptr_16_32_t *gdtr_reg) |
{ |
__asm__ volatile ("sgdtl %0\n" : : "m" (*gdtr_reg)); |
asm volatile ("sgdtl %0\n" : : "m" (*gdtr_reg)); |
} |
/** Load TR from descriptor table. |
263,7 → 263,7 |
*/ |
static inline void tr_load(uint16_t sel) |
{ |
__asm__ volatile ("ltr %0" : : "r" (sel)); |
asm volatile ("ltr %0" : : "r" (sel)); |
} |
#endif |
/trunk/kernel/arch/ia32xen/src/pm.c |
---|
132,7 → 132,7 |
/* Clean IOPL(12,13) and NT(14) flags in EFLAGS register */ |
static void clean_IOPL_NT_flags(void) |
{ |
// __asm__ volatile ( |
// asm volatile ( |
// "pushfl\n" |
// "pop %%eax\n" |
// "and $0xffff8fff, %%eax\n" |
145,7 → 145,7 |
/* Clean AM(18) flag in CR0 register */ |
static void clean_AM_flag(void) |
{ |
// __asm__ volatile ( |
// asm volatile ( |
// "mov %%cr0, %%eax\n" |
// "and $0xfffbffff, %%eax\n" |
// "mov %%eax, %%cr0\n" |
/trunk/kernel/arch/amd64/include/memstr.h |
---|
51,7 → 51,7 |
{ |
unative_t d0, d1, d2; |
__asm__ __volatile__( |
asm volatile( |
"rep movsq\n\t" |
"movq %4, %%rcx\n\t" |
"andq $7, %%rcx\n\t" |
82,7 → 82,7 |
unative_t d0, d1, d2; |
unative_t ret; |
__asm__ ( |
asm ( |
"repe cmpsb\n\t" |
"je 1f\n\t" |
"movq %3, %0\n\t" |
108,7 → 108,7 |
{ |
unative_t d0, d1; |
__asm__ __volatile__ ( |
asm volatile ( |
"rep stosw\n\t" |
: "=&D" (d0), "=&c" (d1), "=a" (x) |
: "0" (dst), "1" ((unative_t)cnt), "2" (x) |
130,7 → 130,7 |
{ |
unative_t d0, d1; |
__asm__ __volatile__ ( |
asm volatile ( |
"rep stosb\n\t" |
: "=&D" (d0), "=&c" (d1), "=a" (x) |
: "0" (dst), "1" ((unative_t)cnt), "2" (x) |
/trunk/kernel/arch/amd64/include/atomic.h |
---|
42,17 → 42,17 |
static inline void atomic_inc(atomic_t *val) { |
#ifdef CONFIG_SMP |
__asm__ volatile ("lock incq %0\n" : "=m" (val->count)); |
asm volatile ("lock incq %0\n" : "=m" (val->count)); |
#else |
__asm__ volatile ("incq %0\n" : "=m" (val->count)); |
asm volatile ("incq %0\n" : "=m" (val->count)); |
#endif /* CONFIG_SMP */ |
} |
static inline void atomic_dec(atomic_t *val) { |
#ifdef CONFIG_SMP |
__asm__ volatile ("lock decq %0\n" : "=m" (val->count)); |
asm volatile ("lock decq %0\n" : "=m" (val->count)); |
#else |
__asm__ volatile ("decq %0\n" : "=m" (val->count)); |
asm volatile ("decq %0\n" : "=m" (val->count)); |
#endif /* CONFIG_SMP */ |
} |
60,7 → 60,7 |
{ |
long r = 1; |
__asm__ volatile ( |
asm volatile ( |
"lock xaddq %1, %0\n" |
: "=m" (val->count), "+r" (r) |
); |
72,7 → 72,7 |
{ |
long r = -1; |
__asm__ volatile ( |
asm volatile ( |
"lock xaddq %1, %0\n" |
: "=m" (val->count), "+r" (r) |
); |
86,7 → 86,7 |
static inline uint64_t test_and_set(atomic_t *val) { |
uint64_t v; |
__asm__ volatile ( |
asm volatile ( |
"movq $1, %0\n" |
"xchgq %0, %1\n" |
: "=r" (v),"=m" (val->count) |
102,7 → 102,7 |
uint64_t tmp; |
preemption_disable(); |
__asm__ volatile ( |
asm volatile ( |
"0:;" |
#ifdef CONFIG_HT |
"pause;" |
/trunk/kernel/arch/amd64/include/asm.h |
---|
52,7 → 52,7 |
{ |
uintptr_t v; |
__asm__ volatile ("andq %%rsp, %0\n" : "=r" (v) : "0" (~((uint64_t)STACK_SIZE-1))); |
asm volatile ("andq %%rsp, %0\n" : "=r" (v) : "0" (~((uint64_t)STACK_SIZE-1))); |
return v; |
} |
/trunk/kernel/arch/amd64/src/fpu_context.c |
---|
40,7 → 40,7 |
/** Save FPU (mmx, sse) context using fxsave instruction */ |
void fpu_context_save(fpu_context_t *fctx) |
{ |
__asm__ volatile ( |
asm volatile ( |
"fxsave %0" |
: "=m"(*fctx) |
); |
49,7 → 49,7 |
/** Restore FPU (mmx,sse) context using fxrstor instruction */ |
void fpu_context_restore(fpu_context_t *fctx) |
{ |
__asm__ volatile ( |
asm volatile ( |
"fxrstor %0" |
: "=m"(*fctx) |
); |
58,7 → 58,7 |
void fpu_init() |
{ |
/* TODO: Zero all SSE, MMX etc. registers */ |
__asm__ volatile ( |
asm volatile ( |
"fninit;" |
); |
} |
/trunk/kernel/arch/amd64/src/cpu/cpu.c |
---|
76,7 → 76,7 |
*/ |
void cpu_setup_fpu(void) |
{ |
__asm__ volatile ( |
asm volatile ( |
"movq %%cr0, %%rax;" |
"btsq $1, %%rax;" /* cr0.mp */ |
"btrq $2, %%rax;" /* cr0.em */ |
99,7 → 99,7 |
*/ |
void fpu_disable(void) |
{ |
__asm__ volatile ( |
asm volatile ( |
"mov %%cr0,%%rax;" |
"bts $3,%%rax;" |
"mov %%rax,%%cr0;" |
111,7 → 111,7 |
void fpu_enable(void) |
{ |
__asm__ volatile ( |
asm volatile ( |
"mov %%cr0,%%rax;" |
"btr $3,%%rax;" |
"mov %%rax,%%cr0;" |
/trunk/kernel/arch/amd64/src/userspace.c |
---|
54,7 → 54,7 |
/* Clear CF,PF,AF,ZF,SF,DF,OF */ |
ipl &= ~(0xcd4); |
__asm__ volatile ("" |
asm volatile ("" |
"pushq %0\n" |
"pushq %1\n" |
"pushq %2\n" |
/trunk/kernel/arch/mips32/include/atomic.h |
---|
55,7 → 55,7 |
{ |
long tmp, v; |
__asm__ volatile ( |
asm volatile ( |
"1:\n" |
" ll %0, %1\n" |
" addiu %0, %0, %3\n" /* same as addi, but never traps on overflow */ |
/trunk/kernel/arch/mips32/include/asm.h |
---|
43,7 → 43,7 |
static inline void cpu_sleep(void) |
{ |
/* Most of the simulators do not support */ |
/* __asm__ volatile ("wait"); */ |
/* asm volatile ("wait"); */ |
} |
/** Return base address of current stack |
56,7 → 56,7 |
{ |
uintptr_t v; |
__asm__ volatile ("and %0, $29, %1\n" : "=r" (v) : "r" (~(STACK_SIZE-1))); |
asm volatile ("and %0, $29, %1\n" : "=r" (v) : "r" (~(STACK_SIZE-1))); |
return v; |
} |
/trunk/kernel/arch/mips32/include/mm/tlb.h |
---|
142,7 → 142,7 |
*/ |
static inline void tlbp(void) |
{ |
__asm__ volatile ("tlbp\n\t"); |
asm volatile ("tlbp\n\t"); |
} |
152,7 → 152,7 |
*/ |
static inline void tlbr(void) |
{ |
__asm__ volatile ("tlbr\n\t"); |
asm volatile ("tlbr\n\t"); |
} |
/** Write Indexed TLB Entry |
161,7 → 161,7 |
*/ |
static inline void tlbwi(void) |
{ |
__asm__ volatile ("tlbwi\n\t"); |
asm volatile ("tlbwi\n\t"); |
} |
/** Write Random TLB Entry |
170,7 → 170,7 |
*/ |
static inline void tlbwr(void) |
{ |
__asm__ volatile ("tlbwr\n\t"); |
asm volatile ("tlbwr\n\t"); |
} |
#define tlb_invalidate(asid) tlb_invalidate_asid(asid) |
/trunk/kernel/arch/mips32/include/barrier.h |
---|
38,12 → 38,12 |
/* |
* TODO: implement true MIPS memory barriers for macros below. |
*/ |
#define CS_ENTER_BARRIER() __asm__ volatile ("" ::: "memory") |
#define CS_LEAVE_BARRIER() __asm__ volatile ("" ::: "memory") |
#define CS_ENTER_BARRIER() asm volatile ("" ::: "memory") |
#define CS_LEAVE_BARRIER() asm volatile ("" ::: "memory") |
#define memory_barrier() __asm__ volatile ("" ::: "memory") |
#define read_barrier() __asm__ volatile ("" ::: "memory") |
#define write_barrier() __asm__ volatile ("" ::: "memory") |
#define memory_barrier() asm volatile ("" ::: "memory") |
#define read_barrier() asm volatile ("" ::: "memory") |
#define write_barrier() asm volatile ("" ::: "memory") |
#endif |
/trunk/kernel/arch/ia32/include/cpuid.h |
---|
76,7 → 76,7 |
{ |
uint32_t val, ret; |
__asm__ volatile ( |
asm volatile ( |
"pushf\n" /* read flags */ |
"popl %0\n" |
"movl %0, %1\n" |
99,7 → 99,7 |
static inline void cpuid(uint32_t cmd, cpu_info_t *info) |
{ |
__asm__ volatile ( |
asm volatile ( |
"movl %4, %%eax\n" |
"cpuid\n" |
/trunk/kernel/arch/ia32/include/memstr.h |
---|
51,7 → 51,7 |
{ |
unative_t d0, d1, d2; |
__asm__ __volatile__( |
asm volatile( |
/* copy all full dwords */ |
"rep movsl\n\t" |
/* load count again */ |
88,7 → 88,7 |
uint32_t d0, d1, d2; |
int ret; |
__asm__ ( |
asm ( |
"repe cmpsb\n\t" |
"je 1f\n\t" |
"movl %3, %0\n\t" |
114,7 → 114,7 |
{ |
uint32_t d0, d1; |
__asm__ __volatile__ ( |
asm volatile ( |
"rep stosw\n\t" |
: "=&D" (d0), "=&c" (d1), "=a" (x) |
: "0" (dst), "1" (cnt), "2" (x) |
136,7 → 136,7 |
{ |
uint32_t d0, d1; |
__asm__ __volatile__ ( |
asm volatile ( |
"rep stosb\n\t" |
: "=&D" (d0), "=&c" (d1), "=a" (x) |
: "0" (dst), "1" (cnt), "2" (x) |
/trunk/kernel/arch/ia32/include/atomic.h |
---|
42,17 → 42,17 |
static inline void atomic_inc(atomic_t *val) { |
#ifdef CONFIG_SMP |
__asm__ volatile ("lock incl %0\n" : "=m" (val->count)); |
asm volatile ("lock incl %0\n" : "=m" (val->count)); |
#else |
__asm__ volatile ("incl %0\n" : "=m" (val->count)); |
asm volatile ("incl %0\n" : "=m" (val->count)); |
#endif /* CONFIG_SMP */ |
} |
static inline void atomic_dec(atomic_t *val) { |
#ifdef CONFIG_SMP |
__asm__ volatile ("lock decl %0\n" : "=m" (val->count)); |
asm volatile ("lock decl %0\n" : "=m" (val->count)); |
#else |
__asm__ volatile ("decl %0\n" : "=m" (val->count)); |
asm volatile ("decl %0\n" : "=m" (val->count)); |
#endif /* CONFIG_SMP */ |
} |
60,7 → 60,7 |
{ |
long r = 1; |
__asm__ volatile ( |
asm volatile ( |
"lock xaddl %1, %0\n" |
: "=m" (val->count), "+r" (r) |
); |
72,7 → 72,7 |
{ |
long r = -1; |
__asm__ volatile ( |
asm volatile ( |
"lock xaddl %1, %0\n" |
: "=m" (val->count), "+r"(r) |
); |
86,7 → 86,7 |
static inline uint32_t test_and_set(atomic_t *val) { |
uint32_t v; |
__asm__ volatile ( |
asm volatile ( |
"movl $1, %0\n" |
"xchgl %0, %1\n" |
: "=r" (v),"=m" (val->count) |
101,7 → 101,7 |
uint32_t tmp; |
preemption_disable(); |
__asm__ volatile ( |
asm volatile ( |
"0:;" |
#ifdef CONFIG_HT |
"pause;" /* Pentium 4's HT love this instruction */ |
/trunk/kernel/arch/ia32/include/asm.h |
---|
57,19 → 57,26 |
* |
* Halt the current CPU until interrupt event. |
*/ |
static inline void cpu_halt(void) { __asm__("hlt\n"); }; |
static inline void cpu_sleep(void) { __asm__("hlt\n"); }; |
static inline void cpu_halt(void) |
{ |
asm("hlt\n"); |
}; |
static inline void cpu_sleep(void) |
{ |
asm("hlt\n"); |
}; |
#define GEN_READ_REG(reg) static inline unative_t read_ ##reg (void) \ |
{ \ |
unative_t res; \ |
__asm__ volatile ("movl %%" #reg ", %0" : "=r" (res) ); \ |
asm volatile ("movl %%" #reg ", %0" : "=r" (res) ); \ |
return res; \ |
} |
#define GEN_WRITE_REG(reg) static inline void write_ ##reg (unative_t regn) \ |
{ \ |
__asm__ volatile ("movl %0, %%" #reg : : "r" (regn)); \ |
asm volatile ("movl %0, %%" #reg : : "r" (regn)); \ |
} |
GEN_READ_REG(cr0); |
98,7 → 105,10 |
* @param port Port to write to |
* @param val Value to write |
*/ |
static inline void outb(uint16_t port, uint8_t val) { __asm__ volatile ("outb %b0, %w1\n" : : "a" (val), "d" (port) ); } |
static inline void outb(uint16_t port, uint8_t val) |
{ |
asm volatile ("outb %b0, %w1\n" : : "a" (val), "d" (port) ); |
} |
/** Word to port |
* |
107,7 → 117,10 |
* @param port Port to write to |
* @param val Value to write |
*/ |
static inline void outw(uint16_t port, uint16_t val) { __asm__ volatile ("outw %w0, %w1\n" : : "a" (val), "d" (port) ); } |
static inline void outw(uint16_t port, uint16_t val) |
{ |
asm volatile ("outw %w0, %w1\n" : : "a" (val), "d" (port) ); |
} |
/** Double word to port |
* |
116,7 → 129,10 |
* @param port Port to write to |
* @param val Value to write |
*/ |
static inline void outl(uint16_t port, uint32_t val) { __asm__ volatile ("outl %l0, %w1\n" : : "a" (val), "d" (port) ); } |
static inline void outl(uint16_t port, uint32_t val) |
{ |
asm volatile ("outl %l0, %w1\n" : : "a" (val), "d" (port) ); |
} |
/** Byte from port |
* |
125,8 → 141,14 |
* @param port Port to read from |
* @return Value read |
*/ |
static inline uint8_t inb(uint16_t port) { uint8_t val; __asm__ volatile ("inb %w1, %b0 \n" : "=a" (val) : "d" (port) ); return val; } |
static inline uint8_t inb(uint16_t port) |
{ |
uint8_t val; |
asm volatile ("inb %w1, %b0 \n" : "=a" (val) : "d" (port) ); |
return val; |
} |
/** Word from port |
* |
* Get word from port |
134,8 → 156,14 |
* @param port Port to read from |
* @return Value read |
*/ |
static inline uint16_t inw(uint16_t port) { uint16_t val; __asm__ volatile ("inw %w1, %w0 \n" : "=a" (val) : "d" (port) ); return val; } |
static inline uint16_t inw(uint16_t port) |
{ |
uint16_t val; |
asm volatile ("inw %w1, %w0 \n" : "=a" (val) : "d" (port) ); |
return val; |
} |
/** Double word from port |
* |
* Get double word from port |
143,8 → 171,14 |
* @param port Port to read from |
* @return Value read |
*/ |
static inline uint32_t inl(uint16_t port) { uint32_t val; __asm__ volatile ("inl %w1, %l0 \n" : "=a" (val) : "d" (port) ); return val; } |
static inline uint32_t inl(uint16_t port) |
{ |
uint32_t val; |
asm volatile ("inl %w1, %l0 \n" : "=a" (val) : "d" (port) ); |
return val; |
} |
/** Enable interrupts. |
* |
* Enable interrupts and return previous |
155,7 → 189,7 |
static inline ipl_t interrupts_enable(void) |
{ |
ipl_t v; |
__asm__ volatile ( |
asm volatile ( |
"pushf\n\t" |
"popl %0\n\t" |
"sti\n" |
174,7 → 208,7 |
static inline ipl_t interrupts_disable(void) |
{ |
ipl_t v; |
__asm__ volatile ( |
asm volatile ( |
"pushf\n\t" |
"popl %0\n\t" |
"cli\n" |
191,7 → 225,7 |
*/ |
static inline void interrupts_restore(ipl_t ipl) |
{ |
__asm__ volatile ( |
asm volatile ( |
"pushl %0\n\t" |
"popf\n" |
: : "r" (ipl) |
205,7 → 239,7 |
static inline ipl_t interrupts_read(void) |
{ |
ipl_t v; |
__asm__ volatile ( |
asm volatile ( |
"pushf\n\t" |
"popl %0\n" |
: "=r" (v) |
223,7 → 257,7 |
{ |
uintptr_t v; |
__asm__ volatile ("andl %%esp, %0\n" : "=r" (v) : "0" (~(STACK_SIZE-1))); |
asm volatile ("andl %%esp, %0\n" : "=r" (v) : "0" (~(STACK_SIZE-1))); |
return v; |
} |
233,7 → 267,7 |
{ |
uintptr_t *ip; |
__asm__ volatile ( |
asm volatile ( |
"mov %%eip, %0" |
: "=r" (ip) |
); |
246,7 → 280,7 |
*/ |
static inline void invlpg(uintptr_t addr) |
{ |
__asm__ volatile ("invlpg %0\n" :: "m" (*(unative_t *)addr)); |
asm volatile ("invlpg %0\n" :: "m" (*(unative_t *)addr)); |
} |
/** Load GDTR register from memory. |
255,7 → 289,7 |
*/ |
static inline void gdtr_load(ptr_16_32_t *gdtr_reg) |
{ |
__asm__ volatile ("lgdtl %0\n" : : "m" (*gdtr_reg)); |
asm volatile ("lgdtl %0\n" : : "m" (*gdtr_reg)); |
} |
/** Store GDTR register to memory. |
264,7 → 298,7 |
*/ |
static inline void gdtr_store(ptr_16_32_t *gdtr_reg) |
{ |
__asm__ volatile ("sgdtl %0\n" : : "m" (*gdtr_reg)); |
asm volatile ("sgdtl %0\n" : : "m" (*gdtr_reg)); |
} |
/** Load IDTR register from memory. |
273,7 → 307,7 |
*/ |
static inline void idtr_load(ptr_16_32_t *idtr_reg) |
{ |
__asm__ volatile ("lidtl %0\n" : : "m" (*idtr_reg)); |
asm volatile ("lidtl %0\n" : : "m" (*idtr_reg)); |
} |
/** Load TR from descriptor table. |
282,7 → 316,7 |
*/ |
static inline void tr_load(uint16_t sel) |
{ |
__asm__ volatile ("ltr %0" : : "r" (sel)); |
asm volatile ("ltr %0" : : "r" (sel)); |
} |
#endif |
/trunk/kernel/arch/ia32/include/barrier.h |
---|
46,12 → 46,12 |
* Provisions are made to prevent compiler from reordering instructions itself. |
*/ |
#define CS_ENTER_BARRIER() __asm__ volatile ("" ::: "memory") |
#define CS_LEAVE_BARRIER() __asm__ volatile ("" ::: "memory") |
#define CS_ENTER_BARRIER() asm volatile ("" ::: "memory") |
#define CS_LEAVE_BARRIER() asm volatile ("" ::: "memory") |
static inline void cpuid_serialization(void) |
{ |
__asm__ volatile ( |
asm volatile ( |
"xorl %%eax, %%eax\n" |
"cpuid\n" |
::: "eax", "ebx", "ecx", "edx", "memory" |
59,20 → 59,20 |
} |
#ifdef CONFIG_FENCES_P4 |
# define memory_barrier() __asm__ volatile ("mfence\n" ::: "memory") |
# define read_barrier() __asm__ volatile ("lfence\n" ::: "memory") |
# define memory_barrier() asm volatile ("mfence\n" ::: "memory") |
# define read_barrier() asm volatile ("lfence\n" ::: "memory") |
# ifdef CONFIG_WEAK_MEMORY |
# define write_barrier() __asm__ volatile ("sfence\n" ::: "memory") |
# define write_barrier() asm volatile ("sfence\n" ::: "memory") |
# else |
# define write_barrier() __asm__ volatile( "" ::: "memory"); |
# define write_barrier() asm volatile( "" ::: "memory"); |
# endif |
#elif CONFIG_FENCES_P3 |
# define memory_barrier() cpuid_serialization() |
# define read_barrier() cpuid_serialization() |
# ifdef CONFIG_WEAK_MEMORY |
# define write_barrier() __asm__ volatile ("sfence\n" ::: "memory") |
# define write_barrier() asm volatile ("sfence\n" ::: "memory") |
# else |
# define write_barrier() __asm__ volatile( "" ::: "memory"); |
# define write_barrier() asm volatile( "" ::: "memory"); |
# endif |
#else |
# define memory_barrier() cpuid_serialization() |
80,7 → 80,7 |
# ifdef CONFIG_WEAK_MEMORY |
# define write_barrier() cpuid_serialization() |
# else |
# define write_barrier() __asm__ volatile( "" ::: "memory"); |
# define write_barrier() asm volatile( "" ::: "memory"); |
# endif |
#endif |
/trunk/kernel/arch/ia32/src/fpu_context.c |
---|
43,7 → 43,7 |
static void fpu_context_f_save(fpu_context_t *fctx) |
{ |
__asm__ volatile ( |
asm volatile ( |
"fnsave %0" |
: "=m"(*fctx) |
); |
51,7 → 51,7 |
static void fpu_context_f_restore(fpu_context_t *fctx) |
{ |
__asm__ volatile ( |
asm volatile ( |
"frstor %0" |
: "=m"(*fctx) |
); |
59,7 → 59,7 |
static void fpu_context_fx_save(fpu_context_t *fctx) |
{ |
__asm__ volatile ( |
asm volatile ( |
"fxsave %0" |
: "=m"(*fctx) |
); |
67,7 → 67,7 |
static void fpu_context_fx_restore(fpu_context_t *fctx) |
{ |
__asm__ volatile ( |
asm volatile ( |
"fxrstor %0" |
: "=m"(*fctx) |
); |
103,7 → 103,7 |
void fpu_init() |
{ |
uint32_t help0 = 0, help1 = 0; |
__asm__ volatile ( |
asm volatile ( |
"fninit;\n" |
"stmxcsr %0\n" |
"mov %0,%1;\n" |
/trunk/kernel/arch/ia32/src/cpu/cpu.c |
---|
71,7 → 71,7 |
void fpu_disable(void) |
{ |
__asm__ volatile ( |
asm volatile ( |
"mov %%cr0,%%eax;" |
"or $8,%%eax;" |
"mov %%eax,%%cr0;" |
83,7 → 83,7 |
void fpu_enable(void) |
{ |
__asm__ volatile ( |
asm volatile ( |
"mov %%cr0,%%eax;" |
"and $0xffFFffF7,%%eax;" |
"mov %%eax,%%cr0;" |
/trunk/kernel/arch/ia32/src/pm.c |
---|
147,7 → 147,7 |
/* Clean IOPL(12,13) and NT(14) flags in EFLAGS register */ |
static void clean_IOPL_NT_flags(void) |
{ |
__asm__ volatile ( |
asm volatile ( |
"pushfl\n" |
"pop %%eax\n" |
"and $0xffff8fff, %%eax\n" |
160,7 → 160,7 |
/* Clean AM(18) flag in CR0 register */ |
static void clean_AM_flag(void) |
{ |
__asm__ volatile ( |
asm volatile ( |
"mov %%cr0, %%eax\n" |
"and $0xfffbffff, %%eax\n" |
"mov %%eax, %%cr0\n" |
/trunk/kernel/arch/ia32/src/userspace.c |
---|
51,7 → 51,7 |
ipl = interrupts_disable(); |
__asm__ volatile ( |
asm volatile ( |
/* |
* Clear nested task flag. |
*/ |
/trunk/uspace/libc/malloc/malloc.c |
---|
1569,7 → 1569,7 |
I = NTREEBINS-1;\ |
else {\ |
unsigned int K;\ |
__asm__("bsrl %1,%0\n\t" : "=r" (K) : "rm" (X));\ |
asm("bsrl %1,%0\n\t" : "=r" (K) : "rm" (X));\ |
I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\ |
}\ |
} |
1628,7 → 1628,7 |
#define compute_bit2idx(X, I)\ |
{\ |
unsigned int J;\ |
__asm__("bsfl %1,%0\n\t" : "=r" (J) : "rm" (X));\ |
asm("bsfl %1,%0\n\t" : "=r" (J) : "rm" (X));\ |
I = (bindex_t)J;\ |
} |
/trunk/uspace/libc/arch/sparc64/include/syscall.h |
---|
46,7 → 46,7 |
register uint64_t a3 asm("o2") = p3; |
register uint64_t a4 asm("o3") = p4; |
__asm__ volatile ( |
asm volatile ( |
"ta %5\n" |
: "=r" (a1) |
: "r" (a1), "r" (a2), "r" (a3), "r" (a4), "i" (id) |
/trunk/uspace/libc/arch/sparc64/include/atomic.h |
---|
53,7 → 53,7 |
do { |
a = val->count; |
b = a + i; |
__asm__ volatile ("casx %0, %2, %1\n" : "+m" (*val), "+r" (b) : "r" (a)); |
asm volatile ("casx %0, %2, %1\n" : "+m" (*val), "+r" (b) : "r" (a)); |
} while (a != b); |
return a; |
/trunk/uspace/libc/arch/sparc64/include/thread.h |
---|
45,7 → 45,7 |
static inline void __tcb_set(tcb_t *tcb) |
{ |
__asm__ volatile ("mov %0, %%g7\n" : : "r" (tcb) : "g7"); |
asm volatile ("mov %0, %%g7\n" : : "r" (tcb) : "g7"); |
} |
static inline tcb_t * __tcb_get(void) |
52,7 → 52,7 |
{ |
void *retval; |
__asm__ volatile ("mov %%g7, %0\n" : "=r" (retval)); |
asm volatile ("mov %%g7, %0\n" : "=r" (retval)); |
return retval; |
} |
/trunk/uspace/libc/arch/ia64/include/atomic.h |
---|
46,7 → 46,7 |
{ |
long v; |
__asm__ volatile ("fetchadd8.rel %0 = %1, %2\n" : "=r" (v), "+m" (val->count) : "i" (imm)); |
asm volatile ("fetchadd8.rel %0 = %1, %2\n" : "=r" (v), "+m" (val->count) : "i" (imm)); |
return v; |
} |
/trunk/uspace/libc/arch/ia64/include/thread.h |
---|
45,7 → 45,7 |
static inline void __tcb_set(tcb_t *tcb) |
{ |
__asm__ volatile ("mov r13 = %0\n" : : "r" (tcb) : "r13"); |
asm volatile ("mov r13 = %0\n" : : "r" (tcb) : "r13"); |
} |
static inline tcb_t *__tcb_get(void) |
52,7 → 52,7 |
{ |
void *retval; |
__asm__ volatile ("mov %0 = r13\n" : "=r" (retval)); |
asm volatile ("mov %0 = r13\n" : "=r" (retval)); |
return retval; |
} |
/trunk/uspace/libc/arch/ppc32/include/atomic.h |
---|
39,7 → 39,7 |
{ |
long tmp; |
asm __volatile__ ( |
asm volatile ( |
"1:\n" |
"lwarx %0, 0, %2\n" |
"addic %0, %0, 1\n" |
54,7 → 54,7 |
{ |
long tmp; |
asm __volatile__( |
asm volatile ( |
"1:\n" |
"lwarx %0, 0, %2\n" |
"addic %0, %0, -1\n" |
/trunk/uspace/libc/arch/amd64/include/atomic.h |
---|
38,11 → 38,11 |
#define LIBC_amd64_ATOMIC_H_ |
static inline void atomic_inc(atomic_t *val) { |
__asm__ volatile ("lock incq %0\n" : "=m" (val->count)); |
asm volatile ("lock incq %0\n" : "=m" (val->count)); |
} |
static inline void atomic_dec(atomic_t *val) { |
__asm__ volatile ("lock decq %0\n" : "=m" (val->count)); |
asm volatile ("lock decq %0\n" : "=m" (val->count)); |
} |
static inline long atomic_postinc(atomic_t *val) |
49,7 → 49,7 |
{ |
long r; |
__asm__ volatile ( |
asm volatile ( |
"movq $1, %0\n" |
"lock xaddq %0, %1\n" |
: "=r" (r), "=m" (val->count) |
62,7 → 62,7 |
{ |
long r; |
__asm__ volatile ( |
asm volatile ( |
"movq $-1, %0\n" |
"lock xaddq %0, %1\n" |
: "=r" (r), "=m" (val->count) |
/trunk/uspace/libc/arch/amd64/include/thread.h |
---|
51,7 → 51,7 |
{ |
void * retval; |
__asm__ ("movq %%fs:0, %0" : "=r"(retval)); |
asm ("movq %%fs:0, %0" : "=r"(retval)); |
return retval; |
} |
/trunk/uspace/libc/arch/ppc64/include/atomic.h |
---|
39,7 → 39,7 |
{ |
long tmp; |
asm __volatile__ ( |
asm volatile ( |
"1:\n" |
"lwarx %0, 0, %2\n" |
"addic %0, %0, 1\n" |
54,7 → 54,7 |
{ |
long tmp; |
asm __volatile__( |
asm volatile ( |
"1:\n" |
"lwarx %0, 0, %2\n" |
"addic %0, %0, -1\n" |
/trunk/uspace/libc/arch/mips32/include/atomic.h |
---|
56,7 → 56,7 |
{ |
long tmp, v; |
__asm__ volatile ( |
asm volatile ( |
"1:\n" |
" ll %0, %1\n" |
" addiu %0, %0, %3\n" /* same as addi, but never traps on overflow */ |
/trunk/uspace/libc/arch/mips32/include/thread.h |
---|
61,7 → 61,7 |
void *tp = tcb; |
tp += MIPS_TP_OFFSET + sizeof(tcb_t); |
__asm__ volatile ("add $27, %0, $0" : : "r"(tp)); /* Move tls to K1 */ |
asm volatile ("add $27, %0, $0" : : "r"(tp)); /* Move tls to K1 */ |
} |
static inline tcb_t * __tcb_get(void) |
68,7 → 68,7 |
{ |
void * retval; |
__asm__ volatile("add %0, $27, $0" : "=r"(retval)); |
asm volatile("add %0, $27, $0" : "=r"(retval)); |
return (tcb_t *)(retval - MIPS_TP_OFFSET - sizeof(tcb_t)); |
} |
/trunk/uspace/libc/arch/ia32/include/atomic.h |
---|
36,11 → 36,11 |
#define LIBC_ia32_ATOMIC_H_ |
static inline void atomic_inc(atomic_t *val) { |
__asm__ volatile ("lock incl %0\n" : "=m" (val->count)); |
asm volatile ("lock incl %0\n" : "=m" (val->count)); |
} |
static inline void atomic_dec(atomic_t *val) { |
__asm__ volatile ("lock decl %0\n" : "=m" (val->count)); |
asm volatile ("lock decl %0\n" : "=m" (val->count)); |
} |
static inline long atomic_postinc(atomic_t *val) |
47,7 → 47,7 |
{ |
long r; |
__asm__ volatile ( |
asm volatile ( |
"movl $1, %0\n" |
"lock xaddl %0, %1\n" |
: "=r" (r), "=m" (val->count) |
60,7 → 60,7 |
{ |
long r; |
__asm__ volatile ( |
asm volatile ( |
"movl $-1, %0\n" |
"lock xaddl %0, %1\n" |
: "=r" (r), "=m" (val->count) |
/trunk/uspace/libc/arch/ia32/include/thread.h |
---|
51,7 → 51,7 |
{ |
void * retval; |
__asm__ ("movl %%gs:0, %0" : "=r"(retval)); |
asm ("movl %%gs:0, %0" : "=r"(retval)); |
return retval; |
} |
/trunk/boot/arch/sparc64/loader/ofwarch.c |
---|
69,7 → 69,7 |
uint64_t current_mid; |
__asm__ volatile ("ldxa [%1] %2, %0\n" : "=r" (current_mid) : "r" (0), "i" (ASI_UPA_CONFIG)); |
asm volatile ("ldxa [%1] %2, %0\n" : "=r" (current_mid) : "r" (0), "i" (ASI_UPA_CONFIG)); |
current_mid >>= UPA_CONFIG_MID_SHIFT; |
current_mid &= UPA_CONFIG_MID_MASK; |