41,17 → 41,29 |
|
static inline void atomic_inc(atomic_t *val) { |
#ifdef CONFIG_SMP |
asm volatile ("lock incl %0\n" : "+m" (val->count)); |
asm volatile ( |
"lock incl %[count]\n" |
: [count] "+m" (val->count) |
); |
#else |
asm volatile ("incl %0\n" : "+m" (val->count)); |
asm volatile ( |
"incl %[count]\n" |
: [count] "+m" (val->count) |
); |
#endif /* CONFIG_SMP */ |
} |
|
static inline void atomic_dec(atomic_t *val) { |
#ifdef CONFIG_SMP |
asm volatile ("lock decl %0\n" : "+m" (val->count)); |
asm volatile ( |
"lock decl %[count]\n" |
: [count] "+m" (val->count) |
); |
#else |
asm volatile ("decl %0\n" : "+m" (val->count)); |
asm volatile ( |
"decl %[count]\n" |
: "+m" (val->count) |
); |
#endif /* CONFIG_SMP */ |
} |
|
60,8 → 72,8 |
long r = 1; |
|
asm volatile ( |
"lock xaddl %1, %0\n" |
: "+m" (val->count), "+r" (r) |
"lock xaddl %[r], %[count]\n" |
: [count] "+m" (val->count), [r] "+r" (r) |
); |
|
return r; |
72,8 → 84,8 |
long r = -1; |
|
asm volatile ( |
"lock xaddl %1, %0\n" |
: "+m" (val->count), "+r"(r) |
"lock xaddl %[r], %[count]\n" |
: [count] "+m" (val->count), [r] "+r"(r) |
); |
|
return r; |
86,9 → 98,9 |
uint32_t v; |
|
asm volatile ( |
"movl $1, %0\n" |
"xchgl %0, %1\n" |
: "=r" (v),"+m" (val->count) |
"movl $1, %[v]\n" |
"xchgl %[v], %[count]\n" |
: [v] "=r" (v), [count] "+m" (val->count) |
); |
|
return v; |
105,15 → 117,15 |
#ifdef CONFIG_HT |
"pause\n" /* Pentium 4's HT love this instruction */ |
#endif |
"mov %0, %1\n" |
"testl %1, %1\n" |
"mov %[count], %[tmp]\n" |
"testl %[tmp], %[tmp]\n" |
"jnz 0b\n" /* lightweight looping on locked spinlock */ |
|
"incl %1\n" /* now use the atomic operation */ |
"xchgl %0, %1\n" |
"testl %1, %1\n" |
"incl %[tmp]\n" /* now use the atomic operation */ |
"xchgl %[count], %[tmp]\n" |
"testl %[tmp], %[tmp]\n" |
"jnz 0b\n" |
: "+m" (val->count), "=&r"(tmp) |
: [count] "+m" (val->count), [tmp] "=&r" (tmp) |
); |
/* |
* Prevent critical section code from bleeding out this way up. |