35,17 → 35,17 |
|
static inline void atomic_inc(atomic_t *val) { |
#ifdef CONFIG_SMP |
__asm__ volatile ("lock incl %0\n" : "+m" (*val)); |
__asm__ volatile ("lock incl %0\n" : "=m" (*val)); |
#else |
__asm__ volatile ("incl %0\n" : "+m" (*val)); |
__asm__ volatile ("incl %0\n" : "=m" (*val)); |
#endif /* CONFIG_SMP */ |
} |
|
static inline void atomic_dec(atomic_t *val) { |
#ifdef CONFIG_SMP |
__asm__ volatile ("lock decl %0\n" : "+m" (*val)); |
__asm__ volatile ("lock decl %0\n" : "=m" (*val)); |
#else |
__asm__ volatile ("decl %0\n" : "+m" (*val)); |
__asm__ volatile ("decl %0\n" : "=m" (*val)); |
#endif /* CONFIG_SMP */ |
} |
|
55,7 → 55,7 |
__asm__ volatile ( |
"movl $1,%0;" |
"lock xaddl %0,%1;" |
: "=r"(r), "+m" (*val) |
: "=r"(r), "=m" (*val) |
); |
return r; |
} |
68,7 → 68,7 |
__asm__ volatile ( |
"movl $-1,%0;" |
"lock xaddl %0,%1;" |
: "=r"(r), "+m" (*val) |
: "=r"(r), "=m" (*val) |
); |
return r; |
} |
84,7 → 84,7 |
__asm__ volatile ( |
"movl $1, %0\n" |
"xchgl %0, %1\n" |
: "=r" (v),"+m" (*val) |
: "=r" (v),"=m" (*val) |
); |
|
return v; |