Rev 3403 | Rev 4346 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 3403 | Rev 4345 | ||
---|---|---|---|
Line 24... | Line 24... | ||
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | /** @addtogroup ia32 |
29 | /** @addtogroup ia32 |
30 | * @{ |
30 | * @{ |
31 | */ |
31 | */ |
32 | /** @file |
32 | /** @file |
33 | */ |
33 | */ |
34 | 34 | ||
Line 39... | Line 39... | ||
39 | #include <arch/barrier.h> |
39 | #include <arch/barrier.h> |
40 | #include <preemption.h> |
40 | #include <preemption.h> |
41 | 41 | ||
42 | static inline void atomic_inc(atomic_t *val) { |
42 | static inline void atomic_inc(atomic_t *val) { |
43 | #ifdef CONFIG_SMP |
43 | #ifdef CONFIG_SMP |
- | 44 | asm volatile ( |
|
- | 45 | "lock incl %[count]\n" |
|
44 | asm volatile ("lock incl %0\n" : "+m" (val->count)); |
46 | : [count] "+m" (val->count) |
- | 47 | ); |
|
45 | #else |
48 | #else |
- | 49 | asm volatile ( |
|
- | 50 | "incl %[count]\n" |
|
46 | asm volatile ("incl %0\n" : "+m" (val->count)); |
51 | : [count] "+m" (val->count) |
- | 52 | ); |
|
47 | #endif /* CONFIG_SMP */ |
53 | #endif /* CONFIG_SMP */ |
48 | } |
54 | } |
49 | 55 | ||
50 | static inline void atomic_dec(atomic_t *val) { |
56 | static inline void atomic_dec(atomic_t *val) { |
51 | #ifdef CONFIG_SMP |
57 | #ifdef CONFIG_SMP |
- | 58 | asm volatile ( |
|
- | 59 | "lock decl %[count]\n" |
|
52 | asm volatile ("lock decl %0\n" : "+m" (val->count)); |
60 | : [count] "+m" (val->count) |
- | 61 | ); |
|
53 | #else |
62 | #else |
- | 63 | asm volatile ( |
|
- | 64 | "decl %[count]\n" |
|
54 | asm volatile ("decl %0\n" : "+m" (val->count)); |
65 | : "+m" (val->count) |
- | 66 | ); |
|
55 | #endif /* CONFIG_SMP */ |
67 | #endif /* CONFIG_SMP */ |
56 | } |
68 | } |
57 | 69 | ||
58 | static inline long atomic_postinc(atomic_t *val) |
70 | static inline long atomic_postinc(atomic_t *val) |
59 | { |
71 | { |
60 | long r = 1; |
72 | long r = 1; |
61 | 73 | ||
62 | asm volatile ( |
74 | asm volatile ( |
63 | "lock xaddl %1, %0\n" |
75 | "lock xaddl %[r], %[count]\n" |
64 | : "+m" (val->count), "+r" (r) |
76 | : [count] "+m" (val->count), [r] "+r" (r) |
65 | ); |
77 | ); |
66 | 78 | ||
67 | return r; |
79 | return r; |
68 | } |
80 | } |
69 | 81 | ||
70 | static inline long atomic_postdec(atomic_t *val) |
82 | static inline long atomic_postdec(atomic_t *val) |
71 | { |
83 | { |
72 | long r = -1; |
84 | long r = -1; |
73 | 85 | ||
74 | asm volatile ( |
86 | asm volatile ( |
75 | "lock xaddl %1, %0\n" |
87 | "lock xaddl %[r], %[count]\n" |
76 | : "+m" (val->count), "+r"(r) |
88 | : [count] "+m" (val->count), [r] "+r"(r) |
77 | ); |
89 | ); |
78 | 90 | ||
79 | return r; |
91 | return r; |
80 | } |
92 | } |
81 | 93 | ||
82 | #define atomic_preinc(val) (atomic_postinc(val) + 1) |
94 | #define atomic_preinc(val) (atomic_postinc(val) + 1) |
83 | #define atomic_predec(val) (atomic_postdec(val) - 1) |
95 | #define atomic_predec(val) (atomic_postdec(val) - 1) |
84 | 96 | ||
85 | static inline uint32_t test_and_set(atomic_t *val) { |
97 | static inline uint32_t test_and_set(atomic_t *val) { |
86 | uint32_t v; |
98 | uint32_t v; |
87 | 99 | ||
88 | asm volatile ( |
100 | asm volatile ( |
89 | "movl $1, %0\n" |
101 | "movl $1, %[v]\n" |
90 | "xchgl %0, %1\n" |
102 | "xchgl %[v], %[count]\n" |
91 | : "=r" (v),"+m" (val->count) |
103 | : [v] "=r" (v), [count] "+m" (val->count) |
92 | ); |
104 | ); |
93 | 105 | ||
94 | return v; |
106 | return v; |
95 | } |
107 | } |
96 | 108 | ||
97 | /** ia32 specific fast spinlock */ |
109 | /** ia32 specific fast spinlock */ |
98 | static inline void atomic_lock_arch(atomic_t *val) |
110 | static inline void atomic_lock_arch(atomic_t *val) |
99 | { |
111 | { |
100 | uint32_t tmp; |
112 | uint32_t tmp; |
101 | 113 | ||
102 | preemption_disable(); |
114 | preemption_disable(); |
103 | asm volatile ( |
115 | asm volatile ( |
104 | "0:\n" |
116 | "0:\n" |
105 | #ifdef CONFIG_HT |
117 | #ifdef CONFIG_HT |
106 | "pause\n" /* Pentium 4's HT love this instruction */ |
118 | "pause\n" /* Pentium 4's HT love this instruction */ |
107 | #endif |
119 | #endif |
108 | "mov %0, %1\n" |
120 | "mov %[count], %[tmp]\n" |
109 | "testl %1, %1\n" |
121 | "testl %[tmp], %[tmp]\n" |
110 | "jnz 0b\n" /* lightweight looping on locked spinlock */ |
122 | "jnz 0b\n" /* lightweight looping on locked spinlock */ |
111 | 123 | ||
112 | "incl %1\n" /* now use the atomic operation */ |
124 | "incl %[tmp]\n" /* now use the atomic operation */ |
113 | "xchgl %0, %1\n" |
125 | "xchgl %[count], %[tmp]\n" |
114 | "testl %1, %1\n" |
126 | "testl %[tmp], %[tmp]\n" |
115 | "jnz 0b\n" |
127 | "jnz 0b\n" |
116 | : "+m" (val->count), "=&r"(tmp) |
128 | : [count] "+m" (val->count), [tmp] "=&r" (tmp) |
117 | ); |
129 | ); |
118 | /* |
130 | /* |
119 | * Prevent critical section code from bleeding out this way up. |
131 | * Prevent critical section code from bleeding out this way up. |
120 | */ |
132 | */ |
121 | CS_ENTER_BARRIER(); |
133 | CS_ENTER_BARRIER(); |