Rev 3343 | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 3343 | Rev 3397 | ||
---|---|---|---|
Line 35... | Line 35... | ||
35 | #ifndef KERN_sparc64_ATOMIC_H_ |
35 | #ifndef KERN_sparc64_ATOMIC_H_ |
36 | #define KERN_sparc64_ATOMIC_H_ |
36 | #define KERN_sparc64_ATOMIC_H_ |
37 | 37 | ||
38 | #include <arch/barrier.h> |
38 | #include <arch/barrier.h> |
39 | #include <arch/types.h> |
39 | #include <arch/types.h> |
- | 40 | #include <preemption.h> |
|
40 | 41 | ||
41 | /** Atomic add operation. |
42 | /** Atomic add operation. |
42 | * |
43 | * |
43 | * Use atomic compare and swap operation to atomically add signed value. |
44 | * Use atomic compare and swap operation to atomically add signed value. |
44 | * |
45 | * |
Line 54... | Line 55... | ||
54 | do { |
55 | do { |
55 | volatile uintptr_t x = (uint64_t) &val->count; |
56 | volatile uintptr_t x = (uint64_t) &val->count; |
56 | 57 | ||
57 | a = *((uint64_t *) x); |
58 | a = *((uint64_t *) x); |
58 | b = a + i; |
59 | b = a + i; |
59 | asm volatile ("casx %0, %2, %1\n" : "+m" (*((uint64_t *)x)), "+r" (b) : "r" (a)); |
60 | asm volatile ("casx %0, %2, %1\n" : "+m" (*((uint64_t *)x)), |
- | 61 | "+r" (b) : "r" (a)); |
|
60 | } while (a != b); |
62 | } while (a != b); |
61 | 63 | ||
62 | return a; |
64 | return a; |
63 | } |
65 | } |
64 | 66 | ||
Line 95... | Line 97... | ||
95 | static inline long test_and_set(atomic_t *val) |
97 | static inline long test_and_set(atomic_t *val) |
96 | { |
98 | { |
97 | uint64_t v = 1; |
99 | uint64_t v = 1; |
98 | volatile uintptr_t x = (uint64_t) &val->count; |
100 | volatile uintptr_t x = (uint64_t) &val->count; |
99 | 101 | ||
100 | asm volatile ("casx %0, %2, %1\n" : "+m" (*((uint64_t *) x)), "+r" (v) : "r" (0)); |
102 | asm volatile ("casx %0, %2, %1\n" : "+m" (*((uint64_t *) x)), |
- | 103 | "+r" (v) : "r" (0)); |
|
101 | 104 | ||
102 | return v; |
105 | return v; |
103 | } |
106 | } |
104 | 107 | ||
105 | static inline void atomic_lock_arch(atomic_t *val) |
108 | static inline void atomic_lock_arch(atomic_t *val) |
Line 107... | Line 110... | ||
107 | uint64_t tmp1 = 1; |
110 | uint64_t tmp1 = 1; |
108 | uint64_t tmp2 = 0; |
111 | uint64_t tmp2 = 0; |
109 | 112 | ||
110 | volatile uintptr_t x = (uint64_t) &val->count; |
113 | volatile uintptr_t x = (uint64_t) &val->count; |
111 | 114 | ||
- | 115 | preemption_disable(); |
|
- | 116 | ||
112 | asm volatile ( |
117 | asm volatile ( |
113 | "0:\n" |
118 | "0:\n" |
114 | "casx %0, %3, %1\n" |
119 | "casx %0, %3, %1\n" |
115 | "brz %1, 2f\n" |
120 | "brz %1, 2f\n" |
116 | "nop\n" |
121 | "nop\n" |