Rev 3380 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
| Rev 3380 | Rev 4017 | ||
|---|---|---|---|
| Line 24... | Line 24... | ||
| 24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
| 26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 27 | */ |
27 | */ |
| 28 | 28 | ||
| 29 | /** @addtogroup amd64 |
29 | /** @addtogroup amd64 |
| 30 | * @{ |
30 | * @{ |
| 31 | */ |
31 | */ |
| 32 | /** @file |
32 | /** @file |
| 33 | */ |
33 | */ |
| 34 | 34 | ||
| Line 39... | Line 39... | ||
| 39 | #include <arch/barrier.h> |
39 | #include <arch/barrier.h> |
| 40 | #include <preemption.h> |
40 | #include <preemption.h> |
| 41 | 41 | ||
| 42 | static inline void atomic_inc(atomic_t *val) { |
42 | static inline void atomic_inc(atomic_t *val) { |
| 43 | #ifdef CONFIG_SMP |
43 | #ifdef CONFIG_SMP |
| - | 44 | asm volatile ( |
|
| - | 45 | "lock incq %[count]\n" |
|
| 44 | asm volatile ("lock incq %0\n" : "+m" (val->count)); |
46 | : [count] "+m" (val->count) |
| - | 47 | ); |
|
| 45 | #else |
48 | #else |
| - | 49 | asm volatile ( |
|
| - | 50 | "incq %[count]\n" |
|
| 46 | asm volatile ("incq %0\n" : "+m" (val->count)); |
51 | : [count] "+m" (val->count) |
| - | 52 | ); |
|
| 47 | #endif /* CONFIG_SMP */ |
53 | #endif /* CONFIG_SMP */ |
| 48 | } |
54 | } |
| 49 | 55 | ||
| 50 | static inline void atomic_dec(atomic_t *val) { |
56 | static inline void atomic_dec(atomic_t *val) { |
| 51 | #ifdef CONFIG_SMP |
57 | #ifdef CONFIG_SMP |
| - | 58 | asm volatile ( |
|
| - | 59 | "lock decq %[count]\n" |
|
| 52 | asm volatile ("lock decq %0\n" : "+m" (val->count)); |
60 | : [count] "+m" (val->count) |
| - | 61 | ); |
|
| 53 | #else |
62 | #else |
| - | 63 | asm volatile ( |
|
| - | 64 | "decq %[count]\n" |
|
| 54 | asm volatile ("decq %0\n" : "+m" (val->count)); |
65 | : [count] "+m" (val->count) |
| - | 66 | ); |
|
| 55 | #endif /* CONFIG_SMP */ |
67 | #endif /* CONFIG_SMP */ |
| 56 | } |
68 | } |
| 57 | 69 | ||
| 58 | static inline long atomic_postinc(atomic_t *val) |
70 | static inline long atomic_postinc(atomic_t *val) |
| 59 | { |
71 | { |
| 60 | long r = 1; |
72 | long r = 1; |
| 61 | 73 | ||
| 62 | asm volatile ( |
74 | asm volatile ( |
| 63 | "lock xaddq %1, %0\n" |
75 | "lock xaddq %[r], %[count]\n" |
| 64 | : "+m" (val->count), "+r" (r) |
76 | : [count] "+m" (val->count), [r] "+r" (r) |
| 65 | ); |
77 | ); |
| 66 | 78 | ||
| 67 | return r; |
79 | return r; |
| 68 | } |
80 | } |
| 69 | 81 | ||
| 70 | static inline long atomic_postdec(atomic_t *val) |
82 | static inline long atomic_postdec(atomic_t *val) |
| 71 | { |
83 | { |
| 72 | long r = -1; |
84 | long r = -1; |
| 73 | 85 | ||
| 74 | asm volatile ( |
86 | asm volatile ( |
| 75 | "lock xaddq %1, %0\n" |
87 | "lock xaddq %[r], %[count]\n" |
| 76 | : "+m" (val->count), "+r" (r) |
88 | : [count] "+m" (val->count), [r] "+r" (r) |
| 77 | ); |
89 | ); |
| 78 | 90 | ||
| 79 | return r; |
91 | return r; |
| 80 | } |
92 | } |
| 81 | 93 | ||
| 82 | #define atomic_preinc(val) (atomic_postinc(val) + 1) |
94 | #define atomic_preinc(val) (atomic_postinc(val) + 1) |
| 83 | #define atomic_predec(val) (atomic_postdec(val) - 1) |
95 | #define atomic_predec(val) (atomic_postdec(val) - 1) |
| 84 | 96 | ||
| 85 | static inline uint64_t test_and_set(atomic_t *val) { |
97 | static inline uint64_t test_and_set(atomic_t *val) { |
| 86 | uint64_t v; |
98 | uint64_t v; |
| 87 | 99 | ||
| 88 | asm volatile ( |
100 | asm volatile ( |
| 89 | "movq $1, %0\n" |
101 | "movq $1, %[v]\n" |
| 90 | "xchgq %0, %1\n" |
102 | "xchgq %[v], %[count]\n" |
| 91 | : "=r" (v), "+m" (val->count) |
103 | : [v] "=r" (v), [count] "+m" (val->count) |
| 92 | ); |
104 | ); |
| 93 | 105 | ||
| 94 | return v; |
106 | return v; |
| 95 | } |
107 | } |
| 96 | 108 | ||
| 97 | 109 | ||
| 98 | /** amd64 specific fast spinlock */ |
110 | /** amd64 specific fast spinlock */ |
| 99 | static inline void atomic_lock_arch(atomic_t *val) |
111 | static inline void atomic_lock_arch(atomic_t *val) |
| 100 | { |
112 | { |
| 101 | uint64_t tmp; |
113 | uint64_t tmp; |
| 102 | 114 | ||
| 103 | preemption_disable(); |
115 | preemption_disable(); |
| 104 | asm volatile ( |
116 | asm volatile ( |
| 105 | "0:\n" |
117 | "0:\n" |
| 106 | #ifdef CONFIG_HT |
118 | #ifdef CONFIG_HT |
| 107 | "pause\n" |
119 | "pause\n" |
| 108 | #endif |
120 | #endif |
| 109 | "mov %0, %1\n" |
121 | "mov %[count], %[tmp]\n" |
| 110 | "testq %1, %1\n" |
122 | "testq %[tmp], %[tmp]\n" |
| 111 | "jnz 0b\n" /* lightweight looping on locked spinlock */ |
123 | "jnz 0b\n" /* lightweight looping on locked spinlock */ |
| 112 | 124 | ||
| 113 | "incq %1\n" /* now use the atomic operation */ |
125 | "incq %[tmp]\n" /* now use the atomic operation */ |
| 114 | "xchgq %0, %1\n" |
126 | "xchgq %[count], %[tmp]\n" |
| 115 | "testq %1, %1\n" |
127 | "testq %[tmp], %[tmp]\n" |
| 116 | "jnz 0b\n" |
128 | "jnz 0b\n" |
| 117 | : "+m" (val->count), "=&r" (tmp) |
129 | : [count] "+m" (val->count), [tmp] "=&r" (tmp) |
| 118 | ); |
130 | ); |
| 119 | /* |
131 | /* |
| 120 | * Prevent critical section code from bleeding out this way up. |
132 | * Prevent critical section code from bleeding out this way up. |
| 121 | */ |
133 | */ |
| 122 | CS_ENTER_BARRIER(); |
134 | CS_ENTER_BARRIER(); |