Rev 1024 | Rev 1104 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 1024 | Rev 1100 | ||
---|---|---|---|
Line 28... | Line 28... | ||
28 | 28 | ||
29 | #ifndef __amd64_ATOMIC_H__ |
29 | #ifndef __amd64_ATOMIC_H__ |
30 | #define __amd64_ATOMIC_H__ |
30 | #define __amd64_ATOMIC_H__ |
31 | 31 | ||
32 | #include <arch/types.h> |
32 | #include <arch/types.h> |
- | 33 | #include <arch/barrier.h> |
|
- | 34 | #include <preemption.h> |
|
33 | 35 | ||
34 | typedef struct { volatile __u64 count; } atomic_t; |
36 | typedef struct { volatile __u64 count; } atomic_t; |
35 | 37 | ||
36 | static inline void atomic_set(atomic_t *val, __u64 i) |
38 | static inline void atomic_set(atomic_t *val, __u64 i) |
37 | { |
39 | { |
Line 99... | Line 101... | ||
99 | 101 | ||
100 | return v; |
102 | return v; |
101 | } |
103 | } |
102 | 104 | ||
103 | 105 | ||
- | 106 | /** AMD64 specific fast spinlock */ |
|
104 | extern void spinlock_arch(volatile int *val); |
107 | static inline void atomic_lock_arch(atomic_t *val) |
- | 108 | { |
|
- | 109 | __u64 tmp; |
|
- | 110 | ||
- | 111 | preemption_disable(); |
|
- | 112 | __asm__ volatile ( |
|
- | 113 | "0:;" |
|
- | 114 | #ifdef CONFIG_HT |
|
- | 115 | "pause;" /* Pentium 4's HT love this instruction */ |
|
- | 116 | #endif |
|
- | 117 | "mov %0, %1;" |
|
- | 118 | "testq %1, %1;" |
|
- | 119 | "jnz 0b;" /* Leightweight looping on locked spinlock */ |
|
- | 120 | ||
- | 121 | "incq %1;" /* now use the atomic operation */ |
|
- | 122 | "xchgq %0, %1;" |
|
- | 123 | "testq %1, %1;" |
|
- | 124 | "jnz 0b;" |
|
- | 125 | : "=m"(val->count),"=r"(tmp) |
|
- | 126 | ); |
|
- | 127 | /* |
|
- | 128 | * Prevent critical section code from bleeding out this way up. |
|
- | 129 | */ |
|
- | 130 | CS_ENTER_BARRIER(); |
|
- | 131 | } |
|
105 | 132 | ||
106 | #endif |
133 | #endif |