Rev 1024 | Rev 1104 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 1024 | Rev 1100 | ||
---|---|---|---|
Line 28... | Line 28... | ||
28 | 28 | ||
29 | #ifndef __ia32_ATOMIC_H__ |
29 | #ifndef __ia32_ATOMIC_H__ |
30 | #define __ia32_ATOMIC_H__ |
30 | #define __ia32_ATOMIC_H__ |
31 | 31 | ||
32 | #include <arch/types.h> |
32 | #include <arch/types.h> |
- | 33 | #include <arch/barrier.h> |
|
- | 34 | #include <preemption.h> |
|
33 | 35 | ||
34 | typedef struct { volatile __u32 count; } atomic_t; |
36 | typedef struct { volatile __u32 count; } atomic_t; |
35 | 37 | ||
36 | static inline void atomic_set(atomic_t *val, __u32 i) |
38 | static inline void atomic_set(atomic_t *val, __u32 i) |
37 | { |
39 | { |
Line 98... | Line 100... | ||
98 | ); |
100 | ); |
99 | 101 | ||
100 | return v; |
102 | return v; |
101 | } |
103 | } |
102 | 104 | ||
- | 105 | /** Ia32 specific fast spinlock */ |
|
- | 106 | static inline void atomic_lock_arch(atomic_t *val) |
|
- | 107 | { |
|
- | 108 | __u32 tmp; |
|
103 | 109 | ||
- | 110 | preemption_disable(); |
|
- | 111 | __asm__ volatile ( |
|
- | 112 | "0:;" |
|
- | 113 | #ifdef CONFIG_HT |
|
- | 114 | "pause;" /* Pentium 4's HT love this instruction */ |
|
- | 115 | #endif |
|
- | 116 | "mov %0, %1;" |
|
- | 117 | "testl %1, %1;" |
|
- | 118 | "jnz 0b;" /* Leightweight looping on locked spinlock */ |
|
- | 119 | ||
- | 120 | "incl %1;" /* now use the atomic operation */ |
|
- | 121 | "xchgl %0, %1;" |
|
- | 122 | "testl %1, %1;" |
|
- | 123 | "jnz 0b;" |
|
- | 124 | : "=m"(val->count),"=r"(tmp) |
|
- | 125 | ); |
|
- | 126 | /* |
|
104 | extern void spinlock_arch(volatile int *val); |
127 | * Prevent critical section code from bleeding out this way up. |
- | 128 | */ |
|
- | 129 | CS_ENTER_BARRIER(); |
|
- | 130 | } |
|
105 | 131 | ||
106 | #endif |
132 | #endif |