Rev 1100 | Rev 1697 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 1100 | Rev 1104 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | #ifndef __ia32_ATOMIC_H__ |
29 | #ifndef __ia32_ATOMIC_H__ |
30 | #define __ia32_ATOMIC_H__ |
30 | #define __ia32_ATOMIC_H__ |
31 | 31 | ||
32 | #include <arch/types.h> |
32 | #include <arch/types.h> |
33 | #include <arch/barrier.h> |
33 | #include <arch/barrier.h> |
34 | #include <preemption.h> |
34 | #include <preemption.h> |
35 | - | ||
36 | typedef struct { volatile __u32 count; } atomic_t; |
- | |
37 | - | ||
38 | static inline void atomic_set(atomic_t *val, __u32 i) |
- | |
39 | { |
- | |
40 | val->count = i; |
- | |
41 | } |
- | |
42 | - | ||
43 | static inline __u32 atomic_get(atomic_t *val) |
- | |
44 | { |
- | |
45 | return val->count; |
35 | #include <typedefs.h> |
46 | } |
- | |
47 | 36 | ||
48 | static inline void atomic_inc(atomic_t *val) { |
37 | static inline void atomic_inc(atomic_t *val) { |
49 | #ifdef CONFIG_SMP |
38 | #ifdef CONFIG_SMP |
50 | __asm__ volatile ("lock incl %0\n" : "=m" (val->count)); |
39 | __asm__ volatile ("lock incl %0\n" : "=m" (val->count)); |
51 | #else |
40 | #else |
52 | __asm__ volatile ("incl %0\n" : "=m" (val->count)); |
41 | __asm__ volatile ("incl %0\n" : "=m" (val->count)); |
53 | #endif /* CONFIG_SMP */ |
42 | #endif /* CONFIG_SMP */ |
54 | } |
43 | } |
55 | 44 | ||
56 | static inline void atomic_dec(atomic_t *val) { |
45 | static inline void atomic_dec(atomic_t *val) { |
57 | #ifdef CONFIG_SMP |
46 | #ifdef CONFIG_SMP |
58 | __asm__ volatile ("lock decl %0\n" : "=m" (val->count)); |
47 | __asm__ volatile ("lock decl %0\n" : "=m" (val->count)); |
59 | #else |
48 | #else |
60 | __asm__ volatile ("decl %0\n" : "=m" (val->count)); |
49 | __asm__ volatile ("decl %0\n" : "=m" (val->count)); |
61 | #endif /* CONFIG_SMP */ |
50 | #endif /* CONFIG_SMP */ |
62 | } |
51 | } |
63 | 52 | ||
64 | static inline count_t atomic_postinc(atomic_t *val) |
53 | static inline long atomic_postinc(atomic_t *val) |
65 | { |
54 | { |
66 | count_t r; |
55 | long r; |
67 | 56 | ||
68 | __asm__ volatile ( |
57 | __asm__ volatile ( |
69 | "movl $1, %0\n" |
58 | "movl $1, %0\n" |
70 | "lock xaddl %0, %1\n" |
59 | "lock xaddl %0, %1\n" |
71 | : "=r" (r), "=m" (val->count) |
60 | : "=r" (r), "=m" (val->count) |
72 | ); |
61 | ); |
73 | 62 | ||
74 | return r; |
63 | return r; |
75 | } |
64 | } |
76 | 65 | ||
77 | static inline count_t atomic_postdec(atomic_t *val) |
66 | static inline long atomic_postdec(atomic_t *val) |
78 | { |
67 | { |
79 | count_t r; |
68 | long r; |
80 | 69 | ||
81 | __asm__ volatile ( |
70 | __asm__ volatile ( |
82 | "movl $-1, %0\n" |
71 | "movl $-1, %0\n" |
83 | "lock xaddl %0, %1\n" |
72 | "lock xaddl %0, %1\n" |
84 | : "=r" (r), "=m" (val->count) |
73 | : "=r" (r), "=m" (val->count) |
85 | ); |
74 | ); |
86 | 75 | ||
87 | return r; |
76 | return r; |
88 | } |
77 | } |
89 | 78 | ||
90 | #define atomic_preinc(val) (atomic_postinc(val)+1) |
79 | #define atomic_preinc(val) (atomic_postinc(val)+1) |
91 | #define atomic_predec(val) (atomic_postdec(val)-1) |
80 | #define atomic_predec(val) (atomic_postdec(val)-1) |
92 | 81 | ||
93 | static inline __u32 test_and_set(atomic_t *val) { |
82 | static inline __u32 test_and_set(atomic_t *val) { |
94 | __u32 v; |
83 | __u32 v; |
95 | 84 | ||
96 | __asm__ volatile ( |
85 | __asm__ volatile ( |
97 | "movl $1, %0\n" |
86 | "movl $1, %0\n" |
98 | "xchgl %0, %1\n" |
87 | "xchgl %0, %1\n" |
99 | : "=r" (v),"=m" (val->count) |
88 | : "=r" (v),"=m" (val->count) |
100 | ); |
89 | ); |
101 | 90 | ||
102 | return v; |
91 | return v; |
103 | } |
92 | } |
104 | 93 | ||
105 | /** Ia32 specific fast spinlock */ |
94 | /** ia32 specific fast spinlock */ |
106 | static inline void atomic_lock_arch(atomic_t *val) |
95 | static inline void atomic_lock_arch(atomic_t *val) |
107 | { |
96 | { |
108 | __u32 tmp; |
97 | __u32 tmp; |
109 | 98 | ||
110 | preemption_disable(); |
99 | preemption_disable(); |
111 | __asm__ volatile ( |
100 | __asm__ volatile ( |
112 | "0:;" |
101 | "0:;" |
113 | #ifdef CONFIG_HT |
102 | #ifdef CONFIG_HT |
114 | "pause;" /* Pentium 4's HT love this instruction */ |
103 | "pause;" /* Pentium 4's HT love this instruction */ |
115 | #endif |
104 | #endif |
116 | "mov %0, %1;" |
105 | "mov %0, %1;" |
117 | "testl %1, %1;" |
106 | "testl %1, %1;" |
118 | "jnz 0b;" /* Leightweight looping on locked spinlock */ |
107 | "jnz 0b;" /* Lightweight looping on locked spinlock */ |
119 | 108 | ||
120 | "incl %1;" /* now use the atomic operation */ |
109 | "incl %1;" /* now use the atomic operation */ |
121 | "xchgl %0, %1;" |
110 | "xchgl %0, %1;" |
122 | "testl %1, %1;" |
111 | "testl %1, %1;" |
123 | "jnz 0b;" |
112 | "jnz 0b;" |
124 | : "=m"(val->count),"=r"(tmp) |
113 | : "=m"(val->count),"=r"(tmp) |
125 | ); |
114 | ); |
126 | /* |
115 | /* |
127 | * Prevent critical section code from bleeding out this way up. |
116 | * Prevent critical section code from bleeding out this way up. |
128 | */ |
117 | */ |
129 | CS_ENTER_BARRIER(); |
118 | CS_ENTER_BARRIER(); |
130 | } |
119 | } |
131 | 120 | ||
132 | #endif |
121 | #endif |
133 | 122 |