Rev 1121 | Rev 1697 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 1121 | Rev 1692 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | #ifndef __amd64_ATOMIC_H__ |
29 | #ifndef __amd64_ATOMIC_H__ |
30 | #define __amd64_ATOMIC_H__ |
30 | #define __amd64_ATOMIC_H__ |
31 | 31 | ||
32 | #include <arch/types.h> |
32 | #include <arch/types.h> |
33 | #include <arch/barrier.h> |
33 | #include <arch/barrier.h> |
34 | #include <preemption.h> |
34 | #include <preemption.h> |
35 | #include <typedefs.h> |
35 | #include <typedefs.h> |
36 | 36 | ||
37 | static inline void atomic_inc(atomic_t *val) { |
37 | static inline void atomic_inc(atomic_t *val) { |
38 | #ifdef CONFIG_SMP |
38 | #ifdef CONFIG_SMP |
39 | __asm__ volatile ("lock incq %0\n" : "=m" (val->count)); |
39 | __asm__ volatile ("lock incq %0\n" : "=m" (val->count)); |
40 | #else |
40 | #else |
41 | __asm__ volatile ("incq %0\n" : "=m" (val->count)); |
41 | __asm__ volatile ("incq %0\n" : "=m" (val->count)); |
42 | #endif /* CONFIG_SMP */ |
42 | #endif /* CONFIG_SMP */ |
43 | } |
43 | } |
44 | 44 | ||
45 | static inline void atomic_dec(atomic_t *val) { |
45 | static inline void atomic_dec(atomic_t *val) { |
46 | #ifdef CONFIG_SMP |
46 | #ifdef CONFIG_SMP |
47 | __asm__ volatile ("lock decq %0\n" : "=m" (val->count)); |
47 | __asm__ volatile ("lock decq %0\n" : "=m" (val->count)); |
48 | #else |
48 | #else |
49 | __asm__ volatile ("decq %0\n" : "=m" (val->count)); |
49 | __asm__ volatile ("decq %0\n" : "=m" (val->count)); |
50 | #endif /* CONFIG_SMP */ |
50 | #endif /* CONFIG_SMP */ |
51 | } |
51 | } |
52 | 52 | ||
53 | static inline long atomic_postinc(atomic_t *val) |
53 | static inline long atomic_postinc(atomic_t *val) |
54 | { |
54 | { |
55 | long r; |
55 | long r = 1; |
56 | 56 | ||
57 | __asm__ volatile ( |
57 | __asm__ volatile ( |
58 | "movq $1, %0\n" |
- | |
59 | "lock xaddq %0, %1\n" |
58 | "lock xaddq %1, %0\n" |
60 | : "=r" (r), "=m" (val->count) |
59 | : "=m" (val->count) : "r" (r) |
61 | ); |
60 | ); |
62 | 61 | ||
63 | return r; |
62 | return r; |
64 | } |
63 | } |
65 | 64 | ||
66 | static inline long atomic_postdec(atomic_t *val) |
65 | static inline long atomic_postdec(atomic_t *val) |
67 | { |
66 | { |
68 | long r; |
67 | long r = -1; |
69 | 68 | ||
70 | __asm__ volatile ( |
69 | __asm__ volatile ( |
71 | "movq $-1, %0\n" |
- | |
72 | "lock xaddq %0, %1\n" |
70 | "lock xaddq %1, %0\n" |
73 | : "=r" (r), "=m" (val->count) |
71 | : "=m" (val->count) : "r" (r) |
74 | ); |
72 | ); |
75 | 73 | ||
76 | return r; |
74 | return r; |
77 | } |
75 | } |
78 | 76 | ||
79 | #define atomic_preinc(val) (atomic_postinc(val)+1) |
77 | #define atomic_preinc(val) (atomic_postinc(val)+1) |
80 | #define atomic_predec(val) (atomic_postdec(val)-1) |
78 | #define atomic_predec(val) (atomic_postdec(val)-1) |
81 | 79 | ||
82 | static inline __u64 test_and_set(atomic_t *val) { |
80 | static inline __u64 test_and_set(atomic_t *val) { |
83 | __u64 v; |
81 | __u64 v; |
84 | 82 | ||
85 | __asm__ volatile ( |
83 | __asm__ volatile ( |
86 | "movq $1, %0\n" |
84 | "movq $1, %0\n" |
87 | "xchgq %0, %1\n" |
85 | "xchgq %0, %1\n" |
88 | : "=r" (v),"=m" (val->count) |
86 | : "=r" (v),"=m" (val->count) |
89 | ); |
87 | ); |
90 | 88 | ||
91 | return v; |
89 | return v; |
92 | } |
90 | } |
93 | 91 | ||
94 | 92 | ||
95 | /** amd64 specific fast spinlock */ |
93 | /** amd64 specific fast spinlock */ |
96 | static inline void atomic_lock_arch(atomic_t *val) |
94 | static inline void atomic_lock_arch(atomic_t *val) |
97 | { |
95 | { |
98 | __u64 tmp; |
96 | __u64 tmp; |
99 | 97 | ||
100 | preemption_disable(); |
98 | preemption_disable(); |
101 | __asm__ volatile ( |
99 | __asm__ volatile ( |
102 | "0:;" |
100 | "0:;" |
103 | #ifdef CONFIG_HT |
101 | #ifdef CONFIG_HT |
104 | "pause;" |
102 | "pause;" |
105 | #endif |
103 | #endif |
106 | "mov %0, %1;" |
104 | "mov %0, %1;" |
107 | "testq %1, %1;" |
105 | "testq %1, %1;" |
108 | "jnz 0b;" /* Lightweight looping on locked spinlock */ |
106 | "jnz 0b;" /* Lightweight looping on locked spinlock */ |
109 | 107 | ||
110 | "incq %1;" /* now use the atomic operation */ |
108 | "incq %1;" /* now use the atomic operation */ |
111 | "xchgq %0, %1;" |
109 | "xchgq %0, %1;" |
112 | "testq %1, %1;" |
110 | "testq %1, %1;" |
113 | "jnz 0b;" |
111 | "jnz 0b;" |
114 | : "=m"(val->count),"=r"(tmp) |
112 | : "=m"(val->count),"=r"(tmp) |
115 | ); |
113 | ); |
116 | /* |
114 | /* |
117 | * Prevent critical section code from bleeding out this way up. |
115 | * Prevent critical section code from bleeding out this way up. |
118 | */ |
116 | */ |
119 | CS_ENTER_BARRIER(); |
117 | CS_ENTER_BARRIER(); |
120 | } |
118 | } |
121 | 119 | ||
122 | #endif |
120 | #endif |
123 | 121 |