Rev 2089 | Rev 3380 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 2089 | Rev 3163 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (c) 2001-2004 Jakub Jermar |
2 | * Copyright (c) 2001-2004 Jakub Jermar |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | /** @addtogroup amd64 |
29 | /** @addtogroup amd64 |
30 | * @{ |
30 | * @{ |
31 | */ |
31 | */ |
32 | /** @file |
32 | /** @file |
33 | */ |
33 | */ |
34 | 34 | ||
35 | #ifndef KERN_amd64_ATOMIC_H_ |
35 | #ifndef KERN_amd64_ATOMIC_H_ |
36 | #define KERN_amd64_ATOMIC_H_ |
36 | #define KERN_amd64_ATOMIC_H_ |
37 | 37 | ||
38 | #include <arch/types.h> |
38 | #include <arch/types.h> |
39 | #include <arch/barrier.h> |
39 | #include <arch/barrier.h> |
40 | #include <preemption.h> |
40 | #include <preemption.h> |
41 | 41 | ||
42 | static inline void atomic_inc(atomic_t *val) { |
42 | static inline void atomic_inc(atomic_t *val) { |
43 | #ifdef CONFIG_SMP |
43 | #ifdef CONFIG_SMP |
44 | asm volatile ("lock incq %0\n" : "=m" (val->count)); |
44 | asm volatile ("lock incq %0\n" : "+m" (val->count)); |
45 | #else |
45 | #else |
46 | asm volatile ("incq %0\n" : "=m" (val->count)); |
46 | asm volatile ("incq %0\n" : "+m" (val->count)); |
47 | #endif /* CONFIG_SMP */ |
47 | #endif /* CONFIG_SMP */ |
48 | } |
48 | } |
49 | 49 | ||
50 | static inline void atomic_dec(atomic_t *val) { |
50 | static inline void atomic_dec(atomic_t *val) { |
51 | #ifdef CONFIG_SMP |
51 | #ifdef CONFIG_SMP |
52 | asm volatile ("lock decq %0\n" : "=m" (val->count)); |
52 | asm volatile ("lock decq %0\n" : "+m" (val->count)); |
53 | #else |
53 | #else |
54 | asm volatile ("decq %0\n" : "=m" (val->count)); |
54 | asm volatile ("decq %0\n" : "+m" (val->count)); |
55 | #endif /* CONFIG_SMP */ |
55 | #endif /* CONFIG_SMP */ |
56 | } |
56 | } |
57 | 57 | ||
58 | static inline long atomic_postinc(atomic_t *val) |
58 | static inline long atomic_postinc(atomic_t *val) |
59 | { |
59 | { |
60 | long r = 1; |
60 | long r = 1; |
61 | 61 | ||
62 | asm volatile ( |
62 | asm volatile ( |
63 | "lock xaddq %1, %0\n" |
63 | "lock xaddq %1, %0\n" |
64 | : "=m" (val->count), "+r" (r) |
64 | : "+m" (val->count), "+r" (r) |
65 | ); |
65 | ); |
66 | 66 | ||
67 | return r; |
67 | return r; |
68 | } |
68 | } |
69 | 69 | ||
70 | static inline long atomic_postdec(atomic_t *val) |
70 | static inline long atomic_postdec(atomic_t *val) |
71 | { |
71 | { |
72 | long r = -1; |
72 | long r = -1; |
73 | 73 | ||
74 | asm volatile ( |
74 | asm volatile ( |
75 | "lock xaddq %1, %0\n" |
75 | "lock xaddq %1, %0\n" |
76 | : "=m" (val->count), "+r" (r) |
76 | : "+m" (val->count), "+r" (r) |
77 | ); |
77 | ); |
78 | 78 | ||
79 | return r; |
79 | return r; |
80 | } |
80 | } |
81 | 81 | ||
82 | #define atomic_preinc(val) (atomic_postinc(val)+1) |
82 | #define atomic_preinc(val) (atomic_postinc(val) + 1) |
83 | #define atomic_predec(val) (atomic_postdec(val)-1) |
83 | #define atomic_predec(val) (atomic_postdec(val) - 1) |
84 | 84 | ||
85 | static inline uint64_t test_and_set(atomic_t *val) { |
85 | static inline uint64_t test_and_set(atomic_t *val) { |
86 | uint64_t v; |
86 | uint64_t v; |
87 | 87 | ||
88 | asm volatile ( |
88 | asm volatile ( |
89 | "movq $1, %0\n" |
89 | "movq $1, %0\n" |
90 | "xchgq %0, %1\n" |
90 | "xchgq %0, %1\n" |
91 | : "=r" (v),"=m" (val->count) |
91 | : "=r" (v), "+m" (val->count) |
92 | ); |
92 | ); |
93 | 93 | ||
94 | return v; |
94 | return v; |
95 | } |
95 | } |
96 | 96 | ||
97 | 97 | ||
98 | /** amd64 specific fast spinlock */ |
98 | /** amd64 specific fast spinlock */ |
99 | static inline void atomic_lock_arch(atomic_t *val) |
99 | static inline void atomic_lock_arch(atomic_t *val) |
100 | { |
100 | { |
101 | uint64_t tmp; |
101 | uint64_t tmp; |
102 | 102 | ||
103 | preemption_disable(); |
103 | preemption_disable(); |
104 | asm volatile ( |
104 | asm volatile ( |
105 | "0:;" |
105 | "0:\n" |
106 | #ifdef CONFIG_HT |
106 | #ifdef CONFIG_HT |
107 | "pause;" |
107 | "pause\n" |
108 | #endif |
108 | #endif |
109 | "mov %0, %1;" |
109 | "mov %0, %1\n" |
110 | "testq %1, %1;" |
110 | "testq %1, %1\n" |
111 | "jnz 0b;" /* Lightweight looping on locked spinlock */ |
111 | "jnz 0b\n" /* Lightweight looping on locked spinlock */ |
112 | 112 | ||
113 | "incq %1;" /* now use the atomic operation */ |
113 | "incq %1\n" /* now use the atomic operation */ |
114 | "xchgq %0, %1;" |
114 | "xchgq %0, %1\n" |
115 | "testq %1, %1;" |
115 | "testq %1, %1\n" |
116 | "jnz 0b;" |
116 | "jnz 0b\n" |
117 | : "=m"(val->count),"=r"(tmp) |
117 | : "+m" (val->count), "=r"(tmp) |
118 | ); |
118 | ); |
119 | /* |
119 | /* |
120 | * Prevent critical section code from bleeding out this way up. |
120 | * Prevent critical section code from bleeding out this way up. |
121 | */ |
121 | */ |
122 | CS_ENTER_BARRIER(); |
122 | CS_ENTER_BARRIER(); |
123 | } |
123 | } |
124 | 124 | ||
125 | #endif |
125 | #endif |
126 | 126 | ||
127 | /** @} |
127 | /** @} |
128 | */ |
128 | */ |
129 | 129 |