Rev 1902 | Rev 2071 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 1902 | Rev 1903 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (C) 2005 Jakub Jermar |
2 | * Copyright (C) 2005 Jakub Jermar |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | /** @addtogroup sparc64 |
29 | /** @addtogroup sparc64 |
30 | * @{ |
30 | * @{ |
31 | */ |
31 | */ |
32 | /** @file |
32 | /** @file |
33 | */ |
33 | */ |
34 | 34 | ||
35 | #ifndef KERN_sparc64_ATOMIC_H_ |
35 | #ifndef KERN_sparc64_ATOMIC_H_ |
36 | #define KERN_sparc64_ATOMIC_H_ |
36 | #define KERN_sparc64_ATOMIC_H_ |
37 | 37 | ||
38 | #include <arch/barrier.h> |
38 | #include <arch/barrier.h> |
39 | #include <arch/types.h> |
39 | #include <arch/types.h> |
40 | #include <typedefs.h> |
40 | #include <typedefs.h> |
41 | 41 | ||
42 | /** Atomic add operation. |
42 | /** Atomic add operation. |
43 | * |
43 | * |
44 | * Use atomic compare and swap operation to atomically add signed value. |
44 | * Use atomic compare and swap operation to atomically add signed value. |
45 | * |
45 | * |
46 | * @param val Atomic variable. |
46 | * @param val Atomic variable. |
47 | * @param i Signed value to be added. |
47 | * @param i Signed value to be added. |
48 | * |
48 | * |
49 | * @return Value of the atomic variable as it existed before addition. |
49 | * @return Value of the atomic variable as it existed before addition. |
50 | */ |
50 | */ |
51 | static inline long atomic_add(atomic_t *val, int i) |
51 | static inline long atomic_add(atomic_t *val, int i) |
52 | { |
52 | { |
53 | uint64_t a, b; |
53 | uint64_t a, b; |
54 | 54 | ||
55 | do { |
55 | do { |
56 | volatile uintptr_t x = (uint64_t) &val->count; |
56 | volatile uintptr_t x = (uint64_t) &val->count; |
57 | 57 | ||
58 | a = *((uint64_t *) x); |
58 | a = *((uint64_t *) x); |
59 | b = a + i; |
59 | b = a + i; |
60 | __asm__ volatile ("casx %0, %2, %1\n" : "+m" (*((uint64_t *)x)), "+r" (b) : "r" (a)); |
60 | __asm__ volatile ("casx %0, %2, %1\n" : "+m" (*((uint64_t *)x)), "+r" (b) : "r" (a)); |
61 | } while (a != b); |
61 | } while (a != b); |
62 | 62 | ||
63 | return a; |
63 | return a; |
64 | } |
64 | } |
65 | 65 | ||
66 | static inline long atomic_preinc(atomic_t *val) |
66 | static inline long atomic_preinc(atomic_t *val) |
67 | { |
67 | { |
68 | return atomic_add(val, 1) + 1; |
68 | return atomic_add(val, 1) + 1; |
69 | } |
69 | } |
70 | 70 | ||
71 | static inline long atomic_postinc(atomic_t *val) |
71 | static inline long atomic_postinc(atomic_t *val) |
72 | { |
72 | { |
73 | return atomic_add(val, 1); |
73 | return atomic_add(val, 1); |
74 | } |
74 | } |
75 | 75 | ||
76 | static inline long atomic_predec(atomic_t *val) |
76 | static inline long atomic_predec(atomic_t *val) |
77 | { |
77 | { |
78 | return atomic_add(val, -1) - 1; |
78 | return atomic_add(val, -1) - 1; |
79 | } |
79 | } |
80 | 80 | ||
81 | static inline long atomic_postdec(atomic_t *val) |
81 | static inline long atomic_postdec(atomic_t *val) |
82 | { |
82 | { |
83 | return atomic_add(val, -1); |
83 | return atomic_add(val, -1); |
84 | } |
84 | } |
85 | 85 | ||
86 | static inline void atomic_inc(atomic_t *val) |
86 | static inline void atomic_inc(atomic_t *val) |
87 | { |
87 | { |
88 | (void) atomic_add(val, 1); |
88 | (void) atomic_add(val, 1); |
89 | } |
89 | } |
90 | 90 | ||
91 | static inline void atomic_dec(atomic_t *val) |
91 | static inline void atomic_dec(atomic_t *val) |
92 | { |
92 | { |
93 | (void) atomic_add(val, -1); |
93 | (void) atomic_add(val, -1); |
94 | } |
94 | } |
95 | 95 | ||
96 | static inline long test_and_set(atomic_t *val) |
96 | static inline long test_and_set(atomic_t *val) |
97 | { |
97 | { |
98 | uint64_t v = 1; |
98 | uint64_t v = 1; |
- | 99 | volatile uintptr_t x = (uint64_t) &val->count; |
|
99 | 100 | ||
100 | __asm__ volatile ("casx %0, %2, %1\n" : "+m" (*val), "+r" (v) : "r" (0)); |
101 | __asm__ volatile ("casx %0, %2, %1\n" : "+m" (*((uint64_t *) x)), "+r" (v) : "r" (0)); |
101 | 102 | ||
102 | return v; |
103 | return v; |
103 | } |
104 | } |
104 | 105 | ||
105 | static inline void atomic_lock_arch(atomic_t *val) |
106 | static inline void atomic_lock_arch(atomic_t *val) |
106 | { |
107 | { |
107 | uint64_t tmp1 = 1; |
108 | uint64_t tmp1 = 1; |
108 | uint64_t tmp2; |
109 | uint64_t tmp2; |
109 | 110 | ||
- | 111 | volatile uintptr_t x = (uint64_t) &val->count; |
|
- | 112 | ||
110 | __asm__ volatile ( |
113 | __asm__ volatile ( |
111 | "0:\n" |
114 | "0:\n" |
112 | "casx %0, %3, %1\n" |
115 | "casx %0, %3, %1\n" |
113 | "brz %1, 2f\n" |
116 | "brz %1, 2f\n" |
114 | "nop\n" |
117 | "nop\n" |
115 | "1:\n" |
118 | "1:\n" |
116 | "ldx %0, %2\n" |
119 | "ldx %0, %2\n" |
117 | "brz %2, 0b\n" |
120 | "brz %2, 0b\n" |
118 | "nop\n" |
121 | "nop\n" |
119 | "ba 1b\n" |
122 | "ba 1b\n" |
120 | "nop\n" |
123 | "nop\n" |
121 | "2:\n" |
124 | "2:\n" |
122 | : "+m" (*val), "+r" (tmp1), "+r" (tmp2) : "r" (0) |
125 | : "+m" (*((uint64_t *) x)), "+r" (tmp1), "+r" (tmp2) : "r" (0) |
123 | ); |
126 | ); |
124 | 127 | ||
125 | /* |
128 | /* |
126 | * Prevent critical section code from bleeding out this way up. |
129 | * Prevent critical section code from bleeding out this way up. |
127 | */ |
130 | */ |
128 | CS_ENTER_BARRIER(); |
131 | CS_ENTER_BARRIER(); |
129 | } |
132 | } |
130 | 133 | ||
131 | #endif |
134 | #endif |
132 | 135 | ||
133 | /** @} |
136 | /** @} |
134 | */ |
137 | */ |
135 | 138 |