Rev 1890 | Rev 1903 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
418 | jermar | 1 | /* |
2 | * Copyright (C) 2005 Jakub Jermar |
||
3 | * All rights reserved. |
||
4 | * |
||
5 | * Redistribution and use in source and binary forms, with or without |
||
6 | * modification, are permitted provided that the following conditions |
||
7 | * are met: |
||
8 | * |
||
9 | * - Redistributions of source code must retain the above copyright |
||
10 | * notice, this list of conditions and the following disclaimer. |
||
11 | * - Redistributions in binary form must reproduce the above copyright |
||
12 | * notice, this list of conditions and the following disclaimer in the |
||
13 | * documentation and/or other materials provided with the distribution. |
||
14 | * - The name of the author may not be used to endorse or promote products |
||
15 | * derived from this software without specific prior written permission. |
||
16 | * |
||
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
||
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
||
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
||
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
||
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
||
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
||
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
||
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
||
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
||
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
||
27 | */ |
||
28 | |||
1784 | jermar | 29 | /** @addtogroup sparc64 |
1702 | cejka | 30 | * @{ |
31 | */ |
||
32 | /** @file |
||
33 | */ |
||
34 | |||
1860 | jermar | 35 | #ifndef KERN_sparc64_ATOMIC_H_ |
36 | #define KERN_sparc64_ATOMIC_H_ |
||
418 | jermar | 37 | |
1902 | jermar | 38 | #include <arch/barrier.h> |
475 | jermar | 39 | #include <arch/types.h> |
1104 | jermar | 40 | #include <typedefs.h> |
475 | jermar | 41 | |
861 | jermar | 42 | /** Atomic add operation. |
43 | * |
||
44 | * Use atomic compare and swap operation to atomically add signed value. |
||
45 | * |
||
46 | * @param val Atomic variable. |
||
47 | * @param i Signed value to be added. |
||
48 | * |
||
49 | * @return Value of the atomic variable as it existed before addition. |
||
418 | jermar | 50 | */ |
1104 | jermar | 51 | static inline long atomic_add(atomic_t *val, int i) |
861 | jermar | 52 | { |
1780 | jermar | 53 | uint64_t a, b; |
418 | jermar | 54 | |
1888 | jermar | 55 | do { |
56 | volatile uintptr_t x = (uint64_t) &val->count; |
||
861 | jermar | 57 | |
1888 | jermar | 58 | a = *((uint64_t *) x); |
59 | b = a + i; |
||
1890 | jermar | 60 | __asm__ volatile ("casx %0, %2, %1\n" : "+m" (*((uint64_t *)x)), "+r" (b) : "r" (a)); |
1888 | jermar | 61 | } while (a != b); |
62 | |||
861 | jermar | 63 | return a; |
64 | } |
||
65 | |||
1104 | jermar | 66 | static inline long atomic_preinc(atomic_t *val) |
1024 | jermar | 67 | { |
68 | return atomic_add(val, 1) + 1; |
||
69 | } |
||
70 | |||
1104 | jermar | 71 | static inline long atomic_postinc(atomic_t *val) |
1024 | jermar | 72 | { |
73 | return atomic_add(val, 1); |
||
74 | } |
||
75 | |||
1104 | jermar | 76 | static inline long atomic_predec(atomic_t *val) |
1024 | jermar | 77 | { |
78 | return atomic_add(val, -1) - 1; |
||
79 | } |
||
80 | |||
1104 | jermar | 81 | static inline long atomic_postdec(atomic_t *val) |
1024 | jermar | 82 | { |
1082 | jermar | 83 | return atomic_add(val, -1); |
1024 | jermar | 84 | } |
85 | |||
627 | jermar | 86 | static inline void atomic_inc(atomic_t *val) |
87 | { |
||
861 | jermar | 88 | (void) atomic_add(val, 1); |
418 | jermar | 89 | } |
90 | |||
627 | jermar | 91 | static inline void atomic_dec(atomic_t *val) |
92 | { |
||
861 | jermar | 93 | (void) atomic_add(val, -1); |
418 | jermar | 94 | } |
95 | |||
1902 | jermar | 96 | static inline long test_and_set(atomic_t *val) |
97 | { |
||
98 | uint64_t v = 1; |
||
99 | |||
100 | __asm__ volatile ("casx %0, %2, %1\n" : "+m" (*val), "+r" (v) : "r" (0)); |
||
101 | |||
102 | return v; |
||
103 | } |
||
104 | |||
105 | static inline void atomic_lock_arch(atomic_t *val) |
||
106 | { |
||
107 | uint64_t tmp1 = 1; |
||
108 | uint64_t tmp2; |
||
109 | |||
110 | __asm__ volatile ( |
||
111 | "0:\n" |
||
112 | "casx %0, %3, %1\n" |
||
113 | "brz %1, 2f\n" |
||
114 | "nop\n" |
||
115 | "1:\n" |
||
116 | "ldx %0, %2\n" |
||
117 | "brz %2, 0b\n" |
||
118 | "nop\n" |
||
119 | "ba 1b\n" |
||
120 | "nop\n" |
||
121 | "2:\n" |
||
122 | : "+m" (*val), "+r" (tmp1), "+r" (tmp2) : "r" (0) |
||
123 | ); |
||
124 | |||
125 | /* |
||
126 | * Prevent critical section code from bleeding out this way up. |
||
127 | */ |
||
128 | CS_ENTER_BARRIER(); |
||
129 | } |
||
130 | |||
418 | jermar | 131 | #endif |
1702 | cejka | 132 | |
1784 | jermar | 133 | /** @} |
1702 | cejka | 134 | */ |