Rev 633 | Rev 1100 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 633 | Rev 1024 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | #ifndef __amd64_ATOMIC_H__ |
29 | #ifndef __amd64_ATOMIC_H__ |
30 | #define __amd64_ATOMIC_H__ |
30 | #define __amd64_ATOMIC_H__ |
31 | 31 | ||
32 | #include <arch/types.h> |
32 | #include <arch/types.h> |
33 | 33 | ||
34 | typedef struct { volatile __u64 count; } atomic_t; |
34 | typedef struct { volatile __u64 count; } atomic_t; |
35 | 35 | ||
36 | static inline void atomic_set(atomic_t *val, __u64 i) |
36 | static inline void atomic_set(atomic_t *val, __u64 i) |
37 | { |
37 | { |
38 | val->count = i; |
38 | val->count = i; |
39 | } |
39 | } |
40 | 40 | ||
41 | static inline __u64 atomic_get(atomic_t *val) |
41 | static inline __u64 atomic_get(atomic_t *val) |
42 | { |
42 | { |
43 | return val->count; |
43 | return val->count; |
44 | } |
44 | } |
45 | 45 | ||
46 | static inline void atomic_inc(atomic_t *val) { |
46 | static inline void atomic_inc(atomic_t *val) { |
47 | #ifdef CONFIG_SMP |
47 | #ifdef CONFIG_SMP |
48 | __asm__ volatile ("lock incq %0\n" : "=m" (val->count)); |
48 | __asm__ volatile ("lock incq %0\n" : "=m" (val->count)); |
49 | #else |
49 | #else |
50 | __asm__ volatile ("incq %0\n" : "=m" (val->count)); |
50 | __asm__ volatile ("incq %0\n" : "=m" (val->count)); |
51 | #endif /* CONFIG_SMP */ |
51 | #endif /* CONFIG_SMP */ |
52 | } |
52 | } |
53 | 53 | ||
54 | static inline void atomic_dec(atomic_t *val) { |
54 | static inline void atomic_dec(atomic_t *val) { |
55 | #ifdef CONFIG_SMP |
55 | #ifdef CONFIG_SMP |
56 | __asm__ volatile ("lock decq %0\n" : "=m" (val->count)); |
56 | __asm__ volatile ("lock decq %0\n" : "=m" (val->count)); |
57 | #else |
57 | #else |
58 | __asm__ volatile ("decq %0\n" : "=m" (val->count)); |
58 | __asm__ volatile ("decq %0\n" : "=m" (val->count)); |
59 | #endif /* CONFIG_SMP */ |
59 | #endif /* CONFIG_SMP */ |
60 | } |
60 | } |
61 | 61 | ||
62 | static inline count_t atomic_inc_pre(atomic_t *val) |
62 | static inline count_t atomic_postinc(atomic_t *val) |
63 | { |
63 | { |
64 | count_t r; |
64 | count_t r; |
65 | 65 | ||
66 | __asm__ volatile ( |
66 | __asm__ volatile ( |
67 | "movq $1, %0\n" |
67 | "movq $1, %0\n" |
68 | "lock xaddq %0, %1\n" |
68 | "lock xaddq %0, %1\n" |
69 | : "=r" (r), "=m" (val->count) |
69 | : "=r" (r), "=m" (val->count) |
70 | ); |
70 | ); |
71 | 71 | ||
72 | return r; |
72 | return r; |
73 | } |
73 | } |
74 | 74 | ||
75 | static inline count_t atomic_dec_pre(atomic_t *val) |
75 | static inline count_t atomic_postdec(atomic_t *val) |
76 | { |
76 | { |
77 | count_t r; |
77 | count_t r; |
78 | 78 | ||
79 | __asm__ volatile ( |
79 | __asm__ volatile ( |
80 | "movq $-1, %0\n" |
80 | "movq $-1, %0\n" |
81 | "lock xaddq %0, %1\n" |
81 | "lock xaddq %0, %1\n" |
82 | : "=r" (r), "=m" (val->count) |
82 | : "=r" (r), "=m" (val->count) |
83 | ); |
83 | ); |
84 | 84 | ||
85 | return r; |
85 | return r; |
86 | } |
86 | } |
87 | 87 | ||
88 | #define atomic_inc_post(val) (atomic_inc_pre(val)+1) |
88 | #define atomic_preinc(val) (atomic_postinc(val)+1) |
89 | #define atomic_dec_post(val) (atomic_dec_pre(val)-1) |
89 | #define atomic_predec(val) (atomic_postdec(val)-1) |
90 | 90 | ||
91 | static inline __u64 test_and_set(atomic_t *val) { |
91 | static inline __u64 test_and_set(atomic_t *val) { |
92 | __u64 v; |
92 | __u64 v; |
93 | 93 | ||
94 | __asm__ volatile ( |
94 | __asm__ volatile ( |
95 | "movq $1, %0\n" |
95 | "movq $1, %0\n" |
96 | "xchgq %0, %1\n" |
96 | "xchgq %0, %1\n" |
97 | : "=r" (v),"=m" (val->count) |
97 | : "=r" (v),"=m" (val->count) |
98 | ); |
98 | ); |
99 | 99 | ||
100 | return v; |
100 | return v; |
101 | } |
101 | } |
102 | 102 | ||
103 | 103 | ||
104 | extern void spinlock_arch(volatile int *val); |
104 | extern void spinlock_arch(volatile int *val); |
105 | 105 | ||
106 | #endif |
106 | #endif |
107 | 107 |