Rev 1024 | Go to most recent revision | Details | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
633 | palkovsky | 1 | /* |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
||
3 | * All rights reserved. |
||
4 | * |
||
5 | * Redistribution and use in source and binary forms, with or without |
||
6 | * modification, are permitted provided that the following conditions |
||
7 | * are met: |
||
8 | * |
||
9 | * - Redistributions of source code must retain the above copyright |
||
10 | * notice, this list of conditions and the following disclaimer. |
||
11 | * - Redistributions in binary form must reproduce the above copyright |
||
12 | * notice, this list of conditions and the following disclaimer in the |
||
13 | * documentation and/or other materials provided with the distribution. |
||
14 | * - The name of the author may not be used to endorse or promote products |
||
15 | * derived from this software without specific prior written permission. |
||
16 | * |
||
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
||
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
||
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
||
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
||
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
||
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
||
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
||
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
||
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
||
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
||
27 | */ |
||
28 | |||
29 | #ifndef __amd64_ATOMIC_H__ |
||
30 | #define __amd64_ATOMIC_H__ |
||
31 | |||
32 | #include <arch/types.h> |
||
33 | |||
34 | typedef struct { volatile __u64 count; } atomic_t; |
||
35 | |||
36 | static inline void atomic_set(atomic_t *val, __u64 i) |
||
37 | { |
||
38 | val->count = i; |
||
39 | } |
||
40 | |||
41 | static inline __u64 atomic_get(atomic_t *val) |
||
42 | { |
||
43 | return val->count; |
||
44 | } |
||
45 | |||
46 | static inline void atomic_inc(atomic_t *val) { |
||
47 | #ifdef CONFIG_SMP |
||
48 | __asm__ volatile ("lock incq %0\n" : "=m" (val->count)); |
||
49 | #else |
||
50 | __asm__ volatile ("incq %0\n" : "=m" (val->count)); |
||
51 | #endif /* CONFIG_SMP */ |
||
52 | } |
||
53 | |||
54 | static inline void atomic_dec(atomic_t *val) { |
||
55 | #ifdef CONFIG_SMP |
||
56 | __asm__ volatile ("lock decq %0\n" : "=m" (val->count)); |
||
57 | #else |
||
58 | __asm__ volatile ("decq %0\n" : "=m" (val->count)); |
||
59 | #endif /* CONFIG_SMP */ |
||
60 | } |
||
61 | |||
62 | static inline count_t atomic_inc_pre(atomic_t *val) |
||
63 | { |
||
64 | count_t r; |
||
65 | |||
66 | __asm__ volatile ( |
||
67 | "movq $1, %0\n" |
||
68 | "lock xaddq %0, %1\n" |
||
69 | : "=r" (r), "=m" (val->count) |
||
70 | ); |
||
71 | |||
72 | return r; |
||
73 | } |
||
74 | |||
75 | static inline count_t atomic_dec_pre(atomic_t *val) |
||
76 | { |
||
77 | count_t r; |
||
78 | |||
79 | __asm__ volatile ( |
||
80 | "movq $-1, %0\n" |
||
81 | "lock xaddq %0, %1\n" |
||
82 | : "=r" (r), "=m" (val->count) |
||
83 | ); |
||
84 | |||
85 | return r; |
||
86 | } |
||
87 | |||
88 | #define atomic_inc_post(val) (atomic_inc_pre(val)+1) |
||
89 | #define atomic_dec_post(val) (atomic_dec_pre(val)-1) |
||
90 | |||
91 | static inline __u64 test_and_set(atomic_t *val) { |
||
92 | __u64 v; |
||
93 | |||
94 | __asm__ volatile ( |
||
95 | "movq $1, %0\n" |
||
96 | "xchgq %0, %1\n" |
||
97 | : "=r" (v),"=m" (val->count) |
||
98 | ); |
||
99 | |||
100 | return v; |
||
101 | } |
||
102 | |||
103 | |||
104 | extern void spinlock_arch(volatile int *val); |
||
105 | |||
106 | #endif |