Rev 1024 | Rev 1104 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
1 | jermar | 1 | /* |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
||
3 | * All rights reserved. |
||
4 | * |
||
5 | * Redistribution and use in source and binary forms, with or without |
||
6 | * modification, are permitted provided that the following conditions |
||
7 | * are met: |
||
8 | * |
||
9 | * - Redistributions of source code must retain the above copyright |
||
10 | * notice, this list of conditions and the following disclaimer. |
||
11 | * - Redistributions in binary form must reproduce the above copyright |
||
12 | * notice, this list of conditions and the following disclaimer in the |
||
13 | * documentation and/or other materials provided with the distribution. |
||
14 | * - The name of the author may not be used to endorse or promote products |
||
15 | * derived from this software without specific prior written permission. |
||
16 | * |
||
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
||
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
||
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
||
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
||
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
||
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
||
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
||
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
||
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
||
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
||
27 | */ |
||
28 | |||
11 | jermar | 29 | #ifndef __ia32_ATOMIC_H__ |
30 | #define __ia32_ATOMIC_H__ |
||
1 | jermar | 31 | |
32 | #include <arch/types.h> |
||
1100 | palkovsky | 33 | #include <arch/barrier.h> |
34 | #include <preemption.h> |
||
1 | jermar | 35 | |
633 | palkovsky | 36 | typedef struct { volatile __u32 count; } atomic_t; |
475 | jermar | 37 | |
633 | palkovsky | 38 | static inline void atomic_set(atomic_t *val, __u32 i) |
625 | palkovsky | 39 | { |
40 | val->count = i; |
||
41 | } |
||
42 | |||
633 | palkovsky | 43 | static inline __u32 atomic_get(atomic_t *val) |
625 | palkovsky | 44 | { |
45 | return val->count; |
||
46 | } |
||
47 | |||
475 | jermar | 48 | static inline void atomic_inc(atomic_t *val) { |
458 | decky | 49 | #ifdef CONFIG_SMP |
625 | palkovsky | 50 | __asm__ volatile ("lock incl %0\n" : "=m" (val->count)); |
115 | jermar | 51 | #else |
625 | palkovsky | 52 | __asm__ volatile ("incl %0\n" : "=m" (val->count)); |
458 | decky | 53 | #endif /* CONFIG_SMP */ |
115 | jermar | 54 | } |
1 | jermar | 55 | |
475 | jermar | 56 | static inline void atomic_dec(atomic_t *val) { |
458 | decky | 57 | #ifdef CONFIG_SMP |
625 | palkovsky | 58 | __asm__ volatile ("lock decl %0\n" : "=m" (val->count)); |
115 | jermar | 59 | #else |
625 | palkovsky | 60 | __asm__ volatile ("decl %0\n" : "=m" (val->count)); |
458 | decky | 61 | #endif /* CONFIG_SMP */ |
115 | jermar | 62 | } |
63 | |||
1024 | jermar | 64 | static inline count_t atomic_postinc(atomic_t *val) |
477 | vana | 65 | { |
627 | jermar | 66 | count_t r; |
67 | |||
477 | vana | 68 | __asm__ volatile ( |
557 | jermar | 69 | "movl $1, %0\n" |
70 | "lock xaddl %0, %1\n" |
||
627 | jermar | 71 | : "=r" (r), "=m" (val->count) |
477 | vana | 72 | ); |
627 | jermar | 73 | |
477 | vana | 74 | return r; |
75 | } |
||
76 | |||
1024 | jermar | 77 | static inline count_t atomic_postdec(atomic_t *val) |
477 | vana | 78 | { |
627 | jermar | 79 | count_t r; |
80 | |||
477 | vana | 81 | __asm__ volatile ( |
557 | jermar | 82 | "movl $-1, %0\n" |
83 | "lock xaddl %0, %1\n" |
||
631 | palkovsky | 84 | : "=r" (r), "=m" (val->count) |
477 | vana | 85 | ); |
627 | jermar | 86 | |
477 | vana | 87 | return r; |
88 | } |
||
89 | |||
1024 | jermar | 90 | #define atomic_preinc(val) (atomic_postinc(val)+1) |
91 | #define atomic_predec(val) (atomic_postdec(val)-1) |
||
477 | vana | 92 | |
633 | palkovsky | 93 | static inline __u32 test_and_set(atomic_t *val) { |
94 | __u32 v; |
||
115 | jermar | 95 | |
96 | __asm__ volatile ( |
||
97 | "movl $1, %0\n" |
||
259 | palkovsky | 98 | "xchgl %0, %1\n" |
625 | palkovsky | 99 | : "=r" (v),"=m" (val->count) |
115 | jermar | 100 | ); |
101 | |||
102 | return v; |
||
103 | } |
||
104 | |||
1100 | palkovsky | 105 | /** Ia32 specific fast spinlock */ |
106 | static inline void atomic_lock_arch(atomic_t *val) |
||
107 | { |
||
108 | __u32 tmp; |
||
115 | jermar | 109 | |
1100 | palkovsky | 110 | preemption_disable(); |
111 | __asm__ volatile ( |
||
112 | "0:;" |
||
113 | #ifdef CONFIG_HT |
||
114 | "pause;" /* Pentium 4's HT love this instruction */ |
||
115 | #endif |
||
116 | "mov %0, %1;" |
||
117 | "testl %1, %1;" |
||
118 | "jnz 0b;" /* Leightweight looping on locked spinlock */ |
||
119 | |||
120 | "incl %1;" /* now use the atomic operation */ |
||
121 | "xchgl %0, %1;" |
||
122 | "testl %1, %1;" |
||
123 | "jnz 0b;" |
||
124 | : "=m"(val->count),"=r"(tmp) |
||
125 | ); |
||
126 | /* |
||
127 | * Prevent critical section code from bleeding out this way up. |
||
128 | */ |
||
129 | CS_ENTER_BARRIER(); |
||
130 | } |
||
1 | jermar | 131 | |
132 | #endif |