Rev 1024 | Rev 1104 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
| Rev | Author | Line No. | Line |
|---|---|---|---|
| 633 | palkovsky | 1 | /* |
| 2 | * Copyright (C) 2001-2004 Jakub Jermar |
||
| 3 | * All rights reserved. |
||
| 4 | * |
||
| 5 | * Redistribution and use in source and binary forms, with or without |
||
| 6 | * modification, are permitted provided that the following conditions |
||
| 7 | * are met: |
||
| 8 | * |
||
| 9 | * - Redistributions of source code must retain the above copyright |
||
| 10 | * notice, this list of conditions and the following disclaimer. |
||
| 11 | * - Redistributions in binary form must reproduce the above copyright |
||
| 12 | * notice, this list of conditions and the following disclaimer in the |
||
| 13 | * documentation and/or other materials provided with the distribution. |
||
| 14 | * - The name of the author may not be used to endorse or promote products |
||
| 15 | * derived from this software without specific prior written permission. |
||
| 16 | * |
||
| 17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
||
| 18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
||
| 19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
||
| 20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
||
| 21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
||
| 22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
||
| 23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
||
| 24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
||
| 25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
||
| 26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
||
| 27 | */ |
||
| 28 | |||
| 29 | #ifndef __amd64_ATOMIC_H__ |
||
| 30 | #define __amd64_ATOMIC_H__ |
||
| 31 | |||
| 32 | #include <arch/types.h> |
||
| 1100 | palkovsky | 33 | #include <arch/barrier.h> |
| 34 | #include <preemption.h> |
||
| 633 | palkovsky | 35 | |
| 36 | typedef struct { volatile __u64 count; } atomic_t; |
||
| 37 | |||
| 38 | static inline void atomic_set(atomic_t *val, __u64 i) |
||
| 39 | { |
||
| 40 | val->count = i; |
||
| 41 | } |
||
| 42 | |||
| 43 | static inline __u64 atomic_get(atomic_t *val) |
||
| 44 | { |
||
| 45 | return val->count; |
||
| 46 | } |
||
| 47 | |||
| 48 | static inline void atomic_inc(atomic_t *val) { |
||
| 49 | #ifdef CONFIG_SMP |
||
| 50 | __asm__ volatile ("lock incq %0\n" : "=m" (val->count)); |
||
| 51 | #else |
||
| 52 | __asm__ volatile ("incq %0\n" : "=m" (val->count)); |
||
| 53 | #endif /* CONFIG_SMP */ |
||
| 54 | } |
||
| 55 | |||
| 56 | static inline void atomic_dec(atomic_t *val) { |
||
| 57 | #ifdef CONFIG_SMP |
||
| 58 | __asm__ volatile ("lock decq %0\n" : "=m" (val->count)); |
||
| 59 | #else |
||
| 60 | __asm__ volatile ("decq %0\n" : "=m" (val->count)); |
||
| 61 | #endif /* CONFIG_SMP */ |
||
| 62 | } |
||
| 63 | |||
| 1024 | jermar | 64 | static inline count_t atomic_postinc(atomic_t *val) |
| 633 | palkovsky | 65 | { |
| 66 | count_t r; |
||
| 67 | |||
| 68 | __asm__ volatile ( |
||
| 69 | "movq $1, %0\n" |
||
| 70 | "lock xaddq %0, %1\n" |
||
| 71 | : "=r" (r), "=m" (val->count) |
||
| 72 | ); |
||
| 73 | |||
| 74 | return r; |
||
| 75 | } |
||
| 76 | |||
| 1024 | jermar | 77 | static inline count_t atomic_postdec(atomic_t *val) |
| 633 | palkovsky | 78 | { |
| 79 | count_t r; |
||
| 80 | |||
| 81 | __asm__ volatile ( |
||
| 82 | "movq $-1, %0\n" |
||
| 83 | "lock xaddq %0, %1\n" |
||
| 84 | : "=r" (r), "=m" (val->count) |
||
| 85 | ); |
||
| 86 | |||
| 87 | return r; |
||
| 88 | } |
||
| 89 | |||
| 1024 | jermar | 90 | #define atomic_preinc(val) (atomic_postinc(val)+1) |
| 91 | #define atomic_predec(val) (atomic_postdec(val)-1) |
||
| 633 | palkovsky | 92 | |
| 93 | static inline __u64 test_and_set(atomic_t *val) { |
||
| 94 | __u64 v; |
||
| 95 | |||
| 96 | __asm__ volatile ( |
||
| 97 | "movq $1, %0\n" |
||
| 98 | "xchgq %0, %1\n" |
||
| 99 | : "=r" (v),"=m" (val->count) |
||
| 100 | ); |
||
| 101 | |||
| 102 | return v; |
||
| 103 | } |
||
| 104 | |||
| 105 | |||
| 1100 | palkovsky | 106 | /** AMD64 specific fast spinlock */ |
| 107 | static inline void atomic_lock_arch(atomic_t *val) |
||
| 108 | { |
||
| 109 | __u64 tmp; |
||
| 633 | palkovsky | 110 | |
| 1100 | palkovsky | 111 | preemption_disable(); |
| 112 | __asm__ volatile ( |
||
| 113 | "0:;" |
||
| 114 | #ifdef CONFIG_HT |
||
| 115 | "pause;" /* Pentium 4's HT love this instruction */ |
||
| 633 | palkovsky | 116 | #endif |
| 1100 | palkovsky | 117 | "mov %0, %1;" |
| 118 | "testq %1, %1;" |
||
| 119 | "jnz 0b;" /* Leightweight looping on locked spinlock */ |
||
| 120 | |||
| 121 | "incq %1;" /* now use the atomic operation */ |
||
| 122 | "xchgq %0, %1;" |
||
| 123 | "testq %1, %1;" |
||
| 124 | "jnz 0b;" |
||
| 125 | : "=m"(val->count),"=r"(tmp) |
||
| 126 | ); |
||
| 127 | /* |
||
| 128 | * Prevent critical section code from bleeding out this way up. |
||
| 129 | */ |
||
| 130 | CS_ENTER_BARRIER(); |
||
| 131 | } |
||
| 132 | |||
| 133 | #endif |