Subversion Repositories HelenOS-historic

Rev

Rev 1100 | Rev 1121 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 1100 Rev 1104
Line 30... Line 30...
30
#define __amd64_ATOMIC_H__
30
#define __amd64_ATOMIC_H__
31
 
31
 
32
#include <arch/types.h>
32
#include <arch/types.h>
33
#include <arch/barrier.h>
33
#include <arch/barrier.h>
34
#include <preemption.h>
34
#include <preemption.h>
35
 
-
 
36
typedef struct { volatile __u64 count; } atomic_t;
-
 
37
 
-
 
38
static inline void atomic_set(atomic_t *val, __u64 i)
-
 
39
{
-
 
40
    val->count = i;
-
 
41
}
-
 
42
 
-
 
43
static inline __u64 atomic_get(atomic_t *val)
-
 
44
{
-
 
45
    return val->count;
35
#include <typedefs.h>
46
}
-
 
47
 
36
 
48
static inline void atomic_inc(atomic_t *val) {
37
static inline void atomic_inc(atomic_t *val) {
49
#ifdef CONFIG_SMP
38
#ifdef CONFIG_SMP
50
    __asm__ volatile ("lock incq %0\n" : "=m" (val->count));
39
    __asm__ volatile ("lock incq %0\n" : "=m" (val->count));
51
#else
40
#else
Line 59... Line 48...
59
#else
48
#else
60
    __asm__ volatile ("decq %0\n" : "=m" (val->count));
49
    __asm__ volatile ("decq %0\n" : "=m" (val->count));
61
#endif /* CONFIG_SMP */
50
#endif /* CONFIG_SMP */
62
}
51
}
63
 
52
 
64
static inline count_t atomic_postinc(atomic_t *val)
53
static inline long atomic_postinc(atomic_t *val)
65
{
54
{
66
    count_t r;
55
    long r;
67
 
56
 
68
    __asm__ volatile (
57
    __asm__ volatile (
69
        "movq $1, %0\n"
58
        "movq $1, %0\n"
70
        "lock xaddq %0, %1\n"
59
        "lock xaddq %0, %1\n"
71
        : "=r" (r), "=m" (val->count)
60
        : "=r" (r), "=m" (val->count)
72
    );
61
    );
73
 
62
 
74
    return r;
63
    return r;
75
}
64
}
76
 
65
 
77
static inline count_t atomic_postdec(atomic_t *val)
66
static inline long atomic_postdec(atomic_t *val)
78
{
67
{
79
    count_t r;
68
    long r;
80
   
69
   
81
    __asm__ volatile (
70
    __asm__ volatile (
82
        "movq $-1, %0\n"
71
        "movq $-1, %0\n"
83
        "lock xaddq %0, %1\n"
72
        "lock xaddq %0, %1\n"
84
        : "=r" (r), "=m" (val->count)
73
        : "=r" (r), "=m" (val->count)
Line 101... Line 90...
101
   
90
   
102
    return v;
91
    return v;
103
}
92
}
104
 
93
 
105
 
94
 
106
/** AMD64 specific fast spinlock */
95
/** amd64 specific fast spinlock */
107
static inline void atomic_lock_arch(atomic_t *val)
96
static inline void atomic_lock_arch(atomic_t *val)
108
{
97
{
109
    __u64 tmp;
98
    __u64 tmp;
110
 
99
 
111
    preemption_disable();
100
    preemption_disable();
Line 114... Line 103...
114
#ifdef CONFIG_HT
103
#ifdef CONFIG_HT
115
        "pause;" /* Pentium 4's HT love this instruction */
104
        "pause;" /* Pentium 4's HT love this instruction */
116
#endif
105
#endif
117
        "mov %0, %1;"
106
        "mov %0, %1;"
118
        "testq %1, %1;"
107
        "testq %1, %1;"
119
        "jnz 0b;"       /* Leightweight looping on locked spinlock */
108
        "jnz 0b;"       /* Lightweight looping on locked spinlock */
120
       
109
       
121
        "incq %1;"      /* now use the atomic operation */
110
        "incq %1;"      /* now use the atomic operation */
122
        "xchgq %0, %1;"
111
        "xchgq %0, %1;"
123
        "testq %1, %1;"
112
        "testq %1, %1;"
124
        "jnz 0b;"
113
        "jnz 0b;"