Subversion Repositories HelenOS-historic

Rev

Rev 1702 | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 1702 Rev 1780
1
/*
1
/*
2
 * Copyright (C) 2001-2004 Jakub Jermar
2
 * Copyright (C) 2001-2004 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
 /** @addtogroup amd64 
29
 /** @addtogroup amd64 
30
 * @{
30
 * @{
31
 */
31
 */
32
/** @file
32
/** @file
33
 */
33
 */
34
 
34
 
35
#ifndef __amd64_ATOMIC_H__
35
#ifndef __amd64_ATOMIC_H__
36
#define __amd64_ATOMIC_H__
36
#define __amd64_ATOMIC_H__
37
 
37
 
38
#include <arch/types.h>
38
#include <arch/types.h>
39
#include <arch/barrier.h>
39
#include <arch/barrier.h>
40
#include <preemption.h>
40
#include <preemption.h>
41
#include <typedefs.h>
41
#include <typedefs.h>
42
 
42
 
43
static inline void atomic_inc(atomic_t *val) {
43
static inline void atomic_inc(atomic_t *val) {
44
#ifdef CONFIG_SMP
44
#ifdef CONFIG_SMP
45
    __asm__ volatile ("lock incq %0\n" : "=m" (val->count));
45
    __asm__ volatile ("lock incq %0\n" : "=m" (val->count));
46
#else
46
#else
47
    __asm__ volatile ("incq %0\n" : "=m" (val->count));
47
    __asm__ volatile ("incq %0\n" : "=m" (val->count));
48
#endif /* CONFIG_SMP */
48
#endif /* CONFIG_SMP */
49
}
49
}
50
 
50
 
51
static inline void atomic_dec(atomic_t *val) {
51
static inline void atomic_dec(atomic_t *val) {
52
#ifdef CONFIG_SMP
52
#ifdef CONFIG_SMP
53
    __asm__ volatile ("lock decq %0\n" : "=m" (val->count));
53
    __asm__ volatile ("lock decq %0\n" : "=m" (val->count));
54
#else
54
#else
55
    __asm__ volatile ("decq %0\n" : "=m" (val->count));
55
    __asm__ volatile ("decq %0\n" : "=m" (val->count));
56
#endif /* CONFIG_SMP */
56
#endif /* CONFIG_SMP */
57
}
57
}
58
 
58
 
59
static inline long atomic_postinc(atomic_t *val)
59
static inline long atomic_postinc(atomic_t *val)
60
{
60
{
61
    long r = 1;
61
    long r = 1;
62
 
62
 
63
    __asm__ volatile (
63
    __asm__ volatile (
64
        "lock xaddq %1, %0\n"
64
        "lock xaddq %1, %0\n"
65
        : "=m" (val->count), "+r" (r)
65
        : "=m" (val->count), "+r" (r)
66
    );
66
    );
67
 
67
 
68
    return r;
68
    return r;
69
}
69
}
70
 
70
 
71
static inline long atomic_postdec(atomic_t *val)
71
static inline long atomic_postdec(atomic_t *val)
72
{
72
{
73
    long r = -1;
73
    long r = -1;
74
   
74
   
75
    __asm__ volatile (
75
    __asm__ volatile (
76
        "lock xaddq %1, %0\n"
76
        "lock xaddq %1, %0\n"
77
        : "=m" (val->count), "+r" (r)
77
        : "=m" (val->count), "+r" (r)
78
    );
78
    );
79
   
79
   
80
    return r;
80
    return r;
81
}
81
}
82
 
82
 
83
#define atomic_preinc(val) (atomic_postinc(val)+1)
83
#define atomic_preinc(val) (atomic_postinc(val)+1)
84
#define atomic_predec(val) (atomic_postdec(val)-1)
84
#define atomic_predec(val) (atomic_postdec(val)-1)
85
 
85
 
86
static inline __u64 test_and_set(atomic_t *val) {
86
static inline uint64_t test_and_set(atomic_t *val) {
87
    __u64 v;
87
    uint64_t v;
88
   
88
   
89
    __asm__ volatile (
89
    __asm__ volatile (
90
        "movq $1, %0\n"
90
        "movq $1, %0\n"
91
        "xchgq %0, %1\n"
91
        "xchgq %0, %1\n"
92
        : "=r" (v),"=m" (val->count)
92
        : "=r" (v),"=m" (val->count)
93
    );
93
    );
94
   
94
   
95
    return v;
95
    return v;
96
}
96
}
97
 
97
 
98
 
98
 
99
/** amd64 specific fast spinlock */
99
/** amd64 specific fast spinlock */
100
static inline void atomic_lock_arch(atomic_t *val)
100
static inline void atomic_lock_arch(atomic_t *val)
101
{
101
{
102
    __u64 tmp;
102
    uint64_t tmp;
103
 
103
 
104
    preemption_disable();
104
    preemption_disable();
105
    __asm__ volatile (
105
    __asm__ volatile (
106
        "0:;"
106
        "0:;"
107
#ifdef CONFIG_HT
107
#ifdef CONFIG_HT
108
        "pause;"
108
        "pause;"
109
#endif
109
#endif
110
        "mov %0, %1;"
110
        "mov %0, %1;"
111
        "testq %1, %1;"
111
        "testq %1, %1;"
112
        "jnz 0b;"       /* Lightweight looping on locked spinlock */
112
        "jnz 0b;"       /* Lightweight looping on locked spinlock */
113
       
113
       
114
        "incq %1;"      /* now use the atomic operation */
114
        "incq %1;"      /* now use the atomic operation */
115
        "xchgq %0, %1;"
115
        "xchgq %0, %1;"
116
        "testq %1, %1;"
116
        "testq %1, %1;"
117
        "jnz 0b;"
117
        "jnz 0b;"
118
                : "=m"(val->count),"=r"(tmp)
118
                : "=m"(val->count),"=r"(tmp)
119
        );
119
        );
120
    /*
120
    /*
121
     * Prevent critical section code from bleeding out this way up.
121
     * Prevent critical section code from bleeding out this way up.
122
     */
122
     */
123
    CS_ENTER_BARRIER();
123
    CS_ENTER_BARRIER();
124
}
124
}
125
 
125
 
126
#endif
126
#endif
127
 
127
 
128
 /** @}
128
 /** @}
129
 */
129
 */
130
 
130
 
131
 
131