Subversion Repositories HelenOS

Rev

Rev 4133 | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 4133 Rev 4402
1
/*
1
/*
2
 * Copyright (c) 2001-2004 Jakub Jermar
2
 * Copyright (c) 2001-2004 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
/** @addtogroup ia32
29
/** @addtogroup ia32
30
 * @{
30
 * @{
31
 */
31
 */
32
/** @file
32
/** @file
33
 */
33
 */
34
 
34
 
35
#ifndef KERN_ia32_ATOMIC_H_
35
#ifndef KERN_ia32_ATOMIC_H_
36
#define KERN_ia32_ATOMIC_H_
36
#define KERN_ia32_ATOMIC_H_
37
 
37
 
38
#include <arch/types.h>
38
#include <arch/types.h>
39
#include <arch/barrier.h>
39
#include <arch/barrier.h>
40
#include <preemption.h>
40
#include <preemption.h>
41
 
41
 
42
static inline void atomic_inc(atomic_t *val) {
42
static inline void atomic_inc(atomic_t *val) {
43
#ifdef CONFIG_SMP
43
#ifdef CONFIG_SMP
44
    asm volatile (
44
    asm volatile (
45
        "lock incl %[count]\n"
45
        "lock incl %[count]\n"
46
        : [count] "+m" (val->count)
46
        : [count] "+m" (val->count)
47
    );
47
    );
48
#else
48
#else
49
    asm volatile (
49
    asm volatile (
50
        "incl %[count]\n"
50
        "incl %[count]\n"
51
        : [count] "+m" (val->count)
51
        : [count] "+m" (val->count)
52
    );
52
    );
53
#endif /* CONFIG_SMP */
53
#endif /* CONFIG_SMP */
54
}
54
}
55
 
55
 
56
static inline void atomic_dec(atomic_t *val) {
56
static inline void atomic_dec(atomic_t *val) {
57
#ifdef CONFIG_SMP
57
#ifdef CONFIG_SMP
58
    asm volatile (
58
    asm volatile (
59
        "lock decl %[count]\n"
59
        "lock decl %[count]\n"
60
        : [count] "+m" (val->count)
60
        : [count] "+m" (val->count)
61
    );
61
    );
62
#else
62
#else
63
    asm volatile (
63
    asm volatile (
64
        "decl %[count]\n"
64
        "decl %[count]\n"
65
        : [count] "+m" (val->count)
65
        : [count] "+m" (val->count)
66
    );
66
    );
67
#endif /* CONFIG_SMP */
67
#endif /* CONFIG_SMP */
68
}
68
}
69
 
69
 
70
static inline long atomic_postinc(atomic_t *val)
70
static inline long atomic_postinc(atomic_t *val)
71
{
71
{
72
    long r = 1;
72
    long r = 1;
73
   
73
   
74
    asm volatile (
74
    asm volatile (
75
        "lock xaddl %[r], %[count]\n"
75
        "lock xaddl %[r], %[count]\n"
76
        : [count] "+m" (val->count), [r] "+r" (r)
76
        : [count] "+m" (val->count), [r] "+r" (r)
77
    );
77
    );
78
   
78
   
79
    return r;
79
    return r;
80
}
80
}
81
 
81
 
82
static inline long atomic_postdec(atomic_t *val)
82
static inline long atomic_postdec(atomic_t *val)
83
{
83
{
84
    long r = -1;
84
    long r = -1;
85
   
85
   
86
    asm volatile (
86
    asm volatile (
87
        "lock xaddl %[r], %[count]\n"
87
        "lock xaddl %[r], %[count]\n"
88
        : [count] "+m" (val->count), [r] "+r"(r)
88
        : [count] "+m" (val->count), [r] "+r"(r)
89
    );
89
    );
90
   
90
   
91
    return r;
91
    return r;
92
}
92
}
93
 
93
 
94
#define atomic_preinc(val)  (atomic_postinc(val) + 1)
94
#define atomic_preinc(val)  (atomic_postinc(val) + 1)
95
#define atomic_predec(val)  (atomic_postdec(val) - 1)
95
#define atomic_predec(val)  (atomic_postdec(val) - 1)
96
 
96
 
97
static inline uint32_t test_and_set(atomic_t *val) {
97
static inline uint32_t test_and_set(atomic_t *val) {
98
    uint32_t v;
98
    uint32_t v;
99
   
99
   
100
    asm volatile (
100
    asm volatile (
101
        "movl $1, %[v]\n"
101
        "movl $1, %[v]\n"
102
        "xchgl %[v], %[count]\n"
102
        "xchgl %[v], %[count]\n"
103
        : [v] "=r" (v), [count] "+m" (val->count)
103
        : [v] "=r" (v), [count] "+m" (val->count)
104
    );
104
    );
105
   
105
   
106
    return v;
106
    return v;
107
}
107
}
108
 
108
 
109
/** ia32 specific fast spinlock */
109
/** ia32 specific fast spinlock */
110
static inline void atomic_lock_arch(atomic_t *val)
110
static inline void atomic_lock_arch(atomic_t *val)
111
{
111
{
112
    uint32_t tmp;
112
    uint32_t tmp;
113
   
113
   
114
    preemption_disable();
114
    preemption_disable();
115
    asm volatile (
115
    asm volatile (
116
        "0:\n"
116
        "0:\n"
117
#ifdef CONFIG_HT
-
 
118
        "pause\n"        /* Pentium 4's HT love this instruction */
117
        "pause\n"        /* Pentium 4's HT love this instruction */
119
#endif
-
 
120
        "mov %[count], %[tmp]\n"
118
        "mov %[count], %[tmp]\n"
121
        "testl %[tmp], %[tmp]\n"
119
        "testl %[tmp], %[tmp]\n"
122
        "jnz 0b\n"       /* lightweight looping on locked spinlock */
120
        "jnz 0b\n"       /* lightweight looping on locked spinlock */
123
       
121
       
124
        "incl %[tmp]\n"  /* now use the atomic operation */
122
        "incl %[tmp]\n"  /* now use the atomic operation */
125
        "xchgl %[count], %[tmp]\n"
123
        "xchgl %[count], %[tmp]\n"
126
        "testl %[tmp], %[tmp]\n"
124
        "testl %[tmp], %[tmp]\n"
127
        "jnz 0b\n"
125
        "jnz 0b\n"
128
        : [count] "+m" (val->count), [tmp] "=&r" (tmp)
126
        : [count] "+m" (val->count), [tmp] "=&r" (tmp)
129
    );
127
    );
130
    /*
128
    /*
131
     * Prevent critical section code from bleeding out this way up.
129
     * Prevent critical section code from bleeding out this way up.
132
     */
130
     */
133
    CS_ENTER_BARRIER();
131
    CS_ENTER_BARRIER();
134
}
132
}
135
 
133
 
136
#endif
134
#endif
137
 
135
 
138
/** @}
136
/** @}
139
 */
137
 */
140
 
138