Subversion Repositories HelenOS

Rev

Rev 4327 | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 4327 Rev 4718
1
/*
1
/*
2
 * Copyright (c) 2005 Jakub Jermar
2
 * Copyright (c) 2005 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
/** @addtogroup sparc64
29
/** @addtogroup sparc64
30
 * @{
30
 * @{
31
 */
31
 */
32
/** @file
32
/** @file
33
 */
33
 */
34
 
34
 
35
#ifndef KERN_sparc64_ATOMIC_H_
35
#ifndef KERN_sparc64_ATOMIC_H_
36
#define KERN_sparc64_ATOMIC_H_
36
#define KERN_sparc64_ATOMIC_H_
37
 
37
 
38
#include <arch/barrier.h>
38
#include <arch/barrier.h>
39
#include <arch/types.h>
39
#include <arch/types.h>
40
#include <preemption.h>
40
#include <preemption.h>
41
 
41
 
42
/** Atomic add operation.
42
/** Atomic add operation.
43
 *
43
 *
44
 * Use atomic compare and swap operation to atomically add signed value.
44
 * Use atomic compare and swap operation to atomically add signed value.
45
 *
45
 *
46
 * @param val Atomic variable.
46
 * @param val Atomic variable.
47
 * @param i Signed value to be added.
47
 * @param i Signed value to be added.
48
 *
48
 *
49
 * @return Value of the atomic variable as it existed before addition.
49
 * @return Value of the atomic variable as it existed before addition.
50
 */
50
 */
51
static inline long atomic_add(atomic_t *val, int i)
51
static inline long atomic_add(atomic_t *val, int i)
52
{
52
{
53
    uint64_t a, b;
53
    uint64_t a, b;
54
 
54
 
55
    do {
55
    do {
56
        volatile uintptr_t x = (uint64_t) &val->count;
56
        volatile uintptr_t x = (uint64_t) &val->count;
57
 
57
 
58
        a = *((uint64_t *) x);
58
        a = *((uint64_t *) x);
59
        b = a + i;
59
        b = a + i;
60
        asm volatile ("casx %0, %2, %1\n" : "+m" (*((uint64_t *)x)),
60
        asm volatile ("casx %0, %2, %1\n" : "+m" (*((uint64_t *)x)),
61
            "+r" (b) : "r" (a));
61
            "+r" (b) : "r" (a));
62
    } while (a != b);
62
    } while (a != b);
63
 
63
 
64
    return a;
64
    return a;
65
}
65
}
66
 
66
 
67
static inline long atomic_preinc(atomic_t *val)
67
static inline long atomic_preinc(atomic_t *val)
68
{
68
{
69
    return atomic_add(val, 1) + 1;
69
    return atomic_add(val, 1) + 1;
70
}
70
}
71
 
71
 
72
static inline long atomic_postinc(atomic_t *val)
72
static inline long atomic_postinc(atomic_t *val)
73
{
73
{
74
    return atomic_add(val, 1);
74
    return atomic_add(val, 1);
75
}
75
}
76
 
76
 
77
static inline long atomic_predec(atomic_t *val)
77
static inline long atomic_predec(atomic_t *val)
78
{
78
{
79
    return atomic_add(val, -1) - 1;
79
    return atomic_add(val, -1) - 1;
80
}
80
}
81
 
81
 
82
static inline long atomic_postdec(atomic_t *val)
82
static inline long atomic_postdec(atomic_t *val)
83
{
83
{
84
    return atomic_add(val, -1);
84
    return atomic_add(val, -1);
85
}
85
}
86
 
86
 
87
static inline void atomic_inc(atomic_t *val)
87
static inline void atomic_inc(atomic_t *val)
88
{
88
{
89
    (void) atomic_add(val, 1);
89
    (void) atomic_add(val, 1);
90
}
90
}
91
 
91
 
92
static inline void atomic_dec(atomic_t *val)
92
static inline void atomic_dec(atomic_t *val)
93
{
93
{
94
    (void) atomic_add(val, -1);
94
    (void) atomic_add(val, -1);
95
}
95
}
96
 
96
 
97
static inline long test_and_set(atomic_t *val)
97
static inline long test_and_set(atomic_t *val)
98
{
98
{
99
    uint64_t v = 1;
99
    uint64_t v = 1;
100
    volatile uintptr_t x = (uint64_t) &val->count;
100
    volatile uintptr_t x = (uint64_t) &val->count;
101
 
101
 
102
    asm volatile ("casx %0, %2, %1\n" : "+m" (*((uint64_t *) x)),
102
    asm volatile ("casx %0, %2, %1\n" : "+m" (*((uint64_t *) x)),
103
        "+r" (v) : "r" (0));
103
        "+r" (v) : "r" (0));
104
 
104
 
105
    return v;
105
    return v;
106
}
106
}
107
 
107
 
108
static inline void atomic_lock_arch(atomic_t *val)
108
static inline void atomic_lock_arch(atomic_t *val)
109
{
109
{
110
    uint64_t tmp1 = 1;
110
    uint64_t tmp1 = 1;
111
    uint64_t tmp2 = 0;
111
    uint64_t tmp2 = 0;
112
 
112
 
113
    volatile uintptr_t x = (uint64_t) &val->count;
113
    volatile uintptr_t x = (uint64_t) &val->count;
114
 
114
 
115
    preemption_disable();
115
    preemption_disable();
116
 
116
 
117
    asm volatile (
117
    asm volatile (
118
    "0:\n"
118
    "0:\n"
119
        "casx %0, %3, %1\n"
119
        "casx %0, %3, %1\n"
120
        "brz %1, 2f\n"
120
        "brz %1, 2f\n"
121
        "nop\n"
121
        "nop\n"
122
    "1:\n"
122
    "1:\n"
123
        "ldx %0, %2\n"
123
        "ldx %0, %2\n"
124
        "brz %2, 0b\n"
124
        "brz %2, 0b\n"
125
        "nop\n"
125
        "nop\n"
126
        "ba %xcc, 1b\n"
126
        "ba %%xcc, 1b\n"
127
        "nop\n"
127
        "nop\n"
128
    "2:\n"
128
    "2:\n"
129
        : "+m" (*((uint64_t *) x)), "+r" (tmp1), "+r" (tmp2) : "r" (0)
129
        : "+m" (*((uint64_t *) x)), "+r" (tmp1), "+r" (tmp2) : "r" (0)
130
    );
130
    );
131
   
131
   
132
    /*
132
    /*
133
     * Prevent critical section code from bleeding out this way up.
133
     * Prevent critical section code from bleeding out this way up.
134
     */
134
     */
135
    CS_ENTER_BARRIER();
135
    CS_ENTER_BARRIER();
136
}
136
}
137
 
137
 
138
#endif
138
#endif
139
 
139
 
140
/** @}
140
/** @}
141
 */
141
 */
142
 
142