Subversion Repositories HelenOS

Rev

Rev 4153 | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 4153 Rev 4718
1
/*
1
/*
2
 * Copyright (c) 2005 Jakub Jermar
2
 * Copyright (c) 2005 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
/** @addtogroup ia64   
29
/** @addtogroup ia64   
30
 * @{
30
 * @{
31
 */
31
 */
32
/** @file
32
/** @file
33
 */
33
 */
34
 
34
 
35
#ifndef KERN_ia64_ATOMIC_H_
35
#ifndef KERN_ia64_ATOMIC_H_
36
#define KERN_ia64_ATOMIC_H_
36
#define KERN_ia64_ATOMIC_H_
37
 
37
 
38
/** Atomic addition.
38
/** Atomic addition.
39
 *
39
 *
40
 * @param val       Atomic value.
40
 * @param val       Atomic value.
41
 * @param imm       Value to add.
41
 * @param imm       Value to add.
42
 *
42
 *
43
 * @return      Value before addition.
43
 * @return      Value before addition.
44
 */
44
 */
45
static inline long atomic_add(atomic_t *val, int imm)
45
static inline long atomic_add(atomic_t *val, int imm)
46
{
46
{
47
    long v;
47
    long v;
48
 
48
 
49
    asm volatile ("fetchadd8.rel %0 = %1, %2\n" : "=r" (v),
49
    asm volatile ("fetchadd8.rel %0 = %1, %2\n" : "=r" (v),
50
        "+m" (val->count) : "i" (imm));
50
        "+m" (val->count) : "i" (imm));
51
 
51
 
52
    return v;
52
    return v;
53
}
53
}
54
 
54
 
55
 
-
 
56
static inline uint64_t test_and_set(atomic_t *val) {
55
static inline uint64_t test_and_set(atomic_t *val)
-
 
56
{
57
    uint64_t v;
57
    uint64_t v;
58
       
58
       
59
    asm volatile (
59
    asm volatile (
60
        "movl %0 = 0x01;;\n"
60
        "movl %0 = 0x1;;\n"
61
        "xchg8 %0 = %1, %0;;\n"
61
        "xchg8 %0 = %1, %0;;\n"
62
        : "=r" (v), "+m" (val->count)
62
        : "=r" (v), "+m" (val->count)
63
    );
63
    );
64
   
64
   
65
    return v;
65
    return v;
66
}
66
}
67
 
67
 
-
 
68
static inline void atomic_lock_arch(atomic_t *val)
-
 
69
{
-
 
70
    do {
-
 
71
        while (val->count)
-
 
72
            ;
-
 
73
    } while (test_and_set(val));
-
 
74
}
68
 
75
 
69
static inline void atomic_inc(atomic_t *val)
76
static inline void atomic_inc(atomic_t *val)
70
{
77
{
71
    atomic_add(val, 1);
78
    atomic_add(val, 1);
72
}
79
}
73
 
80
 
74
static inline void atomic_dec(atomic_t *val)
81
static inline void atomic_dec(atomic_t *val)
75
{
82
{
76
    atomic_add(val, -1);
83
    atomic_add(val, -1);
77
}
84
}
78
 
85
 
79
static inline long atomic_preinc(atomic_t *val)
86
static inline long atomic_preinc(atomic_t *val)
80
{
87
{
81
    return atomic_add(val, 1) + 1;
88
    return atomic_add(val, 1) + 1;
82
}
89
}
83
 
90
 
84
static inline long atomic_predec(atomic_t *val)
91
static inline long atomic_predec(atomic_t *val)
85
{
92
{
86
    return atomic_add(val, -1) - 1;
93
    return atomic_add(val, -1) - 1;
87
}
94
}
88
 
95
 
89
static inline long atomic_postinc(atomic_t *val)
96
static inline long atomic_postinc(atomic_t *val)
90
{
97
{
91
    return atomic_add(val, 1);
98
    return atomic_add(val, 1);
92
}
99
}
93
 
100
 
94
static inline long atomic_postdec(atomic_t *val)
101
static inline long atomic_postdec(atomic_t *val)
95
{
102
{
96
    return atomic_add(val, -1);
103
    return atomic_add(val, -1);
97
}
104
}
98
 
105
 
99
#endif
106
#endif
100
 
107
 
101
/** @}
108
/** @}
102
 */
109
 */
103
 
110