Subversion Repositories HelenOS-historic

Rev

Rev 1702 | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
633 palkovsky 1
/*
2
 * Copyright (C) 2001-2004 Jakub Jermar
3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
1702 cejka 29
 /** @addtogroup amd64 
30
 * @{
31
 */
32
/** @file
33
 */
34
 
633 palkovsky 35
#ifndef __amd64_ATOMIC_H__
36
#define __amd64_ATOMIC_H__
37
 
38
#include <arch/types.h>
1100 palkovsky 39
#include <arch/barrier.h>
40
#include <preemption.h>
1104 jermar 41
#include <typedefs.h>
633 palkovsky 42
 
43
static inline void atomic_inc(atomic_t *val) {
44
#ifdef CONFIG_SMP
45
    __asm__ volatile ("lock incq %0\n" : "=m" (val->count));
46
#else
47
    __asm__ volatile ("incq %0\n" : "=m" (val->count));
48
#endif /* CONFIG_SMP */
49
}
50
 
51
static inline void atomic_dec(atomic_t *val) {
52
#ifdef CONFIG_SMP
53
    __asm__ volatile ("lock decq %0\n" : "=m" (val->count));
54
#else
55
    __asm__ volatile ("decq %0\n" : "=m" (val->count));
56
#endif /* CONFIG_SMP */
57
}
58
 
1104 jermar 59
static inline long atomic_postinc(atomic_t *val)
633 palkovsky 60
{
1692 palkovsky 61
    long r = 1;
633 palkovsky 62
 
63
    __asm__ volatile (
1692 palkovsky 64
        "lock xaddq %1, %0\n"
1697 palkovsky 65
        : "=m" (val->count), "+r" (r)
633 palkovsky 66
    );
67
 
68
    return r;
69
}
70
 
1104 jermar 71
static inline long atomic_postdec(atomic_t *val)
633 palkovsky 72
{
1692 palkovsky 73
    long r = -1;
633 palkovsky 74
 
75
    __asm__ volatile (
1692 palkovsky 76
        "lock xaddq %1, %0\n"
1697 palkovsky 77
        : "=m" (val->count), "+r" (r)
633 palkovsky 78
    );
79
 
80
    return r;
81
}
82
 
1024 jermar 83
#define atomic_preinc(val) (atomic_postinc(val)+1)
84
#define atomic_predec(val) (atomic_postdec(val)-1)
633 palkovsky 85
 
1780 jermar 86
static inline uint64_t test_and_set(atomic_t *val) {
87
    uint64_t v;
633 palkovsky 88
 
89
    __asm__ volatile (
90
        "movq $1, %0\n"
91
        "xchgq %0, %1\n"
92
        : "=r" (v),"=m" (val->count)
93
    );
94
 
95
    return v;
96
}
97
 
98
 
1104 jermar 99
/** amd64 specific fast spinlock */
1100 palkovsky 100
static inline void atomic_lock_arch(atomic_t *val)
101
{
1780 jermar 102
    uint64_t tmp;
633 palkovsky 103
 
1100 palkovsky 104
    preemption_disable();
105
    __asm__ volatile (
106
        "0:;"
107
#ifdef CONFIG_HT
1121 jermar 108
        "pause;"
633 palkovsky 109
#endif
1100 palkovsky 110
        "mov %0, %1;"
111
        "testq %1, %1;"
1104 jermar 112
        "jnz 0b;"       /* Lightweight looping on locked spinlock */
1100 palkovsky 113
 
114
        "incq %1;"      /* now use the atomic operation */
115
        "xchgq %0, %1;"
116
        "testq %1, %1;"
117
        "jnz 0b;"
118
                : "=m"(val->count),"=r"(tmp)
119
        );
120
    /*
121
     * Prevent critical section code from bleeding out this way up.
122
     */
123
    CS_ENTER_BARRIER();
124
}
125
 
126
#endif
1702 cejka 127
 
128
 /** @}
129
 */
130