Subversion Repositories HelenOS-historic

Rev

Rev 1100 | Rev 1121 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
633 palkovsky 1
/*
2
 * Copyright (C) 2001-2004 Jakub Jermar
3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
29
#ifndef __amd64_ATOMIC_H__
30
#define __amd64_ATOMIC_H__
31
 
32
#include <arch/types.h>
1100 palkovsky 33
#include <arch/barrier.h>
34
#include <preemption.h>
1104 jermar 35
#include <typedefs.h>
633 palkovsky 36
 
37
static inline void atomic_inc(atomic_t *val) {
38
#ifdef CONFIG_SMP
39
	__asm__ volatile ("lock incq %0\n" : "=m" (val->count));
40
#else
41
	__asm__ volatile ("incq %0\n" : "=m" (val->count));
42
#endif /* CONFIG_SMP */
43
}
44
 
45
static inline void atomic_dec(atomic_t *val) {
46
#ifdef CONFIG_SMP
47
	__asm__ volatile ("lock decq %0\n" : "=m" (val->count));
48
#else
49
	__asm__ volatile ("decq %0\n" : "=m" (val->count));
50
#endif /* CONFIG_SMP */
51
}
52
 
1104 jermar 53
static inline long atomic_postinc(atomic_t *val) 
633 palkovsky 54
{
1104 jermar 55
	long r;
633 palkovsky 56
 
57
	__asm__ volatile (
58
		"movq $1, %0\n"
59
		"lock xaddq %0, %1\n"
60
		: "=r" (r), "=m" (val->count)
61
	);
62
 
63
	return r;
64
}
65
 
1104 jermar 66
static inline long atomic_postdec(atomic_t *val) 
633 palkovsky 67
{
1104 jermar 68
	long r;
633 palkovsky 69
 
70
	__asm__ volatile (
71
		"movq $-1, %0\n"
72
		"lock xaddq %0, %1\n"
73
		: "=r" (r), "=m" (val->count)
74
	);
75
 
76
	return r;
77
}
78
 
1024 jermar 79
#define atomic_preinc(val) (atomic_postinc(val)+1)
80
#define atomic_predec(val) (atomic_postdec(val)-1)
633 palkovsky 81
 
82
static inline __u64 test_and_set(atomic_t *val) {
83
	__u64 v;
84
 
85
	__asm__ volatile (
86
		"movq $1, %0\n"
87
		"xchgq %0, %1\n"
88
		: "=r" (v),"=m" (val->count)
89
	);
90
 
91
	return v;
92
}
93
 
94
 
1104 jermar 95
/** amd64 specific fast spinlock */
1100 palkovsky 96
static inline void atomic_lock_arch(atomic_t *val)
97
{
98
	__u64 tmp;
633 palkovsky 99
 
1100 palkovsky 100
	preemption_disable();
101
	__asm__ volatile (
102
		"0:;"
103
#ifdef CONFIG_HT
104
		"pause;" /* Pentium 4's HT love this instruction */
633 palkovsky 105
#endif
1100 palkovsky 106
		"mov %0, %1;"
107
		"testq %1, %1;"
1104 jermar 108
		"jnz 0b;"       /* Lightweight looping on locked spinlock */
1100 palkovsky 109
 
110
		"incq %1;"      /* now use the atomic operation */
111
		"xchgq %0, %1;"
112
		"testq %1, %1;"
113
		"jnz 0b;"
114
                : "=m"(val->count),"=r"(tmp)
115
		);
116
	/*
117
	 * Prevent critical section code from bleeding out this way up.
118
	 */
119
	CS_ENTER_BARRIER();
120
}
121
 
122
#endif