Subversion Repositories HelenOS

Rev

Rev 650 | Rev 664 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 650 Rev 658
1
/*
1
/*
2
 * Copyright (C) 2005 Jakub Jermar
2
 * Copyright (C) 2005 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
#ifndef __sparc64_ASM_H__
29
#ifndef __sparc64_ASM_H__
30
#define __sparc64_ASM_H__
30
#define __sparc64_ASM_H__
31
 
31
 
32
#include <typedefs.h>
32
#include <typedefs.h>
33
#include <arch/types.h>
33
#include <arch/types.h>
34
#include <arch/register.h>
34
#include <arch/register.h>
35
#include <config.h>
35
#include <config.h>
36
 
36
 
37
/** Read Processor State register.
37
/** Read Processor State register.
38
 *
38
 *
39
 * @return Value of PSTATE register.
39
 * @return Value of PSTATE register.
40
 */
40
 */
41
static inline __u64 pstate_read(void)
41
static inline __u64 pstate_read(void)
42
{
42
{
43
    __u64 v;
43
    __u64 v;
44
   
44
   
45
    __asm__ volatile ("rdpr %%pstate, %0\n" : "=r" (v));
45
    __asm__ volatile ("rdpr %%pstate, %0\n" : "=r" (v));
46
   
46
   
47
    return v;
47
    return v;
48
}
48
}
49
 
49
 
50
/** Write Processor State register.
50
/** Write Processor State register.
51
 *
51
 *
52
 * @param New value of PSTATE register.
52
 * @param New value of PSTATE register.
53
 */
53
 */
54
static inline void pstate_write(__u64 v)
54
static inline void pstate_write(__u64 v)
55
{
55
{
56
    __asm__ volatile ("wrpr %0, %1, %%pstate\n" : : "r" (v), "i" (0));
56
    __asm__ volatile ("wrpr %0, %1, %%pstate\n" : : "r" (v), "i" (0));
57
}
57
}
58
 
58
 
-
 
59
/** Read TICK_compare Register.
-
 
60
 *
-
 
61
 * @return Value of TICK_comapre register.
-
 
62
 */
-
 
63
static inline __u64 tick_compare_read(void)
-
 
64
{
-
 
65
    __u64 v;
-
 
66
   
-
 
67
    __asm__ volatile ("rd %%tick_cmpr, %0\n" : "=r" (v));
-
 
68
   
-
 
69
    return v;
-
 
70
}
-
 
71
 
-
 
72
/** Write TICK_compare Register.
-
 
73
 *
-
 
74
 * @param New value of TICK_comapre register.
-
 
75
 */
-
 
76
static inline void tick_compare_write(__u64 v)
-
 
77
{
-
 
78
    __asm__ volatile ("wr %0, %1, %%tick_cmpr\n" : : "r" (v), "i" (0));
-
 
79
}
-
 
80
 
-
 
81
/** Read TICK Register.
-
 
82
 *
-
 
83
 * @return Value of TICK register.
-
 
84
 */
-
 
85
static inline __u64 tick_read(void)
-
 
86
{
-
 
87
    __u64 v;
-
 
88
   
-
 
89
    __asm__ volatile ("rdpr %%tick, %0\n" : "=r" (v));
-
 
90
   
-
 
91
    return v;
-
 
92
}
-
 
93
 
-
 
94
/** Write TICK Register.
-
 
95
 *
-
 
96
 * @param New value of TICK register.
-
 
97
 */
-
 
98
static inline void tick_write(__u64 v)
-
 
99
{
-
 
100
    __asm__ volatile ("wrpr %0, %1, %%tick\n" : : "r" (v), "i" (0));
-
 
101
}
-
 
102
 
59
 
103
 
60
/** Enable interrupts.
104
/** Enable interrupts.
61
 *
105
 *
62
 * Enable interrupts and return previous
106
 * Enable interrupts and return previous
63
 * value of IPL.
107
 * value of IPL.
64
 *
108
 *
65
 * @return Old interrupt priority level.
109
 * @return Old interrupt priority level.
66
 */
110
 */
67
static inline ipl_t interrupts_enable(void) {
111
static inline ipl_t interrupts_enable(void) {
68
    pstate_reg_t pstate;
112
    pstate_reg_t pstate;
69
    __u64 value;
113
    __u64 value;
70
   
114
   
71
    value = pstate_read();
115
    value = pstate_read();
72
    pstate.value = value;
116
    pstate.value = value;
73
    pstate.ie = true;
117
    pstate.ie = true;
74
    pstate_write(pstate.value);
118
    pstate_write(pstate.value);
75
   
119
   
76
    return (ipl_t) value;
120
    return (ipl_t) value;
77
}
121
}
78
 
122
 
79
/** Disable interrupts.
123
/** Disable interrupts.
80
 *
124
 *
81
 * Disable interrupts and return previous
125
 * Disable interrupts and return previous
82
 * value of IPL.
126
 * value of IPL.
83
 *
127
 *
84
 * @return Old interrupt priority level.
128
 * @return Old interrupt priority level.
85
 */
129
 */
86
static inline ipl_t interrupts_disable(void) {
130
static inline ipl_t interrupts_disable(void) {
87
    pstate_reg_t pstate;
131
    pstate_reg_t pstate;
88
    __u64 value;
132
    __u64 value;
89
   
133
   
90
    value = pstate_read();
134
    value = pstate_read();
91
    pstate.value = value;
135
    pstate.value = value;
92
    pstate.ie = false;
136
    pstate.ie = false;
93
    pstate_write(pstate.value);
137
    pstate_write(pstate.value);
94
   
138
   
95
    return (ipl_t) value;
139
    return (ipl_t) value;
96
}
140
}
97
 
141
 
98
/** Restore interrupt priority level.
142
/** Restore interrupt priority level.
99
 *
143
 *
100
 * Restore IPL.
144
 * Restore IPL.
101
 *
145
 *
102
 * @param ipl Saved interrupt priority level.
146
 * @param ipl Saved interrupt priority level.
103
 */
147
 */
104
static inline void interrupts_restore(ipl_t ipl) {
148
static inline void interrupts_restore(ipl_t ipl) {
105
    pstate_reg_t pstate;
149
    pstate_reg_t pstate;
106
   
150
   
107
    pstate.value = pstate_read();
151
    pstate.value = pstate_read();
108
    pstate.ie = ((pstate_reg_t) ipl).ie;
152
    pstate.ie = ((pstate_reg_t) ipl).ie;
109
    pstate_write(pstate.value);
153
    pstate_write(pstate.value);
110
}
154
}
111
 
155
 
112
/** Return interrupt priority level.
156
/** Return interrupt priority level.
113
 *
157
 *
114
 * Return IPL.
158
 * Return IPL.
115
 *
159
 *
116
 * @return Current interrupt priority level.
160
 * @return Current interrupt priority level.
117
 */
161
 */
118
static inline ipl_t interrupts_read(void) {
162
static inline ipl_t interrupts_read(void) {
119
    return (ipl_t) pstate_read();
163
    return (ipl_t) pstate_read();
120
}
164
}
121
 
165
 
122
/** Return base address of current stack.
166
/** Return base address of current stack.
123
 *
167
 *
124
 * Return the base address of the current stack.
168
 * Return the base address of the current stack.
125
 * The stack is assumed to be STACK_SIZE bytes long.
169
 * The stack is assumed to be STACK_SIZE bytes long.
126
 * The stack must start on page boundary.
170
 * The stack must start on page boundary.
127
 */
171
 */
128
static inline __address get_stack_base(void)
172
static inline __address get_stack_base(void)
129
{
173
{
130
    __address v;
174
    __address v;
131
   
175
   
132
    __asm__ volatile ("and %%sp, %1, %0\n" : "=r" (v) : "r" (~(STACK_SIZE-1)));
176
    __asm__ volatile ("and %%sp, %1, %0\n" : "=r" (v) : "r" (~(STACK_SIZE-1)));
133
   
177
   
134
    return v;
178
    return v;
135
}
179
}
136
 
180
 
137
/** Read Version Register.
181
/** Read Version Register.
138
 *
182
 *
139
 * @return Value of VER register.
183
 * @return Value of VER register.
140
 */
184
 */
141
static inline __u64 ver_read(void)
185
static inline __u64 ver_read(void)
142
{
186
{
143
    __u64 v;
187
    __u64 v;
144
   
188
   
145
    __asm__ volatile ("rdpr %%ver, %0\n" : "=r" (v));
189
    __asm__ volatile ("rdpr %%ver, %0\n" : "=r" (v));
146
   
190
   
147
    return v;
191
    return v;
148
}
192
}
149
 
193
 
150
/** Read Trap Base Address register.
194
/** Read Trap Base Address register.
151
 *
195
 *
152
 * @return Current value in TBA.
196
 * @return Current value in TBA.
153
 */
197
 */
154
static inline __u64 tba_read(void)
198
static inline __u64 tba_read(void)
155
{
199
{
156
    __u64 v;
200
    __u64 v;
157
   
201
   
158
    __asm__ volatile ("rdpr %%tba, %0\n" : "=r" (v));
202
    __asm__ volatile ("rdpr %%tba, %0\n" : "=r" (v));
159
   
203
   
160
    return v;
204
    return v;
161
}
205
}
162
 
206
 
163
/** Write Trap Base Address register.
207
/** Write Trap Base Address register.
164
 *
208
 *
165
 * @param New value of TBA.
209
 * @param New value of TBA.
166
 */
210
 */
167
static inline void tba_write(__u64 v)
211
static inline void tba_write(__u64 v)
168
{
212
{
169
    __asm__ volatile ("wrpr %0, %1, %%tba\n" : : "r" (v), "i" (0));
213
    __asm__ volatile ("wrpr %0, %1, %%tba\n" : : "r" (v), "i" (0));
170
}
214
}
171
 
215
 
172
/** Load __u64 from alternate space.
216
/** Load __u64 from alternate space.
173
 *
217
 *
174
 * @param asi ASI determining the alternate space.
218
 * @param asi ASI determining the alternate space.
175
 * @param va Virtual address within the ASI.
219
 * @param va Virtual address within the ASI.
176
 *
220
 *
177
 * @return Value read from the virtual address in the specified address space.
221
 * @return Value read from the virtual address in the specified address space.
178
 */
222
 */
179
static inline __u64 asi_u64_read(asi_t asi, __address va)
223
static inline __u64 asi_u64_read(asi_t asi, __address va)
180
{
224
{
181
    __u64 v;
225
    __u64 v;
182
   
226
   
183
    __asm__ volatile ("ldxa [%1] %2, %0\n" : "=r" (v) : "r" (va), "i" (asi));
227
    __asm__ volatile ("ldxa [%1] %2, %0\n" : "=r" (v) : "r" (va), "i" (asi));
184
   
228
   
185
    return v;
229
    return v;
186
}
230
}
187
 
231
 
188
/** Store __u64 to alternate space.
232
/** Store __u64 to alternate space.
189
 *
233
 *
190
 * @param asi ASI determining the alternate space.
234
 * @param asi ASI determining the alternate space.
191
 * @param va Virtual address within the ASI.
235
 * @param va Virtual address within the ASI.
192
 * @param v Value to be written.
236
 * @param v Value to be written.
193
 */
237
 */
194
static inline void asi_u64_write(asi_t asi, __address va, __u64 v)
238
static inline void asi_u64_write(asi_t asi, __address va, __u64 v)
195
{
239
{
196
    __asm__ volatile ("stxa %0, [%1] %2\n" : :  "r" (v), "r" (va), "i" (asi) : "memory");
240
    __asm__ volatile ("stxa %0, [%1] %2\n" : :  "r" (v), "r" (va), "i" (asi) : "memory");
197
}
241
}
198
 
242
 
-
 
243
 
-
 
244
 
199
void cpu_halt(void);
245
void cpu_halt(void);
200
void cpu_sleep(void);
246
void cpu_sleep(void);
201
void asm_delay_loop(__u32 t);
247
void asm_delay_loop(__u32 t);
202
 
248
 
203
#endif
249
#endif
204
 
250