Subversion Repositories HelenOS-historic

Rev

Rev 658 | Rev 665 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 658 Rev 664
1
/*
1
/*
2
 * Copyright (C) 2005 Jakub Jermar
2
 * Copyright (C) 2005 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
#ifndef __sparc64_ASM_H__
29
#ifndef __sparc64_ASM_H__
30
#define __sparc64_ASM_H__
30
#define __sparc64_ASM_H__
31
 
31
 
32
#include <typedefs.h>
32
#include <typedefs.h>
33
#include <arch/types.h>
33
#include <arch/types.h>
34
#include <arch/register.h>
34
#include <arch/register.h>
35
#include <config.h>
35
#include <config.h>
36
 
36
 
37
/** Read Processor State register.
37
/** Read Processor State register.
38
 *
38
 *
39
 * @return Value of PSTATE register.
39
 * @return Value of PSTATE register.
40
 */
40
 */
41
static inline __u64 pstate_read(void)
41
static inline __u64 pstate_read(void)
42
{
42
{
43
    __u64 v;
43
    __u64 v;
44
   
44
   
45
    __asm__ volatile ("rdpr %%pstate, %0\n" : "=r" (v));
45
    __asm__ volatile ("rdpr %%pstate, %0\n" : "=r" (v));
46
   
46
   
47
    return v;
47
    return v;
48
}
48
}
49
 
49
 
50
/** Write Processor State register.
50
/** Write Processor State register.
51
 *
51
 *
52
 * @param New value of PSTATE register.
52
 * @param New value of PSTATE register.
53
 */
53
 */
54
static inline void pstate_write(__u64 v)
54
static inline void pstate_write(__u64 v)
55
{
55
{
56
    __asm__ volatile ("wrpr %0, %1, %%pstate\n" : : "r" (v), "i" (0));
56
    __asm__ volatile ("wrpr %0, %1, %%pstate\n" : : "r" (v), "i" (0));
57
}
57
}
58
 
58
 
59
/** Read TICK_compare Register.
59
/** Read TICK_compare Register.
60
 *
60
 *
61
 * @return Value of TICK_comapre register.
61
 * @return Value of TICK_comapre register.
62
 */
62
 */
63
static inline __u64 tick_compare_read(void)
63
static inline __u64 tick_compare_read(void)
64
{
64
{
65
    __u64 v;
65
    __u64 v;
66
   
66
   
67
    __asm__ volatile ("rd %%tick_cmpr, %0\n" : "=r" (v));
67
    __asm__ volatile ("rd %%tick_cmpr, %0\n" : "=r" (v));
68
   
68
   
69
    return v;
69
    return v;
70
}
70
}
71
 
71
 
72
/** Write TICK_compare Register.
72
/** Write TICK_compare Register.
73
 *
73
 *
74
 * @param New value of TICK_comapre register.
74
 * @param New value of TICK_comapre register.
75
 */
75
 */
76
static inline void tick_compare_write(__u64 v)
76
static inline void tick_compare_write(__u64 v)
77
{
77
{
78
    __asm__ volatile ("wr %0, %1, %%tick_cmpr\n" : : "r" (v), "i" (0));
78
    __asm__ volatile ("wr %0, %1, %%tick_cmpr\n" : : "r" (v), "i" (0));
79
}
79
}
80
 
80
 
81
/** Read TICK Register.
81
/** Read TICK Register.
82
 *
82
 *
83
 * @return Value of TICK register.
83
 * @return Value of TICK register.
84
 */
84
 */
85
static inline __u64 tick_read(void)
85
static inline __u64 tick_read(void)
86
{
86
{
87
    __u64 v;
87
    __u64 v;
88
   
88
   
89
    __asm__ volatile ("rdpr %%tick, %0\n" : "=r" (v));
89
    __asm__ volatile ("rdpr %%tick, %0\n" : "=r" (v));
90
   
90
   
91
    return v;
91
    return v;
92
}
92
}
93
 
93
 
94
/** Write TICK Register.
94
/** Write TICK Register.
95
 *
95
 *
96
 * @param New value of TICK register.
96
 * @param New value of TICK register.
97
 */
97
 */
98
static inline void tick_write(__u64 v)
98
static inline void tick_write(__u64 v)
99
{
99
{
100
    __asm__ volatile ("wrpr %0, %1, %%tick\n" : : "r" (v), "i" (0));
100
    __asm__ volatile ("wrpr %0, %1, %%tick\n" : : "r" (v), "i" (0));
101
}
101
}
102
 
102
 
-
 
103
/** Read SOFTINT Register.
-
 
104
 *
-
 
105
 * @return Value of SOFTINT register.
-
 
106
 */
-
 
107
static inline __u64 softint_read(void)
-
 
108
{
-
 
109
    __u64 v;
-
 
110
 
-
 
111
    __asm__ volatile ("rd %%softint, %0\n" : "=r" (v));
-
 
112
 
-
 
113
    return v;
-
 
114
}
-
 
115
 
-
 
116
/** Write SOFTINT Register.
-
 
117
 *
-
 
118
 * @param New value of SOFTINT register.
-
 
119
 */
-
 
120
static inline void softint_write(__u64 v)
-
 
121
{
-
 
122
    __asm__ volatile ("wr %0, %1, %%softint\n" : : "r" (v), "i" (0));
-
 
123
}
103
 
124
 
104
/** Enable interrupts.
125
/** Enable interrupts.
105
 *
126
 *
106
 * Enable interrupts and return previous
127
 * Enable interrupts and return previous
107
 * value of IPL.
128
 * value of IPL.
108
 *
129
 *
109
 * @return Old interrupt priority level.
130
 * @return Old interrupt priority level.
110
 */
131
 */
111
static inline ipl_t interrupts_enable(void) {
132
static inline ipl_t interrupts_enable(void) {
112
    pstate_reg_t pstate;
133
    pstate_reg_t pstate;
113
    __u64 value;
134
    __u64 value;
114
   
135
   
115
    value = pstate_read();
136
    value = pstate_read();
116
    pstate.value = value;
137
    pstate.value = value;
117
    pstate.ie = true;
138
    pstate.ie = true;
118
    pstate_write(pstate.value);
139
    pstate_write(pstate.value);
119
   
140
   
120
    return (ipl_t) value;
141
    return (ipl_t) value;
121
}
142
}
122
 
143
 
123
/** Disable interrupts.
144
/** Disable interrupts.
124
 *
145
 *
125
 * Disable interrupts and return previous
146
 * Disable interrupts and return previous
126
 * value of IPL.
147
 * value of IPL.
127
 *
148
 *
128
 * @return Old interrupt priority level.
149
 * @return Old interrupt priority level.
129
 */
150
 */
130
static inline ipl_t interrupts_disable(void) {
151
static inline ipl_t interrupts_disable(void) {
131
    pstate_reg_t pstate;
152
    pstate_reg_t pstate;
132
    __u64 value;
153
    __u64 value;
133
   
154
   
134
    value = pstate_read();
155
    value = pstate_read();
135
    pstate.value = value;
156
    pstate.value = value;
136
    pstate.ie = false;
157
    pstate.ie = false;
137
    pstate_write(pstate.value);
158
    pstate_write(pstate.value);
138
   
159
   
139
    return (ipl_t) value;
160
    return (ipl_t) value;
140
}
161
}
141
 
162
 
142
/** Restore interrupt priority level.
163
/** Restore interrupt priority level.
143
 *
164
 *
144
 * Restore IPL.
165
 * Restore IPL.
145
 *
166
 *
146
 * @param ipl Saved interrupt priority level.
167
 * @param ipl Saved interrupt priority level.
147
 */
168
 */
148
static inline void interrupts_restore(ipl_t ipl) {
169
static inline void interrupts_restore(ipl_t ipl) {
149
    pstate_reg_t pstate;
170
    pstate_reg_t pstate;
150
   
171
   
151
    pstate.value = pstate_read();
172
    pstate.value = pstate_read();
152
    pstate.ie = ((pstate_reg_t) ipl).ie;
173
    pstate.ie = ((pstate_reg_t) ipl).ie;
153
    pstate_write(pstate.value);
174
    pstate_write(pstate.value);
154
}
175
}
155
 
176
 
156
/** Return interrupt priority level.
177
/** Return interrupt priority level.
157
 *
178
 *
158
 * Return IPL.
179
 * Return IPL.
159
 *
180
 *
160
 * @return Current interrupt priority level.
181
 * @return Current interrupt priority level.
161
 */
182
 */
162
static inline ipl_t interrupts_read(void) {
183
static inline ipl_t interrupts_read(void) {
163
    return (ipl_t) pstate_read();
184
    return (ipl_t) pstate_read();
164
}
185
}
165
 
186
 
166
/** Return base address of current stack.
187
/** Return base address of current stack.
167
 *
188
 *
168
 * Return the base address of the current stack.
189
 * Return the base address of the current stack.
169
 * The stack is assumed to be STACK_SIZE bytes long.
190
 * The stack is assumed to be STACK_SIZE bytes long.
170
 * The stack must start on page boundary.
191
 * The stack must start on page boundary.
171
 */
192
 */
172
static inline __address get_stack_base(void)
193
static inline __address get_stack_base(void)
173
{
194
{
174
    __address v;
195
    __address v;
175
   
196
   
176
    __asm__ volatile ("and %%sp, %1, %0\n" : "=r" (v) : "r" (~(STACK_SIZE-1)));
197
    __asm__ volatile ("and %%sp, %1, %0\n" : "=r" (v) : "r" (~(STACK_SIZE-1)));
177
   
198
   
178
    return v;
199
    return v;
179
}
200
}
180
 
201
 
181
/** Read Version Register.
202
/** Read Version Register.
182
 *
203
 *
183
 * @return Value of VER register.
204
 * @return Value of VER register.
184
 */
205
 */
185
static inline __u64 ver_read(void)
206
static inline __u64 ver_read(void)
186
{
207
{
187
    __u64 v;
208
    __u64 v;
188
   
209
   
189
    __asm__ volatile ("rdpr %%ver, %0\n" : "=r" (v));
210
    __asm__ volatile ("rdpr %%ver, %0\n" : "=r" (v));
190
   
211
   
191
    return v;
212
    return v;
192
}
213
}
193
 
214
 
194
/** Read Trap Base Address register.
215
/** Read Trap Base Address register.
195
 *
216
 *
196
 * @return Current value in TBA.
217
 * @return Current value in TBA.
197
 */
218
 */
198
static inline __u64 tba_read(void)
219
static inline __u64 tba_read(void)
199
{
220
{
200
    __u64 v;
221
    __u64 v;
201
   
222
   
202
    __asm__ volatile ("rdpr %%tba, %0\n" : "=r" (v));
223
    __asm__ volatile ("rdpr %%tba, %0\n" : "=r" (v));
203
   
224
   
204
    return v;
225
    return v;
205
}
226
}
206
 
227
 
207
/** Write Trap Base Address register.
228
/** Write Trap Base Address register.
208
 *
229
 *
209
 * @param New value of TBA.
230
 * @param New value of TBA.
210
 */
231
 */
211
static inline void tba_write(__u64 v)
232
static inline void tba_write(__u64 v)
212
{
233
{
213
    __asm__ volatile ("wrpr %0, %1, %%tba\n" : : "r" (v), "i" (0));
234
    __asm__ volatile ("wrpr %0, %1, %%tba\n" : : "r" (v), "i" (0));
214
}
235
}
215
 
236
 
216
/** Load __u64 from alternate space.
237
/** Load __u64 from alternate space.
217
 *
238
 *
218
 * @param asi ASI determining the alternate space.
239
 * @param asi ASI determining the alternate space.
219
 * @param va Virtual address within the ASI.
240
 * @param va Virtual address within the ASI.
220
 *
241
 *
221
 * @return Value read from the virtual address in the specified address space.
242
 * @return Value read from the virtual address in the specified address space.
222
 */
243
 */
223
static inline __u64 asi_u64_read(asi_t asi, __address va)
244
static inline __u64 asi_u64_read(asi_t asi, __address va)
224
{
245
{
225
    __u64 v;
246
    __u64 v;
226
   
247
   
227
    __asm__ volatile ("ldxa [%1] %2, %0\n" : "=r" (v) : "r" (va), "i" (asi));
248
    __asm__ volatile ("ldxa [%1] %2, %0\n" : "=r" (v) : "r" (va), "i" (asi));
228
   
249
   
229
    return v;
250
    return v;
230
}
251
}
231
 
252
 
232
/** Store __u64 to alternate space.
253
/** Store __u64 to alternate space.
233
 *
254
 *
234
 * @param asi ASI determining the alternate space.
255
 * @param asi ASI determining the alternate space.
235
 * @param va Virtual address within the ASI.
256
 * @param va Virtual address within the ASI.
236
 * @param v Value to be written.
257
 * @param v Value to be written.
237
 */
258
 */
238
static inline void asi_u64_write(asi_t asi, __address va, __u64 v)
259
static inline void asi_u64_write(asi_t asi, __address va, __u64 v)
239
{
260
{
240
    __asm__ volatile ("stxa %0, [%1] %2\n" : :  "r" (v), "r" (va), "i" (asi) : "memory");
261
    __asm__ volatile ("stxa %0, [%1] %2\n" : :  "r" (v), "r" (va), "i" (asi) : "memory");
241
}
262
}
242
 
263
 
243
 
264
 
244
 
265
 
245
void cpu_halt(void);
266
void cpu_halt(void);
246
void cpu_sleep(void);
267
void cpu_sleep(void);
247
void asm_delay_loop(__u32 t);
268
void asm_delay_loop(__u32 t);
248
 
269
 
249
#endif
270
#endif
250
 
271