Subversion Repositories HelenOS-historic

Rev

Rev 921 | Rev 979 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 921 Rev 962
1
#
1
#
2
# Copyright (C) 2005 Jakub Vana
2
# Copyright (C) 2005 Jakub Vana
3
# Copyright (C) 2005 Jakub Jermar
3
# Copyright (C) 2005 Jakub Jermar
4
# All rights reserved.
4
# All rights reserved.
5
#
5
#
6
# Redistribution and use in source and binary forms, with or without
6
# Redistribution and use in source and binary forms, with or without
7
# modification, are permitted provided that the following conditions
7
# modification, are permitted provided that the following conditions
8
# are met:
8
# are met:
9
#
9
#
10
# - Redistributions of source code must retain the above copyright
10
# - Redistributions of source code must retain the above copyright
11
#   notice, this list of conditions and the following disclaimer.
11
#   notice, this list of conditions and the following disclaimer.
12
# - Redistributions in binary form must reproduce the above copyright
12
# - Redistributions in binary form must reproduce the above copyright
13
#   notice, this list of conditions and the following disclaimer in the
13
#   notice, this list of conditions and the following disclaimer in the
14
#   documentation and/or other materials provided with the distribution.
14
#   documentation and/or other materials provided with the distribution.
15
# - The name of the author may not be used to endorse or promote products
15
# - The name of the author may not be used to endorse or promote products
16
#   derived from this software without specific prior written permission.
16
#   derived from this software without specific prior written permission.
17
#
17
#
18
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28
#
28
#
29
 
29
 
30
#include <arch/stack.h>
30
#include <arch/stack.h>
31
#include <arch/register.h>
31
#include <arch/register.h>
32
#include <arch/mm/page.h>
32
#include <arch/mm/page.h>
33
#include <align.h>
33
#include <align.h>
34
 
34
 
35
#define STACK_ITEMS		18
35
#define STACK_ITEMS		19
36
#define STACK_FRAME_SIZE	ALIGN_UP((STACK_ITEMS*STACK_ITEM_SIZE) + STACK_SCRATCH_AREA_SIZE, STACK_ALIGNMENT)
36
#define STACK_FRAME_SIZE	ALIGN_UP((STACK_ITEMS*STACK_ITEM_SIZE) + STACK_SCRATCH_AREA_SIZE, STACK_ALIGNMENT)
37
 
37
 
38
#if (STACK_ITEMS % 2 == 0)
38
#if (STACK_ITEMS % 2 == 0)
39
#	define STACK_FRAME_BIAS	8
39
#	define STACK_FRAME_BIAS	8
40
#else
40
#else
41
#	define STACK_FRAME_BIAS 16
41
#	define STACK_FRAME_BIAS 16
42
#endif
42
#endif
43
 
43
 
44
/** Partitioning of bank 0 registers. */
44
/** Partitioning of bank 0 registers. */
45
#define R_OFFS 		r16
45
#define R_OFFS 		r16
46
#define R_HANDLER	r17
46
#define R_HANDLER	r17
47
#define R_RET		r18
47
#define R_RET		r18
48
#define R_TMP		r19
48
#define R_TMP		r19
49
#define R_KSTACK_BSP	r22	/* keep in sync with before_thread_runs_arch() */
49
#define R_KSTACK_BSP	r22	/* keep in sync with before_thread_runs_arch() */
50
#define R_KSTACK	r23	/* keep in sync with before_thread_runs_arch() */
50
#define R_KSTACK	r23	/* keep in sync with before_thread_runs_arch() */
51
 
51
 
52
/** Heavyweight interrupt handler
52
/** Heavyweight interrupt handler
53
 *
53
 *
54
 * This macro roughly follows steps from 1 to 19 described in
54
 * This macro roughly follows steps from 1 to 19 described in
55
 * Intel Itanium Architecture Software Developer's Manual, Chapter 3.4.2.
55
 * Intel Itanium Architecture Software Developer's Manual, Chapter 3.4.2.
56
 *
56
 *
57
 * HEAVYWEIGHT_HANDLER macro must cram into 16 bundles (48 instructions).
57
 * HEAVYWEIGHT_HANDLER macro must cram into 16 bundles (48 instructions).
58
 * This goal is achieved by using procedure calls after RSE becomes operational.
58
 * This goal is achieved by using procedure calls after RSE becomes operational.
59
 *
59
 *
60
 * Some steps are skipped (enabling and disabling interrupts).
60
 * Some steps are skipped (enabling and disabling interrupts).
61
 * Some steps are not fully supported yet (e.g. dealing with floating-point
61
 * Some steps are not fully supported yet (e.g. dealing with floating-point
62
 * context).
62
 * context).
63
 *
63
 *
64
 * @param offs Offset from the beginning of IVT.
64
 * @param offs Offset from the beginning of IVT.
65
 * @param handler Interrupt handler address.
65
 * @param handler Interrupt handler address.
66
 */
66
 */
67
.macro HEAVYWEIGHT_HANDLER offs, handler=universal_handler
67
.macro HEAVYWEIGHT_HANDLER offs, handler=universal_handler
68
    .org ivt + \offs
68
    .org ivt + \offs
69
	mov R_OFFS = \offs
69
	mov R_OFFS = \offs
70
	movl R_HANDLER = \handler ;;
70
	movl R_HANDLER = \handler ;;
71
	br heavyweight_handler
71
	br heavyweight_handler
72
.endm
72
.endm
73
 
73
 
74
.global heavyweight_handler
74
.global heavyweight_handler
75
heavyweight_handler:
75
heavyweight_handler:
76
    /* 1. copy interrupt registers into bank 0 */
76
    /* 1. copy interrupt registers into bank 0 */
77
    
77
    
78
	/*
78
	/*
79
	 * Note that r24-r31 from bank 0 can be used only as long as PSR.ic = 0.
79
	 * Note that r24-r31 from bank 0 can be used only as long as PSR.ic = 0.
80
	 */
80
	 */
81
	mov r24 = cr.iip
81
	mov r24 = cr.iip
82
	mov r25 = cr.ipsr
82
	mov r25 = cr.ipsr
83
	mov r26 = cr.iipa
83
	mov r26 = cr.iipa
84
	mov r27 = cr.isr
84
	mov r27 = cr.isr
85
	mov r28 = cr.ifa
85
	mov r28 = cr.ifa
86
	
86
	
87
    /* 2. preserve predicate register into bank 0 */
87
    /* 2. preserve predicate register into bank 0 */
88
	mov r29 = pr ;;
88
	mov r29 = pr ;;
89
	
89
	
90
    /* 3. switch to kernel memory stack */
90
    /* 3. switch to kernel memory stack */
91
    	mov r30 = cr.ipsr
91
    	mov r30 = cr.ipsr
92
	shr.u r31 = r12, VRN_SHIFT ;;
92
	shr.u r31 = r12, VRN_SHIFT ;;
93
 
93
 
94
	shr.u r30 = r30, PSR_CPL_SHIFT ;;
94
	shr.u r30 = r30, PSR_CPL_SHIFT ;;
95
	and r30 = PSR_CPL_MASK_SHIFTED, r30 ;;
95
	and r30 = PSR_CPL_MASK_SHIFTED, r30 ;;
96
 
96
 
97
	/*
97
	/*
98
	 * Set p3 to true if the interrupted context executed in kernel mode.
98
	 * Set p3 to true if the interrupted context executed in kernel mode.
99
	 * Set p4 to false if the interrupted context didn't execute in kernel mode.
99
	 * Set p4 to false if the interrupted context didn't execute in kernel mode.
100
	 */
100
	 */
101
	cmp.eq p3, p4 = r30, r0 ;;
101
	cmp.eq p3, p4 = r30, r0 ;;
102
	cmp.eq p1, p2 = r30, r0 ;;	/* remember IPSR setting in p1 and p2 */
102
	cmp.eq p1, p2 = r30, r0 ;;	/* remember IPSR setting in p1 and p2 */
103
 
103
 
104
	/*
104
	/*
105
	 * Set p3 to true if the stack register references kernel address space.
105
	 * Set p3 to true if the stack register references kernel address space.
106
	 * Set p4 to false if the stack register doesn't reference kernel address space.
106
	 * Set p4 to false if the stack register doesn't reference kernel address space.
107
	 */
107
	 */
108
(p3)	cmp.eq p3, p4 = VRN_KERNEL, r31 ;;
108
(p3)	cmp.eq p3, p4 = VRN_KERNEL, r31 ;;
109
	
109
	
110
	/*
110
	/*
111
	 * Now, p4 is true iff the stack needs to be switched to kernel stack.
111
	 * Now, p4 is true iff the stack needs to be switched to kernel stack.
112
	 */
112
	 */
113
	mov r30 = r12
113
	mov r30 = r12
114
(p4)	mov r12 = R_KSTACK ;;
114
(p4)	mov r12 = R_KSTACK ;;
115
	
115
	
116
	add r31 = -STACK_FRAME_BIAS, r12 ;;
116
	add r31 = -STACK_FRAME_BIAS, r12 ;;
117
	add r12 = -STACK_FRAME_SIZE, r12
117
	add r12 = -STACK_FRAME_SIZE, r12
118
 
118
 
119
    /* 4. save registers in bank 0 into memory stack */
119
    /* 4. save registers in bank 0 into memory stack */
120
 
120
 
121
	/*
121
	/*
122
	 * If this is break_instruction handler,
122
	 * If this is break_instruction handler,
123
	 * copy input parameters to stack.
123
	 * copy input parameters to stack.
124
	 */
124
	 */
125
    	mov R_TMP = 0x2c00 ;;
125
    	mov R_TMP = 0x2c00 ;;
126
	cmp.eq p6,p5 = R_OFFS, R_TMP ;;
126
	cmp.eq p6,p5 = R_OFFS, R_TMP ;;
127
	
127
	
128
	/*
128
	/*
129
	 * From now on, if this is break_instruction handler, p6 is true and p5 is false.
129
	 * From now on, if this is break_instruction handler, p6 is true and p5 is false.
130
	 * Otherwise p6 is false and p5 is true.
130
	 * Otherwise p6 is false and p5 is true.
131
	 * Note that p5 is a preserved predicate register and we make use of it.
131
	 * Note that p5 is a preserved predicate register and we make use of it.
132
	 */
132
	 */
133
	
133
 
-
 
134
(p6)	st8 [r31] = r36, -8 ;;		/* save in4 */	
134
(p6)	st8 [r31] = r35, -8 ;;		/* save in3 */
135
(p6)	st8 [r31] = r35, -8 ;;		/* save in3 */
135
(p6) 	st8 [r31] = r34, -8 ;;		/* save in2 */
136
(p6) 	st8 [r31] = r34, -8 ;;		/* save in2 */
136
(p6)	st8 [r31] = r33, -8 ;;		/* save in1 */
137
(p6)	st8 [r31] = r33, -8 ;;		/* save in1 */
137
(p6)	st8 [r31] = r32, -8 ;;		/* save in0 */
138
(p6)	st8 [r31] = r32, -8 ;;		/* save in0 */
138
(p5)	add r31 = -32, r31 ;;
139
(p5)	add r31 = -40, r31 ;;
139
    
140
    
140
	st8 [r31] = r30, -8 ;;		/* save old stack pointer */ 
141
	st8 [r31] = r30, -8 ;;		/* save old stack pointer */ 
141
	
142
	
142
	st8 [r31] = r29, -8 ;;		/* save predicate registers */
143
	st8 [r31] = r29, -8 ;;		/* save predicate registers */
143
 
144
 
144
	st8 [r31] = r24, -8 ;;		/* save cr.iip */
145
	st8 [r31] = r24, -8 ;;		/* save cr.iip */
145
	st8 [r31] = r25, -8 ;;		/* save cr.ipsr */
146
	st8 [r31] = r25, -8 ;;		/* save cr.ipsr */
146
	st8 [r31] = r26, -8 ;;		/* save cr.iipa */
147
	st8 [r31] = r26, -8 ;;		/* save cr.iipa */
147
	st8 [r31] = r27, -8 ;;		/* save cr.isr */
148
	st8 [r31] = r27, -8 ;;		/* save cr.isr */
148
	st8 [r31] = r28, -8 ;;		/* save cr.ifa */
149
	st8 [r31] = r28, -8 ;;		/* save cr.ifa */
149
 
150
 
150
    /* 5. RSE switch from interrupted context */
151
    /* 5. RSE switch from interrupted context */
151
	mov r24 = ar.rsc
152
	mov r24 = ar.rsc
152
	mov r25 = ar.pfs
153
	mov r25 = ar.pfs
153
	cover
154
	cover
154
	mov r26 = cr.ifs
155
	mov r26 = cr.ifs
155
	
156
	
156
	st8 [r31] = r24, -8 ;;		/* save ar.rsc */
157
	st8 [r31] = r24, -8 ;;		/* save ar.rsc */
157
	st8 [r31] = r25, -8 ;;		/* save ar.pfs */
158
	st8 [r31] = r25, -8 ;;		/* save ar.pfs */
158
	st8 [r31] = r26, -8		/* save ar.ifs */
159
	st8 [r31] = r26, -8		/* save ar.ifs */
159
	
160
	
160
	and r24 = ~(RSC_PL_MASK), r24 ;;
161
	and r24 = ~(RSC_PL_MASK), r24 ;;
161
	and r30 = ~(RSC_MODE_MASK), r24 ;;
162
	and r30 = ~(RSC_MODE_MASK), r24 ;;
162
	mov ar.rsc = r30 ;;		/* update RSE state */
163
	mov ar.rsc = r30 ;;		/* update RSE state */
163
	
164
	
164
	mov r27 = ar.rnat
165
	mov r27 = ar.rnat
165
	mov r28 = ar.bspstore ;;
166
	mov r28 = ar.bspstore ;;
166
	
167
	
167
	/*
168
	/*
168
	 * Inspect BSPSTORE to figure out whether it is necessary to switch to kernel BSPSTORE.
169
	 * Inspect BSPSTORE to figure out whether it is necessary to switch to kernel BSPSTORE.
169
	 */
170
	 */
170
(p1)	shr.u r30 = r28, VRN_SHIFT ;;
171
(p1)	shr.u r30 = r28, VRN_SHIFT ;;
171
(p1)	cmp.eq p1, p2 = VRN_KERNEL, r30 ;;
172
(p1)	cmp.eq p1, p2 = VRN_KERNEL, r30 ;;
172
	
173
	
173
	/*
174
	/*
174
	 * If BSPSTORE needs to be switched, p1 is false and p2 is true.
175
	 * If BSPSTORE needs to be switched, p1 is false and p2 is true.
175
	 */
176
	 */
176
(p1)	mov r30 = r28
177
(p1)	mov r30 = r28
177
(p2)	mov r30 = R_KSTACK_BSP ;;
178
(p2)	mov r30 = R_KSTACK_BSP ;;
178
(p2)	mov ar.bspstore = r30 ;;
179
(p2)	mov ar.bspstore = r30 ;;
179
	
180
	
180
	mov r29 = ar.bsp
181
	mov r29 = ar.bsp
181
	
182
	
182
	st8 [r31] = r27, -8 ;;		/* save ar.rnat */
183
	st8 [r31] = r27, -8 ;;		/* save ar.rnat */
183
	st8 [r31] = r30, -8 ;;		/* save new value written to ar.bspstore */
184
	st8 [r31] = r30, -8 ;;		/* save new value written to ar.bspstore */
184
	st8 [r31] = r28, -8 ;;		/* save ar.bspstore */
185
	st8 [r31] = r28, -8 ;;		/* save ar.bspstore */
185
	st8 [r31] = r29, -8 		/* save ar.bsp */
186
	st8 [r31] = r29, -8 		/* save ar.bsp */
186
	
187
	
187
	mov ar.rsc = r24		/* restore RSE's setting + kernel privileges */
188
	mov ar.rsc = r24		/* restore RSE's setting + kernel privileges */
188
	
189
	
189
    /* steps 6 - 15 are done by heavyweight_handler_inner() */
190
    /* steps 6 - 15 are done by heavyweight_handler_inner() */
190
	mov R_RET = b0 			/* save b0 belonging to interrupted context */
191
	mov R_RET = b0 			/* save b0 belonging to interrupted context */
191
	br.call.sptk.many b0 = heavyweight_handler_inner
192
	br.call.sptk.many b0 = heavyweight_handler_inner
192
0:	mov b0 = R_RET			/* restore b0 belonging to the interrupted context */
193
0:	mov b0 = R_RET			/* restore b0 belonging to the interrupted context */
193
 
194
 
194
    /* 16. RSE switch to interrupted context */
195
    /* 16. RSE switch to interrupted context */
195
	cover				/* allocate zerro size frame (step 1 (from Intel Docs)) */
196
	cover				/* allocate zerro size frame (step 1 (from Intel Docs)) */
196
 
197
 
197
	add r31 = STACK_SCRATCH_AREA_SIZE, r12 ;;
198
	add r31 = STACK_SCRATCH_AREA_SIZE, r12 ;;
198
 
199
 
199
	ld8 r30 = [r31], +8 ;;		/* load ar.bsp */
200
	ld8 r30 = [r31], +8 ;;		/* load ar.bsp */
200
	ld8 r29 = [r31], +8 ;;   	/* load ar.bspstore */
201
	ld8 r29 = [r31], +8 ;;   	/* load ar.bspstore */
201
	ld8 r28 = [r31], +8 ;;   	/* load ar.bspstore_new */
202
	ld8 r28 = [r31], +8 ;;   	/* load ar.bspstore_new */
202
	sub r27 = r30 , r28 ;;		/* calculate loadrs (step 2) */
203
	sub r27 = r30 , r28 ;;		/* calculate loadrs (step 2) */
203
	shl r27 = r27, 16
204
	shl r27 = r27, 16
204
 
205
 
205
	mov r24 = ar.rsc ;;
206
	mov r24 = ar.rsc ;;
206
	and r30 = ~3, r24 ;;
207
	and r30 = ~3, r24 ;;
207
	or  r24 = r30 , r27 ;;     
208
	or  r24 = r30 , r27 ;;     
208
	mov ar.rsc = r24 ;;		/* place RSE in enforced lazy mode */
209
	mov ar.rsc = r24 ;;		/* place RSE in enforced lazy mode */
209
 
210
 
210
	loadrs 				/* (step 3) */
211
	loadrs 				/* (step 3) */
211
 
212
 
212
	ld8 r27 = [r31], +8 ;;		/* load ar.rnat */
213
	ld8 r27 = [r31], +8 ;;		/* load ar.rnat */
213
	ld8 r26 = [r31], +8 ;;		/* load cr.ifs */
214
	ld8 r26 = [r31], +8 ;;		/* load cr.ifs */
214
	ld8 r25 = [r31], +8 ;;		/* load ar.pfs */
215
	ld8 r25 = [r31], +8 ;;		/* load ar.pfs */
215
	ld8 r24 = [r31], +8 ;;		/* load ar.rsc */
216
	ld8 r24 = [r31], +8 ;;		/* load ar.rsc */
216
 
217
 
217
	mov ar.bspstore = r29 ;;	/* (step 4) */
218
	mov ar.bspstore = r29 ;;	/* (step 4) */
218
	mov ar.rnat = r27		/* (step 5) */
219
	mov ar.rnat = r27		/* (step 5) */
219
 
220
 
220
	mov ar.pfs = r25		/* (step 6) */
221
	mov ar.pfs = r25		/* (step 6) */
221
	mov cr.ifs = r26	
222
	mov cr.ifs = r26	
222
 
223
 
223
	mov ar.rsc = r24		/* (step 7) */
224
	mov ar.rsc = r24		/* (step 7) */
224
 
225
 
225
    /* 17. restore interruption state from memory stack */
226
    /* 17. restore interruption state from memory stack */
226
	ld8 r28 = [r31], +8 ;;		/* load cr.ifa */		
227
	ld8 r28 = [r31], +8 ;;		/* load cr.ifa */		
227
	ld8 r27 = [r31], +8 ;;		/* load cr.isr */
228
	ld8 r27 = [r31], +8 ;;		/* load cr.isr */
228
	ld8 r26 = [r31], +8 ;;		/* load cr.iipa */
229
	ld8 r26 = [r31], +8 ;;		/* load cr.iipa */
229
	ld8 r25 = [r31], +8 ;;		/* load cr.ipsr */
230
	ld8 r25 = [r31], +8 ;;		/* load cr.ipsr */
230
	ld8 r24 = [r31], +8 ;;		/* load cr.iip */
231
	ld8 r24 = [r31], +8 ;;		/* load cr.iip */
231
 
232
 
232
	mov cr.iip = r24
233
	mov cr.iip = r24
233
	mov cr.ipsr = r25
234
	mov cr.ipsr = r25
234
	mov cr.iipa = r26
235
	mov cr.iipa = r26
235
	mov cr.isr = r27
236
	mov cr.isr = r27
236
	mov cr.ifa = r28
237
	mov cr.ifa = r28
237
 
238
 
238
    /* 18. restore predicate registers from memory stack */
239
    /* 18. restore predicate registers from memory stack */
239
	ld8 r29 = [r31], +8 ;;		/* load predicate registers */
240
	ld8 r29 = [r31], +8 ;;		/* load predicate registers */
240
	mov pr = r29
241
	mov pr = r29
241
	
242
	
242
    /* 19. return from interruption */
243
    /* 19. return from interruption */
243
    	ld8 r12 = [r31]			/* load stack pointer */ 
244
    	ld8 r12 = [r31]			/* load stack pointer */ 
244
	rfi ;;
245
	rfi ;;
245
 
246
 
246
.global heavyweight_handler_inner
247
.global heavyweight_handler_inner
247
heavyweight_handler_inner:
248
heavyweight_handler_inner:
248
	/*
249
	/*
249
	 * From this point, the rest of the interrupted context
250
	 * From this point, the rest of the interrupted context
250
	 * will be preserved in stacked registers and backing store.
251
	 * will be preserved in stacked registers and backing store.
251
	 */
252
	 */
252
	alloc loc0 = ar.pfs, 0, 47, 2, 0 ;;
253
	alloc loc0 = ar.pfs, 0, 47, 2, 0 ;;
253
	
254
	
254
	/* bank 0 is going to be shadowed, copy essential data from there */
255
	/* bank 0 is going to be shadowed, copy essential data from there */
255
	mov loc1 = R_RET	/* b0 belonging to interrupted context */
256
	mov loc1 = R_RET	/* b0 belonging to interrupted context */
256
	mov loc2 = R_HANDLER
257
	mov loc2 = R_HANDLER
257
	mov out0 = R_OFFS
258
	mov out0 = R_OFFS
258
	
259
	
259
	add out1 = STACK_SCRATCH_AREA_SIZE, r12
260
	add out1 = STACK_SCRATCH_AREA_SIZE, r12
260
 
261
 
261
    /* 6. switch to bank 1 and reenable PSR.ic */
262
    /* 6. switch to bank 1 and reenable PSR.ic */
262
	ssm PSR_IC_MASK
263
	ssm PSR_IC_MASK
263
	bsw.1 ;;
264
	bsw.1 ;;
264
	srlz.d
265
	srlz.d
265
	
266
	
266
    /* 7. preserve branch and application registers */
267
    /* 7. preserve branch and application registers */
267
    	mov loc3 = ar.unat
268
    	mov loc3 = ar.unat
268
	mov loc4 = ar.lc
269
	mov loc4 = ar.lc
269
	mov loc5 = ar.ec
270
	mov loc5 = ar.ec
270
	mov loc6 = ar.ccv
271
	mov loc6 = ar.ccv
271
	mov loc7 = ar.csd
272
	mov loc7 = ar.csd
272
	mov loc8 = ar.ssd
273
	mov loc8 = ar.ssd
273
	
274
	
274
	mov loc9 = b0
275
	mov loc9 = b0
275
	mov loc10 = b1
276
	mov loc10 = b1
276
	mov loc11 = b2
277
	mov loc11 = b2
277
	mov loc12 = b3
278
	mov loc12 = b3
278
	mov loc13 = b4
279
	mov loc13 = b4
279
	mov loc14 = b5
280
	mov loc14 = b5
280
	mov loc15 = b6
281
	mov loc15 = b6
281
	mov loc16 = b7
282
	mov loc16 = b7
282
	
283
	
283
    /* 8. preserve general and floating-point registers */
284
    /* 8. preserve general and floating-point registers */
284
	/* TODO: save floating-point context */
285
	/* TODO: save floating-point context */
285
	mov loc17 = r1
286
	mov loc17 = r1
286
	mov loc18 = r2
287
	mov loc18 = r2
287
	mov loc19 = r3
288
	mov loc19 = r3
288
	mov loc20 = r4
289
	mov loc20 = r4
289
	mov loc21 = r5
290
	mov loc21 = r5
290
	mov loc22 = r6
291
	mov loc22 = r6
291
	mov loc23 = r7
292
	mov loc23 = r7
292
(p5)	mov loc24 = r8		/* only if not in break_instruction handler */
293
(p5)	mov loc24 = r8		/* only if not in break_instruction handler */
293
	mov loc25 = r9
294
	mov loc25 = r9
294
	mov loc26 = r10
295
	mov loc26 = r10
295
	mov loc27 = r11
296
	mov loc27 = r11
296
	/* skip r12 (stack pointer) */
297
	/* skip r12 (stack pointer) */
297
	mov loc28 = r13
298
	mov loc28 = r13
298
	mov loc29 = r14
299
	mov loc29 = r14
299
	mov loc30 = r15
300
	mov loc30 = r15
300
	mov loc31 = r16
301
	mov loc31 = r16
301
	mov loc32 = r17
302
	mov loc32 = r17
302
	mov loc33 = r18
303
	mov loc33 = r18
303
	mov loc34 = r19
304
	mov loc34 = r19
304
	mov loc35 = r20
305
	mov loc35 = r20
305
	mov loc36 = r21
306
	mov loc36 = r21
306
	mov loc37 = r22
307
	mov loc37 = r22
307
	mov loc38 = r23
308
	mov loc38 = r23
308
	mov loc39 = r24
309
	mov loc39 = r24
309
	mov loc40 = r25
310
	mov loc40 = r25
310
	mov loc41 = r26
311
	mov loc41 = r26
311
	mov loc42 = r27
312
	mov loc42 = r27
312
	mov loc43 = r28
313
	mov loc43 = r28
313
	mov loc44 = r29
314
	mov loc44 = r29
314
	mov loc45 = r30
315
	mov loc45 = r30
315
	mov loc46 = r31
316
	mov loc46 = r31
316
    
317
    
317
    /* 9. skipped (will not enable interrupts) */
318
    /* 9. skipped (will not enable interrupts) */
318
	/*
319
	/*
319
    	 * ssm PSR_I_MASK
320
    	 * ssm PSR_I_MASK
320
	 * ;;
321
	 * ;;
321
	 * srlz.d
322
	 * srlz.d
322
	 */
323
	 */
323
 
324
 
324
    /* 10. call handler */
325
    /* 10. call handler */
325
    	movl r1 = _hardcoded_load_address
326
    	movl r1 = _hardcoded_load_address
326
    
327
    
327
    	mov b1 = loc2
328
    	mov b1 = loc2
328
	br.call.sptk.many b0 = b1
329
	br.call.sptk.many b0 = b1
329
 
330
 
330
    /* 11. return from handler */
331
    /* 11. return from handler */
331
0:
332
0:
332
	
333
	
333
    /* 12. skipped (will not disable interrupts) */
334
    /* 12. skipped (will not disable interrupts) */
334
	/*
335
	/*
335
    	 * rsm PSR_I_MASK
336
    	 * rsm PSR_I_MASK
336
	 * ;;
337
	 * ;;
337
	 * srlz.d
338
	 * srlz.d
338
	 */
339
	 */
339
 
340
 
340
    /* 13. restore general and floating-point registers */
341
    /* 13. restore general and floating-point registers */
341
	/* TODO: restore floating-point context */
342
	/* TODO: restore floating-point context */
342
	mov r1 = loc17
343
	mov r1 = loc17
343
	mov r2 = loc18
344
	mov r2 = loc18
344
	mov r3 = loc19
345
	mov r3 = loc19
345
	mov r4 = loc20
346
	mov r4 = loc20
346
	mov r5 = loc21
347
	mov r5 = loc21
347
	mov r6 = loc22
348
	mov r6 = loc22
348
	mov r7 = loc23
349
	mov r7 = loc23
349
(p5)	mov r8 = loc24		/* only if not in break_instruction handler */
350
(p5)	mov r8 = loc24		/* only if not in break_instruction handler */
350
	mov r9 = loc25
351
	mov r9 = loc25
351
	mov r10 = loc26
352
	mov r10 = loc26
352
	mov r11 = loc27
353
	mov r11 = loc27
353
	/* skip r12 (stack pointer) */
354
	/* skip r12 (stack pointer) */
354
	mov r13 = loc28
355
	mov r13 = loc28
355
	mov r14 = loc29
356
	mov r14 = loc29
356
	mov r15 = loc30
357
	mov r15 = loc30
357
	mov r16 = loc31
358
	mov r16 = loc31
358
	mov r17 = loc32
359
	mov r17 = loc32
359
	mov r18 = loc33
360
	mov r18 = loc33
360
	mov r19 = loc34
361
	mov r19 = loc34
361
	mov r20 = loc35
362
	mov r20 = loc35
362
	mov r21 = loc36
363
	mov r21 = loc36
363
	mov r22 = loc37
364
	mov r22 = loc37
364
	mov r23 = loc38
365
	mov r23 = loc38
365
	mov r24 = loc39
366
	mov r24 = loc39
366
	mov r25 = loc40
367
	mov r25 = loc40
367
	mov r26 = loc41 
368
	mov r26 = loc41 
368
	mov r27 = loc42
369
	mov r27 = loc42
369
	mov r28 = loc43
370
	mov r28 = loc43
370
	mov r29 = loc44
371
	mov r29 = loc44
371
	mov r30 = loc45
372
	mov r30 = loc45
372
	mov r31 = loc46
373
	mov r31 = loc46
373
	
374
	
374
    /* 14. restore branch and application registers */
375
    /* 14. restore branch and application registers */
375
    	mov ar.unat = loc3
376
    	mov ar.unat = loc3
376
	mov ar.lc = loc4
377
	mov ar.lc = loc4
377
	mov ar.ec = loc5
378
	mov ar.ec = loc5
378
	mov ar.ccv = loc6
379
	mov ar.ccv = loc6
379
	mov ar.csd = loc7
380
	mov ar.csd = loc7
380
	mov ar.ssd = loc8
381
	mov ar.ssd = loc8
381
	
382
	
382
	mov b0 = loc9
383
	mov b0 = loc9
383
	mov b1 = loc10
384
	mov b1 = loc10
384
	mov b2 = loc11
385
	mov b2 = loc11
385
	mov b3 = loc12
386
	mov b3 = loc12
386
	mov b4 = loc13
387
	mov b4 = loc13
387
	mov b5 = loc14
388
	mov b5 = loc14
388
	mov b6 = loc15
389
	mov b6 = loc15
389
	mov b7 = loc16
390
	mov b7 = loc16
390
	
391
	
391
    /* 15. disable PSR.ic and switch to bank 0 */
392
    /* 15. disable PSR.ic and switch to bank 0 */
392
	rsm PSR_IC_MASK
393
	rsm PSR_IC_MASK
393
	bsw.0 ;;
394
	bsw.0 ;;
394
	srlz.d
395
	srlz.d
395
 
396
 
396
	mov R_RET = loc1
397
	mov R_RET = loc1
397
	mov ar.pfs = loc0
398
	mov ar.pfs = loc0
398
	br.ret.sptk.many b0
399
	br.ret.sptk.many b0
399
 
400
 
400
.global ivt
401
.global ivt
401
.align 32768
402
.align 32768
402
ivt:
403
ivt:
403
	HEAVYWEIGHT_HANDLER 0x0000
404
	HEAVYWEIGHT_HANDLER 0x0000
404
	HEAVYWEIGHT_HANDLER 0x0400
405
	HEAVYWEIGHT_HANDLER 0x0400
405
	HEAVYWEIGHT_HANDLER 0x0800
406
	HEAVYWEIGHT_HANDLER 0x0800
406
	HEAVYWEIGHT_HANDLER 0x0c00 alternate_instruction_tlb_fault
407
	HEAVYWEIGHT_HANDLER 0x0c00 alternate_instruction_tlb_fault
407
	HEAVYWEIGHT_HANDLER 0x1000 alternate_data_tlb_fault
408
	HEAVYWEIGHT_HANDLER 0x1000 alternate_data_tlb_fault
408
	HEAVYWEIGHT_HANDLER 0x1400 data_nested_tlb_fault
409
	HEAVYWEIGHT_HANDLER 0x1400 data_nested_tlb_fault
409
	HEAVYWEIGHT_HANDLER 0x1800
410
	HEAVYWEIGHT_HANDLER 0x1800
410
	HEAVYWEIGHT_HANDLER 0x1c00
411
	HEAVYWEIGHT_HANDLER 0x1c00
411
	HEAVYWEIGHT_HANDLER 0x2000 data_dirty_bit_fault
412
	HEAVYWEIGHT_HANDLER 0x2000 data_dirty_bit_fault
412
	HEAVYWEIGHT_HANDLER 0x2400 instruction_access_bit_fault
413
	HEAVYWEIGHT_HANDLER 0x2400 instruction_access_bit_fault
413
	HEAVYWEIGHT_HANDLER 0x2800 data_access_bit_fault
414
	HEAVYWEIGHT_HANDLER 0x2800 data_access_bit_fault
414
	HEAVYWEIGHT_HANDLER 0x2c00 break_instruction
415
	HEAVYWEIGHT_HANDLER 0x2c00 break_instruction
415
	HEAVYWEIGHT_HANDLER 0x3000 external_interrupt	/* For external interrupt, heavyweight handler is used. */
416
	HEAVYWEIGHT_HANDLER 0x3000 external_interrupt	/* For external interrupt, heavyweight handler is used. */
416
	HEAVYWEIGHT_HANDLER 0x3400
417
	HEAVYWEIGHT_HANDLER 0x3400
417
	HEAVYWEIGHT_HANDLER 0x3800
418
	HEAVYWEIGHT_HANDLER 0x3800
418
	HEAVYWEIGHT_HANDLER 0x3c00
419
	HEAVYWEIGHT_HANDLER 0x3c00
419
	HEAVYWEIGHT_HANDLER 0x4000
420
	HEAVYWEIGHT_HANDLER 0x4000
420
	HEAVYWEIGHT_HANDLER 0x4400
421
	HEAVYWEIGHT_HANDLER 0x4400
421
	HEAVYWEIGHT_HANDLER 0x4800
422
	HEAVYWEIGHT_HANDLER 0x4800
422
	HEAVYWEIGHT_HANDLER 0x4c00
423
	HEAVYWEIGHT_HANDLER 0x4c00
423
 
424
 
424
	HEAVYWEIGHT_HANDLER 0x5000 page_not_present
425
	HEAVYWEIGHT_HANDLER 0x5000 page_not_present
425
	HEAVYWEIGHT_HANDLER 0x5100
426
	HEAVYWEIGHT_HANDLER 0x5100
426
	HEAVYWEIGHT_HANDLER 0x5200
427
	HEAVYWEIGHT_HANDLER 0x5200
427
	HEAVYWEIGHT_HANDLER 0x5300
428
	HEAVYWEIGHT_HANDLER 0x5300
428
	HEAVYWEIGHT_HANDLER 0x5400 general_exception
429
	HEAVYWEIGHT_HANDLER 0x5400 general_exception
429
	HEAVYWEIGHT_HANDLER 0x5500
430
	HEAVYWEIGHT_HANDLER 0x5500
430
	HEAVYWEIGHT_HANDLER 0x5600
431
	HEAVYWEIGHT_HANDLER 0x5600
431
	HEAVYWEIGHT_HANDLER 0x5700
432
	HEAVYWEIGHT_HANDLER 0x5700
432
	HEAVYWEIGHT_HANDLER 0x5800
433
	HEAVYWEIGHT_HANDLER 0x5800
433
	HEAVYWEIGHT_HANDLER 0x5900
434
	HEAVYWEIGHT_HANDLER 0x5900
434
	HEAVYWEIGHT_HANDLER 0x5a00
435
	HEAVYWEIGHT_HANDLER 0x5a00
435
	HEAVYWEIGHT_HANDLER 0x5b00
436
	HEAVYWEIGHT_HANDLER 0x5b00
436
	HEAVYWEIGHT_HANDLER 0x5c00
437
	HEAVYWEIGHT_HANDLER 0x5c00
437
	HEAVYWEIGHT_HANDLER 0x5d00
438
	HEAVYWEIGHT_HANDLER 0x5d00
438
	HEAVYWEIGHT_HANDLER 0x5e00
439
	HEAVYWEIGHT_HANDLER 0x5e00
439
	HEAVYWEIGHT_HANDLER 0x5f00
440
	HEAVYWEIGHT_HANDLER 0x5f00
440
	
441
	
441
	HEAVYWEIGHT_HANDLER 0x6000
442
	HEAVYWEIGHT_HANDLER 0x6000
442
	HEAVYWEIGHT_HANDLER 0x6100
443
	HEAVYWEIGHT_HANDLER 0x6100
443
	HEAVYWEIGHT_HANDLER 0x6200
444
	HEAVYWEIGHT_HANDLER 0x6200
444
	HEAVYWEIGHT_HANDLER 0x6300
445
	HEAVYWEIGHT_HANDLER 0x6300
445
	HEAVYWEIGHT_HANDLER 0x6400
446
	HEAVYWEIGHT_HANDLER 0x6400
446
	HEAVYWEIGHT_HANDLER 0x6500
447
	HEAVYWEIGHT_HANDLER 0x6500
447
	HEAVYWEIGHT_HANDLER 0x6600
448
	HEAVYWEIGHT_HANDLER 0x6600
448
	HEAVYWEIGHT_HANDLER 0x6700
449
	HEAVYWEIGHT_HANDLER 0x6700
449
	HEAVYWEIGHT_HANDLER 0x6800
450
	HEAVYWEIGHT_HANDLER 0x6800
450
	HEAVYWEIGHT_HANDLER 0x6900
451
	HEAVYWEIGHT_HANDLER 0x6900
451
	HEAVYWEIGHT_HANDLER 0x6a00
452
	HEAVYWEIGHT_HANDLER 0x6a00
452
	HEAVYWEIGHT_HANDLER 0x6b00
453
	HEAVYWEIGHT_HANDLER 0x6b00
453
	HEAVYWEIGHT_HANDLER 0x6c00
454
	HEAVYWEIGHT_HANDLER 0x6c00
454
	HEAVYWEIGHT_HANDLER 0x6d00
455
	HEAVYWEIGHT_HANDLER 0x6d00
455
	HEAVYWEIGHT_HANDLER 0x6e00
456
	HEAVYWEIGHT_HANDLER 0x6e00
456
	HEAVYWEIGHT_HANDLER 0x6f00
457
	HEAVYWEIGHT_HANDLER 0x6f00
457
 
458
 
458
	HEAVYWEIGHT_HANDLER 0x7000
459
	HEAVYWEIGHT_HANDLER 0x7000
459
	HEAVYWEIGHT_HANDLER 0x7100
460
	HEAVYWEIGHT_HANDLER 0x7100
460
	HEAVYWEIGHT_HANDLER 0x7200
461
	HEAVYWEIGHT_HANDLER 0x7200
461
	HEAVYWEIGHT_HANDLER 0x7300
462
	HEAVYWEIGHT_HANDLER 0x7300
462
	HEAVYWEIGHT_HANDLER 0x7400
463
	HEAVYWEIGHT_HANDLER 0x7400
463
	HEAVYWEIGHT_HANDLER 0x7500
464
	HEAVYWEIGHT_HANDLER 0x7500
464
	HEAVYWEIGHT_HANDLER 0x7600
465
	HEAVYWEIGHT_HANDLER 0x7600
465
	HEAVYWEIGHT_HANDLER 0x7700
466
	HEAVYWEIGHT_HANDLER 0x7700
466
	HEAVYWEIGHT_HANDLER 0x7800
467
	HEAVYWEIGHT_HANDLER 0x7800
467
	HEAVYWEIGHT_HANDLER 0x7900
468
	HEAVYWEIGHT_HANDLER 0x7900
468
	HEAVYWEIGHT_HANDLER 0x7a00
469
	HEAVYWEIGHT_HANDLER 0x7a00
469
	HEAVYWEIGHT_HANDLER 0x7b00
470
	HEAVYWEIGHT_HANDLER 0x7b00
470
	HEAVYWEIGHT_HANDLER 0x7c00
471
	HEAVYWEIGHT_HANDLER 0x7c00
471
	HEAVYWEIGHT_HANDLER 0x7d00
472
	HEAVYWEIGHT_HANDLER 0x7d00
472
	HEAVYWEIGHT_HANDLER 0x7e00
473
	HEAVYWEIGHT_HANDLER 0x7e00
473
	HEAVYWEIGHT_HANDLER 0x7f00
474
	HEAVYWEIGHT_HANDLER 0x7f00
474
 
475