Subversion Repositories HelenOS

Rev

Rev 473 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 473 Rev 477
1
#
1
#
2
# Copyright (C) 2005 Jakub Vana
2
# Copyright (C) 2005 Jakub Vana
3
# Copyright (C) 2005 Jakub Jermar
-
 
4
# All rights reserved.
3
# All rights reserved.
5
#
4
#
6
# Redistribution and use in source and binary forms, with or without
5
# Redistribution and use in source and binary forms, with or without
7
# modification, are permitted provided that the following conditions
6
# modification, are permitted provided that the following conditions
8
# are met:
7
# are met:
9
#
8
#
10
# - Redistributions of source code must retain the above copyright
9
# - Redistributions of source code must retain the above copyright
11
#   notice, this list of conditions and the following disclaimer.
10
#   notice, this list of conditions and the following disclaimer.
12
# - Redistributions in binary form must reproduce the above copyright
11
# - Redistributions in binary form must reproduce the above copyright
13
#   notice, this list of conditions and the following disclaimer in the
12
#   notice, this list of conditions and the following disclaimer in the
14
#   documentation and/or other materials provided with the distribution.
13
#   documentation and/or other materials provided with the distribution.
15
# - The name of the author may not be used to endorse or promote products
14
# - The name of the author may not be used to endorse or promote products
16
#   derived from this software without specific prior written permission.
15
#   derived from this software without specific prior written permission.
17
#
16
#
18
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28
#
27
#
29
 
28
 
30
#include <arch/stack.h>
29
#include <arch/stack.h>
31
#include <arch/register.h>
-
 
32
 
30
 
33
#define STACK_ITEMS		12
31
#define STACK_ITEMS		12
34
#define STACK_FRAME_SIZE	((STACK_ITEMS*STACK_ITEM_SIZE) + STACK_SCRATCH_AREA_SIZE)
32
#define STACK_FRAME_SIZE	((STACK_ITEMS*STACK_ITEM_SIZE) + STACK_SCRATCH_AREA_SIZE)
35
 
33
 
36
#if (STACK_FRAME_SIZE % STACK_ALIGNMENT != 0)
34
#if (STACK_FRAME_SIZE % STACK_ALIGNMENT != 0)
37
#error Memory stack must be 16-byte aligned.
35
#error Memory stack must be 16-byte aligned.
38
#endif
36
#endif
39
 
37
 
40
/** Heavyweight interrupt handler
38
/** Heavyweight interrupt handler
41
 *
39
 *
42
 * This macro roughly follows steps from 1 to 19 described in
40
 * This macro roughly follows steps from 1 to 19 described in
43
 * Intel Itanium Architecture Software Developer's Manual, Chapter 3.4.2.
41
 * Intel Itanium Architecture Software Developer's Manual, Chapter 3.4.2.
44
 *
42
 *
45
 * HEAVYWEIGHT_HANDLER macro must cram into 16 bundles (48 instructions).
43
 * HEAVYWEIGHT_HANDLER macro must cram into 16 bundles (48 instructions).
46
 * This goal is achieved by using procedure calls after RSE becomes operational.
44
 * This goal is achieved by using procedure calls after RSE becomes operational.
47
 *
45
 *
48
 * Some steps are skipped (enabling and disabling interrupts).
46
 * Some steps are skipped (enabling and disabling interrupts).
49
 * Some steps are not fully supported yet (e.g. interruptions
47
 * Some steps are not fully supported yet (e.g. interruptions
50
 * from userspace and floating-point context).
48
 * from userspace and floating-point context).
51
 *
49
 *
52
 * @param offs Offset from the beginning of IVT.
50
 * @param offs Offset from the beginning of IVT.
53
 * @param handler Interrupt handler address.
51
 * @param handler Interrupt handler address.
54
 */
52
 */
55
.macro HEAVYWEIGHT_HANDLER offs, handler=universal_handler
53
.macro HEAVYWEIGHT_HANDLER offs, handler=universal_handler
56
    .org ivt + \offs
54
    .org ivt + \offs
57
	mov r24 = \offs
55
	mov r24 = \offs
58
	movl r25 = \handler ;;
56
	movl r25 = \handler ;;
59
	mov ar.k0 = r24
57
	mov ar.k0 = r24
60
	mov ar.k1 = r25
58
	mov ar.k1 = r25
61
	br heavyweight_handler
59
	br heavyweight_handler
62
.endm
60
.endm
63
 
61
 
64
.global heavyweight_handler
62
.global heavyweight_handler
65
heavyweight_handler:
63
heavyweight_handler:
66
    /* 1. copy interrupt registers into bank 0 */
64
    /* 1. copy interrupt registers into bank 0 */
67
	mov r24 = cr.iip
65
	mov r24 = cr.iip
68
	mov r25 = cr.ipsr
66
	mov r25 = cr.ipsr
69
	mov r26 = cr.iipa
67
	mov r26 = cr.iipa
70
	mov r27 = cr.isr
68
	mov r27 = cr.isr
71
	mov r28 = cr.ifa
69
	mov r28 = cr.ifa
72
	
70
	
73
    /* 2. preserve predicate register into bank 0 */
71
    /* 2. preserve predicate register into bank 0 */
74
	mov r29 = pr ;;
72
	mov r29 = pr ;;
75
	
73
	
76
    /* 3. switch to kernel memory stack */
74
    /* 3. switch to kernel memory stack */
77
	/* TODO: support interruptions from userspace */
75
	/* TODO: support interruptions from userspace */
78
	/* assume kernel stack */
76
	/* assume kernel stack */
79
	
77
	
80
	add r31 = -8, r12 ;;
78
	add r31 = -8, r12 ;;
81
	add r12 = -STACK_FRAME_SIZE, r12
79
	add r12 = -STACK_FRAME_SIZE, r12
82
 
80
 
83
    /* 4. save registers in bank 0 into memory stack */	
81
    /* 4. save registers in bank 0 into memory stack */	
84
	st8 [r31] = r29, -8 ;;	/* save predicate registers */
82
	st8 [r31] = r29, -8 ;;	/* save predicate registers */
85
 
83
 
86
	st8 [r31] = r24, -8 ;;	/* save cr.iip */
84
	st8 [r31] = r24, -8 ;;	/* save cr.iip */
87
	st8 [r31] = r25, -8 ;;	/* save cr.ipsr */
85
	st8 [r31] = r25, -8 ;;	/* save cr.ipsr */
88
	st8 [r31] = r26, -8 ;;	/* save cr.iipa */
86
	st8 [r31] = r26, -8 ;;	/* save cr.iipa */
89
	st8 [r31] = r27, -8 ;;	/* save cr.isr */
87
	st8 [r31] = r27, -8 ;;	/* save cr.isr */
90
	st8 [r31] = r28, -8	/* save cr.ifa */		
88
	st8 [r31] = r28, -8	/* save cr.ifa */		
91
 
89
 
92
    /* 5. RSE switch from interrupted context */
90
    /* 5. RSE switch from interrupted context */
93
	mov r24 = ar.rsc
91
	mov r24 = ar.rsc
94
	mov r25 = ar.pfs
92
	mov r25 = ar.pfs
95
	cover
93
	cover
96
	mov r26 = cr.ifs
94
	mov r26 = cr.ifs
97
	
95
	
98
	st8 [r31] = r24, -8;;	/* save ar.rsc */
96
	st8 [r31] = r24, -8;;	/* save ar.rsc */
99
	st8 [r31] = r25, -8;;	/* save ar.pfs */
97
	st8 [r31] = r25, -8;;	/* save ar.pfs */
100
	st8 [r31] = r26, -8	/* save ar.ifs */
98
	st8 [r31] = r26, -8	/* save ar.ifs */
101
	
99
	
102
	and r30 = ~3, r24 ;;
100
	and r30 = ~3, r24 ;;
103
	mov ar.rsc = r30 ;;	/* place RSE in enforced lazy mode */
101
	mov ar.rsc = r30 ;;	/* place RSE in enforced lazy mode */
104
	
102
	
105
	mov r27 = ar.rnat
103
	mov r27 = ar.rnat
106
	mov r28 = ar.bspstore ;;
104
	mov r28 = ar.bspstore ;;
107
	
105
	
108
	/* assume kernel backing store */
106
	/* assume kernel backing store */
109
	/* mov ar.bspstore = r28 ;; */
107
	mov ar.bspstore = r28 ;;
110
	
108
	
111
	mov r29 = ar.bsp
109
	mov r29 = ar.bsp
112
	
110
	
113
	st8 [r31] = r27, -8 ;;	/* save ar.rnat */
111
	st8 [r31] = r27, -8 ;;	/* save ar.rnat */
114
	st8 [r31] = r28, -8 ;;	/* save ar.bspstore */
112
	st8 [r31] = r28, -8 ;;	/* save ar.bspstore */
115
	st8 [r31] = r29, -8	/* save ar.bsp */
113
	st8 [r31] = r29, -8	/* save ar.bsp */
116
	
114
	
117
	mov ar.rsc = r24	/* restore RSE's setting */
115
	mov ar.rsc = r24	/* restore RSE's setting */
118
	
116
	
119
    /* steps 6 - 15 are done by heavyweight_handler_inner() */
117
    /* steps 6 - 15 are done by heavyweight_handler_inner() */
120
	mov r24 = b0 		/* save b0 belonging to interrupted context */
118
	mov r24 = b0 		/* save b0 belonging to interrupted context */
121
	mov r26 = ar.k0
119
	mov r26 = ar.k0
122
	mov r25 = ar.k1
120
	mov r25 = ar.k1
123
	br.call.sptk.many rp = heavyweight_handler_inner
121
	br.call.sptk.many rp = heavyweight_handler_inner
124
0:	mov b0 = r24		/* restore b0 belonging to the interrupted context */
122
0:	mov b0 = r24		/* restore b0 belonging to the interrupted context */
125
 
123
 
126
    /* 16. RSE switch to interrupted context */
124
    /* 16. RSE switch to interrupted context */
127
	cover			/* allocate zerro size frame (step 1 (from Intel Docs)) */
125
	cover			/* allocate zerro size frame (step 1 (from Intel Docs)) */
128
 
126
 
129
	add r31 = STACK_SCRATCH_AREA_SIZE, r12 ;;
127
	add r31 = STACK_SCRATCH_AREA_SIZE, r12 ;;
130
 
128
 
131
	mov r28 = ar.bspstore   	/* calculate loadrs (step 2) */
129
	mov r28 = ar.bspstore   	/* calculate loadrs (step 2) */
132
	ld8 r29 = [r31], +8 ;;		/* load ar.bsp */
130
	ld8 r29 = [r31], +8 ;;		/* load ar.bsp */
133
	sub r27 = r29 , r28 ;;
131
	sub r27 = r29 , r28 ;;
134
	shl r27 = r27, 16
132
	shl r27 = r27, 16
135
 
133
 
136
	mov r24 = ar.rsc ;;
134
	mov r24 = ar.rsc ;;
137
	and r30 = ~3, r24 ;;
135
	and r30 = ~3, r24 ;;
138
	or  r24 = r30 , r27 ;;     
136
	or  r24 = r30 , r27 ;;     
139
	mov ar.rsc = r24 ;;		/* place RSE in enforced lazy mode */
137
	mov ar.rsc = r24 ;;		/* place RSE in enforced lazy mode */
140
 
138
 
141
	loadrs 				/* (step 3) */
139
	loadrs 				/* (step 3) */
142
 
140
 
143
	ld8 r28 = [r31], +8 ;;    	/* load ar.bspstore */
141
	ld8 r28 = [r31], +8 ;;    	/* load ar.bspstore */
144
	ld8 r27 = [r31], +8 ;;		/* load ar.rnat */
142
	ld8 r27 = [r31], +8 ;;		/* load ar.rnat */
145
	ld8 r26 = [r31], +8 ;;		/* load cr.ifs */
143
	ld8 r26 = [r31], +8 ;;		/* load cr.ifs */
146
	ld8 r25 = [r31], +8 ;;		/* load ar.pfs */
144
	ld8 r25 = [r31], +8 ;;		/* load ar.pfs */
147
	ld8 r24 = [r31], +8 ;;		/* load ar.rsc */
145
	ld8 r24 = [r31], +8 ;;		/* load ar.rsc */
148
 
146
 
149
	/* mov ar.bspstore = r28 ;; */	/* (step 4) */
147
	mov ar.bspstore = r28 ;;	/* (step 4) */
150
	/* mov ar.rnat = r27 */		/* (step 5) */
148
	mov ar.rnat = r27		/* (step 5) */
151
 
149
 
152
	mov ar.pfs = r25		/* (step 6) */
150
	mov ar.pfs = r25		/* (step 6) */
153
	mov cr.ifs = r26	
151
	mov cr.ifs = r26	
154
 
152
 
155
	mov ar.rsc = r24		/* (step 7) */
153
	mov ar.rsc = r24		/* (step 7) */
156
 
154
 
157
    /* 17. restore interruption state from memory stack */
155
    /* 17. restore interruption state from memory stack */
158
	ld8 r28 = [r31], +8 ;;	/* load cr.ifa */		
156
	ld8 r28 = [r31], +8 ;;	/* load cr.ifa */		
159
	ld8 r27 = [r31], +8 ;;	/* load cr.isr */
157
	ld8 r27 = [r31], +8 ;;	/* load cr.isr */
160
	ld8 r26 = [r31], +8 ;;	/* load cr.iipa */
158
	ld8 r26 = [r31], +8 ;;	/* load cr.iipa */
161
	ld8 r25 = [r31], +8 ;;	/* load cr.ipsr */
159
	ld8 r25 = [r31], +8 ;;	/* load cr.ipsr */
162
	ld8 r24 = [r31], +8 ;;	/* load cr.iip */
160
	ld8 r24 = [r31], +8 ;;	/* load cr.iip */
163
 
161
 
164
	mov cr.iip = r24
162
	mov cr.iip = r24
165
	mov cr.ipsr = r25
163
	mov cr.ipsr = r25
166
	mov cr.iipa = r26
164
	mov cr.iipa = r26
167
	mov cr.isr = r27
165
	mov cr.isr = r27
168
	mov cr.ifa = r28
166
	mov cr.ifa = r28
169
 
167
 
170
    /* 18. restore predicate registers from memory stack */
168
    /* 18. restore predicate registers from memory stack */
171
	ld8 r29 = [r31] , -8 ;;	/* load predicate registers */
169
	ld8 r29 = [r31] , -8 ;;	/* load predicate registers */
172
	mov pr = r29
170
	mov pr = r29
173
	
171
	
174
    /* 19. return from interruption */
172
    /* 19. return from interruption */
175
    	add r12 = STACK_FRAME_SIZE, r12
173
    	add r12 = STACK_FRAME_SIZE, r12
176
	rfi ;;
174
	rfi ;;
177
 
175
 
178
.global heavyweight_handler_inner
176
.global heavyweight_handler_inner
179
heavyweight_handler_inner:
177
heavyweight_handler_inner:
180
	/*
178
	/*
181
	 * From this point, the rest of the interrupted context
179
	 * From this point, the rest of the interrupted context
182
	 * will be preserved in stacked registers and backing store.
180
	 * will be preserved in stacked registers and backing store.
183
	 */
181
	 */
184
	alloc loc0 = ar.pfs, 0, 47, 2, 0 ;;
182
	alloc loc0 = ar.pfs, 0, 47, 2, 0 ;;
185
	
183
	
186
	/* bank 0 is going to be shadowed, copy essential data from there */
184
	/* bank 0 is going to be shadowed, copy essential data from there */
187
	mov loc1 = r24	/* b0 belonging to interrupted context */
185
	mov loc1 = r24	/* b0 belonging to interrupted context */
188
	mov loc2 = r25
186
	mov loc2 = r25
189
	mov out0 = r26
187
	mov out0 = r26
190
	
188
	
191
	add out1 = STACK_SCRATCH_AREA_SIZE, r12
189
	add out1 = STACK_SCRATCH_AREA_SIZE, r12
192
 
190
 
193
    /* 6. switch to bank 1 and reenable PSR.ic */
191
    /* 6. switch to bank 1 and reenable PSR.ic */
194
	ssm PSR_IC_MASK
192
	ssm 0x2000
195
	bsw.1 ;;
193
	bsw.1 ;;
196
	srlz.d
194
	srlz.d
197
	
195
	
198
    /* 7. preserve branch and application registers */
196
    /* 7. preserve branch and application registers */
199
    	mov loc3 = ar.unat
197
    	mov loc3 = ar.unat
200
	mov loc4 = ar.lc
198
	mov loc4 = ar.lc
201
	mov loc5 = ar.ec
199
	mov loc5 = ar.ec
202
	mov loc6 = ar.ccv
200
	mov loc6 = ar.ccv
203
	mov loc7 = ar.csd
201
	mov loc7 = ar.csd
204
	mov loc8 = ar.ssd
202
	mov loc8 = ar.ssd
205
	
203
	
206
	mov loc9 = b0
204
	mov loc9 = b0
207
	mov loc10 = b1
205
	mov loc10 = b1
208
	mov loc11 = b2
206
	mov loc11 = b2
209
	mov loc12 = b3
207
	mov loc12 = b3
210
	mov loc13 = b4
208
	mov loc13 = b4
211
	mov loc14 = b5
209
	mov loc14 = b5
212
	mov loc15 = b6
210
	mov loc15 = b6
213
	mov loc16 = b7
211
	mov loc16 = b7
214
	
212
	
215
    /* 8. preserve general and floating-point registers */
213
    /* 8. preserve general and floating-point registers */
216
	/* TODO: save floating-point context */
214
	/* TODO: save floating-point context */
217
	mov loc17 = r1
215
	mov loc17 = r1
218
	mov loc18 = r2
216
	mov loc18 = r2
219
	mov loc19 = r3
217
	mov loc19 = r3
220
	mov loc20 = r4
218
	mov loc20 = r4
221
	mov loc21 = r5
219
	mov loc21 = r5
222
	mov loc22 = r6
220
	mov loc22 = r6
223
	mov loc23 = r7
221
	mov loc23 = r7
224
	mov loc24 = r8
222
	mov loc24 = r8
225
	mov loc25 = r9
223
	mov loc25 = r9
226
	mov loc26 = r10
224
	mov loc26 = r10
227
	mov loc27 = r11
225
	mov loc27 = r11
228
	/* skip r12 (stack pointer) */
226
	/* skip r12 (stack pointer) */
229
	mov loc28 = r13
227
	mov loc28 = r13
230
	mov loc29 = r14
228
	mov loc29 = r14
231
	mov loc30 = r15
229
	mov loc30 = r15
232
	mov loc31 = r16
230
	mov loc31 = r16
233
	mov loc32 = r17
231
	mov loc32 = r17
234
	mov loc33 = r18
232
	mov loc33 = r18
235
	mov loc34 = r19
233
	mov loc34 = r19
236
	mov loc35 = r20
234
	mov loc35 = r20
237
	mov loc36 = r21
235
	mov loc36 = r21
238
	mov loc37 = r22
236
	mov loc37 = r22
239
	mov loc38 = r23
237
	mov loc38 = r23
240
	mov loc39 = r24
238
	mov loc39 = r24
241
	mov loc40 = r25
239
	mov loc40 = r25
242
	mov loc41 = r26
240
	mov loc41 = r26
243
	mov loc42 = r27
241
	mov loc42 = r27
244
	mov loc43 = r28
242
	mov loc43 = r28
245
	mov loc44 = r29
243
	mov loc44 = r29
246
	mov loc45 = r30
244
	mov loc45 = r30
247
	mov loc46 = r31
245
	mov loc46 = r31
248
    
246
    
249
    /* 9. skipped (will not enable interrupts) */
247
    /* 9. skipped (will not enable interrupts) */
250
	/*
-
 
251
    	 * ssm PSR_I_MASK
-
 
252
	 * ;;
-
 
253
	 * srlz.d
-
 
254
	 */
-
 
255
 
248
 
256
    /* 10. call handler */
249
    /* 10. call handler */
257
    	mov b1 = loc2
250
    	mov b1 = loc2
258
	br.call.sptk.many b0 = b1
251
	br.call.sptk.many b0 = b1
259
 
252
 
260
    /* 11. return from handler */
253
    /* 11. return from handler */
261
0:
254
0:
262
	
255
	
263
    /* 12. skipped (will not disable interrupts) */
256
    /* 12. skipped (will not disable interrupts) */
264
	/*
-
 
265
    	 * rsm PSR_I_MASK
-
 
266
	 * ;;
-
 
267
	 * srlz.d
-
 
268
	 */
-
 
269
 
257
 
270
    /* 13. restore general and floating-point registers */
258
    /* 13. restore general and floating-point registers */
271
	/* TODO: restore floating-point context */
259
	/* TODO: restore floating-point context */
272
	mov r1 = loc17
260
	mov r1 = loc17
273
	mov r2 = loc18
261
	mov r2 = loc18
274
	mov r3 = loc19
262
	mov r3 = loc19
275
	mov r4 = loc20
263
	mov r4 = loc20
276
	mov r5 = loc21
264
	mov r5 = loc21
277
	mov r6 = loc22
265
	mov r6 = loc22
278
	mov r7 = loc23
266
	mov r7 = loc23
279
	mov r8 = loc24
267
	mov r8 = loc24
280
	mov r9 = loc25
268
	mov r9 = loc25
281
	mov r10 = loc26
269
	mov r10 = loc26
282
	mov r11 = loc27
270
	mov r11 = loc27
283
	/* skip r12 (stack pointer) */
271
	/* skip r12 (stack pointer) */
284
	mov r13 = loc28
272
	mov r13 = loc28
285
	mov r14 = loc29
273
	mov r14 = loc29
286
	mov r15 = loc30
274
	mov r15 = loc30
287
	mov r16 = loc31
275
	mov r16 = loc31
288
	mov r17 = loc32
276
	mov r17 = loc32
289
	mov r18 = loc33
277
	mov r18 = loc33
290
	mov r19 = loc34
278
	mov r19 = loc34
291
	mov r20 = loc35
279
	mov r20 = loc35
292
	mov r21 = loc36
280
	mov r21 = loc36
293
	mov r22 = loc37
281
	mov r22 = loc37
294
	mov r23 = loc38
282
	mov r23 = loc38
295
	mov r24 = loc39
283
	mov r24 = loc39
296
	mov r25 = loc40
284
	mov r25 = loc40
297
	mov r26 = loc41 
285
	mov r26 = loc41 
298
	mov r27 = loc42
286
	mov r27 = loc42
299
	mov r28 = loc43
287
	mov r28 = loc43
300
	mov r29 = loc44
288
	mov r29 = loc44
301
	mov r30 = loc45
289
	mov r30 = loc45
302
	mov r31 = loc46
290
	mov r31 = loc46
303
	
291
	
304
    /* 14. restore branch and application registers */
292
    /* 14. restore branch and application registers */
305
    	mov ar.unat = loc3
293
    	mov ar.unat = loc3
306
	mov ar.lc = loc4
294
	mov ar.lc = loc4
307
	mov ar.ec = loc5
295
	mov ar.ec = loc5
308
	mov ar.ccv = loc6
296
	mov ar.ccv = loc6
309
	mov ar.csd = loc7
297
	mov ar.csd = loc7
310
	mov ar.ssd = loc8
298
	mov ar.ssd = loc8
311
	
299
	
312
	mov b0 = loc9
300
	mov b0 = loc9
313
	mov b1 = loc10
301
	mov b1 = loc10
314
	mov b2 = loc11
302
	mov b2 = loc11
315
	mov b3 = loc12
303
	mov b3 = loc12
316
	mov b4 = loc13
304
	mov b4 = loc13
317
	mov b5 = loc14
305
	mov b5 = loc14
318
	mov b6 = loc15
306
	mov b6 = loc15
319
	mov b7 = loc16
307
	mov b7 = loc16
320
	
308
	
321
    /* 15. disable PSR.ic and switch to bank 0 */
309
    /* 15. disable PSR.ic and switch to bank 0 */
322
	rsm PSR_IC_MASK
310
	rsm 0x2000
323
	bsw.0 ;;
311
	bsw.0 ;;
324
	srlz.d
312
	srlz.d
325
 
313
 
326
	mov r24 = loc1
314
	mov r24 = loc1
327
	mov ar.pfs = loc0
315
	mov ar.pfs = loc0
328
	br.ret.sptk.many b0
316
	br.ret.sptk.many b0
329
 
317
 
330
.global ivt
318
.global ivt
331
.align 32768
319
.align 32768
332
ivt:
320
ivt:
333
	HEAVYWEIGHT_HANDLER 0x0000
321
	HEAVYWEIGHT_HANDLER 0x0000
334
	HEAVYWEIGHT_HANDLER 0x0400
322
	HEAVYWEIGHT_HANDLER 0x0400
335
	HEAVYWEIGHT_HANDLER 0x0800
323
	HEAVYWEIGHT_HANDLER 0x0800
336
	HEAVYWEIGHT_HANDLER 0x0c00
324
	HEAVYWEIGHT_HANDLER 0x0c00
337
	HEAVYWEIGHT_HANDLER 0x1000
325
	HEAVYWEIGHT_HANDLER 0x1000
338
	HEAVYWEIGHT_HANDLER 0x1400
326
	HEAVYWEIGHT_HANDLER 0x1400
339
	HEAVYWEIGHT_HANDLER 0x1800
327
	HEAVYWEIGHT_HANDLER 0x1800
340
	HEAVYWEIGHT_HANDLER 0x1c00
328
	HEAVYWEIGHT_HANDLER 0x1c00
341
	HEAVYWEIGHT_HANDLER 0x2000
329
	HEAVYWEIGHT_HANDLER 0x2000
342
	HEAVYWEIGHT_HANDLER 0x2400
330
	HEAVYWEIGHT_HANDLER 0x2400
343
	HEAVYWEIGHT_HANDLER 0x2800
331
	HEAVYWEIGHT_HANDLER 0x2800
344
	HEAVYWEIGHT_HANDLER 0x2c00 break_instruction
332
	HEAVYWEIGHT_HANDLER 0x2c00 break_instruction
345
	HEAVYWEIGHT_HANDLER 0x3000 external_interrupt	/* For external interrupt, heavyweight handler is used. */
333
	HEAVYWEIGHT_HANDLER 0x3000 external_interrupt	/* For external interrupt, heavyweight handler is used. */
346
	HEAVYWEIGHT_HANDLER 0x3400
334
	HEAVYWEIGHT_HANDLER 0x3400
347
	HEAVYWEIGHT_HANDLER 0x3800
335
	HEAVYWEIGHT_HANDLER 0x3800
348
	HEAVYWEIGHT_HANDLER 0x3c00
336
	HEAVYWEIGHT_HANDLER 0x3c00
349
	HEAVYWEIGHT_HANDLER 0x4000
337
	HEAVYWEIGHT_HANDLER 0x4000
350
	HEAVYWEIGHT_HANDLER 0x4400
338
	HEAVYWEIGHT_HANDLER 0x4400
351
	HEAVYWEIGHT_HANDLER 0x4800
339
	HEAVYWEIGHT_HANDLER 0x4800
352
	HEAVYWEIGHT_HANDLER 0x4c00
340
	HEAVYWEIGHT_HANDLER 0x4c00
353
 
341
 
354
	HEAVYWEIGHT_HANDLER 0x5000
342
	HEAVYWEIGHT_HANDLER 0x5000
355
	HEAVYWEIGHT_HANDLER 0x5100
343
	HEAVYWEIGHT_HANDLER 0x5100
356
	HEAVYWEIGHT_HANDLER 0x5200
344
	HEAVYWEIGHT_HANDLER 0x5200
357
	HEAVYWEIGHT_HANDLER 0x5300
345
	HEAVYWEIGHT_HANDLER 0x5300
358
	HEAVYWEIGHT_HANDLER 0x5400 general_exception
346
	HEAVYWEIGHT_HANDLER 0x5400 general_exception
359
	HEAVYWEIGHT_HANDLER 0x5500
347
	HEAVYWEIGHT_HANDLER 0x5500
360
	HEAVYWEIGHT_HANDLER 0x5600
348
	HEAVYWEIGHT_HANDLER 0x5600
361
	HEAVYWEIGHT_HANDLER 0x5700
349
	HEAVYWEIGHT_HANDLER 0x5700
362
	HEAVYWEIGHT_HANDLER 0x5800
350
	HEAVYWEIGHT_HANDLER 0x5800
363
	HEAVYWEIGHT_HANDLER 0x5900
351
	HEAVYWEIGHT_HANDLER 0x5900
364
	HEAVYWEIGHT_HANDLER 0x5a00
352
	HEAVYWEIGHT_HANDLER 0x5a00
365
	HEAVYWEIGHT_HANDLER 0x5b00
353
	HEAVYWEIGHT_HANDLER 0x5b00
366
	HEAVYWEIGHT_HANDLER 0x5c00
354
	HEAVYWEIGHT_HANDLER 0x5c00
367
	HEAVYWEIGHT_HANDLER 0x5d00
355
	HEAVYWEIGHT_HANDLER 0x5d00
368
	HEAVYWEIGHT_HANDLER 0x5e00
356
	HEAVYWEIGHT_HANDLER 0x5e00
369
	HEAVYWEIGHT_HANDLER 0x5f00
357
	HEAVYWEIGHT_HANDLER 0x5f00
370
	
358
	
371
	HEAVYWEIGHT_HANDLER 0x6000
359
	HEAVYWEIGHT_HANDLER 0x6000
372
	HEAVYWEIGHT_HANDLER 0x6100
360
	HEAVYWEIGHT_HANDLER 0x6100
373
	HEAVYWEIGHT_HANDLER 0x6200
361
	HEAVYWEIGHT_HANDLER 0x6200
374
	HEAVYWEIGHT_HANDLER 0x6300
362
	HEAVYWEIGHT_HANDLER 0x6300
375
	HEAVYWEIGHT_HANDLER 0x6400
363
	HEAVYWEIGHT_HANDLER 0x6400
376
	HEAVYWEIGHT_HANDLER 0x6500
364
	HEAVYWEIGHT_HANDLER 0x6500
377
	HEAVYWEIGHT_HANDLER 0x6600
365
	HEAVYWEIGHT_HANDLER 0x6600
378
	HEAVYWEIGHT_HANDLER 0x6700
366
	HEAVYWEIGHT_HANDLER 0x6700
379
	HEAVYWEIGHT_HANDLER 0x6800
367
	HEAVYWEIGHT_HANDLER 0x6800
380
	HEAVYWEIGHT_HANDLER 0x6900
368
	HEAVYWEIGHT_HANDLER 0x6900
381
	HEAVYWEIGHT_HANDLER 0x6a00
369
	HEAVYWEIGHT_HANDLER 0x6a00
382
	HEAVYWEIGHT_HANDLER 0x6b00
370
	HEAVYWEIGHT_HANDLER 0x6b00
383
	HEAVYWEIGHT_HANDLER 0x6c00
371
	HEAVYWEIGHT_HANDLER 0x6c00
384
	HEAVYWEIGHT_HANDLER 0x6d00
372
	HEAVYWEIGHT_HANDLER 0x6d00
385
	HEAVYWEIGHT_HANDLER 0x6e00
373
	HEAVYWEIGHT_HANDLER 0x6e00
386
	HEAVYWEIGHT_HANDLER 0x6f00
374
	HEAVYWEIGHT_HANDLER 0x6f00
387
 
375
 
388
	HEAVYWEIGHT_HANDLER 0x7000
376
	HEAVYWEIGHT_HANDLER 0x7000
389
	HEAVYWEIGHT_HANDLER 0x7100
377
	HEAVYWEIGHT_HANDLER 0x7100
390
	HEAVYWEIGHT_HANDLER 0x7200
378
	HEAVYWEIGHT_HANDLER 0x7200
391
	HEAVYWEIGHT_HANDLER 0x7300
379
	HEAVYWEIGHT_HANDLER 0x7300
392
	HEAVYWEIGHT_HANDLER 0x7400
380
	HEAVYWEIGHT_HANDLER 0x7400
393
	HEAVYWEIGHT_HANDLER 0x7500
381
	HEAVYWEIGHT_HANDLER 0x7500
394
	HEAVYWEIGHT_HANDLER 0x7600
382
	HEAVYWEIGHT_HANDLER 0x7600
395
	HEAVYWEIGHT_HANDLER 0x7700
383
	HEAVYWEIGHT_HANDLER 0x7700
396
	HEAVYWEIGHT_HANDLER 0x7800
384
	HEAVYWEIGHT_HANDLER 0x7800
397
	HEAVYWEIGHT_HANDLER 0x7900
385
	HEAVYWEIGHT_HANDLER 0x7900
398
	HEAVYWEIGHT_HANDLER 0x7a00
386
	HEAVYWEIGHT_HANDLER 0x7a00
399
	HEAVYWEIGHT_HANDLER 0x7b00
387
	HEAVYWEIGHT_HANDLER 0x7b00
400
	HEAVYWEIGHT_HANDLER 0x7c00
388
	HEAVYWEIGHT_HANDLER 0x7c00
401
	HEAVYWEIGHT_HANDLER 0x7d00
389
	HEAVYWEIGHT_HANDLER 0x7d00
402
	HEAVYWEIGHT_HANDLER 0x7e00
390
	HEAVYWEIGHT_HANDLER 0x7e00
403
	HEAVYWEIGHT_HANDLER 0x7f00
391
	HEAVYWEIGHT_HANDLER 0x7f00
404
 
392