Rev 2787 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
212 | vana | 1 | # |
2071 | jermar | 2 | # Copyright (c) 2005 Jakub Vana |
3 | # Copyright (c) 2005 Jakub Jermar |
||
212 | vana | 4 | # All rights reserved. |
5 | # |
||
6 | # Redistribution and use in source and binary forms, with or without |
||
7 | # modification, are permitted provided that the following conditions |
||
8 | # are met: |
||
9 | # |
||
10 | # - Redistributions of source code must retain the above copyright |
||
11 | # notice, this list of conditions and the following disclaimer. |
||
12 | # - Redistributions in binary form must reproduce the above copyright |
||
13 | # notice, this list of conditions and the following disclaimer in the |
||
14 | # documentation and/or other materials provided with the distribution. |
||
15 | # - The name of the author may not be used to endorse or promote products |
||
16 | # derived from this software without specific prior written permission. |
||
17 | # |
||
18 | # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
||
19 | # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
||
20 | # OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
||
21 | # IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
||
22 | # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
||
23 | # NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
||
24 | # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
||
25 | # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
||
26 | # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
||
27 | # THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
||
28 | # |
||
29 | |||
443 | jermar | 30 | #include <arch/stack.h> |
478 | jermar | 31 | #include <arch/register.h> |
912 | jermar | 32 | #include <arch/mm/page.h> |
33 | #include <align.h> |
||
212 | vana | 34 | |
1053 | vana | 35 | #define FRS_TO_SAVE 30 |
2608 | jermar | 36 | #define STACK_ITEMS (21 + FRS_TO_SAVE * 2) |
912 | jermar | 37 | #define STACK_FRAME_SIZE ALIGN_UP((STACK_ITEMS*STACK_ITEM_SIZE) + STACK_SCRATCH_AREA_SIZE, STACK_ALIGNMENT) |
443 | jermar | 38 | |
912 | jermar | 39 | #if (STACK_ITEMS % 2 == 0) |
40 | # define STACK_FRAME_BIAS 8 |
||
41 | #else |
||
42 | # define STACK_FRAME_BIAS 16 |
||
443 | jermar | 43 | #endif |
44 | |||
911 | jermar | 45 | /** Partitioning of bank 0 registers. */ |
46 | #define R_OFFS r16 |
||
47 | #define R_HANDLER r17 |
||
48 | #define R_RET r18 |
||
921 | jermar | 49 | #define R_TMP r19 |
916 | jermar | 50 | #define R_KSTACK_BSP r22 /* keep in sync with before_thread_runs_arch() */ |
911 | jermar | 51 | #define R_KSTACK r23 /* keep in sync with before_thread_runs_arch() */ |
52 | |||
438 | jermar | 53 | /** Heavyweight interrupt handler |
54 | * |
||
435 | jermar | 55 | * This macro roughly follows steps from 1 to 19 described in |
56 | * Intel Itanium Architecture Software Developer's Manual, Chapter 3.4.2. |
||
57 | * |
||
438 | jermar | 58 | * HEAVYWEIGHT_HANDLER macro must cram into 16 bundles (48 instructions). |
59 | * This goal is achieved by using procedure calls after RSE becomes operational. |
||
60 | * |
||
435 | jermar | 61 | * Some steps are skipped (enabling and disabling interrupts). |
456 | jermar | 62 | * |
63 | * @param offs Offset from the beginning of IVT. |
||
64 | * @param handler Interrupt handler address. |
||
435 | jermar | 65 | */ |
470 | jermar | 66 | .macro HEAVYWEIGHT_HANDLER offs, handler=universal_handler |
67 | .org ivt + \offs |
||
911 | jermar | 68 | mov R_OFFS = \offs |
69 | movl R_HANDLER = \handler ;; |
||
470 | jermar | 70 | br heavyweight_handler |
71 | .endm |
||
212 | vana | 72 | |
470 | jermar | 73 | .global heavyweight_handler |
74 | heavyweight_handler: |
||
435 | jermar | 75 | /* 1. copy interrupt registers into bank 0 */ |
911 | jermar | 76 | |
77 | /* |
||
912 | jermar | 78 | * Note that r24-r31 from bank 0 can be used only as long as PSR.ic = 0. |
911 | jermar | 79 | */ |
1053 | vana | 80 | |
1056 | jermar | 81 | /* Set up FPU as in interrupted context. */ |
82 | mov r24 = psr |
||
83 | mov r25 = cr.ipsr |
||
84 | mov r26 = PSR_DFH_MASK |
||
85 | mov r27 = ~PSR_DFH_MASK ;; |
||
86 | and r26 = r25, r26 |
||
87 | and r24 = r24, r27;; |
||
88 | or r24 = r24, r26;; |
||
89 | mov psr.l = r24;; |
||
1053 | vana | 90 | srlz.i |
91 | srlz.d;; |
||
92 | |||
435 | jermar | 93 | mov r24 = cr.iip |
94 | mov r25 = cr.ipsr |
||
95 | mov r26 = cr.iipa |
||
96 | mov r27 = cr.isr |
||
97 | mov r28 = cr.ifa |
||
98 | |||
99 | /* 2. preserve predicate register into bank 0 */ |
||
100 | mov r29 = pr ;; |
||
101 | |||
438 | jermar | 102 | /* 3. switch to kernel memory stack */ |
912 | jermar | 103 | mov r30 = cr.ipsr |
916 | jermar | 104 | shr.u r31 = r12, VRN_SHIFT ;; |
912 | jermar | 105 | |
916 | jermar | 106 | shr.u r30 = r30, PSR_CPL_SHIFT ;; |
107 | and r30 = PSR_CPL_MASK_SHIFTED, r30 ;; |
||
108 | |||
912 | jermar | 109 | /* |
916 | jermar | 110 | * Set p3 to true if the interrupted context executed in kernel mode. |
111 | * Set p4 to false if the interrupted context didn't execute in kernel mode. |
||
912 | jermar | 112 | */ |
916 | jermar | 113 | cmp.eq p3, p4 = r30, r0 ;; |
114 | cmp.eq p1, p2 = r30, r0 ;; /* remember IPSR setting in p1 and p2 */ |
||
912 | jermar | 115 | |
116 | /* |
||
916 | jermar | 117 | * Set p3 to true if the stack register references kernel address space. |
118 | * Set p4 to false if the stack register doesn't reference kernel address space. |
||
912 | jermar | 119 | */ |
921 | jermar | 120 | (p3) cmp.eq p3, p4 = VRN_KERNEL, r31 ;; |
912 | jermar | 121 | |
122 | /* |
||
916 | jermar | 123 | * Now, p4 is true iff the stack needs to be switched to kernel stack. |
912 | jermar | 124 | */ |
125 | mov r30 = r12 |
||
921 | jermar | 126 | (p4) mov r12 = R_KSTACK ;; |
912 | jermar | 127 | |
128 | add r31 = -STACK_FRAME_BIAS, r12 ;; |
||
470 | jermar | 129 | add r12 = -STACK_FRAME_SIZE, r12 |
130 | |||
921 | jermar | 131 | /* 4. save registers in bank 0 into memory stack */ |
132 | |||
133 | /* |
||
134 | * If this is break_instruction handler, |
||
135 | * copy input parameters to stack. |
||
136 | */ |
||
137 | mov R_TMP = 0x2c00 ;; |
||
2608 | jermar | 138 | cmp.eq p6, p5 = R_OFFS, R_TMP ;; |
921 | jermar | 139 | |
140 | /* |
||
2608 | jermar | 141 | * From now on, if this is break_instruction handler, p6 is true and p5 |
142 | * is false. Otherwise p6 is false and p5 is true. |
||
921 | jermar | 143 | * Note that p5 is a preserved predicate register and we make use of it. |
144 | */ |
||
962 | jermar | 145 | |
2608 | jermar | 146 | (p6) st8 [r31] = r38, -8 ;; /* save in6 */ |
147 | (p6) st8 [r31] = r37, -8 ;; /* save in5 */ |
||
962 | jermar | 148 | (p6) st8 [r31] = r36, -8 ;; /* save in4 */ |
921 | jermar | 149 | (p6) st8 [r31] = r35, -8 ;; /* save in3 */ |
150 | (p6) st8 [r31] = r34, -8 ;; /* save in2 */ |
||
151 | (p6) st8 [r31] = r33, -8 ;; /* save in1 */ |
||
152 | (p6) st8 [r31] = r32, -8 ;; /* save in0 */ |
||
2608 | jermar | 153 | (p5) add r31 = -56, r31 ;; |
921 | jermar | 154 | |
912 | jermar | 155 | st8 [r31] = r30, -8 ;; /* save old stack pointer */ |
156 | |||
157 | st8 [r31] = r29, -8 ;; /* save predicate registers */ |
||
438 | jermar | 158 | |
912 | jermar | 159 | st8 [r31] = r24, -8 ;; /* save cr.iip */ |
160 | st8 [r31] = r25, -8 ;; /* save cr.ipsr */ |
||
161 | st8 [r31] = r26, -8 ;; /* save cr.iipa */ |
||
162 | st8 [r31] = r27, -8 ;; /* save cr.isr */ |
||
163 | st8 [r31] = r28, -8 ;; /* save cr.ifa */ |
||
438 | jermar | 164 | |
165 | /* 5. RSE switch from interrupted context */ |
||
435 | jermar | 166 | mov r24 = ar.rsc |
167 | mov r25 = ar.pfs |
||
168 | cover |
||
169 | mov r26 = cr.ifs |
||
170 | |||
916 | jermar | 171 | st8 [r31] = r24, -8 ;; /* save ar.rsc */ |
172 | st8 [r31] = r25, -8 ;; /* save ar.pfs */ |
||
173 | st8 [r31] = r26, -8 /* save ar.ifs */ |
||
435 | jermar | 174 | |
919 | jermar | 175 | and r24 = ~(RSC_PL_MASK), r24 ;; |
176 | and r30 = ~(RSC_MODE_MASK), r24 ;; |
||
177 | mov ar.rsc = r30 ;; /* update RSE state */ |
||
435 | jermar | 178 | |
179 | mov r27 = ar.rnat |
||
470 | jermar | 180 | mov r28 = ar.bspstore ;; |
435 | jermar | 181 | |
916 | jermar | 182 | /* |
2608 | jermar | 183 | * Inspect BSPSTORE to figure out whether it is necessary to switch to |
184 | * kernel BSPSTORE. |
||
916 | jermar | 185 | */ |
921 | jermar | 186 | (p1) shr.u r30 = r28, VRN_SHIFT ;; |
187 | (p1) cmp.eq p1, p2 = VRN_KERNEL, r30 ;; |
||
435 | jermar | 188 | |
916 | jermar | 189 | /* |
190 | * If BSPSTORE needs to be switched, p1 is false and p2 is true. |
||
191 | */ |
||
921 | jermar | 192 | (p1) mov r30 = r28 |
193 | (p2) mov r30 = R_KSTACK_BSP ;; |
||
194 | (p2) mov ar.bspstore = r30 ;; |
||
916 | jermar | 195 | |
435 | jermar | 196 | mov r29 = ar.bsp |
197 | |||
916 | jermar | 198 | st8 [r31] = r27, -8 ;; /* save ar.rnat */ |
199 | st8 [r31] = r30, -8 ;; /* save new value written to ar.bspstore */ |
||
200 | st8 [r31] = r28, -8 ;; /* save ar.bspstore */ |
||
201 | st8 [r31] = r29, -8 /* save ar.bsp */ |
||
435 | jermar | 202 | |
919 | jermar | 203 | mov ar.rsc = r24 /* restore RSE's setting + kernel privileges */ |
435 | jermar | 204 | |
470 | jermar | 205 | /* steps 6 - 15 are done by heavyweight_handler_inner() */ |
916 | jermar | 206 | mov R_RET = b0 /* save b0 belonging to interrupted context */ |
911 | jermar | 207 | br.call.sptk.many b0 = heavyweight_handler_inner |
916 | jermar | 208 | 0: mov b0 = R_RET /* restore b0 belonging to the interrupted context */ |
438 | jermar | 209 | |
470 | jermar | 210 | /* 16. RSE switch to interrupted context */ |
2608 | jermar | 211 | cover /* allocate zero size frame (step 1 (from Intel Docs)) */ |
438 | jermar | 212 | |
2608 | jermar | 213 | add r31 = (STACK_SCRATCH_AREA_SIZE + (FRS_TO_SAVE * 2 * 8)), r12 ;; |
470 | jermar | 214 | |
915 | jermar | 215 | ld8 r30 = [r31], +8 ;; /* load ar.bsp */ |
216 | ld8 r29 = [r31], +8 ;; /* load ar.bspstore */ |
||
217 | ld8 r28 = [r31], +8 ;; /* load ar.bspstore_new */ |
||
218 | sub r27 = r30 , r28 ;; /* calculate loadrs (step 2) */ |
||
470 | jermar | 219 | shl r27 = r27, 16 |
220 | |||
221 | mov r24 = ar.rsc ;; |
||
222 | and r30 = ~3, r24 ;; |
||
223 | or r24 = r30 , r27 ;; |
||
224 | mov ar.rsc = r24 ;; /* place RSE in enforced lazy mode */ |
||
225 | |||
226 | loadrs /* (step 3) */ |
||
227 | |||
228 | ld8 r27 = [r31], +8 ;; /* load ar.rnat */ |
||
229 | ld8 r26 = [r31], +8 ;; /* load cr.ifs */ |
||
230 | ld8 r25 = [r31], +8 ;; /* load ar.pfs */ |
||
231 | ld8 r24 = [r31], +8 ;; /* load ar.rsc */ |
||
232 | |||
915 | jermar | 233 | mov ar.bspstore = r29 ;; /* (step 4) */ |
234 | mov ar.rnat = r27 /* (step 5) */ |
||
470 | jermar | 235 | |
236 | mov ar.pfs = r25 /* (step 6) */ |
||
237 | mov cr.ifs = r26 |
||
238 | |||
239 | mov ar.rsc = r24 /* (step 7) */ |
||
240 | |||
241 | /* 17. restore interruption state from memory stack */ |
||
916 | jermar | 242 | ld8 r28 = [r31], +8 ;; /* load cr.ifa */ |
243 | ld8 r27 = [r31], +8 ;; /* load cr.isr */ |
||
244 | ld8 r26 = [r31], +8 ;; /* load cr.iipa */ |
||
245 | ld8 r25 = [r31], +8 ;; /* load cr.ipsr */ |
||
246 | ld8 r24 = [r31], +8 ;; /* load cr.iip */ |
||
470 | jermar | 247 | |
1053 | vana | 248 | mov cr.iip = r24;; |
470 | jermar | 249 | mov cr.iipa = r26 |
250 | mov cr.isr = r27 |
||
251 | mov cr.ifa = r28 |
||
1056 | jermar | 252 | |
253 | /* Set up FPU as in exception. */ |
||
254 | mov r24 = psr |
||
255 | mov r26 = PSR_DFH_MASK |
||
256 | mov r27 = ~PSR_DFH_MASK ;; |
||
257 | and r25 = r25, r27 |
||
258 | and r24 = r24, r26 ;; |
||
259 | or r25 = r25, r24;; |
||
1053 | vana | 260 | mov cr.ipsr = r25 |
470 | jermar | 261 | |
262 | /* 18. restore predicate registers from memory stack */ |
||
916 | jermar | 263 | ld8 r29 = [r31], +8 ;; /* load predicate registers */ |
470 | jermar | 264 | mov pr = r29 |
265 | |||
266 | /* 19. return from interruption */ |
||
916 | jermar | 267 | ld8 r12 = [r31] /* load stack pointer */ |
470 | jermar | 268 | rfi ;; |
269 | |||
438 | jermar | 270 | .global heavyweight_handler_inner |
271 | heavyweight_handler_inner: |
||
272 | /* |
||
273 | * From this point, the rest of the interrupted context |
||
274 | * will be preserved in stacked registers and backing store. |
||
275 | */ |
||
979 | vana | 276 | alloc loc0 = ar.pfs, 0, 48, 2, 0 ;; |
438 | jermar | 277 | |
470 | jermar | 278 | /* bank 0 is going to be shadowed, copy essential data from there */ |
911 | jermar | 279 | mov loc1 = R_RET /* b0 belonging to interrupted context */ |
280 | mov loc2 = R_HANDLER |
||
281 | mov out0 = R_OFFS |
||
470 | jermar | 282 | |
283 | add out1 = STACK_SCRATCH_AREA_SIZE, r12 |
||
438 | jermar | 284 | |
435 | jermar | 285 | /* 6. switch to bank 1 and reenable PSR.ic */ |
478 | jermar | 286 | ssm PSR_IC_MASK |
435 | jermar | 287 | bsw.1 ;; |
288 | srlz.d |
||
289 | |||
290 | /* 7. preserve branch and application registers */ |
||
470 | jermar | 291 | mov loc3 = ar.unat |
292 | mov loc4 = ar.lc |
||
293 | mov loc5 = ar.ec |
||
294 | mov loc6 = ar.ccv |
||
295 | mov loc7 = ar.csd |
||
296 | mov loc8 = ar.ssd |
||
435 | jermar | 297 | |
470 | jermar | 298 | mov loc9 = b0 |
299 | mov loc10 = b1 |
||
300 | mov loc11 = b2 |
||
301 | mov loc12 = b3 |
||
302 | mov loc13 = b4 |
||
303 | mov loc14 = b5 |
||
304 | mov loc15 = b6 |
||
305 | mov loc16 = b7 |
||
438 | jermar | 306 | |
435 | jermar | 307 | /* 8. preserve general and floating-point registers */ |
470 | jermar | 308 | mov loc17 = r1 |
309 | mov loc18 = r2 |
||
310 | mov loc19 = r3 |
||
311 | mov loc20 = r4 |
||
312 | mov loc21 = r5 |
||
313 | mov loc22 = r6 |
||
314 | mov loc23 = r7 |
||
921 | jermar | 315 | (p5) mov loc24 = r8 /* only if not in break_instruction handler */ |
470 | jermar | 316 | mov loc25 = r9 |
317 | mov loc26 = r10 |
||
318 | mov loc27 = r11 |
||
438 | jermar | 319 | /* skip r12 (stack pointer) */ |
470 | jermar | 320 | mov loc28 = r13 |
321 | mov loc29 = r14 |
||
322 | mov loc30 = r15 |
||
323 | mov loc31 = r16 |
||
324 | mov loc32 = r17 |
||
325 | mov loc33 = r18 |
||
326 | mov loc34 = r19 |
||
327 | mov loc35 = r20 |
||
328 | mov loc36 = r21 |
||
329 | mov loc37 = r22 |
||
330 | mov loc38 = r23 |
||
331 | mov loc39 = r24 |
||
332 | mov loc40 = r25 |
||
333 | mov loc41 = r26 |
||
334 | mov loc42 = r27 |
||
335 | mov loc43 = r28 |
||
336 | mov loc44 = r29 |
||
337 | mov loc45 = r30 |
||
338 | mov loc46 = r31 |
||
979 | vana | 339 | |
1056 | jermar | 340 | add r24 = 96 + STACK_SCRATCH_AREA_SIZE, r12 |
341 | add r25 = 112 + STACK_SCRATCH_AREA_SIZE, r12 |
||
342 | add r26 = 0 + STACK_SCRATCH_AREA_SIZE, r12 |
||
343 | add r27 = 16 + STACK_SCRATCH_AREA_SIZE, r12 |
||
344 | add r28 = 32 + STACK_SCRATCH_AREA_SIZE, r12 |
||
345 | add r29 = 48 + STACK_SCRATCH_AREA_SIZE, r12 |
||
346 | add r30 = 64 + STACK_SCRATCH_AREA_SIZE, r12 |
||
347 | add r31 = 80 + STACK_SCRATCH_AREA_SIZE, r12 ;; |
||
1053 | vana | 348 | |
1056 | jermar | 349 | stf.spill [r26] = f2, 0x80 |
350 | stf.spill [r27] = f3, 0x80 |
||
351 | stf.spill [r28] = f4, 0x80 |
||
352 | stf.spill [r29] = f5, 0x80 |
||
353 | stf.spill [r30] = f6, 0x80 |
||
354 | stf.spill [r31] = f7, 0x80 ;; |
||
1053 | vana | 355 | |
1056 | jermar | 356 | stf.spill [r24] = f8, 0x80 |
357 | stf.spill [r25] = f9, 0x80 |
||
358 | stf.spill [r26] = f10, 0x80 |
||
359 | stf.spill [r27] = f11, 0x80 |
||
360 | stf.spill [r28] = f12, 0x80 |
||
361 | stf.spill [r29] = f13, 0x80 |
||
362 | stf.spill [r30] = f14, 0x80 |
||
363 | stf.spill [r31] = f15, 0x80 ;; |
||
1053 | vana | 364 | |
1056 | jermar | 365 | stf.spill [r24] = f16, 0x80 |
366 | stf.spill [r25] = f17, 0x80 |
||
367 | stf.spill [r26] = f18, 0x80 |
||
368 | stf.spill [r27] = f19, 0x80 |
||
369 | stf.spill [r28] = f20, 0x80 |
||
370 | stf.spill [r29] = f21, 0x80 |
||
371 | stf.spill [r30] = f22, 0x80 |
||
372 | stf.spill [r31] = f23, 0x80 ;; |
||
1053 | vana | 373 | |
1056 | jermar | 374 | stf.spill [r24] = f24, 0x80 |
375 | stf.spill [r25] = f25, 0x80 |
||
376 | stf.spill [r26] = f26, 0x80 |
||
377 | stf.spill [r27] = f27, 0x80 |
||
378 | stf.spill [r28] = f28, 0x80 |
||
379 | stf.spill [r29] = f29, 0x80 |
||
380 | stf.spill [r30] = f30, 0x80 |
||
381 | stf.spill [r31] = f31, 0x80 ;; |
||
1053 | vana | 382 | |
1056 | jermar | 383 | mov loc47 = ar.fpsr /* preserve floating point status register */ |
438 | jermar | 384 | |
435 | jermar | 385 | /* 9. skipped (will not enable interrupts) */ |
478 | jermar | 386 | /* |
387 | * ssm PSR_I_MASK |
||
388 | * ;; |
||
389 | * srlz.d |
||
390 | */ |
||
238 | vana | 391 | |
438 | jermar | 392 | /* 10. call handler */ |
919 | jermar | 393 | movl r1 = _hardcoded_load_address |
394 | |||
470 | jermar | 395 | mov b1 = loc2 |
438 | jermar | 396 | br.call.sptk.many b0 = b1 |
397 | |||
398 | /* 11. return from handler */ |
||
399 | 0: |
||
400 | |||
435 | jermar | 401 | /* 12. skipped (will not disable interrupts) */ |
478 | jermar | 402 | /* |
403 | * rsm PSR_I_MASK |
||
404 | * ;; |
||
405 | * srlz.d |
||
406 | */ |
||
438 | jermar | 407 | |
435 | jermar | 408 | /* 13. restore general and floating-point registers */ |
1056 | jermar | 409 | add r24 = 96 + STACK_SCRATCH_AREA_SIZE, r12 |
410 | add r25 = 112 + STACK_SCRATCH_AREA_SIZE, r12 |
||
411 | add r26 = 0 + STACK_SCRATCH_AREA_SIZE, r12 |
||
412 | add r27 = 16 + STACK_SCRATCH_AREA_SIZE, r12 |
||
413 | add r28 = 32 + STACK_SCRATCH_AREA_SIZE, r12 |
||
414 | add r29 = 48 + STACK_SCRATCH_AREA_SIZE, r12 |
||
415 | add r30 = 64 + STACK_SCRATCH_AREA_SIZE, r12 |
||
416 | add r31 = 80 + STACK_SCRATCH_AREA_SIZE, r12 ;; |
||
1053 | vana | 417 | |
1056 | jermar | 418 | ldf.fill f2 = [r26], 0x80 |
419 | ldf.fill f3 = [r27], 0x80 |
||
420 | ldf.fill f4 = [r28], 0x80 |
||
421 | ldf.fill f5 = [r29], 0x80 |
||
422 | ldf.fill f6 = [r30], 0x80 |
||
423 | ldf.fill f7 = [r31], 0x80 ;; |
||
1053 | vana | 424 | |
1056 | jermar | 425 | ldf.fill f8 = [r24], 0x80 |
426 | ldf.fill f9 = [r25], 0x80 |
||
427 | ldf.fill f10 = [r26], 0x80 |
||
428 | ldf.fill f11 = [r27], 0x80 |
||
429 | ldf.fill f12 = [r28], 0x80 |
||
430 | ldf.fill f13 = [r29], 0x80 |
||
431 | ldf.fill f14 = [r30], 0x80 |
||
432 | ldf.fill f15 = [r31], 0x80 ;; |
||
1053 | vana | 433 | |
1056 | jermar | 434 | ldf.fill f16 = [r24], 0x80 |
435 | ldf.fill f17 = [r25], 0x80 |
||
436 | ldf.fill f18 = [r26], 0x80 |
||
437 | ldf.fill f19 = [r27], 0x80 |
||
438 | ldf.fill f20 = [r28], 0x80 |
||
439 | ldf.fill f21 = [r29], 0x80 |
||
440 | ldf.fill f22 = [r30], 0x80 |
||
441 | ldf.fill f23 = [r31], 0x80 ;; |
||
1053 | vana | 442 | |
1056 | jermar | 443 | ldf.fill f24 = [r24], 0x80 |
444 | ldf.fill f25 = [r25], 0x80 |
||
445 | ldf.fill f26 = [r26], 0x80 |
||
446 | ldf.fill f27 = [r27], 0x80 |
||
447 | ldf.fill f28 = [r28], 0x80 |
||
448 | ldf.fill f29 = [r29], 0x80 |
||
449 | ldf.fill f30 = [r30], 0x80 |
||
450 | ldf.fill f31 = [r31], 0x80 ;; |
||
1053 | vana | 451 | |
470 | jermar | 452 | mov r1 = loc17 |
453 | mov r2 = loc18 |
||
454 | mov r3 = loc19 |
||
455 | mov r4 = loc20 |
||
456 | mov r5 = loc21 |
||
457 | mov r6 = loc22 |
||
458 | mov r7 = loc23 |
||
921 | jermar | 459 | (p5) mov r8 = loc24 /* only if not in break_instruction handler */ |
470 | jermar | 460 | mov r9 = loc25 |
461 | mov r10 = loc26 |
||
462 | mov r11 = loc27 |
||
438 | jermar | 463 | /* skip r12 (stack pointer) */ |
470 | jermar | 464 | mov r13 = loc28 |
465 | mov r14 = loc29 |
||
466 | mov r15 = loc30 |
||
467 | mov r16 = loc31 |
||
468 | mov r17 = loc32 |
||
469 | mov r18 = loc33 |
||
470 | mov r19 = loc34 |
||
471 | mov r20 = loc35 |
||
472 | mov r21 = loc36 |
||
473 | mov r22 = loc37 |
||
474 | mov r23 = loc38 |
||
475 | mov r24 = loc39 |
||
476 | mov r25 = loc40 |
||
477 | mov r26 = loc41 |
||
478 | mov r27 = loc42 |
||
479 | mov r28 = loc43 |
||
480 | mov r29 = loc44 |
||
481 | mov r30 = loc45 |
||
482 | mov r31 = loc46 |
||
1056 | jermar | 483 | |
484 | mov ar.fpsr = loc47 /* restore floating point status register */ |
||
435 | jermar | 485 | |
486 | /* 14. restore branch and application registers */ |
||
470 | jermar | 487 | mov ar.unat = loc3 |
488 | mov ar.lc = loc4 |
||
489 | mov ar.ec = loc5 |
||
490 | mov ar.ccv = loc6 |
||
491 | mov ar.csd = loc7 |
||
492 | mov ar.ssd = loc8 |
||
435 | jermar | 493 | |
470 | jermar | 494 | mov b0 = loc9 |
495 | mov b1 = loc10 |
||
496 | mov b2 = loc11 |
||
497 | mov b3 = loc12 |
||
498 | mov b4 = loc13 |
||
499 | mov b5 = loc14 |
||
500 | mov b6 = loc15 |
||
501 | mov b7 = loc16 |
||
438 | jermar | 502 | |
435 | jermar | 503 | /* 15. disable PSR.ic and switch to bank 0 */ |
478 | jermar | 504 | rsm PSR_IC_MASK |
435 | jermar | 505 | bsw.0 ;; |
506 | srlz.d |
||
438 | jermar | 507 | |
911 | jermar | 508 | mov R_RET = loc1 |
438 | jermar | 509 | mov ar.pfs = loc0 |
470 | jermar | 510 | br.ret.sptk.many b0 |
438 | jermar | 511 | |
470 | jermar | 512 | .global ivt |
513 | .align 32768 |
||
514 | ivt: |
||
515 | HEAVYWEIGHT_HANDLER 0x0000 |
||
516 | HEAVYWEIGHT_HANDLER 0x0400 |
||
517 | HEAVYWEIGHT_HANDLER 0x0800 |
||
899 | jermar | 518 | HEAVYWEIGHT_HANDLER 0x0c00 alternate_instruction_tlb_fault |
519 | HEAVYWEIGHT_HANDLER 0x1000 alternate_data_tlb_fault |
||
520 | HEAVYWEIGHT_HANDLER 0x1400 data_nested_tlb_fault |
||
470 | jermar | 521 | HEAVYWEIGHT_HANDLER 0x1800 |
522 | HEAVYWEIGHT_HANDLER 0x1c00 |
||
899 | jermar | 523 | HEAVYWEIGHT_HANDLER 0x2000 data_dirty_bit_fault |
524 | HEAVYWEIGHT_HANDLER 0x2400 instruction_access_bit_fault |
||
525 | HEAVYWEIGHT_HANDLER 0x2800 data_access_bit_fault |
||
470 | jermar | 526 | HEAVYWEIGHT_HANDLER 0x2c00 break_instruction |
527 | HEAVYWEIGHT_HANDLER 0x3000 external_interrupt /* For external interrupt, heavyweight handler is used. */ |
||
528 | HEAVYWEIGHT_HANDLER 0x3400 |
||
529 | HEAVYWEIGHT_HANDLER 0x3800 |
||
530 | HEAVYWEIGHT_HANDLER 0x3c00 |
||
531 | HEAVYWEIGHT_HANDLER 0x4000 |
||
532 | HEAVYWEIGHT_HANDLER 0x4400 |
||
533 | HEAVYWEIGHT_HANDLER 0x4800 |
||
534 | HEAVYWEIGHT_HANDLER 0x4c00 |
||
444 | vana | 535 | |
899 | jermar | 536 | HEAVYWEIGHT_HANDLER 0x5000 page_not_present |
470 | jermar | 537 | HEAVYWEIGHT_HANDLER 0x5100 |
538 | HEAVYWEIGHT_HANDLER 0x5200 |
||
4692 | svoboda | 539 | HEAVYWEIGHT_HANDLER 0x5300 data_access_rights_fault |
470 | jermar | 540 | HEAVYWEIGHT_HANDLER 0x5400 general_exception |
1023 | vana | 541 | HEAVYWEIGHT_HANDLER 0x5500 disabled_fp_register |
470 | jermar | 542 | HEAVYWEIGHT_HANDLER 0x5600 |
543 | HEAVYWEIGHT_HANDLER 0x5700 |
||
544 | HEAVYWEIGHT_HANDLER 0x5800 |
||
545 | HEAVYWEIGHT_HANDLER 0x5900 |
||
546 | HEAVYWEIGHT_HANDLER 0x5a00 |
||
547 | HEAVYWEIGHT_HANDLER 0x5b00 |
||
548 | HEAVYWEIGHT_HANDLER 0x5c00 |
||
1023 | vana | 549 | HEAVYWEIGHT_HANDLER 0x5d00 |
470 | jermar | 550 | HEAVYWEIGHT_HANDLER 0x5e00 |
551 | HEAVYWEIGHT_HANDLER 0x5f00 |
||
435 | jermar | 552 | |
470 | jermar | 553 | HEAVYWEIGHT_HANDLER 0x6000 |
554 | HEAVYWEIGHT_HANDLER 0x6100 |
||
555 | HEAVYWEIGHT_HANDLER 0x6200 |
||
556 | HEAVYWEIGHT_HANDLER 0x6300 |
||
557 | HEAVYWEIGHT_HANDLER 0x6400 |
||
558 | HEAVYWEIGHT_HANDLER 0x6500 |
||
559 | HEAVYWEIGHT_HANDLER 0x6600 |
||
560 | HEAVYWEIGHT_HANDLER 0x6700 |
||
561 | HEAVYWEIGHT_HANDLER 0x6800 |
||
562 | HEAVYWEIGHT_HANDLER 0x6900 |
||
563 | HEAVYWEIGHT_HANDLER 0x6a00 |
||
564 | HEAVYWEIGHT_HANDLER 0x6b00 |
||
565 | HEAVYWEIGHT_HANDLER 0x6c00 |
||
566 | HEAVYWEIGHT_HANDLER 0x6d00 |
||
567 | HEAVYWEIGHT_HANDLER 0x6e00 |
||
568 | HEAVYWEIGHT_HANDLER 0x6f00 |
||
435 | jermar | 569 | |
470 | jermar | 570 | HEAVYWEIGHT_HANDLER 0x7000 |
571 | HEAVYWEIGHT_HANDLER 0x7100 |
||
572 | HEAVYWEIGHT_HANDLER 0x7200 |
||
573 | HEAVYWEIGHT_HANDLER 0x7300 |
||
574 | HEAVYWEIGHT_HANDLER 0x7400 |
||
575 | HEAVYWEIGHT_HANDLER 0x7500 |
||
576 | HEAVYWEIGHT_HANDLER 0x7600 |
||
577 | HEAVYWEIGHT_HANDLER 0x7700 |
||
578 | HEAVYWEIGHT_HANDLER 0x7800 |
||
579 | HEAVYWEIGHT_HANDLER 0x7900 |
||
580 | HEAVYWEIGHT_HANDLER 0x7a00 |
||
581 | HEAVYWEIGHT_HANDLER 0x7b00 |
||
582 | HEAVYWEIGHT_HANDLER 0x7c00 |
||
583 | HEAVYWEIGHT_HANDLER 0x7d00 |
||
584 | HEAVYWEIGHT_HANDLER 0x7e00 |
||
585 | HEAVYWEIGHT_HANDLER 0x7f00 |