Rev 915 | Rev 919 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 915 | Rev 916 | ||
---|---|---|---|
Line 43... | Line 43... | ||
43 | 43 | ||
44 | /** Partitioning of bank 0 registers. */ |
44 | /** Partitioning of bank 0 registers. */ |
45 | #define R_OFFS r16 |
45 | #define R_OFFS r16 |
46 | #define R_HANDLER r17 |
46 | #define R_HANDLER r17 |
47 | #define R_RET r18 |
47 | #define R_RET r18 |
- | 48 | #define R_KSTACK_BSP r22 /* keep in sync with before_thread_runs_arch() */ |
|
48 | #define R_KSTACK r23 /* keep in sync with before_thread_runs_arch() */ |
49 | #define R_KSTACK r23 /* keep in sync with before_thread_runs_arch() */ |
49 | 50 | ||
50 | /** Heavyweight interrupt handler |
51 | /** Heavyweight interrupt handler |
51 | * |
52 | * |
52 | * This macro roughly follows steps from 1 to 19 described in |
53 | * This macro roughly follows steps from 1 to 19 described in |
Line 54... | Line 55... | ||
54 | * |
55 | * |
55 | * HEAVYWEIGHT_HANDLER macro must cram into 16 bundles (48 instructions). |
56 | * HEAVYWEIGHT_HANDLER macro must cram into 16 bundles (48 instructions). |
56 | * This goal is achieved by using procedure calls after RSE becomes operational. |
57 | * This goal is achieved by using procedure calls after RSE becomes operational. |
57 | * |
58 | * |
58 | * Some steps are skipped (enabling and disabling interrupts). |
59 | * Some steps are skipped (enabling and disabling interrupts). |
59 | * Some steps are not fully supported yet (e.g. interruptions |
60 | * Some steps are not fully supported yet (e.g. dealing with floating-point |
60 | * from userspace and floating-point context). |
61 | * context). |
61 | * |
62 | * |
62 | * @param offs Offset from the beginning of IVT. |
63 | * @param offs Offset from the beginning of IVT. |
63 | * @param handler Interrupt handler address. |
64 | * @param handler Interrupt handler address. |
64 | */ |
65 | */ |
65 | .macro HEAVYWEIGHT_HANDLER offs, handler=universal_handler |
66 | .macro HEAVYWEIGHT_HANDLER offs, handler=universal_handler |
Line 85... | Line 86... | ||
85 | /* 2. preserve predicate register into bank 0 */ |
86 | /* 2. preserve predicate register into bank 0 */ |
86 | mov r29 = pr ;; |
87 | mov r29 = pr ;; |
87 | 88 | ||
88 | /* 3. switch to kernel memory stack */ |
89 | /* 3. switch to kernel memory stack */ |
89 | mov r30 = cr.ipsr |
90 | mov r30 = cr.ipsr |
90 | shr.u r31 = r12, VRN_SHIFT ;; |
91 | shr.u r31 = r12, VRN_SHIFT ;; |
- | 92 | ||
- | 93 | shr.u r30 = r30, PSR_CPL_SHIFT ;; |
|
- | 94 | and r30 = PSR_CPL_MASK_SHIFTED, r30 ;; |
|
91 | 95 | ||
92 | /* |
96 | /* |
93 | * Set p6 to true if the stack register references kernel address space. |
97 | * Set p3 to true if the interrupted context executed in kernel mode. |
94 | * Set p7 to false if the stack register doesn't reference kernel address space. |
98 | * Set p4 to false if the interrupted context didn't execute in kernel mode. |
95 | */ |
99 | */ |
96 | cmp.eq p6, p7 = VRN_KERNEL, r31 ;; |
100 | cmp.eq p3, p4 = r30, r0 ;; |
97 | - | ||
98 | (p6) shr.u r30 = r30, PSR_CPL_SHIFT ;; |
- | |
99 | (p6) and r30 = PSR_CPL_MASK_SHIFTED, r30 ;; |
101 | cmp.eq p1, p2 = r30, r0 ;; /* remember IPSR setting in p1 and p2 */ |
100 | 102 | ||
101 | /* |
103 | /* |
102 | * Set p6 to true if the interrupted context executed in kernel mode. |
104 | * Set p3 to true if the stack register references kernel address space. |
103 | * Set p7 to false if the interrupted context didn't execute in kernel mode. |
105 | * Set p4 to false if the stack register doesn't reference kernel address space. |
104 | */ |
106 | */ |
105 | (p6) cmp.eq p6, p7 = r30, r0 ;; |
107 | (p3) cmp.eq p3, p4 = VRN_KERNEL, r31 ;; |
106 | 108 | ||
107 | /* |
109 | /* |
108 | * Now, p7 is true iff the stack needs to be switched to kernel stack. |
110 | * Now, p4 is true iff the stack needs to be switched to kernel stack. |
109 | */ |
111 | */ |
110 | mov r30 = r12 |
112 | mov r30 = r12 |
111 | (p7) mov r12 = R_KSTACK ;; |
113 | (p4) mov r12 = R_KSTACK ;; |
112 | 114 | ||
113 | add r31 = -STACK_FRAME_BIAS, r12 ;; |
115 | add r31 = -STACK_FRAME_BIAS, r12 ;; |
114 | add r12 = -STACK_FRAME_SIZE, r12 |
116 | add r12 = -STACK_FRAME_SIZE, r12 |
115 | 117 | ||
116 | /* 4. save registers in bank 0 into memory stack */ |
118 | /* 4. save registers in bank 0 into memory stack */ |
Line 128... | Line 130... | ||
128 | mov r24 = ar.rsc |
130 | mov r24 = ar.rsc |
129 | mov r25 = ar.pfs |
131 | mov r25 = ar.pfs |
130 | cover |
132 | cover |
131 | mov r26 = cr.ifs |
133 | mov r26 = cr.ifs |
132 | 134 | ||
133 | st8 [r31] = r24, -8 ;; /* save ar.rsc */ |
135 | st8 [r31] = r24, -8 ;; /* save ar.rsc */ |
134 | st8 [r31] = r25, -8 ;; /* save ar.pfs */ |
136 | st8 [r31] = r25, -8 ;; /* save ar.pfs */ |
135 | st8 [r31] = r26, -8 /* save ar.ifs */ |
137 | st8 [r31] = r26, -8 /* save ar.ifs */ |
136 | 138 | ||
137 | and r30 = ~3, r24 ;; |
139 | and r30 = ~3, r24 ;; |
138 | mov ar.rsc = r30 ;; /* place RSE in enforced lazy mode */ |
140 | mov ar.rsc = r30 ;; /* place RSE in enforced lazy mode */ |
139 | 141 | ||
140 | mov r27 = ar.rnat |
142 | mov r27 = ar.rnat |
141 | mov r28 = ar.bspstore ;; |
143 | mov r28 = ar.bspstore ;; |
142 | 144 | ||
- | 145 | /* |
|
- | 146 | * Inspect BSPSTORE to figure out whether it is necessary to switch to kernel BSPSTORE. |
|
- | 147 | */ |
|
143 | /* assume kernel backing store */ |
148 | (p1) shr.u r30 = r28, VRN_SHIFT ;; |
- | 149 | (p1) cmp.eq p1, p2 = VRN_KERNEL, r30 ;; |
|
- | 150 | ||
- | 151 | /* |
|
- | 152 | * If BSPSTORE needs to be switched, p1 is false and p2 is true. |
|
- | 153 | */ |
|
- | 154 | (p1) mov r30 = r28 |
|
- | 155 | (p2) mov r30 = R_KSTACK_BSP ;; |
|
144 | mov ar.bspstore = r28 ;; |
156 | (p2) mov ar.bspstore = r30 ;; |
145 | 157 | ||
146 | mov r29 = ar.bsp |
158 | mov r29 = ar.bsp |
147 | 159 | ||
148 | st8 [r31] = r27, -8 ;; /* save ar.rnat */ |
160 | st8 [r31] = r27, -8 ;; /* save ar.rnat */ |
149 | st8 [r31] = r28, -8 ;; /* save new value written to ar.bspstore */ |
161 | st8 [r31] = r30, -8 ;; /* save new value written to ar.bspstore */ |
150 | st8 [r31] = r28, -8 ;; /* save ar.bspstore */ |
162 | st8 [r31] = r28, -8 ;; /* save ar.bspstore */ |
151 | st8 [r31] = r29, -8 /* save ar.bsp */ |
163 | st8 [r31] = r29, -8 /* save ar.bsp */ |
152 | 164 | ||
153 | mov ar.rsc = r24 /* restore RSE's setting */ |
165 | mov ar.rsc = r24 /* restore RSE's setting */ |
154 | 166 | ||
155 | /* steps 6 - 15 are done by heavyweight_handler_inner() */ |
167 | /* steps 6 - 15 are done by heavyweight_handler_inner() */ |
156 | mov R_RET = b0 /* save b0 belonging to interrupted context */ |
168 | mov R_RET = b0 /* save b0 belonging to interrupted context */ |
157 | br.call.sptk.many b0 = heavyweight_handler_inner |
169 | br.call.sptk.many b0 = heavyweight_handler_inner |
158 | 0: mov b0 = R_RET /* restore b0 belonging to the interrupted context */ |
170 | 0: mov b0 = R_RET /* restore b0 belonging to the interrupted context */ |
159 | 171 | ||
160 | /* 16. RSE switch to interrupted context */ |
172 | /* 16. RSE switch to interrupted context */ |
161 | cover /* allocate zerro size frame (step 1 (from Intel Docs)) */ |
173 | cover /* allocate zerro size frame (step 1 (from Intel Docs)) */ |
162 | 174 | ||
163 | add r31 = STACK_SCRATCH_AREA_SIZE, r12 ;; |
175 | add r31 = STACK_SCRATCH_AREA_SIZE, r12 ;; |
164 | 176 | ||
165 | ld8 r30 = [r31], +8 ;; /* load ar.bsp */ |
177 | ld8 r30 = [r31], +8 ;; /* load ar.bsp */ |
166 | ld8 r29 = [r31], +8 ;; /* load ar.bspstore */ |
178 | ld8 r29 = [r31], +8 ;; /* load ar.bspstore */ |
Line 187... | Line 199... | ||
187 | mov cr.ifs = r26 |
199 | mov cr.ifs = r26 |
188 | 200 | ||
189 | mov ar.rsc = r24 /* (step 7) */ |
201 | mov ar.rsc = r24 /* (step 7) */ |
190 | 202 | ||
191 | /* 17. restore interruption state from memory stack */ |
203 | /* 17. restore interruption state from memory stack */ |
192 | ld8 r28 = [r31], +8 ;; /* load cr.ifa */ |
204 | ld8 r28 = [r31], +8 ;; /* load cr.ifa */ |
193 | ld8 r27 = [r31], +8 ;; /* load cr.isr */ |
205 | ld8 r27 = [r31], +8 ;; /* load cr.isr */ |
194 | ld8 r26 = [r31], +8 ;; /* load cr.iipa */ |
206 | ld8 r26 = [r31], +8 ;; /* load cr.iipa */ |
195 | ld8 r25 = [r31], +8 ;; /* load cr.ipsr */ |
207 | ld8 r25 = [r31], +8 ;; /* load cr.ipsr */ |
196 | ld8 r24 = [r31], +8 ;; /* load cr.iip */ |
208 | ld8 r24 = [r31], +8 ;; /* load cr.iip */ |
197 | 209 | ||
198 | mov cr.iip = r24 |
210 | mov cr.iip = r24 |
199 | mov cr.ipsr = r25 |
211 | mov cr.ipsr = r25 |
200 | mov cr.iipa = r26 |
212 | mov cr.iipa = r26 |
201 | mov cr.isr = r27 |
213 | mov cr.isr = r27 |
202 | mov cr.ifa = r28 |
214 | mov cr.ifa = r28 |
203 | 215 | ||
204 | /* 18. restore predicate registers from memory stack */ |
216 | /* 18. restore predicate registers from memory stack */ |
205 | ld8 r29 = [r31], +8 ;; /* load predicate registers */ |
217 | ld8 r29 = [r31], +8 ;; /* load predicate registers */ |
206 | mov pr = r29 |
218 | mov pr = r29 |
207 | 219 | ||
208 | /* 19. return from interruption */ |
220 | /* 19. return from interruption */ |
209 | ld8 r12 = [r31] /* load stack pointer */ |
221 | ld8 r12 = [r31] /* load stack pointer */ |
210 | rfi ;; |
222 | rfi ;; |
211 | 223 | ||
212 | .global heavyweight_handler_inner |
224 | .global heavyweight_handler_inner |
213 | heavyweight_handler_inner: |
225 | heavyweight_handler_inner: |
214 | /* |
226 | /* |