Rev 915 | Rev 921 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 915 | Rev 916 | ||
---|---|---|---|
Line 43... | Line 43... | ||
43 | 43 | ||
44 | /** Partitioning of bank 0 registers. */ |
44 | /** Partitioning of bank 0 registers. */ |
45 | #define R_OFFS r16 |
45 | #define R_OFFS r16 |
46 | #define R_HANDLER r17 |
46 | #define R_HANDLER r17 |
47 | #define R_RET r18 |
47 | #define R_RET r18 |
- | 48 | #define R_KSTACK_BSP r22 /* keep in sync with before_thread_runs_arch() */ |
|
48 | #define R_KSTACK r23 /* keep in sync with before_thread_runs_arch() */ |
49 | #define R_KSTACK r23 /* keep in sync with before_thread_runs_arch() */ |
49 | 50 | ||
50 | /** Heavyweight interrupt handler |
51 | /** Heavyweight interrupt handler |
51 | * |
52 | * |
52 | * This macro roughly follows steps from 1 to 19 described in |
53 | * This macro roughly follows steps from 1 to 19 described in |
Line 54... | Line 55... | ||
54 | * |
55 | * |
55 | * HEAVYWEIGHT_HANDLER macro must cram into 16 bundles (48 instructions). |
56 | * HEAVYWEIGHT_HANDLER macro must cram into 16 bundles (48 instructions). |
56 | * This goal is achieved by using procedure calls after RSE becomes operational. |
57 | * This goal is achieved by using procedure calls after RSE becomes operational. |
57 | * |
58 | * |
58 | * Some steps are skipped (enabling and disabling interrupts). |
59 | * Some steps are skipped (enabling and disabling interrupts). |
59 | * Some steps are not fully supported yet (e.g. interruptions |
60 | * Some steps are not fully supported yet (e.g. dealing with floating-point |
60 | * from userspace and floating-point context). |
61 | * context). |
61 | * |
62 | * |
62 | * @param offs Offset from the beginning of IVT. |
63 | * @param offs Offset from the beginning of IVT. |
63 | * @param handler Interrupt handler address. |
64 | * @param handler Interrupt handler address. |
64 | */ |
65 | */ |
65 | .macro HEAVYWEIGHT_HANDLER offs, handler=universal_handler |
66 | .macro HEAVYWEIGHT_HANDLER offs, handler=universal_handler |
Line 87... | Line 88... | ||
87 | 88 | ||
88 | /* 3. switch to kernel memory stack */ |
89 | /* 3. switch to kernel memory stack */ |
89 | mov r30 = cr.ipsr |
90 | mov r30 = cr.ipsr |
90 | shr.u r31 = r12, VRN_SHIFT ;; |
91 | shr.u r31 = r12, VRN_SHIFT ;; |
91 | 92 | ||
- | 93 | shr.u r30 = r30, PSR_CPL_SHIFT ;; |
|
- | 94 | and r30 = PSR_CPL_MASK_SHIFTED, r30 ;; |
|
- | 95 | ||
92 | /* |
96 | /* |
93 | * Set p6 to true if the stack register references kernel address space. |
97 | * Set p3 to true if the interrupted context executed in kernel mode. |
94 | * Set p7 to false if the stack register doesn't reference kernel address space. |
98 | * Set p4 to false if the interrupted context didn't execute in kernel mode. |
95 | */ |
99 | */ |
96 | cmp.eq p6, p7 = VRN_KERNEL, r31 ;; |
100 | cmp.eq p3, p4 = r30, r0 ;; |
97 | - | ||
98 | (p6) shr.u r30 = r30, PSR_CPL_SHIFT ;; |
- | |
99 | (p6) and r30 = PSR_CPL_MASK_SHIFTED, r30 ;; |
101 | cmp.eq p1, p2 = r30, r0 ;; /* remember IPSR setting in p1 and p2 */ |
100 | 102 | ||
101 | /* |
103 | /* |
102 | * Set p6 to true if the interrupted context executed in kernel mode. |
104 | * Set p3 to true if the stack register references kernel address space. |
103 | * Set p7 to false if the interrupted context didn't execute in kernel mode. |
105 | * Set p4 to false if the stack register doesn't reference kernel address space. |
104 | */ |
106 | */ |
105 | (p6) cmp.eq p6, p7 = r30, r0 ;; |
107 | (p3) cmp.eq p3, p4 = VRN_KERNEL, r31 ;; |
106 | 108 | ||
107 | /* |
109 | /* |
108 | * Now, p7 is true iff the stack needs to be switched to kernel stack. |
110 | * Now, p4 is true iff the stack needs to be switched to kernel stack. |
109 | */ |
111 | */ |
110 | mov r30 = r12 |
112 | mov r30 = r12 |
111 | (p7) mov r12 = R_KSTACK ;; |
113 | (p4) mov r12 = R_KSTACK ;; |
112 | 114 | ||
113 | add r31 = -STACK_FRAME_BIAS, r12 ;; |
115 | add r31 = -STACK_FRAME_BIAS, r12 ;; |
114 | add r12 = -STACK_FRAME_SIZE, r12 |
116 | add r12 = -STACK_FRAME_SIZE, r12 |
115 | 117 | ||
116 | /* 4. save registers in bank 0 into memory stack */ |
118 | /* 4. save registers in bank 0 into memory stack */ |
Line 138... | Line 140... | ||
138 | mov ar.rsc = r30 ;; /* place RSE in enforced lazy mode */ |
140 | mov ar.rsc = r30 ;; /* place RSE in enforced lazy mode */ |
139 | 141 | ||
140 | mov r27 = ar.rnat |
142 | mov r27 = ar.rnat |
141 | mov r28 = ar.bspstore ;; |
143 | mov r28 = ar.bspstore ;; |
142 | 144 | ||
- | 145 | /* |
|
- | 146 | * Inspect BSPSTORE to figure out whether it is necessary to switch to kernel BSPSTORE. |
|
- | 147 | */ |
|
143 | /* assume kernel backing store */ |
148 | (p1) shr.u r30 = r28, VRN_SHIFT ;; |
- | 149 | (p1) cmp.eq p1, p2 = VRN_KERNEL, r30 ;; |
|
- | 150 | ||
- | 151 | /* |
|
- | 152 | * If BSPSTORE needs to be switched, p1 is false and p2 is true. |
|
- | 153 | */ |
|
- | 154 | (p1) mov r30 = r28 |
|
- | 155 | (p2) mov r30 = R_KSTACK_BSP ;; |
|
144 | mov ar.bspstore = r28 ;; |
156 | (p2) mov ar.bspstore = r30 ;; |
145 | 157 | ||
146 | mov r29 = ar.bsp |
158 | mov r29 = ar.bsp |
147 | 159 | ||
148 | st8 [r31] = r27, -8 ;; /* save ar.rnat */ |
160 | st8 [r31] = r27, -8 ;; /* save ar.rnat */ |
149 | st8 [r31] = r28, -8 ;; /* save new value written to ar.bspstore */ |
161 | st8 [r31] = r30, -8 ;; /* save new value written to ar.bspstore */ |
150 | st8 [r31] = r28, -8 ;; /* save ar.bspstore */ |
162 | st8 [r31] = r28, -8 ;; /* save ar.bspstore */ |
151 | st8 [r31] = r29, -8 /* save ar.bsp */ |
163 | st8 [r31] = r29, -8 /* save ar.bsp */ |
152 | 164 | ||
153 | mov ar.rsc = r24 /* restore RSE's setting */ |
165 | mov ar.rsc = r24 /* restore RSE's setting */ |
154 | 166 |