Rev 3781 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 3781 | Rev 3782 | ||
---|---|---|---|
Line 47... | Line 47... | ||
47 | 47 | ||
48 | stack0: |
48 | stack0: |
49 | kernel_image_start: |
49 | kernel_image_start: |
50 | .auto |
50 | .auto |
51 | 51 | ||
- | 52 | #ifdef CONFIG_SMP |
|
52 | # Identify self(CPU) in OS structures by ID / EID |
53 | # Identify self(CPU) in OS structures by ID / EID |
53 | 54 | ||
54 | mov r9 = cr64 |
55 | mov r9 = cr64 |
55 | mov r10 = 1 |
56 | mov r10 = 1 |
56 | movl r12 = 0xffffffff |
57 | movl r12 = 0xffffffff |
57 | movl r8 = cpu_by_id_eid_list |
58 | movl r8 = cpu_by_id_eid_list |
58 | and r8 = r8, r12 |
59 | and r8 = r8, r12 |
59 | shr r9 = r9, 16 |
60 | shr r9 = r9, 16 |
60 | add r8 = r8, r9 |
61 | add r8 = r8, r9 |
61 | st1 [r8] = r10 |
62 | st1 [r8] = r10 |
- | 63 | #endif |
|
62 | 64 | ||
63 | mov psr.l = r0 |
65 | mov psr.l = r0 |
64 | srlz.i |
66 | srlz.i |
65 | srlz.d |
67 | srlz.d |
66 | 68 | ||
Line 77... | Line 79... | ||
77 | mov rr[r8] = r9 |
79 | mov rr[r8] = r9 |
78 | 80 | ||
79 | movl r8 = (VRN_KERNEL << VRN_SHIFT) |
81 | movl r8 = (VRN_KERNEL << VRN_SHIFT) |
80 | mov cr.ifa = r8 |
82 | mov cr.ifa = r8 |
81 | 83 | ||
82 | mov r11 = cr.itir ;; |
84 | mov r11 = cr.itir |
83 | movl r10 = (KERNEL_PAGE_WIDTH << PS_SHIFT);; |
85 | movl r10 = (KERNEL_PAGE_WIDTH << PS_SHIFT) |
84 | or r10 = r10, r11 ;; |
86 | or r10 = r10, r11 |
85 | mov cr.itir = r10;; |
87 | mov cr.itir = r10 |
86 | 88 | ||
87 | movl r10 = (KERNEL_TRANSLATION_I) |
89 | movl r10 = (KERNEL_TRANSLATION_I) |
88 | itr.i itr[r0] = r10 |
90 | itr.i itr[r0] = r10 |
89 | movl r10 = (KERNEL_TRANSLATION_D) |
91 | movl r10 = (KERNEL_TRANSLATION_D) |
90 | itr.d dtr[r0] = r10 |
92 | itr.d dtr[r0] = r10 |
Line 93... | Line 95... | ||
93 | movl r8 = (VRN_KERNEL << VRN_SHIFT) | VIO_OFFSET |
95 | movl r8 = (VRN_KERNEL << VRN_SHIFT) | VIO_OFFSET |
94 | mov cr.ifa = r8 |
96 | mov cr.ifa = r8 |
95 | movl r10 = (KERNEL_TRANSLATION_VIO) |
97 | movl r10 = (KERNEL_TRANSLATION_VIO) |
96 | itr.d dtr[r7] = r10 |
98 | itr.d dtr[r7] = r10 |
97 | 99 | ||
98 | mov r11 = cr.itir ;; |
100 | mov r11 = cr.itir |
99 | movl r10 = ~0xfc;; |
101 | movl r10 = ~0xfc |
100 | and r10 = r10, r11 ;; |
102 | and r10 = r10, r11 |
101 | movl r11 = (IO_PAGE_WIDTH << PS_SHIFT);; |
103 | movl r11 = (IO_PAGE_WIDTH << PS_SHIFT) |
102 | or r10 = r10, r11 ;; |
104 | or r10 = r10, r11 |
103 | mov cr.itir = r10;; |
105 | mov cr.itir = r10 |
104 | 106 | ||
105 | movl r7 = 2 |
107 | movl r7 = 2 |
106 | movl r8 = (VRN_KERNEL << VRN_SHIFT) | IO_OFFSET |
108 | movl r8 = (VRN_KERNEL << VRN_SHIFT) | IO_OFFSET |
107 | mov cr.ifa = r8 |
109 | mov cr.ifa = r8 |
108 | movl r10 = (KERNEL_TRANSLATION_IO) |
110 | movl r10 = (KERNEL_TRANSLATION_IO) |
109 | itr.d dtr[r7] = r10 |
111 | itr.d dtr[r7] = r10 |
110 | 112 | ||
111 | # Setup mapping for fimware arrea (also SAPIC) |
113 | # Setup mapping for fimware arrea (also SAPIC) |
112 | 114 | ||
113 | mov r11 = cr.itir ;; |
115 | mov r11 = cr.itir |
114 | movl r10 = ~0xfc;; |
116 | movl r10 = ~0xfc |
115 | and r10 = r10, r11 ;; |
117 | and r10 = r10, r11 |
116 | movl r11 = (FW_PAGE_WIDTH << PS_SHIFT);; |
118 | movl r11 = (FW_PAGE_WIDTH << PS_SHIFT) |
117 | or r10 = r10, r11 ;; |
119 | or r10 = r10, r11 |
118 | mov cr.itir = r10;; |
120 | mov cr.itir = r10 |
119 | 121 | ||
120 | movl r7 = 3 |
122 | movl r7 = 3 |
121 | movl r8 = (VRN_KERNEL << VRN_SHIFT) | FW_OFFSET |
123 | movl r8 = (VRN_KERNEL << VRN_SHIFT) | FW_OFFSET |
122 | mov cr.ifa = r8 |
124 | mov cr.ifa = r8 |
123 | movl r10 = (KERNEL_TRANSLATION_FW) |
125 | movl r10 = (KERNEL_TRANSLATION_FW) |
Line 140... | Line 142... | ||
140 | 142 | ||
141 | /* |
143 | /* |
142 | * Return From Interrupt is the only way to |
144 | * Return From Interrupt is the only way to |
143 | * fill the upper half word of PSR. |
145 | * fill the upper half word of PSR. |
144 | */ |
146 | */ |
145 | rfi;; |
147 | rfi ;; |
146 | 148 | ||
147 | 149 | ||
148 | .global paging_start |
150 | .global paging_start |
149 | paging_start: |
151 | paging_start: |
150 | 152 | ||
Line 153... | Line 155... | ||
153 | */ |
155 | */ |
154 | 156 | ||
155 | # Switch to register bank 1 |
157 | # Switch to register bank 1 |
156 | bsw.1 |
158 | bsw.1 |
157 | 159 | ||
- | 160 | #ifdef CONFIG_SMP |
|
158 | # Am I BSP or AP? |
161 | # Am I BSP or AP? |
159 | movl r20 = bsp_started;; |
162 | movl r20 = bsp_started ;; |
160 | ld8 r20 = [r20];; |
163 | ld8 r20 = [r20] ;; |
161 | cmp.eq p3, p2 = r20, r0;; |
164 | cmp.eq p3, p2 = r20, r0 ;; |
- | 165 | #else |
|
- | 166 | cmp.eq p3, p2 = r0, r0 ;; /* you are BSP */ |
|
- | 167 | #endif /* CONFIG_SMP */ |
|
162 | 168 | ||
163 | # Initialize register stack |
169 | # Initialize register stack |
164 | mov ar.rsc = r0 |
170 | mov ar.rsc = r0 |
165 | movl r8 = (VRN_KERNEL << VRN_SHIFT) ;; |
171 | movl r8 = (VRN_KERNEL << VRN_SHIFT) ;; |
166 | mov ar.bspstore = r8 |
172 | mov ar.bspstore = r8 |
Line 193... | Line 199... | ||
193 | 199 | ||
194 | ssm (1 << 19) ;; /* Disable f32 - f127 */ |
200 | ssm (1 << 19) ;; /* Disable f32 - f127 */ |
195 | srlz.i |
201 | srlz.i |
196 | srlz.d ;; |
202 | srlz.d ;; |
197 | 203 | ||
- | 204 | #ifdef CONFIG_SMP |
|
198 | (p2) movl r18 = main_ap ;; |
205 | (p2) movl r18 = main_ap ;; |
199 | (p2) mov b1 = r18 ;; |
206 | (p2) mov b1 = r18 ;; |
200 | (p2) br.call.sptk.many b0 = b1 |
207 | (p2) br.call.sptk.many b0 = b1 |
201 | 208 | ||
202 | # Mark that BSP is on |
209 | # Mark that BSP is on |
203 | mov r20=1;; |
210 | mov r20 = 1 ;; |
204 | movl r21=bsp_started;; |
211 | movl r21 = bsp_started ;; |
205 | st8 [r21]=r20;; |
212 | st8 [r21] = r20 ;; |
- | 213 | #endif |
|
206 | 214 | ||
207 | br.call.sptk.many b0 = arch_pre_main |
215 | br.call.sptk.many b0 = arch_pre_main |
208 | 216 | ||
209 | movl r18 = main_bsp ;; |
217 | movl r18 = main_bsp ;; |
210 | mov b1 = r18 ;; |
218 | mov b1 = r18 ;; |
211 | br.call.sptk.many b0 = b1 |
219 | br.call.sptk.many b0 = b1 |
212 | 220 | ||
213 | 0: |
221 | 0: |
214 | br 0b |
222 | br 0b |
215 | .align 4096 |
- | |
216 | 223 | ||
- | 224 | #ifdef CONFIG_SMP |
|
- | 225 | ||
- | 226 | .align 4096 |
|
217 | kernel_image_ap_start: |
227 | kernel_image_ap_start: |
218 | .auto |
228 | .auto |
219 | 229 | ||
220 | # Identify self(CPU) in OS structures by ID / EID |
230 | # Identify self(CPU) in OS structures by ID / EID |
221 | 231 | ||
Line 227... | Line 237... | ||
227 | shr r9 = r9, 16 |
237 | shr r9 = r9, 16 |
228 | add r8 = r8, r9 |
238 | add r8 = r8, r9 |
229 | st1 [r8] = r10 |
239 | st1 [r8] = r10 |
230 | 240 | ||
231 | # Wait for wakeup synchro signal (#3 in cpu_by_id_eid_list) |
241 | # Wait for wakeup synchro signal (#3 in cpu_by_id_eid_list) |
- | 242 | ||
232 | kernel_image_ap_start_loop: |
243 | kernel_image_ap_start_loop: |
233 | movl r11 = kernel_image_ap_start_loop |
244 | movl r11 = kernel_image_ap_start_loop |
234 | and r11 = r11, r12 |
245 | and r11 = r11, r12 |
235 | mov b1 = r11 |
246 | mov b1 = r11 |
236 | 247 | ||
237 | ld1 r20 = [r8];; |
248 | ld1 r20 = [r8] ;; |
238 | movl r21 = 3;; |
249 | movl r21 = 3 ;; |
239 | cmp.eq p2, p3 = r20, r21;; |
250 | cmp.eq p2, p3 = r20, r21 ;; |
240 | (p3) br.call.sptk.many b0 = b1 |
251 | (p3) br.call.sptk.many b0 = b1 |
241 | 252 | ||
242 | movl r11 = kernel_image_start |
253 | movl r11 = kernel_image_start |
243 | and r11 = r11, r12 |
254 | and r11 = r11, r12 |
244 | mov b1 = r11 |
255 | mov b1 = r11 |
245 | br.call.sptk.many b0 = b1 |
256 | br.call.sptk.many b0 = b1 |
246 | 257 | ||
247 | - | ||
248 | .align 16 |
258 | .align 16 |
249 | .global bsp_started |
259 | .global bsp_started |
250 | bsp_started: |
260 | bsp_started: |
251 | .space 8 |
261 | .space 8 |
252 | 262 | ||
253 | .align 4096 |
263 | .align 4096 |
254 | .global cpu_by_id_eid_list |
264 | .global cpu_by_id_eid_list |
255 | cpu_by_id_eid_list: |
265 | cpu_by_id_eid_list: |
256 | .space 65536 |
266 | .space 65536 |
257 | 267 | ||
- | 268 | #endif /* CONFIG_SMP */ |