Rev 3588 | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 3588 | Rev 4338 | ||
---|---|---|---|
Line 30... | Line 30... | ||
30 | #include <arch/mm/page.h> |
30 | #include <arch/mm/page.h> |
31 | #include <arch/mm/asid.h> |
31 | #include <arch/mm/asid.h> |
32 | #include <mm/asid.h> |
32 | #include <mm/asid.h> |
33 | 33 | ||
34 | #define RR_MASK (0xFFFFFFFF00000002) |
34 | #define RR_MASK (0xFFFFFFFF00000002) |
35 | #define RID_SHIFT 8 |
35 | #define RID_SHIFT 8 |
36 | #define PS_SHIFT 2 |
36 | #define PS_SHIFT 2 |
37 | - | ||
38 | #define KERNEL_TRANSLATION_I 0x0010000000000661 |
- | |
39 | #define KERNEL_TRANSLATION_D 0x0010000000000661 |
- | |
40 | #define KERNEL_TRANSLATION_VIO 0x0010000000000671 |
- | |
41 | #define KERNEL_TRANSLATION_IO 0x00100FFFFC000671 |
- | |
42 | #define KERNEL_TRANSLATION_FW 0x00100000F0000671 |
- | |
43 | - | ||
44 | 37 | ||
- | 38 | #define KERNEL_TRANSLATION_I 0x0010000000000661 |
|
- | 39 | #define KERNEL_TRANSLATION_D 0x0010000000000661 |
|
- | 40 | #define KERNEL_TRANSLATION_VIO 0x0010000000000671 |
|
- | 41 | #define KERNEL_TRANSLATION_IO 0x00100FFFFC000671 |
|
- | 42 | #define KERNEL_TRANSLATION_FW 0x00100000F0000671 |
|
45 | 43 | ||
46 | .section K_TEXT_START, "ax" |
44 | .section K_TEXT_START, "ax" |
47 | 45 | ||
48 | .global kernel_image_start |
46 | .global kernel_image_start |
49 | 47 | ||
50 | stack0: |
48 | stack0: |
51 | kernel_image_start: |
49 | kernel_image_start: |
52 | .auto |
50 | .auto |
53 | 51 | ||
- | 52 | #ifdef CONFIG_SMP |
|
54 | #identifi self(CPU) in OS structures by ID / EID |
53 | # Identify self(CPU) in OS structures by ID / EID |
55 | mov r9=cr64 |
- | |
56 | mov r10=1 |
- | |
57 | movl r12=0xffffffff |
- | |
58 | movl r8=cpu_by_id_eid_list |
- | |
59 | and r8=r8,r12 |
- | |
60 | shr r9=r9,16 |
- | |
61 | add r8=r8,r9 |
- | |
62 | st1 [r8]=r10 |
- | |
63 | - | ||
64 | 54 | ||
- | 55 | mov r9 = cr64 |
|
- | 56 | mov r10 = 1 |
|
- | 57 | movl r12 = 0xffffffff |
|
- | 58 | movl r8 = cpu_by_id_eid_list |
|
- | 59 | and r8 = r8, r12 |
|
- | 60 | shr r9 = r9, 16 |
|
- | 61 | add r8 = r8, r9 |
|
- | 62 | st1 [r8] = r10 |
|
- | 63 | #endif |
|
65 | 64 | ||
66 | mov psr.l = r0 |
65 | mov psr.l = r0 |
67 | srlz.i |
66 | srlz.i |
68 | srlz.d |
67 | srlz.d |
69 | 68 | ||
70 | # Fill TR.i and TR.d using Region Register #VRN_KERNEL |
69 | # Fill TR.i and TR.d using Region Register #VRN_KERNEL |
71 | 70 | ||
72 | - | ||
73 | movl r8 = (VRN_KERNEL << VRN_SHIFT) |
71 | movl r8 = (VRN_KERNEL << VRN_SHIFT) |
74 | mov r9 = rr[r8] |
72 | mov r9 = rr[r8] |
75 | 73 | ||
76 | - | ||
77 | movl r10 = (RR_MASK) |
74 | movl r10 = (RR_MASK) |
78 | and r9 = r10, r9 |
75 | and r9 = r10, r9 |
79 | movl r10 = ((RID_KERNEL << RID_SHIFT) | (KERNEL_PAGE_WIDTH << PS_SHIFT)) |
76 | movl r10 = ((RID_KERNEL << RID_SHIFT) | (KERNEL_PAGE_WIDTH << PS_SHIFT)) |
80 | or r9 = r10, r9 |
77 | or r9 = r10, r9 |
81 | 78 | ||
82 | - | ||
83 | mov rr[r8] = r9 |
79 | mov rr[r8] = r9 |
84 | 80 | ||
85 | - | ||
86 | - | ||
87 | movl r8 = (VRN_KERNEL << VRN_SHIFT) |
81 | movl r8 = (VRN_KERNEL << VRN_SHIFT) |
88 | mov cr.ifa = r8 |
82 | mov cr.ifa = r8 |
89 | 83 | ||
90 | - | ||
91 | mov r11 = cr.itir ;; |
84 | mov r11 = cr.itir |
92 | movl r10 = (KERNEL_PAGE_WIDTH << PS_SHIFT);; |
85 | movl r10 = (KERNEL_PAGE_WIDTH << PS_SHIFT) |
93 | or r10 =r10 , r11 ;; |
86 | or r10 = r10, r11 |
94 | mov cr.itir = r10;; |
87 | mov cr.itir = r10 |
95 | 88 | ||
96 | - | ||
97 | movl r10 = (KERNEL_TRANSLATION_I) |
89 | movl r10 = (KERNEL_TRANSLATION_I) |
98 | itr.i itr[r0] = r10 |
90 | itr.i itr[r0] = r10 |
99 | - | ||
100 | - | ||
101 | movl r10 = (KERNEL_TRANSLATION_D) |
91 | movl r10 = (KERNEL_TRANSLATION_D) |
102 | itr.d dtr[r0] = r10 |
92 | itr.d dtr[r0] = r10 |
103 | 93 | ||
104 | - | ||
105 | movl r7 = 1 |
94 | movl r7 = 1 |
106 | movl r8 = (VRN_KERNEL << VRN_SHIFT) | VIO_OFFSET |
95 | movl r8 = (VRN_KERNEL << VRN_SHIFT) | VIO_OFFSET |
107 | mov cr.ifa = r8 |
96 | mov cr.ifa = r8 |
108 | movl r10 = (KERNEL_TRANSLATION_VIO) |
97 | movl r10 = (KERNEL_TRANSLATION_VIO) |
109 | itr.d dtr[r7] = r10 |
98 | itr.d dtr[r7] = r10 |
110 | 99 | ||
111 | - | ||
112 | mov r11 = cr.itir ;; |
100 | mov r11 = cr.itir |
113 | movl r10 = ~0xfc;; |
101 | movl r10 = ~0xfc |
114 | and r10 =r10 , r11 ;; |
102 | and r10 = r10, r11 |
115 | movl r11 = (IO_PAGE_WIDTH << PS_SHIFT);; |
103 | movl r11 = (IO_PAGE_WIDTH << PS_SHIFT) |
116 | or r10 =r10 , r11 ;; |
104 | or r10 = r10, r11 |
117 | mov cr.itir = r10;; |
105 | mov cr.itir = r10 |
118 | - | ||
119 | 106 | ||
120 | movl r7 = 2 |
107 | movl r7 = 2 |
121 | movl r8 = (VRN_KERNEL << VRN_SHIFT) | IO_OFFSET |
108 | movl r8 = (VRN_KERNEL << VRN_SHIFT) | IO_OFFSET |
122 | mov cr.ifa = r8 |
109 | mov cr.ifa = r8 |
123 | movl r10 = (KERNEL_TRANSLATION_IO) |
110 | movl r10 = (KERNEL_TRANSLATION_IO) |
124 | itr.d dtr[r7] = r10 |
111 | itr.d dtr[r7] = r10 |
125 | 112 | ||
- | 113 | # Setup mapping for fimware arrea (also SAPIC) |
|
126 | 114 | ||
127 | #setup mapping for fimware arrea (also SAPIC) |
- | |
128 | mov r11 = cr.itir ;; |
115 | mov r11 = cr.itir |
129 | movl r10 = ~0xfc;; |
116 | movl r10 = ~0xfc |
130 | and r10 =r10 , r11 ;; |
117 | and r10 = r10, r11 |
131 | movl r11 = (FW_PAGE_WIDTH << PS_SHIFT);; |
118 | movl r11 = (FW_PAGE_WIDTH << PS_SHIFT) |
132 | or r10 =r10 , r11 ;; |
119 | or r10 = r10, r11 |
133 | mov cr.itir = r10;; |
120 | mov cr.itir = r10 |
134 | - | ||
135 | 121 | ||
136 | movl r7 = 3 |
122 | movl r7 = 3 |
137 | movl r8 = (VRN_KERNEL << VRN_SHIFT) | FW_OFFSET |
123 | movl r8 = (VRN_KERNEL << VRN_SHIFT) | FW_OFFSET |
138 | mov cr.ifa = r8 |
124 | mov cr.ifa = r8 |
139 | movl r10 = (KERNEL_TRANSLATION_FW) |
125 | movl r10 = (KERNEL_TRANSLATION_FW) |
140 | itr.d dtr[r7] = r10 |
126 | itr.d dtr[r7] = r10 |
141 | 127 | ||
- | 128 | # Initialize PSR |
|
142 | 129 | ||
143 | - | ||
144 | - | ||
145 | - | ||
146 | # initialize PSR |
- | |
147 | movl r10 = (PSR_DT_MASK | PSR_RT_MASK | PSR_IT_MASK | PSR_IC_MASK) /* Enable paging */ |
130 | movl r10 = (PSR_DT_MASK | PSR_RT_MASK | PSR_IT_MASK | PSR_IC_MASK) /* Enable paging */ |
148 | mov r9 = psr |
131 | mov r9 = psr |
- | 132 | ||
149 | or r10 = r10, r9 |
133 | or r10 = r10, r9 |
150 | mov cr.ipsr = r10 |
134 | mov cr.ipsr = r10 |
151 | mov cr.ifs = r0 |
135 | mov cr.ifs = r0 |
152 | movl r8 = paging_start |
136 | movl r8 = paging_start |
153 | mov cr.iip = r8 |
137 | mov cr.iip = r8 |
154 | srlz.d |
138 | srlz.d |
155 | srlz.i |
139 | srlz.i |
156 | 140 | ||
157 | .explicit |
141 | .explicit |
- | 142 | ||
158 | /* |
143 | /* |
159 | * Return From Interupt is the only the way to fill upper half word of PSR. |
144 | * Return From Interrupt is the only way to |
- | 145 | * fill the upper half word of PSR. |
|
160 | */ |
146 | */ |
161 | rfi;; |
147 | rfi ;; |
- | 148 | ||
162 | 149 | ||
163 | .global paging_start |
150 | .global paging_start |
164 | paging_start: |
151 | paging_start: |
165 | 152 | ||
166 | /* |
153 | /* |
167 | * Now we are paging. |
154 | * Now we are paging. |
168 | */ |
155 | */ |
169 | 156 | ||
170 | # switch to register bank 1 |
157 | # Switch to register bank 1 |
171 | bsw.1 |
158 | bsw.1 |
172 | 159 | ||
- | 160 | #ifdef CONFIG_SMP |
|
173 | #Am'I BSP or AP |
161 | # Am I BSP or AP? |
174 | movl r20=bsp_started;; |
162 | movl r20 = bsp_started ;; |
175 | ld8 r20=[r20];; |
163 | ld8 r20 = [r20] ;; |
176 | cmp.eq p3,p2=r20,r0;; |
164 | cmp.eq p3, p2 = r20, r0 ;; |
177 | 165 | #else |
|
- | 166 | cmp.eq p3, p2 = r0, r0 ;; /* you are BSP */ |
|
- | 167 | #endif /* CONFIG_SMP */ |
|
178 | 168 | ||
179 | # initialize register stack |
169 | # Initialize register stack |
180 | mov ar.rsc = r0 |
170 | mov ar.rsc = r0 |
181 | movl r8 = (VRN_KERNEL << VRN_SHIFT) ;; |
171 | movl r8 = (VRN_KERNEL << VRN_SHIFT) ;; |
182 | mov ar.bspstore = r8 |
172 | mov ar.bspstore = r8 |
183 | loadrs |
173 | loadrs |
184 | 174 | ||
185 | # initialize memory stack to some sane value |
175 | # Initialize memory stack to some sane value |
186 | movl r12 = stack0 ;; |
176 | movl r12 = stack0 ;; |
187 | - | ||
188 | add r12 = -16, r12 /* allocate a scratch area on the stack */ |
177 | add r12 = -16, r12 /* allocate a scratch area on the stack */ |
189 | 178 | ||
190 | # initialize gp (Global Pointer) register |
179 | # Initialize gp (Global Pointer) register |
191 | movl r20 = (VRN_KERNEL << VRN_SHIFT);; |
180 | movl r20 = (VRN_KERNEL << VRN_SHIFT);; |
192 | or r20 = r20,r1;; |
181 | or r20 = r20,r1;; |
193 | movl r1 = _hardcoded_load_address |
182 | movl r1 = _hardcoded_load_address |
194 | 183 | ||
195 | /* |
184 | /* |
Line 210... | Line 199... | ||
210 | 199 | ||
211 | ssm (1 << 19) ;; /* Disable f32 - f127 */ |
200 | ssm (1 << 19) ;; /* Disable f32 - f127 */ |
212 | srlz.i |
201 | srlz.i |
213 | srlz.d ;; |
202 | srlz.d ;; |
214 | 203 | ||
- | 204 | #ifdef CONFIG_SMP |
|
215 | (p2) movl r18 = main_ap ;; |
205 | (p2) movl r18 = main_ap ;; |
216 | (p2) mov b1 = r18 ;; |
206 | (p2) mov b1 = r18 ;; |
217 | (p2) br.call.sptk.many b0 = b1 |
207 | (p2) br.call.sptk.many b0 = b1 |
218 | 208 | ||
219 | #Mark that BSP is on |
209 | # Mark that BSP is on |
220 | mov r20=1;; |
210 | mov r20 = 1 ;; |
221 | movl r21=bsp_started;; |
211 | movl r21 = bsp_started ;; |
222 | st8 [r21]=r20;; |
212 | st8 [r21] = r20 ;; |
223 | 213 | #endif |
|
224 | 214 | ||
225 | br.call.sptk.many b0 = arch_pre_main |
215 | br.call.sptk.many b0 = arch_pre_main |
226 | 216 | ||
227 | movl r18 = main_bsp ;; |
217 | movl r18 = main_bsp ;; |
228 | mov b1 = r18 ;; |
218 | mov b1 = r18 ;; |
229 | br.call.sptk.many b0 = b1 |
219 | br.call.sptk.many b0 = b1 |
230 | 220 | ||
231 | - | ||
232 | 0: |
221 | 0: |
233 | br 0b |
222 | br 0b |
234 | .align 4096 |
- | |
235 | 223 | ||
- | 224 | #ifdef CONFIG_SMP |
|
- | 225 | ||
- | 226 | .align 4096 |
|
236 | kernel_image_ap_start: |
227 | kernel_image_ap_start: |
237 | .auto |
228 | .auto |
- | 229 | ||
238 | #identifi self(CPU) in OS structures by ID / EID |
230 | # Identify self(CPU) in OS structures by ID / EID |
- | 231 | ||
239 | mov r9=cr64 |
232 | mov r9 = cr64 |
240 | mov r10=1 |
233 | mov r10 = 1 |
241 | movl r12=0xffffffff |
234 | movl r12 = 0xffffffff |
242 | movl r8=cpu_by_id_eid_list |
235 | movl r8 = cpu_by_id_eid_list |
243 | and r8=r8,r12 |
236 | and r8 = r8, r12 |
244 | shr r9=r9,16 |
237 | shr r9 = r9, 16 |
245 | add r8=r8,r9 |
238 | add r8 = r8, r9 |
246 | st1 [r8]=r10 |
239 | st1 [r8] = r10 |
- | 240 | ||
- | 241 | # Wait for wakeup synchro signal (#3 in cpu_by_id_eid_list) |
|
247 | 242 | ||
248 | #wait for wakeup sychro signal (#3 in cpu_by_id_eid_list) |
- | |
249 | kernel_image_ap_start_loop: |
243 | kernel_image_ap_start_loop: |
250 | movl r11=kernel_image_ap_start_loop |
244 | movl r11 = kernel_image_ap_start_loop |
251 | and r11=r11,r12 |
245 | and r11 = r11, r12 |
252 | mov b1 = r11 |
246 | mov b1 = r11 |
253 | 247 | ||
254 | ld1 r20=[r8];; |
248 | ld1 r20 = [r8] ;; |
255 | movl r21=3;; |
249 | movl r21 = 3 ;; |
256 | cmp.eq p2,p3=r20,r21;; |
250 | cmp.eq p2, p3 = r20, r21 ;; |
257 | (p3)br.call.sptk.many b0 = b1 |
251 | (p3) br.call.sptk.many b0 = b1 |
258 | 252 | ||
259 | movl r11=kernel_image_start |
253 | movl r11 = kernel_image_start |
260 | and r11=r11,r12 |
254 | and r11 = r11, r12 |
261 | mov b1 = r11 |
255 | mov b1 = r11 |
262 | br.call.sptk.many b0 = b1 |
256 | br.call.sptk.many b0 = b1 |
263 | 257 | ||
264 | - | ||
265 | .align 16 |
258 | .align 16 |
266 | .global bsp_started |
259 | .global bsp_started |
267 | bsp_started: |
260 | bsp_started: |
268 | .space 8 |
261 | .space 8 |
269 | 262 | ||
270 | - | ||
271 | .align 4096 |
263 | .align 4096 |
272 | .global cpu_by_id_eid_list |
264 | .global cpu_by_id_eid_list |
273 | cpu_by_id_eid_list: |
265 | cpu_by_id_eid_list: |
274 | .space 65536 |
266 | .space 65536 |
275 | 267 | ||
276 | - | ||
- | 268 | #endif /* CONFIG_SMP */ |