Rev 3022 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 3022 | Rev 4055 | ||
---|---|---|---|
Line 30... | Line 30... | ||
30 | #include <arch/mm/page.h> |
30 | #include <arch/mm/page.h> |
31 | #include <arch/mm/asid.h> |
31 | #include <arch/mm/asid.h> |
32 | #include <mm/asid.h> |
32 | #include <mm/asid.h> |
33 | 33 | ||
34 | #define RR_MASK (0xFFFFFFFF00000002) |
34 | #define RR_MASK (0xFFFFFFFF00000002) |
35 | #define RID_SHIFT 8 |
35 | #define RID_SHIFT 8 |
36 | #define PS_SHIFT 2 |
36 | #define PS_SHIFT 2 |
37 | - | ||
38 | #define KERNEL_TRANSLATION_I 0x0010000000000661 |
- | |
39 | #define KERNEL_TRANSLATION_D 0x0010000000000661 |
- | |
40 | #define KERNEL_TRANSLATION_VIO 0x0010000000000671 |
- | |
41 | #define KERNEL_TRANSLATION_IO 0x00100FFFFC000671 |
- | |
42 | #define VIO_OFFSET 0x0002000000000000 |
- | |
43 | - | ||
44 | #define IO_OFFSET 0x0001000000000000 |
- | |
45 | - | ||
46 | 37 | ||
- | 38 | #define KERNEL_TRANSLATION_I 0x0010000000000661 |
|
- | 39 | #define KERNEL_TRANSLATION_D 0x0010000000000661 |
|
- | 40 | #define KERNEL_TRANSLATION_VIO 0x0010000000000671 |
|
- | 41 | #define KERNEL_TRANSLATION_IO 0x00100FFFFC000671 |
|
- | 42 | #define KERNEL_TRANSLATION_FW 0x00100000F0000671 |
|
47 | 43 | ||
48 | .section K_TEXT_START, "ax" |
44 | .section K_TEXT_START, "ax" |
49 | 45 | ||
50 | .global kernel_image_start |
46 | .global kernel_image_start |
51 | 47 | ||
52 | stack0: |
48 | stack0: |
53 | kernel_image_start: |
49 | kernel_image_start: |
54 | .auto |
50 | .auto |
55 | 51 | ||
- | 52 | #ifdef CONFIG_SMP |
|
- | 53 | # Identify self(CPU) in OS structures by ID / EID |
|
- | 54 | ||
- | 55 | mov r9 = cr64 |
|
- | 56 | mov r10 = 1 |
|
- | 57 | movl r12 = 0xffffffff |
|
- | 58 | movl r8 = cpu_by_id_eid_list |
|
- | 59 | and r8 = r8, r12 |
|
- | 60 | shr r9 = r9, 16 |
|
- | 61 | add r8 = r8, r9 |
|
- | 62 | st1 [r8] = r10 |
|
- | 63 | #endif |
|
- | 64 | ||
56 | mov psr.l = r0 |
65 | mov psr.l = r0 |
57 | srlz.i |
66 | srlz.i |
58 | srlz.d |
67 | srlz.d |
59 | 68 | ||
60 | # Fill TR.i and TR.d using Region Register #VRN_KERNEL |
69 | # Fill TR.i and TR.d using Region Register #VRN_KERNEL |
61 | 70 | ||
62 | - | ||
63 | movl r8 = (VRN_KERNEL << VRN_SHIFT) |
71 | movl r8 = (VRN_KERNEL << VRN_SHIFT) |
64 | mov r9 = rr[r8] |
72 | mov r9 = rr[r8] |
65 | 73 | ||
66 | - | ||
67 | movl r10 = (RR_MASK) |
74 | movl r10 = (RR_MASK) |
68 | and r9 = r10, r9 |
75 | and r9 = r10, r9 |
69 | movl r10 = ((RID_KERNEL << RID_SHIFT) | (KERNEL_PAGE_WIDTH << PS_SHIFT)) |
76 | movl r10 = ((RID_KERNEL << RID_SHIFT) | (KERNEL_PAGE_WIDTH << PS_SHIFT)) |
70 | or r9 = r10, r9 |
77 | or r9 = r10, r9 |
71 | 78 | ||
72 | - | ||
73 | mov rr[r8] = r9 |
79 | mov rr[r8] = r9 |
74 | 80 | ||
75 | - | ||
76 | - | ||
77 | movl r8 = (VRN_KERNEL << VRN_SHIFT) |
81 | movl r8 = (VRN_KERNEL << VRN_SHIFT) |
78 | mov cr.ifa = r8 |
82 | mov cr.ifa = r8 |
79 | 83 | ||
80 | - | ||
81 | mov r11 = cr.itir ;; |
84 | mov r11 = cr.itir |
82 | movl r10 = (KERNEL_PAGE_WIDTH << PS_SHIFT);; |
85 | movl r10 = (KERNEL_PAGE_WIDTH << PS_SHIFT) |
83 | or r10 =r10 , r11 ;; |
86 | or r10 = r10, r11 |
84 | mov cr.itir = r10;; |
87 | mov cr.itir = r10 |
85 | 88 | ||
86 | - | ||
87 | movl r10 = (KERNEL_TRANSLATION_I) |
89 | movl r10 = (KERNEL_TRANSLATION_I) |
88 | itr.i itr[r0] = r10 |
90 | itr.i itr[r0] = r10 |
89 | - | ||
90 | - | ||
91 | movl r10 = (KERNEL_TRANSLATION_D) |
91 | movl r10 = (KERNEL_TRANSLATION_D) |
92 | itr.d dtr[r0] = r10 |
92 | itr.d dtr[r0] = r10 |
93 | 93 | ||
94 | - | ||
95 | movl r7 = 1 |
94 | movl r7 = 1 |
96 | movl r8 = (VRN_KERNEL << VRN_SHIFT) | VIO_OFFSET |
95 | movl r8 = (VRN_KERNEL << VRN_SHIFT) | VIO_OFFSET |
97 | mov cr.ifa = r8 |
96 | mov cr.ifa = r8 |
98 | movl r10 = (KERNEL_TRANSLATION_VIO) |
97 | movl r10 = (KERNEL_TRANSLATION_VIO) |
99 | itr.d dtr[r7] = r10 |
98 | itr.d dtr[r7] = r10 |
100 | 99 | ||
101 | - | ||
102 | mov r11 = cr.itir ;; |
100 | mov r11 = cr.itir |
103 | movl r10 = ~0xfc;; |
101 | movl r10 = ~0xfc |
104 | and r10 =r10 , r11 ;; |
102 | and r10 = r10, r11 |
105 | movl r11 = (IO_PAGE_WIDTH << PS_SHIFT);; |
103 | movl r11 = (IO_PAGE_WIDTH << PS_SHIFT) |
106 | or r10 =r10 , r11 ;; |
104 | or r10 = r10, r11 |
107 | mov cr.itir = r10;; |
105 | mov cr.itir = r10 |
108 | - | ||
109 | 106 | ||
110 | movl r7 = 2 |
107 | movl r7 = 2 |
111 | movl r8 = (VRN_KERNEL << VRN_SHIFT) | IO_OFFSET |
108 | movl r8 = (VRN_KERNEL << VRN_SHIFT) | IO_OFFSET |
112 | mov cr.ifa = r8 |
109 | mov cr.ifa = r8 |
113 | movl r10 = (KERNEL_TRANSLATION_IO) |
110 | movl r10 = (KERNEL_TRANSLATION_IO) |
114 | itr.d dtr[r7] = r10 |
111 | itr.d dtr[r7] = r10 |
115 | 112 | ||
- | 113 | # Setup mapping for fimware arrea (also SAPIC) |
|
116 | 114 | ||
- | 115 | mov r11 = cr.itir |
|
- | 116 | movl r10 = ~0xfc |
|
- | 117 | and r10 = r10, r11 |
|
- | 118 | movl r11 = (FW_PAGE_WIDTH << PS_SHIFT) |
|
- | 119 | or r10 = r10, r11 |
|
- | 120 | mov cr.itir = r10 |
|
117 | 121 | ||
- | 122 | movl r7 = 3 |
|
- | 123 | movl r8 = (VRN_KERNEL << VRN_SHIFT) | FW_OFFSET |
|
- | 124 | mov cr.ifa = r8 |
|
- | 125 | movl r10 = (KERNEL_TRANSLATION_FW) |
|
- | 126 | itr.d dtr[r7] = r10 |
|
- | 127 | ||
- | 128 | # Initialize PSR |
|
118 | 129 | ||
119 | # initialize PSR |
- | |
120 | movl r10 = (PSR_DT_MASK | PSR_RT_MASK | PSR_IT_MASK | PSR_IC_MASK) /* Enable paging */ |
130 | movl r10 = (PSR_DT_MASK | PSR_RT_MASK | PSR_IT_MASK | PSR_IC_MASK) /* Enable paging */ |
121 | mov r9 = psr |
131 | mov r9 = psr |
- | 132 | ||
122 | or r10 = r10, r9 |
133 | or r10 = r10, r9 |
123 | mov cr.ipsr = r10 |
134 | mov cr.ipsr = r10 |
124 | mov cr.ifs = r0 |
135 | mov cr.ifs = r0 |
125 | movl r8 = paging_start |
136 | movl r8 = paging_start |
126 | mov cr.iip = r8 |
137 | mov cr.iip = r8 |
127 | srlz.d |
138 | srlz.d |
128 | srlz.i |
139 | srlz.i |
129 | 140 | ||
130 | .explicit |
141 | .explicit |
- | 142 | ||
131 | /* |
143 | /* |
132 | * Return From Interupt is the only the way to fill upper half word of PSR. |
144 | * Return From Interrupt is the only way to |
- | 145 | * fill the upper half word of PSR. |
|
133 | */ |
146 | */ |
134 | rfi;; |
147 | rfi ;; |
- | 148 | ||
135 | 149 | ||
136 | .global paging_start |
150 | .global paging_start |
137 | paging_start: |
151 | paging_start: |
138 | 152 | ||
139 | /* |
153 | /* |
140 | * Now we are paging. |
154 | * Now we are paging. |
141 | */ |
155 | */ |
142 | 156 | ||
143 | # switch to register bank 1 |
157 | # Switch to register bank 1 |
144 | bsw.1 |
158 | bsw.1 |
- | 159 | ||
- | 160 | #ifdef CONFIG_SMP |
|
- | 161 | # Am I BSP or AP? |
|
- | 162 | movl r20 = bsp_started ;; |
|
- | 163 | ld8 r20 = [r20] ;; |
|
- | 164 | cmp.eq p3, p2 = r20, r0 ;; |
|
- | 165 | #else |
|
- | 166 | cmp.eq p3, p2 = r0, r0 ;; /* you are BSP */ |
|
- | 167 | #endif /* CONFIG_SMP */ |
|
145 | 168 | ||
146 | # initialize register stack |
169 | # Initialize register stack |
147 | mov ar.rsc = r0 |
170 | mov ar.rsc = r0 |
148 | movl r8 = (VRN_KERNEL << VRN_SHIFT) ;; |
171 | movl r8 = (VRN_KERNEL << VRN_SHIFT) ;; |
149 | mov ar.bspstore = r8 |
172 | mov ar.bspstore = r8 |
150 | loadrs |
173 | loadrs |
151 | 174 | ||
152 | # initialize memory stack to some sane value |
175 | # Initialize memory stack to some sane value |
153 | movl r12 = stack0 ;; |
176 | movl r12 = stack0 ;; |
154 | - | ||
155 | add r12 = -16, r12 /* allocate a scratch area on the stack */ |
177 | add r12 = -16, r12 /* allocate a scratch area on the stack */ |
156 | 178 | ||
157 | # initialize gp (Global Pointer) register |
179 | # Initialize gp (Global Pointer) register |
158 | movl r20 = (VRN_KERNEL << VRN_SHIFT);; |
180 | movl r20 = (VRN_KERNEL << VRN_SHIFT);; |
159 | or r20 = r20,r1;; |
181 | or r20 = r20,r1;; |
160 | movl r1 = _hardcoded_load_address |
182 | movl r1 = _hardcoded_load_address |
161 | 183 | ||
162 | /* |
184 | /* |
163 | * Initialize hardcoded_* variables. |
185 | * Initialize hardcoded_* variables. Do only BSP |
164 | */ |
186 | */ |
165 | movl r14 = _hardcoded_ktext_size |
187 | (p3) movl r14 = _hardcoded_ktext_size |
166 | movl r15 = _hardcoded_kdata_size |
188 | (p3) movl r15 = _hardcoded_kdata_size |
167 | movl r16 = _hardcoded_load_address ;; |
189 | (p3) movl r16 = _hardcoded_load_address ;; |
168 | addl r17 = @gprel(hardcoded_ktext_size), gp |
190 | (p3) addl r17 = @gprel(hardcoded_ktext_size), gp |
169 | addl r18 = @gprel(hardcoded_kdata_size), gp |
191 | (p3) addl r18 = @gprel(hardcoded_kdata_size), gp |
170 | addl r19 = @gprel(hardcoded_load_address), gp |
192 | (p3) addl r19 = @gprel(hardcoded_load_address), gp |
171 | addl r21 = @gprel(bootinfo), gp |
193 | (p3) addl r21 = @gprel(bootinfo), gp |
172 | ;; |
194 | ;; |
173 | st8 [r17] = r14 |
195 | (p3) st8 [r17] = r14 |
174 | st8 [r18] = r15 |
196 | (p3) st8 [r18] = r15 |
175 | st8 [r19] = r16 |
197 | (p3) st8 [r19] = r16 |
176 | st8 [r21] = r20 |
198 | (p3) st8 [r21] = r20 |
177 | 199 | ||
178 | ssm (1 << 19) ;; /* Disable f32 - f127 */ |
200 | ssm (1 << 19) ;; /* Disable f32 - f127 */ |
179 | srlz.i |
201 | srlz.i |
180 | srlz.d ;; |
202 | srlz.d ;; |
181 | 203 | ||
- | 204 | #ifdef CONFIG_SMP |
|
- | 205 | (p2) movl r18 = main_ap ;; |
|
- | 206 | (p2) mov b1 = r18 ;; |
|
- | 207 | (p2) br.call.sptk.many b0 = b1 |
|
- | 208 | ||
- | 209 | # Mark that BSP is on |
|
- | 210 | mov r20 = 1 ;; |
|
- | 211 | movl r21 = bsp_started ;; |
|
- | 212 | st8 [r21] = r20 ;; |
|
- | 213 | #endif |
|
- | 214 | ||
182 | br.call.sptk.many b0 = arch_pre_main |
215 | br.call.sptk.many b0 = arch_pre_main |
183 | 216 | ||
184 | movl r18 = main_bsp ;; |
217 | movl r18 = main_bsp ;; |
185 | mov b1 = r18 ;; |
218 | mov b1 = r18 ;; |
186 | br.call.sptk.many b0 = b1 |
219 | br.call.sptk.many b0 = b1 |
187 | 220 | ||
188 | - | ||
189 | 0: |
221 | 0: |
190 | br 0b |
222 | br 0b |
- | 223 | ||
- | 224 | #ifdef CONFIG_SMP |
|
- | 225 | ||
- | 226 | .align 4096 |
|
- | 227 | kernel_image_ap_start: |
|
- | 228 | .auto |
|
- | 229 | ||
- | 230 | # Identify self(CPU) in OS structures by ID / EID |
|
- | 231 | ||
- | 232 | mov r9 = cr64 |
|
- | 233 | mov r10 = 1 |
|
- | 234 | movl r12 = 0xffffffff |
|
- | 235 | movl r8 = cpu_by_id_eid_list |
|
- | 236 | and r8 = r8, r12 |
|
- | 237 | shr r9 = r9, 16 |
|
- | 238 | add r8 = r8, r9 |
|
- | 239 | st1 [r8] = r10 |
|
- | 240 | ||
- | 241 | # Wait for wakeup synchro signal (#3 in cpu_by_id_eid_list) |
|
- | 242 | ||
- | 243 | kernel_image_ap_start_loop: |
|
- | 244 | movl r11 = kernel_image_ap_start_loop |
|
- | 245 | and r11 = r11, r12 |
|
- | 246 | mov b1 = r11 |
|
- | 247 | ||
- | 248 | ld1 r20 = [r8] ;; |
|
- | 249 | movl r21 = 3 ;; |
|
- | 250 | cmp.eq p2, p3 = r20, r21 ;; |
|
- | 251 | (p3) br.call.sptk.many b0 = b1 |
|
- | 252 | ||
- | 253 | movl r11 = kernel_image_start |
|
- | 254 | and r11 = r11, r12 |
|
- | 255 | mov b1 = r11 |
|
- | 256 | br.call.sptk.many b0 = b1 |
|
- | 257 | ||
- | 258 | .align 16 |
|
- | 259 | .global bsp_started |
|
- | 260 | bsp_started: |
|
- | 261 | .space 8 |
|
- | 262 | ||
- | 263 | .align 4096 |
|
- | 264 | .global cpu_by_id_eid_list |
|
- | 265 | cpu_by_id_eid_list: |
|
- | 266 | .space 65536 |
|
- | 267 | ||
- | 268 | #endif /* CONFIG_SMP */ |