Rev 2009 | Rev 2058 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 2009 | Rev 2049 | ||
---|---|---|---|
Line 75... | Line 75... | ||
75 | /* |
75 | /* |
76 | * Setup basic runtime environment. |
76 | * Setup basic runtime environment. |
77 | */ |
77 | */ |
78 | 78 | ||
79 | wrpr %g0, NWINDOWS - 2, %cansave ! set maximum saveable windows |
79 | wrpr %g0, NWINDOWS - 2, %cansave ! set maximum saveable windows |
80 | wrpr %g0, 0, %canrestore ! get rid of windows we will never need again |
80 | wrpr %g0, 0, %canrestore ! get rid of windows we will |
- | 81 | ! never need again |
|
81 | wrpr %g0, 0, %otherwin ! make sure the window state is consistent |
82 | wrpr %g0, 0, %otherwin ! make sure the window state is |
- | 83 | ! consistent |
|
82 | wrpr %g0, NWINDOWS - 1, %cleanwin ! prevent needless clean_window traps for kernel |
84 | wrpr %g0, NWINDOWS - 1, %cleanwin ! prevent needless clean_window |
- | 85 | ! traps for kernel |
|
83 | 86 | ||
84 | wrpr %g0, 0, %tl ! TL = 0, primary context register is used |
87 | wrpr %g0, 0, %tl ! TL = 0, primary context |
- | 88 | ! register is used |
|
85 | 89 | ||
86 | wrpr %g0, PSTATE_PRIV_BIT, %pstate ! Disable interrupts and disable 32-bit address masking. |
90 | wrpr %g0, PSTATE_PRIV_BIT, %pstate ! disable interrupts and disable |
- | 91 | ! 32-bit address masking |
|
87 | 92 | ||
88 | wrpr %g0, 0, %pil ! intialize %pil |
93 | wrpr %g0, 0, %pil ! intialize %pil |
89 | 94 | ||
90 | /* |
95 | /* |
91 | * Switch to kernel trap table. |
96 | * Switch to kernel trap table. |
92 | */ |
97 | */ |
93 | sethi %hi(trap_table), %g1 |
98 | sethi %hi(trap_table), %g1 |
94 | wrpr %g1, %lo(trap_table), %tba |
99 | wrpr %g1, %lo(trap_table), %tba |
95 | 100 | ||
96 | /* |
101 | /* |
97 | * Take over the DMMU by installing global locked |
102 | * Take over the DMMU by installing global locked TTE entry identically |
98 | * TTE entry identically mapping the first 4M |
103 | * mapping the first 4M of memory. |
99 | * of memory. |
- | |
100 | * |
104 | * |
101 | * In case of DMMU, no FLUSH instructions need to be |
105 | * In case of DMMU, no FLUSH instructions need to be issued. Because of |
102 | * issued. Because of that, the old DTLB contents can |
- | |
103 | * be demapped pretty straightforwardly and without |
106 | * that, the old DTLB contents can be demapped pretty straightforwardly |
104 | * causing any traps. |
107 | * and without causing any traps. |
105 | */ |
108 | */ |
106 | 109 | ||
107 | wr %g0, ASI_DMMU, %asi |
110 | wr %g0, ASI_DMMU, %asi |
108 | 111 | ||
109 | #define SET_TLB_DEMAP_CMD(r1, context_id) \ |
112 | #define SET_TLB_DEMAP_CMD(r1, context_id) \ |
110 | set (TLB_DEMAP_CONTEXT<<TLB_DEMAP_TYPE_SHIFT) | (context_id<<TLB_DEMAP_CONTEXT_SHIFT), %r1 |
113 | set (TLB_DEMAP_CONTEXT << TLB_DEMAP_TYPE_SHIFT) | (context_id << \ |
- | 114 | TLB_DEMAP_CONTEXT_SHIFT), %r1 |
|
111 | 115 | ||
112 | ! demap context 0 |
116 | ! demap context 0 |
113 | SET_TLB_DEMAP_CMD(g1, TLB_DEMAP_NUCLEUS) |
117 | SET_TLB_DEMAP_CMD(g1, TLB_DEMAP_NUCLEUS) |
114 | stxa %g0, [%g1] ASI_DMMU_DEMAP |
118 | stxa %g0, [%g1] ASI_DMMU_DEMAP |
115 | membar #Sync |
119 | membar #Sync |
116 | 120 | ||
117 | #define SET_TLB_TAG(r1, context) \ |
121 | #define SET_TLB_TAG(r1, context) \ |
118 | set VMA | (context<<TLB_TAG_ACCESS_CONTEXT_SHIFT), %r1 |
122 | set VMA | (context << TLB_TAG_ACCESS_CONTEXT_SHIFT), %r1 |
119 | 123 | ||
120 | ! write DTLB tag |
124 | ! write DTLB tag |
121 | SET_TLB_TAG(g1, MEM_CONTEXT_KERNEL) |
125 | SET_TLB_TAG(g1, MEM_CONTEXT_KERNEL) |
122 | stxa %g1, [VA_DMMU_TAG_ACCESS] %asi |
126 | stxa %g1, [VA_DMMU_TAG_ACCESS] %asi |
123 | membar #Sync |
127 | membar #Sync |
Line 142... | Line 146... | ||
142 | SET_TLB_DATA(g1, g2, TTE_L | TTE_W) ! use non-global mapping |
146 | SET_TLB_DATA(g1, g2, TTE_L | TTE_W) ! use non-global mapping |
143 | stxa %g1, [%g0] ASI_DTLB_DATA_IN_REG |
147 | stxa %g1, [%g0] ASI_DTLB_DATA_IN_REG |
144 | membar #Sync |
148 | membar #Sync |
145 | 149 | ||
146 | /* |
150 | /* |
147 | * Because we cannot use global mappings (because we want to |
151 | * Because we cannot use global mappings (because we want to have |
148 | * have separate 64-bit address spaces for both the kernel |
152 | * separate 64-bit address spaces for both the kernel and the |
149 | * and the userspace), we prepare the identity mapping also in |
153 | * userspace), we prepare the identity mapping also in context 1. This |
150 | * context 1. This step is required by the |
- | |
151 | * code installing the ITLB mapping. |
154 | * step is required by the code installing the ITLB mapping. |
152 | */ |
155 | */ |
153 | ! write DTLB tag of context 1 (i.e. MEM_CONTEXT_TEMP) |
156 | ! write DTLB tag of context 1 (i.e. MEM_CONTEXT_TEMP) |
154 | SET_TLB_TAG(g1, MEM_CONTEXT_TEMP) |
157 | SET_TLB_TAG(g1, MEM_CONTEXT_TEMP) |
155 | stxa %g1, [VA_DMMU_TAG_ACCESS] %asi |
158 | stxa %g1, [VA_DMMU_TAG_ACCESS] %asi |
156 | membar #Sync |
159 | membar #Sync |
Line 159... | Line 162... | ||
159 | SET_TLB_DATA(g1, g2, TTE_W) ! use non-global mapping |
162 | SET_TLB_DATA(g1, g2, TTE_W) ! use non-global mapping |
160 | stxa %g1, [%g0] ASI_DTLB_DATA_IN_REG |
163 | stxa %g1, [%g0] ASI_DTLB_DATA_IN_REG |
161 | membar #Sync |
164 | membar #Sync |
162 | 165 | ||
163 | /* |
166 | /* |
164 | * Now is time to take over the IMMU. |
167 | * Now is time to take over the IMMU. Unfortunatelly, it cannot be done |
165 | * Unfortunatelly, it cannot be done as easily as the DMMU, |
168 | * as easily as the DMMU, because the IMMU is mapping the code it |
166 | * because the IMMU is mapping the code it executes. |
169 | * executes. |
167 | * |
170 | * |
168 | * [ Note that brave experiments with disabling the IMMU |
171 | * [ Note that brave experiments with disabling the IMMU and using the |
169 | * and using the DMMU approach failed after a dozen |
172 | * DMMU approach failed after a dozen of desparate days with only little |
170 | * of desparate days with only little success. ] |
173 | * success. ] |
171 | * |
174 | * |
172 | * The approach used here is inspired from OpenBSD. |
175 | * The approach used here is inspired from OpenBSD. First, the kernel |
173 | * First, the kernel creates IMMU mapping for itself |
- | |
174 | * in context 1 (MEM_CONTEXT_TEMP) and switches to |
176 | * creates IMMU mapping for itself in context 1 (MEM_CONTEXT_TEMP) and |
175 | * it. Context 0 (MEM_CONTEXT_KERNEL) can be demapped |
177 | * switches to it. Context 0 (MEM_CONTEXT_KERNEL) can be demapped |
176 | * afterwards and replaced with the kernel permanent |
178 | * afterwards and replaced with the kernel permanent mapping. Finally, |
177 | * mapping. Finally, the kernel switches back to |
- | |
178 | * context 0 and demaps context 1. |
179 | * the kernel switches back to context 0 and demaps context 1. |
179 | * |
180 | * |
180 | * Moreover, the IMMU requires use of the FLUSH instructions. |
181 | * Moreover, the IMMU requires use of the FLUSH instructions. But that |
181 | * But that is OK because we always use operands with |
182 | * is OK because we always use operands with addresses already mapped by |
182 | * addresses already mapped by the taken over DTLB. |
183 | * the taken over DTLB. |
183 | */ |
184 | */ |
184 | 185 | ||
185 | set kernel_image_start, %g5 |
186 | set kernel_image_start, %g5 |
186 | 187 | ||
187 | ! write ITLB tag of context 1 |
188 | ! write ITLB tag of context 1 |
Line 289... | Line 290... | ||
289 | srlx %g1, UPA_CONFIG_MID_SHIFT, %g1 |
290 | srlx %g1, UPA_CONFIG_MID_SHIFT, %g1 |
290 | and %g1, UPA_CONFIG_MID_MASK, %g1 |
291 | and %g1, UPA_CONFIG_MID_MASK, %g1 |
291 | 292 | ||
292 | #ifdef CONFIG_SMP |
293 | #ifdef CONFIG_SMP |
293 | /* |
294 | /* |
294 | * Active loop for APs until the BSP picks them up. |
295 | * Active loop for APs until the BSP picks them up. A processor cannot |
295 | * A processor cannot leave the loop until the |
- | |
296 | * global variable 'waking_up_mid' equals its |
296 | * leave the loop until the global variable 'waking_up_mid' equals its |
297 | * MID. |
297 | * MID. |
298 | */ |
298 | */ |
299 | set waking_up_mid, %g2 |
299 | set waking_up_mid, %g2 |
300 | 2: |
300 | 2: |
301 | ldx [%g2], %g3 |
301 | ldx [%g2], %g3 |
Line 324... | Line 324... | ||
324 | 324 | ||
325 | 325 | ||
326 | .section K_DATA_START, "aw", @progbits |
326 | .section K_DATA_START, "aw", @progbits |
327 | 327 | ||
328 | /* |
328 | /* |
329 | * Create small stack to be used by the bootstrap processor. |
329 | * Create small stack to be used by the bootstrap processor. It is going to be |
330 | * It is going to be used only for a very limited period of |
330 | * used only for a very limited period of time, but we switch to it anyway, |
331 | * time, but we switch to it anyway, just to be sure we are |
- | |
332 | * properly initialized. |
331 | * just to be sure we are properly initialized. |
333 | * |
332 | * |
334 | * What is important is that this piece of memory is covered |
333 | * What is important is that this piece of memory is covered by the 4M DTLB |
335 | * by the 4M DTLB locked entry and therefore there will be |
334 | * locked entry and therefore there will be no surprises like deadly |
336 | * no surprises like deadly combinations of spill trap and |
- | |
337 | * and TLB miss on the stack address. |
335 | * combinations of spill trap and and TLB miss on the stack address. |
338 | */ |
336 | */ |
339 | 337 | ||
340 | #define INITIAL_STACK_SIZE 1024 |
338 | #define INITIAL_STACK_SIZE 1024 |
341 | 339 | ||
342 | .align STACK_ALIGNMENT |
340 | .align STACK_ALIGNMENT |
Line 352... | Line 350... | ||
352 | .global physmem_base ! copy of the physical memory base address |
350 | .global physmem_base ! copy of the physical memory base address |
353 | physmem_base: |
351 | physmem_base: |
354 | .quad 0 |
352 | .quad 0 |
355 | 353 | ||
356 | /* |
354 | /* |
357 | * This variable is used by the fast_data_MMU_miss trap handler. |
355 | * This variable is used by the fast_data_MMU_miss trap handler. In runtime, it |
358 | * In runtime, it is further modified to reflect the starting address of |
356 | * is further modified to reflect the starting address of physical memory. |
359 | * physical memory. |
- | |
360 | */ |
357 | */ |
361 | .global kernel_8k_tlb_data_template |
358 | .global kernel_8k_tlb_data_template |
362 | kernel_8k_tlb_data_template: |
359 | kernel_8k_tlb_data_template: |
363 | #ifdef CONFIG_VIRT_IDX_DCACHE |
360 | #ifdef CONFIG_VIRT_IDX_DCACHE |
364 | .quad ((1 << TTE_V_SHIFT) | (PAGESIZE_8K << TTE_SIZE_SHIFT) | TTE_CP | TTE_CV | TTE_P | TTE_W) |
361 | .quad ((1 << TTE_V_SHIFT) | (PAGESIZE_8K << TTE_SIZE_SHIFT) | TTE_CP | \ |
- | 362 | TTE_CV | TTE_P | TTE_W) |
|
365 | #else /* CONFIG_VIRT_IDX_DCACHE */ |
363 | #else /* CONFIG_VIRT_IDX_DCACHE */ |
366 | .quad ((1 << TTE_V_SHIFT) | (PAGESIZE_8K << TTE_SIZE_SHIFT) | TTE_CP | TTE_P | TTE_W) |
364 | .quad ((1 << TTE_V_SHIFT) | (PAGESIZE_8K << TTE_SIZE_SHIFT) | TTE_CP | \ |
- | 365 | TTE_P | TTE_W) |
|
367 | #endif /* CONFIG_VIRT_IDX_DCACHE */ |
366 | #endif /* CONFIG_VIRT_IDX_DCACHE */ |
- | 367 |