Rev 1822 | Rev 1852 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 1822 | Rev 1823 | ||
---|---|---|---|
Line 25... | Line 25... | ||
25 | # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | # THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | # THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | # |
27 | # |
28 | 28 | ||
29 | #include <arch/regdef.h> |
29 | #include <arch/regdef.h> |
- | 30 | #include <arch/boot/boot.h> |
|
- | 31 | ||
- | 32 | #include <arch/mm/mmu.h> |
|
- | 33 | #include <arch/mm/tlb.h> |
|
- | 34 | #include <arch/mm/tte.h> |
|
30 | 35 | ||
31 | .register %g2, #scratch |
36 | .register %g2, #scratch |
32 | .register %g3, #scratch |
37 | .register %g3, #scratch |
33 | .register %g6, #scratch |
38 | .register %g6, #scratch |
34 | .register %g7, #scratch |
39 | .register %g7, #scratch |
Line 50... | Line 55... | ||
50 | * - identity mapping for memory stack |
55 | * - identity mapping for memory stack |
51 | */ |
56 | */ |
52 | 57 | ||
53 | .global kernel_image_start |
58 | .global kernel_image_start |
54 | kernel_image_start: |
59 | kernel_image_start: |
55 | flushw ! flush all but the active register window |
- | |
56 | 60 | ||
57 | /* |
61 | /* |
58 | * Disable interrupts and disable 32-bit address masking. |
62 | * Setup basic runtime environment. |
59 | */ |
63 | */ |
- | 64 | ||
- | 65 | flushw ! flush all but the active register window |
|
- | 66 | wrpr %g0, 0, %tl ! TL = 0, primary context register is used |
|
- | 67 | ||
- | 68 | ! Disable interrupts and disable 32-bit address masking. |
|
60 | rdpr %pstate, %l0 |
69 | rdpr %pstate, %g1 |
61 | and %l0, ~(PSTATE_AM_BIT|PSTATE_IE_BIT), %l0 |
70 | and %g1, ~(PSTATE_AM_BIT|PSTATE_IE_BIT), %g1 |
62 | wrpr %l0, 0, %pstate |
71 | wrpr %g1, 0, %pstate |
- | 72 | ||
- | 73 | wrpr %r0, 0, %pil ! intialize %pil |
|
63 | 74 | ||
64 | /* |
75 | /* |
65 | * Copy the bootinfo structure passed from the boot loader |
76 | * Copy the bootinfo structure passed from the boot loader |
66 | * to the kernel bootinfo structure. |
77 | * to the kernel bootinfo structure. |
67 | */ |
78 | */ |
Line 69... | Line 80... | ||
69 | mov %o0, %o1 |
80 | mov %o0, %o1 |
70 | set bootinfo, %o0 |
81 | set bootinfo, %o0 |
71 | call memcpy |
82 | call memcpy |
72 | nop |
83 | nop |
73 | 84 | ||
74 | set kernel_image_start, %o0 |
- | |
75 | /* |
85 | /* |
76 | * Take over control of MMU. |
86 | * Switch to kernel trap table. |
- | 87 | */ |
|
- | 88 | set trap_table, %g1 |
|
- | 89 | wrpr %g1, 0, %tba |
|
- | 90 | ||
- | 91 | /* |
|
- | 92 | * Take over the DMMU by installing global locked |
|
- | 93 | * TTE entry identically mapping the first 4M |
|
- | 94 | * of memory. |
|
77 | * |
95 | * |
78 | * First, take over DMMU for which we don't need to issue |
96 | * In case of DMMU, no FLUSH instructions need to be |
79 | * any FLUSH instructions. Because of that, we can |
97 | * issued. Because of that, the old DTLB contents can |
80 | * demap the old DTLB pretty straightforwardly. |
98 | * be demapped pretty straightforwardly and without |
- | 99 | * causing any traps. |
|
81 | */ |
100 | */ |
82 | call take_over_tlb_and_tt |
- | |
83 | nop |
- | |
84 | 101 | ||
85 | wrpr %r0, 0, %pil |
102 | wr %g0, ASI_DMMU, %asi |
86 | 103 | ||
- | 104 | #define SET_TLB_DEMAP_CMD(r1, context_id) \ |
|
- | 105 | set (TLB_DEMAP_CONTEXT<<TLB_DEMAP_TYPE_SHIFT) | (context_id<<TLB_DEMAP_CONTEXT_SHIFT), %r1 |
|
- | 106 | ||
- | 107 | ! demap context 0 |
|
- | 108 | SET_TLB_DEMAP_CMD(g1, TLB_DEMAP_NUCLEUS) |
|
- | 109 | stxa %g0, [%g1] ASI_DMMU_DEMAP |
|
- | 110 | membar #Sync |
|
- | 111 | ||
- | 112 | #define SET_TLB_TAG(r1, context) \ |
|
- | 113 | set VMA | (context<<TLB_TAG_ACCESS_CONTEXT_SHIFT), %r1 |
|
- | 114 | ||
- | 115 | ! write DTLB tag |
|
- | 116 | SET_TLB_TAG(g1, MEM_CONTEXT_KERNEL) |
|
- | 117 | stxa %g1, [VA_DMMU_TAG_ACCESS] %asi |
|
- | 118 | membar #Sync |
|
- | 119 | ||
- | 120 | #define SET_TLB_DATA(r1, r2, imm) \ |
|
- | 121 | set TTE_L | TTE_CP | TTE_P | TTE_W | LMA | imm, %r1; \ |
|
- | 122 | set PAGESIZE_4M, %r2; \ |
|
- | 123 | sllx %r2, TTE_SIZE_SHIFT, %r2; \ |
|
- | 124 | or %r1, %r2, %r1; \ |
|
- | 125 | set 1, %r2; \ |
|
- | 126 | sllx %r2, TTE_V_SHIFT, %r2; \ |
|
- | 127 | or %r1, %r2, %r1; |
|
- | 128 | ||
- | 129 | ! write DTLB data and install the kernel mapping |
|
- | 130 | SET_TLB_DATA(g1, g2, TTE_G) |
|
- | 131 | stxa %g1, [%g0] ASI_DTLB_DATA_IN_REG |
|
- | 132 | membar #Sync |
|
- | 133 | ||
- | 134 | /* |
|
- | 135 | * Now is time to take over the IMMU. |
|
- | 136 | * Unfortunatelly, it cannot be done as easily as the DMMU, |
|
- | 137 | * because the IMMU is mapping the code it executes. |
|
- | 138 | * |
|
- | 139 | * [ Note that brave experiments with disabling the IMMU |
|
- | 140 | * and using the DMMU approach failed after a dozen |
|
- | 141 | * of desparate days with only little success. ] |
|
- | 142 | * |
|
- | 143 | * The approach used here is inspired from OpenBSD. |
|
- | 144 | * First, the kernel creates IMMU mapping for itself |
|
- | 145 | * in context 1 (MEM_CONTEXT_TEMP) and switches to |
|
- | 146 | * it. Context 0 (MEM_CONTEXT_KERNEL) can be demapped |
|
- | 147 | * afterwards and replaced with the kernel permanent |
|
- | 148 | * mapping. Finally, the kernel switches back to |
|
- | 149 | * context 0 and demaps context 1. |
|
- | 150 | * |
|
- | 151 | * Moreover, the IMMU requires use of the FLUSH instructions. |
|
- | 152 | * But that is OK because we always use operands with |
|
- | 153 | * addresses already mapped by the taken over DTLB. |
|
- | 154 | */ |
|
- | 155 | ||
- | 156 | set kernel_image_start, %g7 |
|
- | 157 | ||
- | 158 | ! write ITLB tag of context 1 |
|
- | 159 | SET_TLB_TAG(g1, MEM_CONTEXT_TEMP) |
|
- | 160 | set VA_DMMU_TAG_ACCESS, %g2 |
|
- | 161 | stxa %g1, [%g2] ASI_IMMU |
|
- | 162 | flush %g7 |
|
- | 163 | ||
- | 164 | ! write ITLB data and install the temporary mapping in context 1 |
|
- | 165 | SET_TLB_DATA(g1, g2, 0) ! use non-global mapping |
|
- | 166 | stxa %g1, [%g0] ASI_ITLB_DATA_IN_REG |
|
- | 167 | flush %g7 |
|
- | 168 | ||
- | 169 | ! switch to context 1 |
|
- | 170 | set MEM_CONTEXT_TEMP, %g1 |
|
- | 171 | stxa %g1, [VA_PRIMARY_CONTEXT_REG] %asi ! ASI_DMMU is correct here !!! |
|
- | 172 | flush %g7 |
|
- | 173 | ||
- | 174 | ! demap context 0 |
|
- | 175 | SET_TLB_DEMAP_CMD(g1, TLB_DEMAP_NUCLEUS) |
|
- | 176 | stxa %g0, [%g1] ASI_IMMU_DEMAP |
|
- | 177 | flush %g7 |
|
- | 178 | ||
- | 179 | ! write ITLB tag of context 0 |
|
- | 180 | SET_TLB_TAG(g1, MEM_CONTEXT_KERNEL) |
|
- | 181 | set VA_DMMU_TAG_ACCESS, %g2 |
|
- | 182 | stxa %g1, [%g2] ASI_IMMU |
|
- | 183 | flush %g7 |
|
- | 184 | ||
- | 185 | ! write ITLB data and install the permanent kernel mapping in context 0 |
|
- | 186 | SET_TLB_DATA(g1, g2, 0) ! use non-global mapping |
|
- | 187 | stxa %g1, [%g0] ASI_ITLB_DATA_IN_REG |
|
- | 188 | flush %g7 |
|
- | 189 | ||
- | 190 | ! switch to context 0 |
|
- | 191 | stxa %g0, [VA_PRIMARY_CONTEXT_REG] %asi ! ASI_DMMU is correct here !!! |
|
- | 192 | flush %g7 |
|
- | 193 | ||
- | 194 | ! ensure nucleus mapping |
|
- | 195 | wrpr %g0, 1, %tl |
|
- | 196 | ||
- | 197 | ! set context 1 in the primary context register |
|
- | 198 | set MEM_CONTEXT_TEMP, %g1 |
|
- | 199 | stxa %g1, [VA_PRIMARY_CONTEXT_REG] %asi ! ASI_DMMU is correct here !!! |
|
- | 200 | flush %g7 |
|
- | 201 | ||
- | 202 | ! demap context 1 |
|
- | 203 | SET_TLB_DEMAP_CMD(g1, TLB_DEMAP_PRIMARY) |
|
- | 204 | stxa %g0, [%g1] ASI_IMMU_DEMAP |
|
- | 205 | flush %g7 |
|
- | 206 | ||
- | 207 | ! set context 0 in the primary context register |
|
- | 208 | stxa %g0, [VA_PRIMARY_CONTEXT_REG] %asi ! ASI_DMMU is correct here !!! |
|
- | 209 | flush %g7 |
|
- | 210 | ||
- | 211 | ! set TL back to 0 |
|
- | 212 | wrpr %g0, 0, %tl |
|
- | 213 | ||
87 | call main_bsp |
214 | call main_bsp |
88 | nop |
215 | nop |
89 | 216 | ||
90 | /* Not reached. */ |
217 | /* Not reached. */ |
91 | 218 |