Rev 3817 | Rev 4129 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
| Rev 3817 | Rev 3862 | ||
|---|---|---|---|
| Line 31... | Line 31... | ||
| 31 | * @{ |
31 | * @{ |
| 32 | */ |
32 | */ |
| 33 | /** @file |
33 | /** @file |
| 34 | */ |
34 | */ |
| 35 | 35 | ||
| - | 36 | /* SUN4V-OK */ |
|
| - | 37 | ||
| 36 | #include <arch/mm/as.h> |
38 | #include <arch/mm/as.h> |
| 37 | #include <arch/mm/pagesize.h> |
39 | #include <arch/mm/pagesize.h> |
| 38 | #include <arch/mm/sun4u/tlb.h> |
- | |
| 39 | #include <arch/mm/sun4u/tlb.h> |
40 | #include <arch/mm/tlb.h> |
| 40 | #include <genarch/mm/page_ht.h> |
41 | #include <genarch/mm/page_ht.h> |
| 41 | #include <genarch/mm/asid_fifo.h> |
42 | #include <genarch/mm/asid_fifo.h> |
| 42 | #include <debug.h> |
43 | #include <debug.h> |
| 43 | #include <config.h> |
44 | #include <config.h> |
| - | 45 | #include <arch/sun4v/hypercall.h> |
|
| 44 | 46 | ||
| 45 | #ifdef CONFIG_TSB |
47 | #ifdef CONFIG_TSB |
| 46 | #include <arch/mm/tsb.h> |
48 | #include <arch/mm/tsb.h> |
| 47 | #include <arch/memstr.h> |
49 | #include <arch/memstr.h> |
| 48 | #include <arch/asm.h> |
50 | #include <arch/asm.h> |
| Line 61... | Line 63... | ||
| 61 | } |
63 | } |
| 62 | 64 | ||
| 63 | int as_constructor_arch(as_t *as, int flags) |
65 | int as_constructor_arch(as_t *as, int flags) |
| 64 | { |
66 | { |
| 65 | #ifdef CONFIG_TSB |
67 | #ifdef CONFIG_TSB |
| 66 | /* |
- | |
| 67 | * The order must be calculated with respect to the emulated |
- | |
| 68 | * 16K page size. |
68 | int order = fnzb32( |
| 69 | */ |
- | |
| 70 | int order = fnzb32(((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) * |
- | |
| 71 | sizeof(tsb_entry_t)) >> FRAME_WIDTH); |
69 | (TSB_ENTRY_COUNT * sizeof(tsb_entry_t)) >> FRAME_WIDTH); |
| 72 | 70 | ||
| 73 | uintptr_t tsb = (uintptr_t) frame_alloc(order, flags | FRAME_KA); |
71 | uintptr_t tsb = (uintptr_t) frame_alloc(order, flags | FRAME_KA); |
| 74 | 72 | ||
| 75 | if (!tsb) |
73 | if (!tsb) |
| 76 | return -1; |
74 | return -1; |
| 77 | 75 | ||
| - | 76 | as->arch.tsb_description.page_size = PAGESIZE_8K; |
|
| 78 | as->arch.itsb = (tsb_entry_t *) tsb; |
77 | as->arch.tsb_description.associativity = 1; |
| 79 | as->arch.dtsb = (tsb_entry_t *) (tsb + ITSB_ENTRY_COUNT * |
78 | as->arch.tsb_description.num_ttes = TSB_ENTRY_COUNT; |
| - | 79 | as->arch.tsb_description.pgsize_mask = 1 << PAGESIZE_8K; |
|
| 80 | sizeof(tsb_entry_t)); |
80 | as->arch.tsb_description.tsb_base = tsb; |
| - | 81 | as->arch.tsb_description.reserved = 0; |
|
| 81 | 82 | ||
| 82 | memsetb(as->arch.itsb, |
83 | memsetb((void *) as->arch.tsb_description.tsb_base, |
| 83 | (ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) * sizeof(tsb_entry_t), 0); |
84 | TSB_ENTRY_COUNT * sizeof(tsb_entry_t), 0); |
| 84 | #endif |
85 | #endif |
| 85 | return 0; |
86 | return 0; |
| 86 | } |
87 | } |
| 87 | 88 | ||
| 88 | int as_destructor_arch(as_t *as) |
89 | int as_destructor_arch(as_t *as) |
| 89 | { |
90 | { |
| 90 | #ifdef CONFIG_TSB |
91 | #ifdef CONFIG_TSB |
| 91 | /* |
- | |
| 92 | * The count must be calculated with respect to the emualted 16K page |
- | |
| 93 | * size. |
- | |
| 94 | */ |
- | |
| 95 | count_t cnt = ((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) * |
- | |
| 96 | sizeof(tsb_entry_t)) >> FRAME_WIDTH; |
92 | count_t cnt = (TSB_ENTRY_COUNT * sizeof(tsb_entry_t)) >> FRAME_WIDTH; |
| 97 | frame_free(KA2PA((uintptr_t) as->arch.itsb)); |
93 | frame_free(KA2PA((uintptr_t) as->arch.tsb_description.tsb_base)); |
| 98 | return cnt; |
94 | return cnt; |
| 99 | #else |
95 | #else |
| 100 | return 0; |
96 | return 0; |
| 101 | #endif |
97 | #endif |
| 102 | } |
98 | } |
| 103 | 99 | ||
| 104 | int as_create_arch(as_t *as, int flags) |
100 | int as_create_arch(as_t *as, int flags) |
| 105 | { |
101 | { |
| 106 | #ifdef CONFIG_TSB |
102 | #ifdef CONFIG_TSB |
| 107 | tsb_invalidate(as, 0, (count_t) -1); |
103 | tsb_invalidate(as, 0, (count_t) -1); |
| - | 104 | as->arch.tsb_description.context = as->asid; |
|
| 108 | #endif |
105 | #endif |
| 109 | return 0; |
106 | return 0; |
| 110 | } |
107 | } |
| 111 | 108 | ||
| 112 | /** Perform sparc64-specific tasks when an address space becomes active on the |
109 | /** Perform sparc64-specific tasks when an address space becomes active on the |
| Line 116... | Line 113... | ||
| 116 | * |
113 | * |
| 117 | * @param as Address space. |
114 | * @param as Address space. |
| 118 | */ |
115 | */ |
| 119 | void as_install_arch(as_t *as) |
116 | void as_install_arch(as_t *as) |
| 120 | { |
117 | { |
| 121 | #if 0 |
- | |
| 122 | tlb_context_reg_t ctx; |
- | |
| 123 | - | ||
| 124 | /* |
- | |
| 125 | * Note that we don't and may not lock the address space. That's ok |
- | |
| 126 | * since we only read members that are currently read-only. |
- | |
| 127 | * |
- | |
| 128 | * Moreover, the as->asid is protected by asidlock, which is being held. |
- | |
| 129 | */ |
- | |
| 130 | - | ||
| 131 | /* |
- | |
| 132 | * Write ASID to secondary context register. The primary context |
- | |
| 133 | * register has to be set from TL>0 so it will be filled from the |
- | |
| 134 | * secondary context register from the TL=1 code just before switch to |
- | |
| 135 | * userspace. |
- | |
| 136 | */ |
- | |
| 137 | ctx.v = 0; |
- | |
| 138 | ctx.context = as->asid; |
- | |
| 139 | mmu_secondary_context_write(ctx.v); |
118 | mmu_secondary_context_write(as->asid); |
| 140 | 119 | ||
| 141 | #ifdef CONFIG_TSB |
120 | #ifdef CONFIG_TSB |
| 142 | uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH); |
121 | uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH); |
| 143 | 122 | ||
| 144 | ASSERT(as->arch.itsb && as->arch.dtsb); |
123 | ASSERT(as->arch.tsb_description.tsb_base); |
| 145 | - | ||
| 146 | uintptr_t tsb = (uintptr_t) as->arch.itsb; |
124 | uintptr_t tsb = as->arch.tsb_description.tsb_base; |
| 147 | 125 | ||
| 148 | if (!overlaps(tsb, 8 * MMU_PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) { |
126 | if (!overlaps(tsb, 8 * MMU_PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) { |
| 149 | /* |
127 | /* |
| 150 | * TSBs were allocated from memory not covered |
128 | * TSBs were allocated from memory not covered |
| 151 | * by the locked 4M kernel DTLB entry. We need |
129 | * by the locked 4M kernel DTLB entry. We need |
| 152 | * to map both TSBs explicitly. |
130 | * to map both TSBs explicitly. |
| 153 | */ |
131 | */ |
| 154 | dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, tsb); |
132 | mmu_demap_page(tsb, 0, MMU_FLAG_DTLB); |
| - | 133 | mmu_map_perm_addr( |
|
| 155 | dtlb_insert_mapping(tsb, KA2PA(tsb), PAGESIZE_64K, true, true); |
134 | tsb, KA2PA(tsb), true, true, false, true, |
| - | 135 | PAGESIZE_64K, MMU_FLAG_DTLB); |
|
| 156 | } |
136 | } |
| 157 | - | ||
| 158 | /* |
- | |
| 159 | * Setup TSB Base registers. |
- | |
| 160 | */ |
- | |
| 161 | tsb_base_reg_t tsb_base; |
- | |
| 162 | - | ||
| 163 | tsb_base.value = 0; |
- | |
| 164 | tsb_base.size = TSB_SIZE; |
- | |
| 165 | tsb_base.split = 0; |
- | |
| 166 | 137 | ||
| 167 | tsb_base.base = ((uintptr_t) as->arch.itsb) >> MMU_PAGE_WIDTH; |
- | |
| 168 | itsb_base_write(tsb_base.value); |
- | |
| 169 | tsb_base.base = ((uintptr_t) as->arch.dtsb) >> MMU_PAGE_WIDTH; |
138 | __hypercall_fast2(MMU_TSB_CTX0, 1, as->arch.tsb_description.tsb_base); |
| 170 | dtsb_base_write(tsb_base.value); |
- | |
| 171 | 139 | ||
| 172 | #if defined (US3) |
- | |
| 173 | /* |
- | |
| 174 | * Clear the extension registers. |
- | |
| 175 | * In HelenOS, primary and secondary context registers contain |
- | |
| 176 | * equal values and kernel misses (context 0, ie. the nucleus context) |
- | |
| 177 | * are excluded from the TSB miss handler, so it makes no sense |
- | |
| 178 | * to have separate TSBs for primary, secondary and nucleus contexts. |
- | |
| 179 | * Clearing the extension registers will ensure that the value of the |
- | |
| 180 | * TSB Base register will be used as an address of TSB, making the code |
- | |
| 181 | * compatible with the US port. |
- | |
| 182 | */ |
- | |
| 183 | itsb_primary_extension_write(0); |
- | |
| 184 | itsb_nucleus_extension_write(0); |
- | |
| 185 | dtsb_primary_extension_write(0); |
- | |
| 186 | dtsb_secondary_extension_write(0); |
- | |
| 187 | dtsb_nucleus_extension_write(0); |
- | |
| 188 | #endif |
- | |
| 189 | #endif |
- | |
| 190 | #endif |
140 | #endif |
| 191 | } |
141 | } |
| 192 | 142 | ||
| 193 | /** Perform sparc64-specific tasks when an address space is removed from the |
143 | /** Perform sparc64-specific tasks when an address space is removed from the |
| 194 | * processor. |
144 | * processor. |
| Line 197... | Line 147... | ||
| 197 | * |
147 | * |
| 198 | * @param as Address space. |
148 | * @param as Address space. |
| 199 | */ |
149 | */ |
| 200 | void as_deinstall_arch(as_t *as) |
150 | void as_deinstall_arch(as_t *as) |
| 201 | { |
151 | { |
| 202 | - | ||
| 203 | /* |
152 | /* |
| 204 | * Note that we don't and may not lock the address space. That's ok |
153 | * Note that we don't and may not lock the address space. That's ok |
| 205 | * since we only read members that are currently read-only. |
154 | * since we only read members that are currently read-only. |
| 206 | * |
155 | * |
| 207 | * Moreover, the as->asid is protected by asidlock, which is being held. |
156 | * Moreover, the as->asid is protected by asidlock, which is being held. |
| 208 | */ |
157 | */ |
| 209 | - | ||
| 210 | #ifdef CONFIG_TSB |
158 | #ifdef CONFIG_TSB |
| 211 | uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH); |
159 | uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH); |
| 212 | 160 | ||
| 213 | ASSERT(as->arch.itsb && as->arch.dtsb); |
161 | ASSERT(as->arch.tsb_description.tsb_base); |
| 214 | 162 | ||
| 215 | uintptr_t tsb = (uintptr_t) as->arch.itsb; |
163 | uintptr_t tsb = as->arch.tsb_description.tsb_base; |
| 216 | 164 | ||
| 217 | if (!overlaps(tsb, 8 * MMU_PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) { |
165 | if (!overlaps(tsb, 8 * MMU_PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) { |
| 218 | /* |
166 | /* |
| 219 | * TSBs were allocated from memory not covered |
167 | * TSBs were allocated from memory not covered |
| 220 | * by the locked 4M kernel DTLB entry. We need |
168 | * by the locked 4M kernel DTLB entry. We need |
| 221 | * to demap the entry installed by as_install_arch(). |
169 | * to demap the entry installed by as_install_arch(). |
| 222 | */ |
170 | */ |
| 223 | dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, tsb); |
171 | mmu_demap_page(tsb, 0, MMU_FLAG_DTLB); |
| 224 | } |
172 | } |
| 225 | #endif |
173 | #endif |
| 226 | } |
174 | } |
| 227 | 175 | ||
| 228 | /** @} |
176 | /** @} |