Rev 3862 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 3862 | Rev 3993 | ||
---|---|---|---|
Line 50... | Line 50... | ||
50 | #include <arch/trap/trap.h> |
50 | #include <arch/trap/trap.h> |
51 | #include <arch/trap/exception.h> |
51 | #include <arch/trap/exception.h> |
52 | #include <panic.h> |
52 | #include <panic.h> |
53 | #include <arch/asm.h> |
53 | #include <arch/asm.h> |
54 | #include <arch/cpu.h> |
54 | #include <arch/cpu.h> |
- | 55 | #include <arch/mm/pagesize.h> |
|
55 | 56 | ||
56 | #ifdef CONFIG_TSB |
57 | #ifdef CONFIG_TSB |
57 | #include <arch/mm/tsb.h> |
58 | #include <arch/mm/tsb.h> |
58 | #endif |
59 | #endif |
59 | 60 | ||
60 | #if 0 |
- | |
61 | static void dtlb_pte_copy(pte_t *, index_t, bool); |
61 | static void itlb_pte_copy(pte_t *); |
62 | static void itlb_pte_copy(pte_t *, index_t); |
62 | static void dtlb_pte_copy(pte_t *, bool); |
63 | static void do_fast_instruction_access_mmu_miss_fault(istate_t *, const char *); |
63 | static void do_fast_instruction_access_mmu_miss_fault(istate_t *, const char *); |
64 | static void do_fast_data_access_mmu_miss_fault(istate_t *, tlb_tag_access_reg_t, |
64 | static void do_fast_data_access_mmu_miss_fault(istate_t *, uint64_t, |
65 | const char *); |
65 | const char *); |
66 | static void do_fast_data_access_protection_fault(istate_t *, |
66 | static void do_fast_data_access_protection_fault(istate_t *, |
67 | tlb_tag_access_reg_t, const char *); |
67 | uint64_t, const char *); |
68 | 68 | ||
- | 69 | #if 0 |
|
69 | char *context_encoding[] = { |
70 | char *context_encoding[] = { |
70 | "Primary", |
71 | "Primary", |
71 | "Secondary", |
72 | "Secondary", |
72 | "Nucleus", |
73 | "Nucleus", |
73 | "Reserved" |
74 | "Reserved" |
74 | }; |
75 | }; |
75 | #endif |
76 | #endif |
76 | 77 | ||
77 | /* |
78 | /* |
- | 79 | * The assembly language routine passes a 64-bit parameter to the Data Access |
|
- | 80 | * MMU Miss and Data Access protection handlers, the parameter encapsulates |
|
- | 81 | * a virtual address of the faulting page and the faulting context. The most |
|
- | 82 | * significant 51 bits represent the VA of the faulting page and the least |
|
- | 83 | * significant 13 vits represent the faulting context. The following macros |
|
- | 84 | * extract the page and context out of the 64-bit parameter: |
|
- | 85 | */ |
|
- | 86 | ||
- | 87 | /* extracts the VA of the faulting page */ |
|
- | 88 | #define DMISS_ADDRESS(page_and_ctx) (((page_and_ctx) >> 13) << 13) |
|
- | 89 | ||
- | 90 | /* extracts the faulting context */ |
|
- | 91 | #define DMISS_CONTEXT(page_and_ctx) ((page_and_ctx) & 0x1fff) |
|
- | 92 | ||
- | 93 | /* |
|
78 | * Invalidate all non-locked DTLB and ITLB entries. |
94 | * Invalidate all non-locked DTLB and ITLB entries. |
79 | */ |
95 | */ |
80 | void tlb_arch_init(void) |
96 | void tlb_arch_init(void) |
81 | { |
97 | { |
82 | tlb_invalidate_all(); |
98 | tlb_invalidate_all(); |
Line 125... | Line 141... | ||
125 | } |
141 | } |
126 | 142 | ||
127 | /** Copy PTE to TLB. |
143 | /** Copy PTE to TLB. |
128 | * |
144 | * |
129 | * @param t Page Table Entry to be copied. |
145 | * @param t Page Table Entry to be copied. |
130 | * @param index Zero if lower 8K-subpage, one if higher 8K-subpage. |
- | |
131 | * @param ro If true, the entry will be created read-only, regardless |
146 | * @param ro If true, the entry will be created read-only, regardless |
132 | * of its w field. |
147 | * of its w field. |
133 | */ |
148 | */ |
134 | #if 0 |
- | |
135 | void dtlb_pte_copy(pte_t *t, index_t index, bool ro) |
149 | void dtlb_pte_copy(pte_t *t, bool ro) |
136 | { |
150 | { |
137 | tlb_tag_access_reg_t tag; |
- | |
138 | tlb_data_t data; |
151 | tte_data_t data; |
139 | page_address_t pg; |
- | |
140 | frame_address_t fr; |
- | |
141 | - | ||
142 | pg.address = t->page + (index << MMU_PAGE_WIDTH); |
- | |
143 | fr.address = t->frame + (index << MMU_PAGE_WIDTH); |
- | |
144 | - | ||
145 | tag.value = 0; |
- | |
146 | tag.context = t->as->asid; |
- | |
147 | tag.vpn = pg.vpn; |
- | |
148 | - | ||
149 | dtlb_tag_access_write(tag.value); |
- | |
150 | 152 | ||
151 | data.value = 0; |
153 | data.value = 0; |
152 | data.v = true; |
154 | data.v = true; |
153 | data.size = PAGESIZE_8K; |
155 | data.nfo = false; |
- | 156 | data.ra = (t->frame) >> FRAME_WIDTH; |
|
154 | data.pfn = fr.pfn; |
157 | data.ie = false; |
155 | data.l = false; |
158 | data.e = false; |
156 | data.cp = t->c; |
159 | data.cp = t->c; |
157 | #ifdef CONFIG_VIRT_IDX_DCACHE |
160 | #ifdef CONFIG_VIRT_IDX_DCACHE |
158 | data.cv = t->c; |
161 | data.cv = t->c; |
159 | #endif /* CONFIG_VIRT_IDX_DCACHE */ |
162 | #endif |
160 | data.p = t->k; /* p like privileged */ |
163 | data.p = t->k; |
- | 164 | data.x = false; |
|
161 | data.w = ro ? false : t->w; |
165 | data.w = ro ? false : t->w; |
162 | data.g = t->g; |
166 | data.size = PAGESIZE_8K; |
163 | 167 | ||
164 | dtlb_data_in_write(data.value); |
168 | __hypercall_hyperfast( |
- | 169 | t->page, t->as->asid, data.value, MMU_FLAG_DTLB, 0, MMU_MAP_ADDR); |
|
165 | } |
170 | } |
166 | #endif |
- | |
167 | 171 | ||
168 | /** Copy PTE to ITLB. |
172 | /** Copy PTE to ITLB. |
169 | * |
173 | * |
170 | * @param t Page Table Entry to be copied. |
174 | * @param t Page Table Entry to be copied. |
171 | * @param index Zero if lower 8K-subpage, one if higher 8K-subpage. |
- | |
172 | */ |
175 | */ |
173 | #if 0 |
- | |
174 | void itlb_pte_copy(pte_t *t, index_t index) |
176 | void itlb_pte_copy(pte_t *t) |
175 | { |
177 | { |
176 | tlb_tag_access_reg_t tag; |
- | |
177 | tlb_data_t data; |
178 | tte_data_t data; |
178 | page_address_t pg; |
- | |
179 | frame_address_t fr; |
- | |
180 | - | ||
181 | pg.address = t->page + (index << MMU_PAGE_WIDTH); |
- | |
182 | fr.address = t->frame + (index << MMU_PAGE_WIDTH); |
- | |
183 | - | ||
184 | tag.value = 0; |
- | |
185 | tag.context = t->as->asid; |
- | |
186 | tag.vpn = pg.vpn; |
- | |
187 | - | ||
188 | itlb_tag_access_write(tag.value); |
- | |
189 | 179 | ||
190 | data.value = 0; |
180 | data.value = 0; |
191 | data.v = true; |
181 | data.v = true; |
192 | data.size = PAGESIZE_8K; |
182 | data.nfo = false; |
- | 183 | data.ra = (t->frame) >> FRAME_WIDTH; |
|
193 | data.pfn = fr.pfn; |
184 | data.ie = false; |
194 | data.l = false; |
185 | data.e = false; |
195 | data.cp = t->c; |
186 | data.cp = t->c; |
- | 187 | data.cv = false; |
|
196 | data.p = t->k; /* p like privileged */ |
188 | data.p = t->k; |
- | 189 | data.x = true; |
|
197 | data.w = false; |
190 | data.w = false; |
198 | data.g = t->g; |
191 | data.size = PAGESIZE_8K; |
199 | 192 | ||
200 | itlb_data_in_write(data.value); |
193 | __hypercall_hyperfast( |
- | 194 | t->page, t->as->asid, data.value, MMU_FLAG_ITLB, 0, MMU_MAP_ADDR); |
|
201 | } |
195 | } |
202 | #endif |
- | |
203 | 196 | ||
204 | /** ITLB miss handler. */ |
197 | /** ITLB miss handler. */ |
205 | void fast_instruction_access_mmu_miss(unative_t unused, istate_t *istate) |
198 | void fast_instruction_access_mmu_miss(unative_t unused, istate_t *istate) |
206 | { |
199 | { |
207 | #if 0 |
- | |
208 | uintptr_t va = ALIGN_DOWN(istate->tpc, PAGE_SIZE); |
200 | uintptr_t va = ALIGN_DOWN(istate->tpc, PAGE_SIZE); |
209 | index_t index = (istate->tpc >> MMU_PAGE_WIDTH) % MMU_PAGES_PER_PAGE; |
- | |
210 | pte_t *t; |
201 | pte_t *t; |
211 | 202 | ||
212 | page_table_lock(AS, true); |
203 | page_table_lock(AS, true); |
213 | t = page_mapping_find(AS, va); |
204 | t = page_mapping_find(AS, va); |
- | 205 | ||
214 | if (t && PTE_EXECUTABLE(t)) { |
206 | if (t && PTE_EXECUTABLE(t)) { |
215 | /* |
207 | /* |
216 | * The mapping was found in the software page hash table. |
208 | * The mapping was found in the software page hash table. |
217 | * Insert it into ITLB. |
209 | * Insert it into ITLB. |
218 | */ |
210 | */ |
219 | t->a = true; |
211 | t->a = true; |
220 | itlb_pte_copy(t, index); |
212 | itlb_pte_copy(t); |
221 | #ifdef CONFIG_TSB |
213 | #ifdef CONFIG_TSB |
222 | itsb_pte_copy(t, index); |
214 | //itsb_pte_copy(t, index); |
223 | #endif |
215 | #endif |
224 | page_table_unlock(AS, true); |
216 | page_table_unlock(AS, true); |
225 | } else { |
217 | } else { |
226 | /* |
218 | /* |
227 | * Forward the page fault to the address space page fault |
219 | * Forward the page fault to the address space page fault |
Line 231... | Line 223... | ||
231 | if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) { |
223 | if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) { |
232 | do_fast_instruction_access_mmu_miss_fault(istate, |
224 | do_fast_instruction_access_mmu_miss_fault(istate, |
233 | __func__); |
225 | __func__); |
234 | } |
226 | } |
235 | } |
227 | } |
236 | #endif |
- | |
237 | } |
228 | } |
238 | 229 | ||
239 | /** DTLB miss handler. |
230 | /** DTLB miss handler. |
240 | * |
231 | * |
241 | * Note that some faults (e.g. kernel faults) were already resolved by the |
232 | * Note that some faults (e.g. kernel faults) were already resolved by the |
242 | * low-level, assembly language part of the fast_data_access_mmu_miss handler. |
233 | * low-level, assembly language part of the fast_data_access_mmu_miss handler. |
243 | * |
234 | * |
244 | * @param tag Content of the TLB Tag Access register as it existed |
235 | * @param page_and_ctx A 64-bit value describing the fault. The most |
- | 236 | * significant 51 bits of the value contain the virtual |
|
245 | * when the trap happened. This is to prevent confusion |
237 | * address which caused the fault truncated to the page |
- | 238 | * boundary. The least significant 13 bits of the value |
|
246 | * created by clobbered Tag Access register during a nested |
239 | * contain the number of the context in which the fault |
247 | * DTLB miss. |
240 | * occurred. |
248 | * @param istate Interrupted state saved on the stack. |
241 | * @param istate Interrupted state saved on the stack. |
249 | */ |
242 | */ |
250 | //void fast_data_access_mmu_miss(tlb_tag_access_reg_t tag, istate_t *istate) |
243 | void fast_data_access_mmu_miss(uint64_t page_and_ctx, istate_t *istate) |
251 | //{ |
244 | { |
252 | #if 0 |
- | |
253 | uintptr_t va; |
- | |
254 | index_t index; |
- | |
255 | pte_t *t; |
245 | pte_t *t; |
- | 246 | uintptr_t va = DMISS_ADDRESS(page_and_ctx); |
|
- | 247 | uint16_t ctx = DMISS_CONTEXT(page_and_ctx); |
|
256 | 248 | ||
257 | va = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE); |
- | |
258 | index = tag.vpn % MMU_PAGES_PER_PAGE; |
- | |
259 | - | ||
260 | if (tag.context == ASID_KERNEL) { |
249 | if (ctx == ASID_KERNEL) { |
261 | if (!tag.vpn) { |
250 | if (va == 0) { |
262 | /* NULL access in kernel */ |
251 | /* NULL access in kernel */ |
263 | do_fast_data_access_mmu_miss_fault(istate, tag, |
252 | do_fast_data_access_mmu_miss_fault(istate, page_and_ctx, |
264 | __func__); |
253 | __func__); |
265 | } |
254 | } |
266 | do_fast_data_access_mmu_miss_fault(istate, tag, "Unexpected " |
255 | do_fast_data_access_mmu_miss_fault(istate, page_and_ctx, "Unexpected " |
267 | "kernel page fault."); |
256 | "kernel page fault."); |
268 | } |
257 | } |
269 | 258 | ||
270 | page_table_lock(AS, true); |
259 | page_table_lock(AS, true); |
271 | t = page_mapping_find(AS, va); |
260 | t = page_mapping_find(AS, va); |
Line 273... | Line 262... | ||
273 | /* |
262 | /* |
274 | * The mapping was found in the software page hash table. |
263 | * The mapping was found in the software page hash table. |
275 | * Insert it into DTLB. |
264 | * Insert it into DTLB. |
276 | */ |
265 | */ |
277 | t->a = true; |
266 | t->a = true; |
278 | dtlb_pte_copy(t, index, true); |
267 | dtlb_pte_copy(t, true); |
279 | #ifdef CONFIG_TSB |
268 | #ifdef CONFIG_TSB |
280 | dtsb_pte_copy(t, index, true); |
269 | //dtsb_pte_copy(t, true); |
281 | #endif |
270 | #endif |
282 | page_table_unlock(AS, true); |
271 | page_table_unlock(AS, true); |
283 | } else { |
272 | } else { |
284 | /* |
273 | /* |
285 | * Forward the page fault to the address space page fault |
274 | * Forward the page fault to the address space page fault |
286 | * handler. |
275 | * handler. |
287 | */ |
276 | */ |
288 | page_table_unlock(AS, true); |
277 | page_table_unlock(AS, true); |
289 | if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) { |
278 | if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) { |
290 | do_fast_data_access_mmu_miss_fault(istate, tag, |
279 | do_fast_data_access_mmu_miss_fault(istate, page_and_ctx, |
291 | __func__); |
280 | __func__); |
292 | } |
281 | } |
293 | } |
282 | } |
294 | #endif |
- | |
295 | //} |
283 | } |
296 | 284 | ||
297 | /** DTLB protection fault handler. |
285 | /** DTLB protection fault handler. |
298 | * |
286 | * |
299 | * @param tag Content of the TLB Tag Access register as it existed |
287 | * @param page_and_ctx A 64-bit value describing the fault. The most |
- | 288 | * significant 51 bits of the value contain the virtual |
|
300 | * when the trap happened. This is to prevent confusion |
289 | * address which caused the fault truncated to the page |
- | 290 | * boundary. The least significant 13 bits of the value |
|
301 | * created by clobbered Tag Access register during a nested |
291 | * contain the number of the context in which the fault |
302 | * DTLB miss. |
292 | * occurred. |
303 | * @param istate Interrupted state saved on the stack. |
293 | * @param istate Interrupted state saved on the stack. |
304 | */ |
294 | */ |
305 | //void fast_data_access_protection(tlb_tag_access_reg_t tag, istate_t *istate) |
295 | void fast_data_access_protection(uint64_t page_and_ctx, istate_t *istate) |
306 | //{ |
296 | { |
307 | #if 0 |
- | |
308 | uintptr_t va; |
- | |
309 | index_t index; |
- | |
310 | pte_t *t; |
297 | pte_t *t; |
311 | - | ||
312 | va = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE); |
298 | uintptr_t va = DMISS_ADDRESS(page_and_ctx); |
313 | index = tag.vpn % MMU_PAGES_PER_PAGE; /* 16K-page emulation */ |
299 | uint16_t ctx = DMISS_CONTEXT(page_and_ctx); |
314 | 300 | ||
315 | page_table_lock(AS, true); |
301 | page_table_lock(AS, true); |
316 | t = page_mapping_find(AS, va); |
302 | t = page_mapping_find(AS, va); |
317 | if (t && PTE_WRITABLE(t)) { |
303 | if (t && PTE_WRITABLE(t)) { |
318 | /* |
304 | /* |
Line 320... | Line 306... | ||
320 | * writable. Demap the old mapping and insert an updated mapping |
306 | * writable. Demap the old mapping and insert an updated mapping |
321 | * into DTLB. |
307 | * into DTLB. |
322 | */ |
308 | */ |
323 | t->a = true; |
309 | t->a = true; |
324 | t->d = true; |
310 | t->d = true; |
325 | dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_SECONDARY, |
- | |
326 | va + index * MMU_PAGE_SIZE); |
311 | mmu_demap_page(va, ctx, MMU_FLAG_DTLB); |
327 | dtlb_pte_copy(t, index, false); |
312 | dtlb_pte_copy(t, false); |
328 | #ifdef CONFIG_TSB |
313 | #ifdef CONFIG_TSB |
329 | dtsb_pte_copy(t, index, false); |
314 | //dtsb_pte_copy(t, false); |
330 | #endif |
315 | #endif |
331 | page_table_unlock(AS, true); |
316 | page_table_unlock(AS, true); |
332 | } else { |
317 | } else { |
333 | /* |
318 | /* |
334 | * Forward the page fault to the address space page fault |
319 | * Forward the page fault to the address space page fault |
335 | * handler. |
320 | * handler. |
336 | */ |
321 | */ |
337 | page_table_unlock(AS, true); |
322 | page_table_unlock(AS, true); |
338 | if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) { |
323 | if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) { |
339 | do_fast_data_access_protection_fault(istate, tag, |
324 | do_fast_data_access_protection_fault(istate, page_and_ctx, |
340 | __func__); |
325 | __func__); |
341 | } |
326 | } |
342 | } |
327 | } |
343 | #endif |
- | |
344 | //} |
328 | } |
345 | 329 | ||
346 | /** Print TLB entry (for debugging purposes). |
330 | /** Print TLB entry (for debugging purposes). |
347 | * |
331 | * |
348 | * The diag field has been left out in order to make this function more generic |
332 | * The diag field has been left out in order to make this function more generic |
349 | * (there is no diag field in US3 architeture). |
333 | * (there is no diag field in US3 architeture). |
Line 361... | Line 345... | ||
361 | t.context, d.v, d.size, d.nfo, d.ie, d.soft2, |
345 | t.context, d.v, d.size, d.nfo, d.ie, d.soft2, |
362 | d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g); |
346 | d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g); |
363 | } |
347 | } |
364 | #endif |
348 | #endif |
365 | 349 | ||
366 | #if defined (US) |
- | |
367 | - | ||
368 | /** Print contents of both TLBs. */ |
- | |
369 | void tlb_print(void) |
350 | void tlb_print(void) |
370 | #if 0 |
- | |
371 | { |
351 | { |
- | 352 | #if 0 |
|
372 | int i; |
353 | int i; |
373 | tlb_data_t d; |
354 | tlb_data_t d; |
374 | tlb_tag_read_reg_t t; |
355 | tlb_tag_read_reg_t t; |
375 | 356 | ||
376 | printf("I-TLB contents:\n"); |
357 | printf("I-TLB contents:\n"); |
Line 387... | Line 368... | ||
387 | print_tlb_entry(i, t, d); |
368 | print_tlb_entry(i, t, d); |
388 | } |
369 | } |
389 | #endif |
370 | #endif |
390 | } |
371 | } |
391 | 372 | ||
392 | #elif defined (US3) |
- | |
393 | - | ||
394 | /** Print contents of all TLBs. */ |
- | |
395 | void tlb_print(void) |
- | |
396 | { |
- | |
397 | #if 0 |
- | |
398 | int i; |
- | |
399 | tlb_data_t d; |
- | |
400 | tlb_tag_read_reg_t t; |
- | |
401 | - | ||
402 | printf("TLB_ISMALL contents:\n"); |
- | |
403 | for (i = 0; i < tlb_ismall_size(); i++) { |
- | |
404 | d.value = dtlb_data_access_read(TLB_ISMALL, i); |
- | |
405 | t.value = dtlb_tag_read_read(TLB_ISMALL, i); |
- | |
406 | print_tlb_entry(i, t, d); |
- | |
407 | } |
- | |
408 | - | ||
409 | printf("TLB_IBIG contents:\n"); |
- | |
410 | for (i = 0; i < tlb_ibig_size(); i++) { |
- | |
411 | d.value = dtlb_data_access_read(TLB_IBIG, i); |
- | |
412 | t.value = dtlb_tag_read_read(TLB_IBIG, i); |
- | |
413 | print_tlb_entry(i, t, d); |
- | |
414 | } |
- | |
415 | - | ||
416 | printf("TLB_DSMALL contents:\n"); |
- | |
417 | for (i = 0; i < tlb_dsmall_size(); i++) { |
- | |
418 | d.value = dtlb_data_access_read(TLB_DSMALL, i); |
- | |
419 | t.value = dtlb_tag_read_read(TLB_DSMALL, i); |
- | |
420 | print_tlb_entry(i, t, d); |
- | |
421 | } |
- | |
422 | - | ||
423 | printf("TLB_DBIG_1 contents:\n"); |
- | |
424 | for (i = 0; i < tlb_dbig_size(); i++) { |
- | |
425 | d.value = dtlb_data_access_read(TLB_DBIG_0, i); |
- | |
426 | t.value = dtlb_tag_read_read(TLB_DBIG_0, i); |
- | |
427 | print_tlb_entry(i, t, d); |
- | |
428 | } |
- | |
429 | - | ||
430 | printf("TLB_DBIG_2 contents:\n"); |
- | |
431 | for (i = 0; i < tlb_dbig_size(); i++) { |
- | |
432 | d.value = dtlb_data_access_read(TLB_DBIG_1, i); |
- | |
433 | t.value = dtlb_tag_read_read(TLB_DBIG_1, i); |
- | |
434 | print_tlb_entry(i, t, d); |
- | |
435 | } |
- | |
436 | #endif |
- | |
437 | } |
- | |
438 | - | ||
439 | #endif |
- | |
440 | - | ||
441 | #if 0 |
- | |
442 | void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, |
373 | void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, |
443 | const char *str) |
374 | const char *str) |
444 | { |
375 | { |
445 | fault_if_from_uspace(istate, "%s\n", str); |
376 | fault_if_from_uspace(istate, "%s\n", str); |
446 | dump_istate(istate); |
377 | dump_istate(istate); |
447 | panic("%s\n", str); |
378 | panic("%s\n", str); |
448 | } |
379 | } |
449 | #endif |
- | |
450 | 380 | ||
451 | #if 0 |
- | |
452 | void do_fast_data_access_mmu_miss_fault(istate_t *istate, |
381 | void do_fast_data_access_mmu_miss_fault(istate_t *istate, |
453 | tlb_tag_access_reg_t tag, const char *str) |
382 | uint64_t page_and_ctx, const char *str) |
454 | { |
383 | { |
455 | uintptr_t va; |
- | |
456 | - | ||
457 | va = tag.vpn << MMU_PAGE_WIDTH; |
- | |
458 | if (tag.context) { |
384 | if (DMISS_CONTEXT(page_and_ctx)) { |
459 | fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va, |
385 | fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, DMISS_ADDRESS(page_and_ctx), |
460 | tag.context); |
386 | DMISS_CONTEXT(page_and_ctx)); |
461 | } |
387 | } |
462 | dump_istate(istate); |
388 | dump_istate(istate); |
463 | printf("Faulting page: %p, ASID=%d\n", va, tag.context); |
389 | printf("Faulting page: %p, ASID=%d\n", DMISS_ADDRESS(page_and_ctx), DMISS_CONTEXT(page_and_ctx)); |
464 | panic("%s\n", str); |
390 | panic("%s\n", str); |
465 | } |
391 | } |
466 | #endif |
- | |
467 | 392 | ||
468 | #if 0 |
- | |
469 | void do_fast_data_access_protection_fault(istate_t *istate, |
393 | void do_fast_data_access_protection_fault(istate_t *istate, |
470 | tlb_tag_access_reg_t tag, const char *str) |
394 | uint64_t page_and_ctx, const char *str) |
471 | { |
395 | { |
472 | uintptr_t va; |
- | |
473 | - | ||
474 | va = tag.vpn << MMU_PAGE_WIDTH; |
- | |
475 | - | ||
476 | if (tag.context) { |
396 | if (DMISS_CONTEXT(page_and_ctx)) { |
477 | fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va, |
397 | fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, DMISS_ADDRESS(page_and_ctx), |
478 | tag.context); |
398 | DMISS_CONTEXT(page_and_ctx)); |
479 | } |
399 | } |
480 | printf("Faulting page: %p, ASID=%d\n", va, tag.context); |
400 | printf("Faulting page: %p, ASID=%d\n", DMISS_ADDRESS(page_and_ctx), DMISS_CONTEXT(page_and_ctx)); |
481 | dump_istate(istate); |
401 | dump_istate(istate); |
482 | panic("%s\n", str); |
402 | panic("%s\n", str); |
483 | } |
403 | } |
484 | #endif |
- | |
485 | 404 | ||
486 | void describe_mmu_fault(void) |
405 | void describe_mmu_fault(void) |
487 | { |
406 | { |
488 | } |
407 | } |
489 | 408 | ||
490 | #if defined (US3) |
- | |
491 | /** Invalidates given TLB entry if and only if it is non-locked or global. |
- | |
492 | * |
- | |
493 | * @param tlb TLB number (one of TLB_DSMALL, TLB_DBIG_0, TLB_DBIG_1, |
- | |
494 | * TLB_ISMALL, TLB_IBIG). |
- | |
495 | * @param entry Entry index within the given TLB. |
- | |
496 | */ |
- | |
497 | #if 0 |
- | |
498 | static void tlb_invalidate_entry(int tlb, index_t entry) |
- | |
499 | { |
- | |
500 | tlb_data_t d; |
- | |
501 | tlb_tag_read_reg_t t; |
- | |
502 | - | ||
503 | if (tlb == TLB_DSMALL || tlb == TLB_DBIG_0 || tlb == TLB_DBIG_1) { |
- | |
504 | d.value = dtlb_data_access_read(tlb, entry); |
- | |
505 | if (!d.l || d.g) { |
- | |
506 | t.value = dtlb_tag_read_read(tlb, entry); |
- | |
507 | d.v = false; |
- | |
508 | dtlb_tag_access_write(t.value); |
- | |
509 | dtlb_data_access_write(tlb, entry, d.value); |
- | |
510 | } |
- | |
511 | } else if (tlb == TLB_ISMALL || tlb == TLB_IBIG) { |
- | |
512 | d.value = itlb_data_access_read(tlb, entry); |
- | |
513 | if (!d.l || d.g) { |
- | |
514 | t.value = itlb_tag_read_read(tlb, entry); |
- | |
515 | d.v = false; |
- | |
516 | itlb_tag_access_write(t.value); |
- | |
517 | itlb_data_access_write(tlb, entry, d.value); |
- | |
518 | } |
- | |
519 | } |
- | |
520 | } |
- | |
521 | #endif |
- | |
522 | #endif |
- | |
523 | - | ||
524 | /** Invalidate all unlocked ITLB and DTLB entries. */ |
409 | /** Invalidate all unlocked ITLB and DTLB entries. */ |
525 | void tlb_invalidate_all(void) |
410 | void tlb_invalidate_all(void) |
526 | { |
411 | { |
527 | uint64_t errno = __hypercall_fast3(MMU_DEMAP_ALL, 0, 0, |
412 | uint64_t errno = __hypercall_fast3(MMU_DEMAP_ALL, 0, 0, |
528 | MMU_FLAG_DTLB | MMU_FLAG_ITLB); |
413 | MMU_FLAG_DTLB | MMU_FLAG_ITLB); |