Rev 3993 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 3993 | Rev 4129 | ||
---|---|---|---|
Line 64... | Line 64... | ||
64 | static void do_fast_data_access_mmu_miss_fault(istate_t *, uint64_t, |
64 | static void do_fast_data_access_mmu_miss_fault(istate_t *, uint64_t, |
65 | const char *); |
65 | const char *); |
66 | static void do_fast_data_access_protection_fault(istate_t *, |
66 | static void do_fast_data_access_protection_fault(istate_t *, |
67 | uint64_t, const char *); |
67 | uint64_t, const char *); |
68 | 68 | ||
69 | #if 0 |
- | |
70 | char *context_encoding[] = { |
- | |
71 | "Primary", |
- | |
72 | "Secondary", |
- | |
73 | "Nucleus", |
- | |
74 | "Reserved" |
- | |
75 | }; |
- | |
76 | #endif |
- | |
77 | - | ||
78 | /* |
69 | /* |
79 | * The assembly language routine passes a 64-bit parameter to the Data Access |
70 | * The assembly language routine passes a 64-bit parameter to the Data Access |
80 | * MMU Miss and Data Access protection handlers, the parameter encapsulates |
71 | * MMU Miss and Data Access protection handlers, the parameter encapsulates |
81 | * a virtual address of the faulting page and the faulting context. The most |
72 | * a virtual address of the faulting page and the faulting context. The most |
82 | * significant 51 bits represent the VA of the faulting page and the least |
73 | * significant 51 bits represent the VA of the faulting page and the least |
Line 88... | Line 79... | ||
88 | #define DMISS_ADDRESS(page_and_ctx) (((page_and_ctx) >> 13) << 13) |
79 | #define DMISS_ADDRESS(page_and_ctx) (((page_and_ctx) >> 13) << 13) |
89 | 80 | ||
90 | /* extracts the faulting context */ |
81 | /* extracts the faulting context */ |
91 | #define DMISS_CONTEXT(page_and_ctx) ((page_and_ctx) & 0x1fff) |
82 | #define DMISS_CONTEXT(page_and_ctx) ((page_and_ctx) & 0x1fff) |
92 | 83 | ||
- | 84 | /** |
|
- | 85 | * Descriptions of fault types from the MMU Fault status area. |
|
- | 86 | * |
|
- | 87 | * fault_type[i] contains description of error for which the IFT or DFT |
|
- | 88 | * field of the MMU fault status area is i. |
|
- | 89 | */ |
|
- | 90 | char *fault_types[] = { |
|
- | 91 | "unknown", |
|
- | 92 | "fast miss", |
|
- | 93 | "fast protection", |
|
- | 94 | "MMU miss", |
|
- | 95 | "invalid RA", |
|
- | 96 | "privileged violation", |
|
- | 97 | "protection violation", |
|
- | 98 | "NFO access", |
|
- | 99 | "so page/NFO side effect", |
|
- | 100 | "invalid VA", |
|
- | 101 | "invalid ASI", |
|
- | 102 | "nc atomic", |
|
- | 103 | "privileged action", |
|
- | 104 | "unknown", |
|
- | 105 | "unaligned access", |
|
- | 106 | "invalid page size" |
|
- | 107 | }; |
|
- | 108 | ||
- | 109 | ||
- | 110 | /** Array of MMU fault status areas. */ |
|
- | 111 | extern mmu_fault_status_area_t mmu_fsas[MAX_NUM_STRANDS]; |
|
- | 112 | ||
93 | /* |
113 | /* |
94 | * Invalidate all non-locked DTLB and ITLB entries. |
114 | * Invalidate all non-locked DTLB and ITLB entries. |
95 | */ |
115 | */ |
96 | void tlb_arch_init(void) |
116 | void tlb_arch_init(void) |
97 | { |
117 | { |
Line 107... | Line 127... | ||
107 | * @param cacheable True if the mapping is cacheable, false otherwise. |
127 | * @param cacheable True if the mapping is cacheable, false otherwise. |
108 | */ |
128 | */ |
109 | void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize, |
129 | void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize, |
110 | bool locked, bool cacheable) |
130 | bool locked, bool cacheable) |
111 | { |
131 | { |
112 | #if 0 |
- | |
113 | tlb_tag_access_reg_t tag; |
- | |
114 | tlb_data_t data; |
132 | tte_data_t data; |
115 | page_address_t pg; |
- | |
116 | frame_address_t fr; |
- | |
117 | - | ||
118 | pg.address = page; |
- | |
119 | fr.address = frame; |
- | |
120 | - | ||
121 | tag.context = ASID_KERNEL; |
- | |
122 | tag.vpn = pg.vpn; |
- | |
123 | - | ||
124 | dtlb_tag_access_write(tag.value); |
- | |
125 | 133 | ||
126 | data.value = 0; |
134 | data.value = 0; |
127 | data.v = true; |
135 | data.v = true; |
128 | data.size = pagesize; |
136 | data.nfo = false; |
- | 137 | data.ra = frame >> FRAME_WIDTH; |
|
129 | data.pfn = fr.pfn; |
138 | data.ie = false; |
130 | data.l = locked; |
139 | data.e = false; |
131 | data.cp = cacheable; |
140 | data.cp = cacheable; |
132 | #ifdef CONFIG_VIRT_IDX_DCACHE |
141 | #ifdef CONFIG_VIRT_IDX_DCACHE |
133 | data.cv = cacheable; |
142 | data.cv = cacheable; |
134 | #endif /* CONFIG_VIRT_IDX_DCACHE */ |
- | |
135 | data.p = true; |
- | |
136 | data.w = true; |
- | |
137 | data.g = false; |
- | |
138 | - | ||
139 | dtlb_data_in_write(data.value); |
- | |
140 | #endif |
143 | #endif |
- | 144 | data.p = true; |
|
- | 145 | data.x = false; |
|
- | 146 | data.w = false; |
|
- | 147 | data.size = pagesize; |
|
- | 148 | ||
- | 149 | if (locked) { |
|
- | 150 | __hypercall_fast4( |
|
- | 151 | MMU_MAP_PERM_ADDR, page, 0, data.value, MMU_FLAG_DTLB); |
|
- | 152 | } else { |
|
- | 153 | __hypercall_hyperfast( |
|
- | 154 | page, ASID_KERNEL, data.value, MMU_FLAG_DTLB, 0, |
|
- | 155 | MMU_MAP_ADDR); |
|
- | 156 | } |
|
141 | } |
157 | } |
142 | 158 | ||
143 | /** Copy PTE to TLB. |
159 | /** Copy PTE to TLB. |
144 | * |
160 | * |
145 | * @param t Page Table Entry to be copied. |
161 | * @param t Page Table Entry to be copied. |
Line 209... | Line 225... | ||
209 | * Insert it into ITLB. |
225 | * Insert it into ITLB. |
210 | */ |
226 | */ |
211 | t->a = true; |
227 | t->a = true; |
212 | itlb_pte_copy(t); |
228 | itlb_pte_copy(t); |
213 | #ifdef CONFIG_TSB |
229 | #ifdef CONFIG_TSB |
214 | //itsb_pte_copy(t, index); |
230 | itsb_pte_copy(t); |
215 | #endif |
231 | #endif |
216 | page_table_unlock(AS, true); |
232 | page_table_unlock(AS, true); |
217 | } else { |
233 | } else { |
218 | /* |
234 | /* |
219 | * Forward the page fault to the address space page fault |
235 | * Forward the page fault to the address space page fault |
Line 264... | Line 280... | ||
264 | * Insert it into DTLB. |
280 | * Insert it into DTLB. |
265 | */ |
281 | */ |
266 | t->a = true; |
282 | t->a = true; |
267 | dtlb_pte_copy(t, true); |
283 | dtlb_pte_copy(t, true); |
268 | #ifdef CONFIG_TSB |
284 | #ifdef CONFIG_TSB |
269 | //dtsb_pte_copy(t, true); |
285 | dtsb_pte_copy(t, true); |
270 | #endif |
286 | #endif |
271 | page_table_unlock(AS, true); |
287 | page_table_unlock(AS, true); |
272 | } else { |
288 | } else { |
273 | /* |
289 | /* |
274 | * Forward the page fault to the address space page fault |
290 | * Forward the page fault to the address space page fault |
Line 309... | Line 325... | ||
309 | t->a = true; |
325 | t->a = true; |
310 | t->d = true; |
326 | t->d = true; |
311 | mmu_demap_page(va, ctx, MMU_FLAG_DTLB); |
327 | mmu_demap_page(va, ctx, MMU_FLAG_DTLB); |
312 | dtlb_pte_copy(t, false); |
328 | dtlb_pte_copy(t, false); |
313 | #ifdef CONFIG_TSB |
329 | #ifdef CONFIG_TSB |
314 | //dtsb_pte_copy(t, false); |
330 | dtsb_pte_copy(t, false); |
315 | #endif |
331 | #endif |
316 | page_table_unlock(AS, true); |
332 | page_table_unlock(AS, true); |
317 | } else { |
333 | } else { |
318 | /* |
334 | /* |
319 | * Forward the page fault to the address space page fault |
335 | * Forward the page fault to the address space page fault |
Line 325... | Line 341... | ||
325 | __func__); |
341 | __func__); |
326 | } |
342 | } |
327 | } |
343 | } |
328 | } |
344 | } |
329 | 345 | ||
330 | /** Print TLB entry (for debugging purposes). |
- | |
331 | * |
346 | /* |
332 | * The diag field has been left out in order to make this function more generic |
347 | * On Niagara this function does not work, as supervisor software is isolated |
333 | * (there is no diag field in US3 architeture). |
348 | * from the TLB by the hypervisor and has no chance to investigate the TLB |
334 | * |
- | |
335 | * @param i TLB entry number |
- | |
336 | * @param t TLB entry tag |
349 | * entries. |
337 | * @param d TLB entry data |
- | |
338 | */ |
350 | */ |
339 | #if 0 |
- | |
340 | static void print_tlb_entry(int i, tlb_tag_read_reg_t t, tlb_data_t d) |
- | |
341 | { |
- | |
342 | printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, " |
- | |
343 | "ie=%d, soft2=%#x, pfn=%#x, soft=%#x, l=%d, " |
- | |
344 | "cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn, |
- | |
345 | t.context, d.v, d.size, d.nfo, d.ie, d.soft2, |
- | |
346 | d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g); |
- | |
347 | } |
- | |
348 | #endif |
- | |
349 | - | ||
350 | void tlb_print(void) |
351 | void tlb_print(void) |
351 | { |
352 | { |
352 | #if 0 |
- | |
353 | int i; |
- | |
354 | tlb_data_t d; |
- | |
355 | tlb_tag_read_reg_t t; |
- | |
356 | - | ||
357 | printf("I-TLB contents:\n"); |
353 | printf("Operation not possible on Niagara.\n"); |
358 | for (i = 0; i < ITLB_ENTRY_COUNT; i++) { |
- | |
359 | d.value = itlb_data_access_read(i); |
- | |
360 | t.value = itlb_tag_read_read(i); |
- | |
361 | print_tlb_entry(i, t, d); |
- | |
362 | } |
- | |
363 | - | ||
364 | printf("D-TLB contents:\n"); |
- | |
365 | for (i = 0; i < DTLB_ENTRY_COUNT; i++) { |
- | |
366 | d.value = dtlb_data_access_read(i); |
- | |
367 | t.value = dtlb_tag_read_read(i); |
- | |
368 | print_tlb_entry(i, t, d); |
- | |
369 | } |
- | |
370 | #endif |
- | |
371 | } |
354 | } |
372 | 355 | ||
373 | void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, |
356 | void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, |
374 | const char *str) |
357 | const char *str) |
375 | { |
358 | { |
Line 400... | Line 383... | ||
400 | printf("Faulting page: %p, ASID=%d\n", DMISS_ADDRESS(page_and_ctx), DMISS_CONTEXT(page_and_ctx)); |
383 | printf("Faulting page: %p, ASID=%d\n", DMISS_ADDRESS(page_and_ctx), DMISS_CONTEXT(page_and_ctx)); |
401 | dump_istate(istate); |
384 | dump_istate(istate); |
402 | panic("%s\n", str); |
385 | panic("%s\n", str); |
403 | } |
386 | } |
404 | 387 | ||
- | 388 | /** |
|
- | 389 | * Describes the exact condition which caused the last DMMU fault. |
|
- | 390 | */ |
|
405 | void describe_mmu_fault(void) |
391 | void describe_dmmu_fault(void) |
406 | { |
392 | { |
- | 393 | uint64_t myid; |
|
- | 394 | __hypercall_fast_ret1(0, 0, 0, 0, 0, CPU_MYID, &myid); |
|
- | 395 | ||
- | 396 | ASSERT(mmu_fsas[myid].dft < 16); |
|
- | 397 | ||
- | 398 | printf("condition which caused the fault: %s\n", |
|
- | 399 | fault_types[mmu_fsas[myid].dft]); |
|
407 | } |
400 | } |
408 | 401 | ||
409 | /** Invalidate all unlocked ITLB and DTLB entries. */ |
402 | /** Invalidate all unlocked ITLB and DTLB entries. */ |
410 | void tlb_invalidate_all(void) |
403 | void tlb_invalidate_all(void) |
411 | { |
404 | { |
Line 421... | Line 414... | ||
421 | * |
414 | * |
422 | * @param asid Address Space ID. |
415 | * @param asid Address Space ID. |
423 | */ |
416 | */ |
424 | void tlb_invalidate_asid(asid_t asid) |
417 | void tlb_invalidate_asid(asid_t asid) |
425 | { |
418 | { |
426 | #if 0 |
- | |
427 | tlb_context_reg_t pc_save, ctx; |
- | |
428 | - | ||
429 | /* switch to nucleus because we are mapped by the primary context */ |
419 | /* switch to nucleus because we are mapped by the primary context */ |
430 | nucleus_enter(); |
420 | nucleus_enter(); |
431 | - | ||
432 | ctx.v = pc_save.v = mmu_primary_context_read(); |
- | |
433 | ctx.context = asid; |
- | |
434 | mmu_primary_context_write(ctx.v); |
- | |
435 | 421 | ||
436 | itlb_demap(TLB_DEMAP_CONTEXT, TLB_DEMAP_PRIMARY, 0); |
- | |
437 | dtlb_demap(TLB_DEMAP_CONTEXT, TLB_DEMAP_PRIMARY, 0); |
422 | __hypercall_fast4(MMU_DEMAP_CTX, 0, 0, asid, |
438 | - | ||
439 | mmu_primary_context_write(pc_save.v); |
423 | MMU_FLAG_ITLB | MMU_FLAG_DTLB); |
440 | 424 | ||
441 | nucleus_leave(); |
425 | nucleus_leave(); |
442 | #endif |
- | |
443 | } |
426 | } |
444 | 427 | ||
445 | /** Invalidate all ITLB and DTLB entries for specified page range in specified |
428 | /** Invalidate all ITLB and DTLB entries for specified page range in specified |
446 | * address space. |
429 | * address space. |
447 | * |
430 | * |
Line 449... | Line 432... | ||
449 | * @param page First page which to sweep out from ITLB and DTLB. |
432 | * @param page First page which to sweep out from ITLB and DTLB. |
450 | * @param cnt Number of ITLB and DTLB entries to invalidate. |
433 | * @param cnt Number of ITLB and DTLB entries to invalidate. |
451 | */ |
434 | */ |
452 | void tlb_invalidate_pages(asid_t asid, uintptr_t page, count_t cnt) |
435 | void tlb_invalidate_pages(asid_t asid, uintptr_t page, count_t cnt) |
453 | { |
436 | { |
454 | #if 0 |
- | |
455 | unsigned int i; |
437 | unsigned int i; |
456 | tlb_context_reg_t pc_save, ctx; |
- | |
457 | 438 | ||
458 | /* switch to nucleus because we are mapped by the primary context */ |
439 | /* switch to nucleus because we are mapped by the primary context */ |
459 | nucleus_enter(); |
440 | nucleus_enter(); |
460 | - | ||
461 | ctx.v = pc_save.v = mmu_primary_context_read(); |
- | |
462 | ctx.context = asid; |
- | |
463 | mmu_primary_context_write(ctx.v); |
- | |
464 | 441 | ||
465 | for (i = 0; i < cnt * MMU_PAGES_PER_PAGE; i++) { |
442 | for (i = 0; i < cnt; i++) { |
466 | itlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY, |
443 | __hypercall_fast5(MMU_DEMAP_PAGE, 0, 0, page, asid, |
467 | page + i * MMU_PAGE_SIZE); |
- | |
468 | dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY, |
- | |
469 | page + i * MMU_PAGE_SIZE); |
444 | MMU_FLAG_DTLB | MMU_FLAG_ITLB); |
470 | } |
445 | } |
471 | - | ||
472 | mmu_primary_context_write(pc_save.v); |
- | |
473 | 446 | ||
474 | nucleus_leave(); |
447 | nucleus_leave(); |
475 | #endif |
- | |
476 | } |
448 | } |
477 | 449 | ||
478 | /** @} |
450 | /** @} |
479 | */ |
451 | */ |