Rev 4327 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 4327 | Rev 4581 | ||
---|---|---|---|
Line 52... | Line 52... | ||
52 | 52 | ||
53 | #ifdef CONFIG_TSB |
53 | #ifdef CONFIG_TSB |
54 | #include <arch/mm/tsb.h> |
54 | #include <arch/mm/tsb.h> |
55 | #endif |
55 | #endif |
56 | 56 | ||
57 | static void dtlb_pte_copy(pte_t *, index_t, bool); |
57 | static void dtlb_pte_copy(pte_t *, size_t, bool); |
58 | static void itlb_pte_copy(pte_t *, index_t); |
58 | static void itlb_pte_copy(pte_t *, size_t); |
59 | static void do_fast_instruction_access_mmu_miss_fault(istate_t *, const char *); |
59 | static void do_fast_instruction_access_mmu_miss_fault(istate_t *, const char *); |
60 | static void do_fast_data_access_mmu_miss_fault(istate_t *, tlb_tag_access_reg_t, |
60 | static void do_fast_data_access_mmu_miss_fault(istate_t *, tlb_tag_access_reg_t, |
61 | const char *); |
61 | const char *); |
62 | static void do_fast_data_access_protection_fault(istate_t *, |
62 | static void do_fast_data_access_protection_fault(istate_t *, |
63 | tlb_tag_access_reg_t, const char *); |
63 | tlb_tag_access_reg_t, const char *); |
Line 128... | Line 128... | ||
128 | * @param t Page Table Entry to be copied. |
128 | * @param t Page Table Entry to be copied. |
129 | * @param index Zero if lower 8K-subpage, one if higher 8K-subpage. |
129 | * @param index Zero if lower 8K-subpage, one if higher 8K-subpage. |
130 | * @param ro If true, the entry will be created read-only, regardless |
130 | * @param ro If true, the entry will be created read-only, regardless |
131 | * of its w field. |
131 | * of its w field. |
132 | */ |
132 | */ |
133 | void dtlb_pte_copy(pte_t *t, index_t index, bool ro) |
133 | void dtlb_pte_copy(pte_t *t, size_t index, bool ro) |
134 | { |
134 | { |
135 | tlb_tag_access_reg_t tag; |
135 | tlb_tag_access_reg_t tag; |
136 | tlb_data_t data; |
136 | tlb_data_t data; |
137 | page_address_t pg; |
137 | page_address_t pg; |
138 | frame_address_t fr; |
138 | frame_address_t fr; |
Line 165... | Line 165... | ||
165 | /** Copy PTE to ITLB. |
165 | /** Copy PTE to ITLB. |
166 | * |
166 | * |
167 | * @param t Page Table Entry to be copied. |
167 | * @param t Page Table Entry to be copied. |
168 | * @param index Zero if lower 8K-subpage, one if higher 8K-subpage. |
168 | * @param index Zero if lower 8K-subpage, one if higher 8K-subpage. |
169 | */ |
169 | */ |
170 | void itlb_pte_copy(pte_t *t, index_t index) |
170 | void itlb_pte_copy(pte_t *t, size_t index) |
171 | { |
171 | { |
172 | tlb_tag_access_reg_t tag; |
172 | tlb_tag_access_reg_t tag; |
173 | tlb_data_t data; |
173 | tlb_data_t data; |
174 | page_address_t pg; |
174 | page_address_t pg; |
175 | frame_address_t fr; |
175 | frame_address_t fr; |
Line 198... | Line 198... | ||
198 | 198 | ||
199 | /** ITLB miss handler. */ |
199 | /** ITLB miss handler. */ |
200 | void fast_instruction_access_mmu_miss(unative_t unused, istate_t *istate) |
200 | void fast_instruction_access_mmu_miss(unative_t unused, istate_t *istate) |
201 | { |
201 | { |
202 | uintptr_t page_16k = ALIGN_DOWN(istate->tpc, PAGE_SIZE); |
202 | uintptr_t page_16k = ALIGN_DOWN(istate->tpc, PAGE_SIZE); |
203 | index_t index = (istate->tpc >> MMU_PAGE_WIDTH) % MMU_PAGES_PER_PAGE; |
203 | size_t index = (istate->tpc >> MMU_PAGE_WIDTH) % MMU_PAGES_PER_PAGE; |
204 | pte_t *t; |
204 | pte_t *t; |
205 | 205 | ||
206 | page_table_lock(AS, true); |
206 | page_table_lock(AS, true); |
207 | t = page_mapping_find(AS, page_16k); |
207 | t = page_mapping_find(AS, page_16k); |
208 | if (t && PTE_EXECUTABLE(t)) { |
208 | if (t && PTE_EXECUTABLE(t)) { |
Line 243... | Line 243... | ||
243 | */ |
243 | */ |
244 | void fast_data_access_mmu_miss(tlb_tag_access_reg_t tag, istate_t *istate) |
244 | void fast_data_access_mmu_miss(tlb_tag_access_reg_t tag, istate_t *istate) |
245 | { |
245 | { |
246 | uintptr_t page_8k; |
246 | uintptr_t page_8k; |
247 | uintptr_t page_16k; |
247 | uintptr_t page_16k; |
248 | index_t index; |
248 | size_t index; |
249 | pte_t *t; |
249 | pte_t *t; |
250 | 250 | ||
251 | page_8k = (uint64_t) tag.vpn << MMU_PAGE_WIDTH; |
251 | page_8k = (uint64_t) tag.vpn << MMU_PAGE_WIDTH; |
252 | page_16k = ALIGN_DOWN(page_8k, PAGE_SIZE); |
252 | page_16k = ALIGN_DOWN(page_8k, PAGE_SIZE); |
253 | index = tag.vpn % MMU_PAGES_PER_PAGE; |
253 | index = tag.vpn % MMU_PAGES_PER_PAGE; |
Line 307... | Line 307... | ||
307 | * @param istate Interrupted state saved on the stack. |
307 | * @param istate Interrupted state saved on the stack. |
308 | */ |
308 | */ |
309 | void fast_data_access_protection(tlb_tag_access_reg_t tag, istate_t *istate) |
309 | void fast_data_access_protection(tlb_tag_access_reg_t tag, istate_t *istate) |
310 | { |
310 | { |
311 | uintptr_t page_16k; |
311 | uintptr_t page_16k; |
312 | index_t index; |
312 | size_t index; |
313 | pte_t *t; |
313 | pte_t *t; |
314 | 314 | ||
315 | page_16k = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE); |
315 | page_16k = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE); |
316 | index = tag.vpn % MMU_PAGES_PER_PAGE; /* 16K-page emulation */ |
316 | index = tag.vpn % MMU_PAGES_PER_PAGE; /* 16K-page emulation */ |
317 | 317 | ||
Line 495... | Line 495... | ||
495 | printf("DTLB SFAR: address=%p\n", sfar); |
495 | printf("DTLB SFAR: address=%p\n", sfar); |
496 | 496 | ||
497 | dtlb_sfsr_write(0); |
497 | dtlb_sfsr_write(0); |
498 | } |
498 | } |
499 | 499 | ||
500 | #if defined (US3) |
500 | #if defined (US) |
501 | /** Invalidates given TLB entry if and only if it is non-locked or global. |
- | |
502 | * |
- | |
503 | * @param tlb TLB number (one of TLB_DSMALL, TLB_DBIG_0, TLB_DBIG_1, |
- | |
504 | * TLB_ISMALL, TLB_IBIG). |
- | |
505 | * @param entry Entry index within the given TLB. |
- | |
506 | */ |
- | |
507 | static void tlb_invalidate_entry(int tlb, index_t entry) |
- | |
508 | { |
- | |
509 | tlb_data_t d; |
- | |
510 | tlb_tag_read_reg_t t; |
- | |
511 | - | ||
512 | if (tlb == TLB_DSMALL || tlb == TLB_DBIG_0 || tlb == TLB_DBIG_1) { |
- | |
513 | d.value = dtlb_data_access_read(tlb, entry); |
- | |
514 | if (!d.l || d.g) { |
- | |
515 | t.value = dtlb_tag_read_read(tlb, entry); |
- | |
516 | d.v = false; |
- | |
517 | dtlb_tag_access_write(t.value); |
- | |
518 | dtlb_data_access_write(tlb, entry, d.value); |
- | |
519 | } |
- | |
520 | } else if (tlb == TLB_ISMALL || tlb == TLB_IBIG) { |
- | |
521 | d.value = itlb_data_access_read(tlb, entry); |
- | |
522 | if (!d.l || d.g) { |
- | |
523 | t.value = itlb_tag_read_read(tlb, entry); |
- | |
524 | d.v = false; |
- | |
525 | itlb_tag_access_write(t.value); |
- | |
526 | itlb_data_access_write(tlb, entry, d.value); |
- | |
527 | } |
- | |
528 | } |
- | |
529 | } |
- | |
530 | #endif |
- | |
531 | - | ||
532 | /** Invalidate all unlocked ITLB and DTLB entries. */ |
501 | /** Invalidate all unlocked ITLB and DTLB entries. */ |
533 | void tlb_invalidate_all(void) |
502 | void tlb_invalidate_all(void) |
534 | { |
503 | { |
535 | int i; |
504 | int i; |
536 | 505 | ||
Line 541... | Line 510... | ||
541 | * found must have been created by someone else. Their only purpose now |
510 | * found must have been created by someone else. Their only purpose now |
542 | * is to collide with proper mappings. Invalidate immediately. It should |
511 | * is to collide with proper mappings. Invalidate immediately. It should |
543 | * be safe to invalidate them as late as now. |
512 | * be safe to invalidate them as late as now. |
544 | */ |
513 | */ |
545 | 514 | ||
546 | #if defined (US) |
- | |
547 | tlb_data_t d; |
515 | tlb_data_t d; |
548 | tlb_tag_read_reg_t t; |
516 | tlb_tag_read_reg_t t; |
549 | 517 | ||
550 | for (i = 0; i < ITLB_ENTRY_COUNT; i++) { |
518 | for (i = 0; i < ITLB_ENTRY_COUNT; i++) { |
551 | d.value = itlb_data_access_read(i); |
519 | d.value = itlb_data_access_read(i); |
Line 565... | Line 533... | ||
565 | dtlb_tag_access_write(t.value); |
533 | dtlb_tag_access_write(t.value); |
566 | dtlb_data_access_write(i, d.value); |
534 | dtlb_data_access_write(i, d.value); |
567 | } |
535 | } |
568 | } |
536 | } |
569 | 537 | ||
570 | #elif defined (US3) |
538 | } |
571 | 539 | ||
572 | for (i = 0; i < tlb_ismall_size(); i++) |
- | |
573 | tlb_invalidate_entry(TLB_ISMALL, i); |
- | |
574 | for (i = 0; i < tlb_ibig_size(); i++) |
- | |
575 | tlb_invalidate_entry(TLB_IBIG, i); |
540 | #elif defined (US3) |
576 | for (i = 0; i < tlb_dsmall_size(); i++) |
- | |
577 | tlb_invalidate_entry(TLB_DSMALL, i); |
- | |
578 | for (i = 0; i < tlb_dbig_size(); i++) |
- | |
579 | tlb_invalidate_entry(TLB_DBIG_0, i); |
- | |
580 | for (i = 0; i < tlb_dbig_size(); i++) |
- | |
581 | tlb_invalidate_entry(TLB_DBIG_1, i); |
- | |
582 | #endif |
- | |
583 | 541 | ||
- | 542 | /** Invalidate all unlocked ITLB and DTLB entries. */ |
|
- | 543 | void tlb_invalidate_all(void) |
|
- | 544 | { |
|
- | 545 | itlb_demap(TLB_DEMAP_ALL, 0, 0); |
|
- | 546 | dtlb_demap(TLB_DEMAP_ALL, 0, 0); |
|
584 | } |
547 | } |
585 | 548 | ||
- | 549 | #endif |
|
- | 550 | ||
586 | /** Invalidate all ITLB and DTLB entries that belong to specified ASID |
551 | /** Invalidate all ITLB and DTLB entries that belong to specified ASID |
587 | * (Context). |
552 | * (Context). |
588 | * |
553 | * |
589 | * @param asid Address Space ID. |
554 | * @param asid Address Space ID. |
590 | */ |
555 | */ |
Line 612... | Line 577... | ||
612 | * |
577 | * |
613 | * @param asid Address Space ID. |
578 | * @param asid Address Space ID. |
614 | * @param page First page which to sweep out from ITLB and DTLB. |
579 | * @param page First page which to sweep out from ITLB and DTLB. |
615 | * @param cnt Number of ITLB and DTLB entries to invalidate. |
580 | * @param cnt Number of ITLB and DTLB entries to invalidate. |
616 | */ |
581 | */ |
617 | void tlb_invalidate_pages(asid_t asid, uintptr_t page, count_t cnt) |
582 | void tlb_invalidate_pages(asid_t asid, uintptr_t page, size_t cnt) |
618 | { |
583 | { |
619 | unsigned int i; |
584 | unsigned int i; |
620 | tlb_context_reg_t pc_save, ctx; |
585 | tlb_context_reg_t pc_save, ctx; |
621 | 586 | ||
622 | /* switch to nucleus because we are mapped by the primary context */ |
587 | /* switch to nucleus because we are mapped by the primary context */ |