54,13 → 54,14 |
#include <arch/mm/tsb.h> |
#endif |
|
static void dtlb_pte_copy(pte_t *, size_t, bool); |
static void itlb_pte_copy(pte_t *, size_t); |
static void do_fast_instruction_access_mmu_miss_fault(istate_t *, const char *); |
static void do_fast_data_access_mmu_miss_fault(istate_t *, tlb_tag_access_reg_t, |
const char *); |
static void do_fast_data_access_protection_fault(istate_t *, |
tlb_tag_access_reg_t, const char *); |
static void dtlb_pte_copy(pte_t *t, index_t index, bool ro); |
static void itlb_pte_copy(pte_t *t, index_t index); |
static void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, |
const char *str); |
static void do_fast_data_access_mmu_miss_fault(istate_t *istate, |
tlb_tag_access_reg_t tag, const char *str); |
static void do_fast_data_access_protection_fault(istate_t *istate, |
tlb_tag_access_reg_t tag, const char *str); |
|
char *context_encoding[] = { |
"Primary", |
85,11 → 86,11 |
|
/** Insert privileged mapping into DMMU TLB. |
* |
* @param page Virtual page address. |
* @param frame Physical frame address. |
* @param pagesize Page size. |
* @param locked True for permanent mappings, false otherwise. |
* @param cacheable True if the mapping is cacheable, false otherwise. |
* @param page Virtual page address. |
* @param frame Physical frame address. |
* @param pagesize Page size. |
* @param locked True for permanent mappings, false otherwise. |
* @param cacheable True if the mapping is cacheable, false otherwise. |
*/ |
void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize, |
bool locked, bool cacheable) |
102,7 → 103,7 |
pg.address = page; |
fr.address = frame; |
|
tag.context = ASID_KERNEL; |
tag.value = ASID_KERNEL; |
tag.vpn = pg.vpn; |
|
dtlb_tag_access_write(tag.value); |
125,12 → 126,12 |
|
/** Copy PTE to TLB. |
* |
* @param t Page Table Entry to be copied. |
* @param index Zero if lower 8K-subpage, one if higher 8K-subpage. |
* @param ro If true, the entry will be created read-only, regardless |
* of its w field. |
* @param t Page Table Entry to be copied. |
* @param index Zero if lower 8K-subpage, one if higher 8K-subpage. |
* @param ro If true, the entry will be created read-only, regardless of its |
* w field. |
*/ |
void dtlb_pte_copy(pte_t *t, size_t index, bool ro) |
void dtlb_pte_copy(pte_t *t, index_t index, bool ro) |
{ |
tlb_tag_access_reg_t tag; |
tlb_data_t data; |
164,10 → 165,10 |
|
/** Copy PTE to ITLB. |
* |
* @param t Page Table Entry to be copied. |
* @param index Zero if lower 8K-subpage, one if higher 8K-subpage. |
* @param t Page Table Entry to be copied. |
* @param index Zero if lower 8K-subpage, one if higher 8K-subpage. |
*/ |
void itlb_pte_copy(pte_t *t, size_t index) |
void itlb_pte_copy(pte_t *t, index_t index) |
{ |
tlb_tag_access_reg_t tag; |
tlb_data_t data; |
199,12 → 200,12 |
/** ITLB miss handler. */ |
void fast_instruction_access_mmu_miss(unative_t unused, istate_t *istate) |
{ |
uintptr_t page_16k = ALIGN_DOWN(istate->tpc, PAGE_SIZE); |
size_t index = (istate->tpc >> MMU_PAGE_WIDTH) % MMU_PAGES_PER_PAGE; |
uintptr_t va = ALIGN_DOWN(istate->tpc, PAGE_SIZE); |
index_t index = (istate->tpc >> MMU_PAGE_WIDTH) % MMU_PAGES_PER_PAGE; |
pte_t *t; |
|
page_table_lock(AS, true); |
t = page_mapping_find(AS, page_16k); |
t = page_mapping_find(AS, va); |
if (t && PTE_EXECUTABLE(t)) { |
/* |
* The mapping was found in the software page hash table. |
222,8 → 223,7 |
* handler. |
*/ |
page_table_unlock(AS, true); |
if (as_page_fault(page_16k, PF_ACCESS_EXEC, istate) == |
AS_PF_FAULT) { |
if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) { |
do_fast_instruction_access_mmu_miss_fault(istate, |
__func__); |
} |
235,21 → 235,18 |
* Note that some faults (e.g. kernel faults) were already resolved by the |
* low-level, assembly language part of the fast_data_access_mmu_miss handler. |
* |
* @param tag Content of the TLB Tag Access register as it existed |
* when the trap happened. This is to prevent confusion |
* created by clobbered Tag Access register during a nested |
* DTLB miss. |
* @param istate Interrupted state saved on the stack. |
* @param tag Content of the TLB Tag Access register as it existed when the |
* trap happened. This is to prevent confusion created by clobbered |
* Tag Access register during a nested DTLB miss. |
* @param istate Interrupted state saved on the stack. |
*/ |
void fast_data_access_mmu_miss(tlb_tag_access_reg_t tag, istate_t *istate) |
{ |
uintptr_t page_8k; |
uintptr_t page_16k; |
size_t index; |
uintptr_t va; |
index_t index; |
pte_t *t; |
|
page_8k = (uint64_t) tag.vpn << MMU_PAGE_WIDTH; |
page_16k = ALIGN_DOWN(page_8k, PAGE_SIZE); |
va = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE); |
index = tag.vpn % MMU_PAGES_PER_PAGE; |
|
if (tag.context == ASID_KERNEL) { |
257,15 → 254,6 |
/* NULL access in kernel */ |
do_fast_data_access_mmu_miss_fault(istate, tag, |
__func__); |
} else if (page_8k >= end_of_identity) { |
/* |
* The kernel is accessing the I/O space. |
* We still do identity mapping for I/O, |
* but without caching. |
*/ |
dtlb_insert_mapping(page_8k, KA2PA(page_8k), |
PAGESIZE_8K, false, false); |
return; |
} |
do_fast_data_access_mmu_miss_fault(istate, tag, "Unexpected " |
"kernel page fault."); |
272,7 → 260,7 |
} |
|
page_table_lock(AS, true); |
t = page_mapping_find(AS, page_16k); |
t = page_mapping_find(AS, va); |
if (t) { |
/* |
* The mapping was found in the software page hash table. |
290,8 → 278,7 |
* handler. |
*/ |
page_table_unlock(AS, true); |
if (as_page_fault(page_16k, PF_ACCESS_READ, istate) == |
AS_PF_FAULT) { |
if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) { |
do_fast_data_access_mmu_miss_fault(istate, tag, |
__func__); |
} |
300,23 → 287,22 |
|
/** DTLB protection fault handler. |
* |
* @param tag Content of the TLB Tag Access register as it existed |
* when the trap happened. This is to prevent confusion |
* created by clobbered Tag Access register during a nested |
* DTLB miss. |
* @param istate Interrupted state saved on the stack. |
* @param tag Content of the TLB Tag Access register as it existed when the |
* trap happened. This is to prevent confusion created by clobbered |
* Tag Access register during a nested DTLB miss. |
* @param istate Interrupted state saved on the stack. |
*/ |
void fast_data_access_protection(tlb_tag_access_reg_t tag, istate_t *istate) |
{ |
uintptr_t page_16k; |
size_t index; |
uintptr_t va; |
index_t index; |
pte_t *t; |
|
page_16k = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE); |
va = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE); |
index = tag.vpn % MMU_PAGES_PER_PAGE; /* 16K-page emulation */ |
|
page_table_lock(AS, true); |
t = page_mapping_find(AS, page_16k); |
t = page_mapping_find(AS, va); |
if (t && PTE_WRITABLE(t)) { |
/* |
* The mapping was found in the software page hash table and is |
326,7 → 312,7 |
t->a = true; |
t->d = true; |
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_SECONDARY, |
page_16k + index * MMU_PAGE_SIZE); |
va + index * MMU_PAGE_SIZE); |
dtlb_pte_copy(t, index, false); |
#ifdef CONFIG_TSB |
dtsb_pte_copy(t, index, false); |
338,8 → 324,7 |
* handler. |
*/ |
page_table_unlock(AS, true); |
if (as_page_fault(page_16k, PF_ACCESS_WRITE, istate) == |
AS_PF_FAULT) { |
if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) { |
do_fast_data_access_protection_fault(istate, tag, |
__func__); |
} |
346,26 → 331,6 |
} |
} |
|
/** Print TLB entry (for debugging purposes). |
* |
* The diag field has been left out in order to make this function more generic |
* (there is no diag field in US3 architeture). |
* |
* @param i TLB entry number |
* @param t TLB entry tag |
* @param d TLB entry data |
*/ |
static void print_tlb_entry(int i, tlb_tag_read_reg_t t, tlb_data_t d) |
{ |
printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, " |
"ie=%d, soft2=%#x, pfn=%#x, soft=%#x, l=%d, " |
"cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn, |
t.context, d.v, d.size, d.nfo, d.ie, d.soft2, |
d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g); |
} |
|
#if defined (US) |
|
/** Print contents of both TLBs. */ |
void tlb_print(void) |
{ |
377,7 → 342,12 |
for (i = 0; i < ITLB_ENTRY_COUNT; i++) { |
d.value = itlb_data_access_read(i); |
t.value = itlb_tag_read_read(i); |
print_tlb_entry(i, t, d); |
|
printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, " |
"ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, " |
"cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn, |
t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag, |
d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g); |
} |
|
printf("D-TLB contents:\n"); |
384,63 → 354,22 |
for (i = 0; i < DTLB_ENTRY_COUNT; i++) { |
d.value = dtlb_data_access_read(i); |
t.value = dtlb_tag_read_read(i); |
print_tlb_entry(i, t, d); |
|
printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, " |
"ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, " |
"cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn, |
t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag, |
d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g); |
} |
} |
|
#elif defined (US3) |
|
/** Print contents of all TLBs. */ |
void tlb_print(void) |
{ |
int i; |
tlb_data_t d; |
tlb_tag_read_reg_t t; |
|
printf("TLB_ISMALL contents:\n"); |
for (i = 0; i < tlb_ismall_size(); i++) { |
d.value = dtlb_data_access_read(TLB_ISMALL, i); |
t.value = dtlb_tag_read_read(TLB_ISMALL, i); |
print_tlb_entry(i, t, d); |
} |
|
printf("TLB_IBIG contents:\n"); |
for (i = 0; i < tlb_ibig_size(); i++) { |
d.value = dtlb_data_access_read(TLB_IBIG, i); |
t.value = dtlb_tag_read_read(TLB_IBIG, i); |
print_tlb_entry(i, t, d); |
} |
|
printf("TLB_DSMALL contents:\n"); |
for (i = 0; i < tlb_dsmall_size(); i++) { |
d.value = dtlb_data_access_read(TLB_DSMALL, i); |
t.value = dtlb_tag_read_read(TLB_DSMALL, i); |
print_tlb_entry(i, t, d); |
} |
|
printf("TLB_DBIG_1 contents:\n"); |
for (i = 0; i < tlb_dbig_size(); i++) { |
d.value = dtlb_data_access_read(TLB_DBIG_0, i); |
t.value = dtlb_tag_read_read(TLB_DBIG_0, i); |
print_tlb_entry(i, t, d); |
} |
|
printf("TLB_DBIG_2 contents:\n"); |
for (i = 0; i < tlb_dbig_size(); i++) { |
d.value = dtlb_data_access_read(TLB_DBIG_1, i); |
t.value = dtlb_tag_read_read(TLB_DBIG_1, i); |
print_tlb_entry(i, t, d); |
} |
} |
|
#endif |
|
void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, |
const char *str) |
{ |
fault_if_from_uspace(istate, "%s.", str); |
fault_if_from_uspace(istate, "%s\n", str); |
dump_istate(istate); |
panic("%s.", str); |
panic("%s\n", str); |
} |
|
void do_fast_data_access_mmu_miss_fault(istate_t *istate, |
450,12 → 379,12 |
|
va = tag.vpn << MMU_PAGE_WIDTH; |
if (tag.context) { |
fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d).", str, va, |
fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va, |
tag.context); |
} |
dump_istate(istate); |
printf("Faulting page: %p, ASID=%d.\n", va, tag.context); |
panic("%s.", str); |
printf("Faulting page: %p, ASID=%d\n", va, tag.context); |
panic("%s\n", str); |
} |
|
void do_fast_data_access_protection_fault(istate_t *istate, |
466,12 → 395,12 |
va = tag.vpn << MMU_PAGE_WIDTH; |
|
if (tag.context) { |
fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d).", str, va, |
fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va, |
tag.context); |
} |
printf("Faulting page: %p, ASID=%d\n", va, tag.context); |
dump_istate(istate); |
panic("%s.", str); |
panic("%s\n", str); |
} |
|
void dump_sfsr_and_sfar(void) |
482,39 → 411,30 |
sfsr.value = dtlb_sfsr_read(); |
sfar = dtlb_sfar_read(); |
|
#if defined (US) |
printf("DTLB SFSR: asi=%#x, ft=%#x, e=%d, ct=%d, pr=%d, w=%d, ow=%d, " |
"fv=%d\n", sfsr.asi, sfsr.ft, sfsr.e, sfsr.ct, sfsr.pr, sfsr.w, |
sfsr.ow, sfsr.fv); |
#elif defined (US3) |
printf("DTLB SFSR: nf=%d, asi=%#x, tm=%d, ft=%#x, e=%d, ct=%d, pr=%d, " |
"w=%d, ow=%d, fv=%d\n", sfsr.nf, sfsr.asi, sfsr.tm, sfsr.ft, |
sfsr.e, sfsr.ct, sfsr.pr, sfsr.w, sfsr.ow, sfsr.fv); |
#endif |
|
printf("DTLB SFAR: address=%p\n", sfar); |
|
dtlb_sfsr_write(0); |
} |
|
#if defined (US) |
/** Invalidate all unlocked ITLB and DTLB entries. */ |
void tlb_invalidate_all(void) |
{ |
int i; |
|
tlb_data_t d; |
tlb_tag_read_reg_t t; |
|
/* |
* Walk all ITLB and DTLB entries and remove all unlocked mappings. |
* |
* The kernel doesn't use global mappings so any locked global mappings |
* found must have been created by someone else. Their only purpose now |
* found must have been created by someone else. Their only purpose now |
* is to collide with proper mappings. Invalidate immediately. It should |
* be safe to invalidate them as late as now. |
*/ |
|
tlb_data_t d; |
tlb_tag_read_reg_t t; |
|
for (i = 0; i < ITLB_ENTRY_COUNT; i++) { |
d.value = itlb_data_access_read(i); |
if (!d.l || d.g) { |
524,7 → 444,7 |
itlb_data_access_write(i, d.value); |
} |
} |
|
|
for (i = 0; i < DTLB_ENTRY_COUNT; i++) { |
d.value = dtlb_data_access_read(i); |
if (!d.l || d.g) { |
534,20 → 454,9 |
dtlb_data_access_write(i, d.value); |
} |
} |
|
|
} |
|
#elif defined (US3) |
|
/** Invalidate all unlocked ITLB and DTLB entries. */ |
void tlb_invalidate_all(void) |
{ |
itlb_demap(TLB_DEMAP_ALL, 0, 0); |
dtlb_demap(TLB_DEMAP_ALL, 0, 0); |
} |
|
#endif |
|
/** Invalidate all ITLB and DTLB entries that belong to specified ASID |
* (Context). |
* |
575,11 → 484,11 |
/** Invalidate all ITLB and DTLB entries for specified page range in specified |
* address space. |
* |
* @param asid Address Space ID. |
* @param page First page which to sweep out from ITLB and DTLB. |
* @param cnt Number of ITLB and DTLB entries to invalidate. |
* @param asid Address Space ID. |
* @param page First page which to sweep out from ITLB and DTLB. |
* @param cnt Number of ITLB and DTLB entries to invalidate. |
*/ |
void tlb_invalidate_pages(asid_t asid, uintptr_t page, size_t cnt) |
void tlb_invalidate_pages(asid_t asid, uintptr_t page, count_t cnt) |
{ |
unsigned int i; |
tlb_context_reg_t pc_save, ctx; |