52,20 → 52,21 |
#include <panic.h> |
#include <arch/asm.h> |
#include <arch/cpu.h> |
#include <arch/mm/pagesize.h> |
|
#ifdef CONFIG_TSB |
#include <arch/mm/tsb.h> |
#endif |
|
#if 0 |
static void dtlb_pte_copy(pte_t *, index_t, bool); |
static void itlb_pte_copy(pte_t *, index_t); |
static void itlb_pte_copy(pte_t *); |
static void dtlb_pte_copy(pte_t *, bool); |
static void do_fast_instruction_access_mmu_miss_fault(istate_t *, const char *); |
static void do_fast_data_access_mmu_miss_fault(istate_t *, tlb_tag_access_reg_t, |
static void do_fast_data_access_mmu_miss_fault(istate_t *, uint64_t, |
const char *); |
static void do_fast_data_access_protection_fault(istate_t *, |
tlb_tag_access_reg_t, const char *); |
uint64_t, const char *); |
|
#if 0 |
char *context_encoding[] = { |
"Primary", |
"Secondary", |
75,6 → 76,21 |
#endif |
|
/* |
* The assembly language routine passes a 64-bit parameter to the Data Access |
* MMU Miss and Data Access protection handlers, the parameter encapsulates |
* a virtual address of the faulting page and the faulting context. The most |
* significant 51 bits represent the VA of the faulting page and the least |
* significant 13 vits represent the faulting context. The following macros |
* extract the page and context out of the 64-bit parameter: |
*/ |
|
/* extracts the VA of the faulting page */ |
#define DMISS_ADDRESS(page_and_ctx) (((page_and_ctx) >> 13) << 13) |
|
/* extracts the faulting context */ |
#define DMISS_CONTEXT(page_and_ctx) ((page_and_ctx) & 0x1fff) |
|
/* |
* Invalidate all non-locked DTLB and ITLB entries. |
*/ |
void tlb_arch_init(void) |
127,90 → 143,66 |
/** Copy PTE to TLB. |
* |
* @param t Page Table Entry to be copied. |
* @param index Zero if lower 8K-subpage, one if higher 8K-subpage. |
* @param ro If true, the entry will be created read-only, regardless |
* of its w field. |
*/ |
#if 0 |
void dtlb_pte_copy(pte_t *t, index_t index, bool ro) |
void dtlb_pte_copy(pte_t *t, bool ro) |
{ |
tlb_tag_access_reg_t tag; |
tlb_data_t data; |
page_address_t pg; |
frame_address_t fr; |
|
pg.address = t->page + (index << MMU_PAGE_WIDTH); |
fr.address = t->frame + (index << MMU_PAGE_WIDTH); |
|
tag.value = 0; |
tag.context = t->as->asid; |
tag.vpn = pg.vpn; |
|
dtlb_tag_access_write(tag.value); |
|
tte_data_t data; |
|
data.value = 0; |
data.v = true; |
data.size = PAGESIZE_8K; |
data.pfn = fr.pfn; |
data.l = false; |
data.nfo = false; |
data.ra = (t->frame) >> FRAME_WIDTH; |
data.ie = false; |
data.e = false; |
data.cp = t->c; |
#ifdef CONFIG_VIRT_IDX_DCACHE |
data.cv = t->c; |
#endif /* CONFIG_VIRT_IDX_DCACHE */ |
data.p = t->k; /* p like privileged */ |
#endif |
data.p = t->k; |
data.x = false; |
data.w = ro ? false : t->w; |
data.g = t->g; |
|
dtlb_data_in_write(data.value); |
data.size = PAGESIZE_8K; |
|
__hypercall_hyperfast( |
t->page, t->as->asid, data.value, MMU_FLAG_DTLB, 0, MMU_MAP_ADDR); |
} |
#endif |
|
/** Copy PTE to ITLB. |
* |
* @param t Page Table Entry to be copied. |
* @param index Zero if lower 8K-subpage, one if higher 8K-subpage. |
*/ |
#if 0 |
void itlb_pte_copy(pte_t *t, index_t index) |
void itlb_pte_copy(pte_t *t) |
{ |
tlb_tag_access_reg_t tag; |
tlb_data_t data; |
page_address_t pg; |
frame_address_t fr; |
|
pg.address = t->page + (index << MMU_PAGE_WIDTH); |
fr.address = t->frame + (index << MMU_PAGE_WIDTH); |
|
tag.value = 0; |
tag.context = t->as->asid; |
tag.vpn = pg.vpn; |
tte_data_t data; |
|
itlb_tag_access_write(tag.value); |
|
data.value = 0; |
data.v = true; |
data.size = PAGESIZE_8K; |
data.pfn = fr.pfn; |
data.l = false; |
data.nfo = false; |
data.ra = (t->frame) >> FRAME_WIDTH; |
data.ie = false; |
data.e = false; |
data.cp = t->c; |
data.p = t->k; /* p like privileged */ |
data.cv = false; |
data.p = t->k; |
data.x = true; |
data.w = false; |
data.g = t->g; |
data.size = PAGESIZE_8K; |
|
itlb_data_in_write(data.value); |
__hypercall_hyperfast( |
t->page, t->as->asid, data.value, MMU_FLAG_ITLB, 0, MMU_MAP_ADDR); |
} |
#endif |
|
/** ITLB miss handler. */ |
void fast_instruction_access_mmu_miss(unative_t unused, istate_t *istate) |
{ |
#if 0 |
uintptr_t va = ALIGN_DOWN(istate->tpc, PAGE_SIZE); |
index_t index = (istate->tpc >> MMU_PAGE_WIDTH) % MMU_PAGES_PER_PAGE; |
pte_t *t; |
|
page_table_lock(AS, true); |
t = page_mapping_find(AS, va); |
|
if (t && PTE_EXECUTABLE(t)) { |
/* |
* The mapping was found in the software page hash table. |
217,9 → 209,9 |
* Insert it into ITLB. |
*/ |
t->a = true; |
itlb_pte_copy(t, index); |
itlb_pte_copy(t); |
#ifdef CONFIG_TSB |
itsb_pte_copy(t, index); |
//itsb_pte_copy(t, index); |
#endif |
page_table_unlock(AS, true); |
} else { |
233,7 → 225,6 |
__func__); |
} |
} |
#endif |
} |
|
/** DTLB miss handler. |
241,29 → 232,27 |
* Note that some faults (e.g. kernel faults) were already resolved by the |
* low-level, assembly language part of the fast_data_access_mmu_miss handler. |
* |
* @param tag Content of the TLB Tag Access register as it existed |
* when the trap happened. This is to prevent confusion |
* created by clobbered Tag Access register during a nested |
* DTLB miss. |
* @param page_and_ctx A 64-bit value describing the fault. The most |
* significant 51 bits of the value contain the virtual |
* address which caused the fault truncated to the page |
* boundary. The least significant 13 bits of the value |
* contain the number of the context in which the fault |
* occurred. |
* @param istate Interrupted state saved on the stack. |
*/ |
//void fast_data_access_mmu_miss(tlb_tag_access_reg_t tag, istate_t *istate) |
//{ |
#if 0 |
uintptr_t va; |
index_t index; |
void fast_data_access_mmu_miss(uint64_t page_and_ctx, istate_t *istate) |
{ |
pte_t *t; |
uintptr_t va = DMISS_ADDRESS(page_and_ctx); |
uint16_t ctx = DMISS_CONTEXT(page_and_ctx); |
|
va = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE); |
index = tag.vpn % MMU_PAGES_PER_PAGE; |
|
if (tag.context == ASID_KERNEL) { |
if (!tag.vpn) { |
if (ctx == ASID_KERNEL) { |
if (va == 0) { |
/* NULL access in kernel */ |
do_fast_data_access_mmu_miss_fault(istate, tag, |
do_fast_data_access_mmu_miss_fault(istate, page_and_ctx, |
__func__); |
} |
do_fast_data_access_mmu_miss_fault(istate, tag, "Unexpected " |
do_fast_data_access_mmu_miss_fault(istate, page_and_ctx, "Unexpected " |
"kernel page fault."); |
} |
|
275,9 → 264,9 |
* Insert it into DTLB. |
*/ |
t->a = true; |
dtlb_pte_copy(t, index, true); |
dtlb_pte_copy(t, true); |
#ifdef CONFIG_TSB |
dtsb_pte_copy(t, index, true); |
//dtsb_pte_copy(t, true); |
#endif |
page_table_unlock(AS, true); |
} else { |
287,31 → 276,28 |
*/ |
page_table_unlock(AS, true); |
if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) { |
do_fast_data_access_mmu_miss_fault(istate, tag, |
do_fast_data_access_mmu_miss_fault(istate, page_and_ctx, |
__func__); |
} |
} |
#endif |
//} |
} |
|
/** DTLB protection fault handler. |
* |
* @param tag Content of the TLB Tag Access register as it existed |
* when the trap happened. This is to prevent confusion |
* created by clobbered Tag Access register during a nested |
* DTLB miss. |
* @param page_and_ctx A 64-bit value describing the fault. The most |
* significant 51 bits of the value contain the virtual |
* address which caused the fault truncated to the page |
* boundary. The least significant 13 bits of the value |
* contain the number of the context in which the fault |
* occurred. |
* @param istate Interrupted state saved on the stack. |
*/ |
//void fast_data_access_protection(tlb_tag_access_reg_t tag, istate_t *istate) |
//{ |
#if 0 |
uintptr_t va; |
index_t index; |
void fast_data_access_protection(uint64_t page_and_ctx, istate_t *istate) |
{ |
pte_t *t; |
uintptr_t va = DMISS_ADDRESS(page_and_ctx); |
uint16_t ctx = DMISS_CONTEXT(page_and_ctx); |
|
va = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE); |
index = tag.vpn % MMU_PAGES_PER_PAGE; /* 16K-page emulation */ |
|
page_table_lock(AS, true); |
t = page_mapping_find(AS, va); |
if (t && PTE_WRITABLE(t)) { |
322,11 → 308,10 |
*/ |
t->a = true; |
t->d = true; |
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_SECONDARY, |
va + index * MMU_PAGE_SIZE); |
dtlb_pte_copy(t, index, false); |
mmu_demap_page(va, ctx, MMU_FLAG_DTLB); |
dtlb_pte_copy(t, false); |
#ifdef CONFIG_TSB |
dtsb_pte_copy(t, index, false); |
//dtsb_pte_copy(t, false); |
#endif |
page_table_unlock(AS, true); |
} else { |
336,12 → 321,11 |
*/ |
page_table_unlock(AS, true); |
if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) { |
do_fast_data_access_protection_fault(istate, tag, |
do_fast_data_access_protection_fault(istate, page_and_ctx, |
__func__); |
} |
} |
#endif |
//} |
} |
|
/** Print TLB entry (for debugging purposes). |
* |
363,12 → 347,9 |
} |
#endif |
|
#if defined (US) |
|
/** Print contents of both TLBs. */ |
void tlb_print(void) |
{ |
#if 0 |
{ |
int i; |
tlb_data_t d; |
tlb_tag_read_reg_t t; |
389,56 → 370,6 |
#endif |
} |
|
#elif defined (US3) |
|
/** Print contents of all TLBs. */ |
void tlb_print(void) |
{ |
#if 0 |
int i; |
tlb_data_t d; |
tlb_tag_read_reg_t t; |
|
printf("TLB_ISMALL contents:\n"); |
for (i = 0; i < tlb_ismall_size(); i++) { |
d.value = dtlb_data_access_read(TLB_ISMALL, i); |
t.value = dtlb_tag_read_read(TLB_ISMALL, i); |
print_tlb_entry(i, t, d); |
} |
|
printf("TLB_IBIG contents:\n"); |
for (i = 0; i < tlb_ibig_size(); i++) { |
d.value = dtlb_data_access_read(TLB_IBIG, i); |
t.value = dtlb_tag_read_read(TLB_IBIG, i); |
print_tlb_entry(i, t, d); |
} |
|
printf("TLB_DSMALL contents:\n"); |
for (i = 0; i < tlb_dsmall_size(); i++) { |
d.value = dtlb_data_access_read(TLB_DSMALL, i); |
t.value = dtlb_tag_read_read(TLB_DSMALL, i); |
print_tlb_entry(i, t, d); |
} |
|
printf("TLB_DBIG_1 contents:\n"); |
for (i = 0; i < tlb_dbig_size(); i++) { |
d.value = dtlb_data_access_read(TLB_DBIG_0, i); |
t.value = dtlb_tag_read_read(TLB_DBIG_0, i); |
print_tlb_entry(i, t, d); |
} |
|
printf("TLB_DBIG_2 contents:\n"); |
for (i = 0; i < tlb_dbig_size(); i++) { |
d.value = dtlb_data_access_read(TLB_DBIG_1, i); |
t.value = dtlb_tag_read_read(TLB_DBIG_1, i); |
print_tlb_entry(i, t, d); |
} |
#endif |
} |
|
#endif |
|
#if 0 |
void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, |
const char *str) |
{ |
446,81 → 377,35 |
dump_istate(istate); |
panic("%s\n", str); |
} |
#endif |
|
#if 0 |
void do_fast_data_access_mmu_miss_fault(istate_t *istate, |
tlb_tag_access_reg_t tag, const char *str) |
uint64_t page_and_ctx, const char *str) |
{ |
uintptr_t va; |
|
va = tag.vpn << MMU_PAGE_WIDTH; |
if (tag.context) { |
fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va, |
tag.context); |
if (DMISS_CONTEXT(page_and_ctx)) { |
fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, DMISS_ADDRESS(page_and_ctx), |
DMISS_CONTEXT(page_and_ctx)); |
} |
dump_istate(istate); |
printf("Faulting page: %p, ASID=%d\n", va, tag.context); |
printf("Faulting page: %p, ASID=%d\n", DMISS_ADDRESS(page_and_ctx), DMISS_CONTEXT(page_and_ctx)); |
panic("%s\n", str); |
} |
#endif |
|
#if 0 |
void do_fast_data_access_protection_fault(istate_t *istate, |
tlb_tag_access_reg_t tag, const char *str) |
uint64_t page_and_ctx, const char *str) |
{ |
uintptr_t va; |
|
va = tag.vpn << MMU_PAGE_WIDTH; |
|
if (tag.context) { |
fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va, |
tag.context); |
if (DMISS_CONTEXT(page_and_ctx)) { |
fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, DMISS_ADDRESS(page_and_ctx), |
DMISS_CONTEXT(page_and_ctx)); |
} |
printf("Faulting page: %p, ASID=%d\n", va, tag.context); |
printf("Faulting page: %p, ASID=%d\n", DMISS_ADDRESS(page_and_ctx), DMISS_CONTEXT(page_and_ctx)); |
dump_istate(istate); |
panic("%s\n", str); |
} |
#endif |
|
void describe_mmu_fault(void) |
{ |
} |
|
#if defined (US3) |
/** Invalidates given TLB entry if and only if it is non-locked or global. |
* |
* @param tlb TLB number (one of TLB_DSMALL, TLB_DBIG_0, TLB_DBIG_1, |
* TLB_ISMALL, TLB_IBIG). |
* @param entry Entry index within the given TLB. |
*/ |
#if 0 |
static void tlb_invalidate_entry(int tlb, index_t entry) |
{ |
tlb_data_t d; |
tlb_tag_read_reg_t t; |
|
if (tlb == TLB_DSMALL || tlb == TLB_DBIG_0 || tlb == TLB_DBIG_1) { |
d.value = dtlb_data_access_read(tlb, entry); |
if (!d.l || d.g) { |
t.value = dtlb_tag_read_read(tlb, entry); |
d.v = false; |
dtlb_tag_access_write(t.value); |
dtlb_data_access_write(tlb, entry, d.value); |
} |
} else if (tlb == TLB_ISMALL || tlb == TLB_IBIG) { |
d.value = itlb_data_access_read(tlb, entry); |
if (!d.l || d.g) { |
t.value = itlb_tag_read_read(tlb, entry); |
d.v = false; |
itlb_tag_access_write(t.value); |
itlb_data_access_write(tlb, entry, d.value); |
} |
} |
} |
#endif |
#endif |
|
/** Invalidate all unlocked ITLB and DTLB entries. */ |
void tlb_invalidate_all(void) |
{ |