72,7 → 72,7 |
|
for (i = 0; i < cnt; i++) { |
((tsb_entry_t *) as->arch.tsb_description.tsb_base)[ |
(i0 + i) & (TSB_ENTRY_COUNT - 1)].tag.invalid = true; |
(i0 + i) & (TSB_ENTRY_COUNT - 1)].data.v = false; |
} |
} |
|
79,21 → 79,17 |
/** Copy software PTE to ITSB. |
* |
* @param t Software PTE. |
* @param index Zero if lower 8K-subpage, one if higher 8K subpage. |
*/ |
void itsb_pte_copy(pte_t *t, index_t index) |
void itsb_pte_copy(pte_t *t) |
{ |
#if 0 |
as_t *as; |
tsb_entry_t *tsb; |
index_t entry; |
|
ASSERT(index <= 1); |
|
as = t->as; |
entry = ((t->page >> MMU_PAGE_WIDTH) + index) & TSB_INDEX_MASK; |
ASSERT(entry < ITSB_ENTRY_COUNT); |
tsb = &as->arch.itsb[entry]; |
entry = (t->page >> MMU_PAGE_WIDTH) & TSB_INDEX_MASK; |
ASSERT(entry < TSB_ENTRY_COUNT); |
tsb = &((tsb_entry_t *) as->arch.tsb_description.tsb_base)[entry]; |
|
/* |
* We use write barriers to make sure that the TSB load |
101,47 → 97,45 |
* be repeated. |
*/ |
|
tsb->tag.invalid = true; /* invalidate the entry |
* (tag target has this |
* set to 0) */ |
tsb->data.v = false; |
|
write_barrier(); |
|
tsb->tag.context = as->asid; |
/* the shift is bigger than PAGE_WIDTH, do not bother with index */ |
tsb->tag.va_tag = t->page >> VA_TAG_PAGE_SHIFT; |
|
tsb->data.value = 0; |
tsb->data.size = PAGESIZE_8K; |
tsb->data.pfn = (t->frame >> MMU_FRAME_WIDTH) + index; |
tsb->data.nfo = false; |
tsb->data.ra = t->frame >> MMU_FRAME_WIDTH; |
tsb->data.ie = false; |
tsb->data.e = false; |
tsb->data.cp = t->c; /* cp as cache in phys.-idxed, c as cacheable */ |
tsb->data.cv = false; |
tsb->data.p = t->k; /* p as privileged, k as kernel */ |
tsb->data.v = t->p; /* v as valid, p as present */ |
tsb->data.x = true; |
tsb->data.w = false; |
tsb->data.size = PAGESIZE_8K; |
|
write_barrier(); |
|
tsb->tag.invalid = false; /* mark the entry as valid */ |
#endif |
tsb->data.v = t->p; /* v as valid, p as present */ |
} |
|
/** Copy software PTE to DTSB. |
* |
* @param t Software PTE. |
* @param index Zero if lower 8K-subpage, one if higher 8K-subpage. |
* @param ro If true, the mapping is copied read-only. |
*/ |
void dtsb_pte_copy(pte_t *t, index_t index, bool ro) |
void dtsb_pte_copy(pte_t *t, bool ro) |
{ |
#if 0 |
as_t *as; |
tsb_entry_t *tsb; |
index_t entry; |
|
ASSERT(index <= 1); |
|
as = t->as; |
entry = ((t->page >> MMU_PAGE_WIDTH) + index) & TSB_INDEX_MASK; |
ASSERT(entry < DTSB_ENTRY_COUNT); |
tsb = &as->arch.dtsb[entry]; |
entry = (t->page >> MMU_PAGE_WIDTH) & TSB_INDEX_MASK; |
ASSERT(entry < TSB_ENTRY_COUNT); |
tsb = &((tsb_entry_t *) as->arch.tsb_description.tsb_base)[entry]; |
|
/* |
* We use write barriers to make sure that the TSB load |
149,30 → 143,30 |
* be repeated. |
*/ |
|
tsb->tag.invalid = true; /* invalidate the entry |
* (tag target has this |
* set to 0) */ |
tsb->data.v = false; |
|
write_barrier(); |
|
tsb->tag.context = as->asid; |
/* the shift is bigger than PAGE_WIDTH, do not bother with index */ |
tsb->tag.va_tag = t->page >> VA_TAG_PAGE_SHIFT; |
|
tsb->data.value = 0; |
tsb->data.size = PAGESIZE_8K; |
tsb->data.pfn = (t->frame >> MMU_FRAME_WIDTH) + index; |
tsb->data.cp = t->c; |
tsb->data.nfo = false; |
tsb->data.ra = t->frame >> MMU_FRAME_WIDTH; |
tsb->data.ie = false; |
tsb->data.e = false; |
tsb->data.cp = t->c; /* cp as cache in phys.-idxed, c as cacheable */ |
#ifdef CONFIG_VIRT_IDX_DCACHE |
tsb->data.cv = t->c; |
#endif /* CONFIG_VIRT_IDX_DCACHE */ |
tsb->data.p = t->k; /* p as privileged */ |
tsb->data.p = t->k; /* p as privileged, k as kernel */ |
tsb->data.x = true; |
tsb->data.w = ro ? false : t->w; |
tsb->data.v = t->p; |
tsb->data.size = PAGESIZE_8K; |
|
write_barrier(); |
|
tsb->tag.invalid = false; /* mark the entry as valid */ |
#endif |
tsb->data.v = t->p; /* v as valid, p as present */ |
} |
|
/** @} |