Subversion Repositories HelenOS

Compare Revisions

Ignore whitespace Rev 1890 → Rev 1891

/trunk/kernel/arch/sparc64/src/mm/tlb.c
51,6 → 51,10
#include <panic.h>
#include <arch/asm.h>
 
#ifdef CONFIG_TSB
#include <arch/mm/tsb.h>
#endif
 
static void dtlb_pte_copy(pte_t *t, bool ro);
static void itlb_pte_copy(pte_t *t);
static void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, const char *str);
144,6 → 148,10
dtlb_data_in_write(data.value);
}
 
/** Copy PTE to ITLB.
*
* @param t Page Table Entry to be copied.
*/
void itlb_pte_copy(pte_t *t)
{
tlb_tag_access_reg_t tag;
189,6 → 197,9
*/
t->a = true;
itlb_pte_copy(t);
#ifdef CONFIG_TSB
itsb_pte_copy(t);
#endif
page_table_unlock(AS, true);
} else {
/*
233,6 → 244,9
*/
t->a = true;
dtlb_pte_copy(t, true);
#ifdef CONFIG_TSB
dtsb_pte_copy(t, true);
#endif
page_table_unlock(AS, true);
} else {
/*
266,6 → 280,9
t->d = true;
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_SECONDARY, va);
dtlb_pte_copy(t, false);
#ifdef CONFIG_TSB
dtsb_pte_copy(t, false);
#endif
page_table_unlock(AS, true);
} else {
/*
/trunk/kernel/arch/sparc64/src/mm/as.c
40,6 → 40,12
 
#ifdef CONFIG_TSB
#include <arch/mm/tsb.h>
#include <arch/memstr.h>
#include <synch/mutex.h>
#include <arch/asm.h>
#include <mm/frame.h>
#include <bitops.h>
#include <macros.h>
#endif
 
/** Architecture dependent address space init. */
49,6 → 55,47
asid_fifo_init();
}
 
int as_constructor_arch(as_t *as, int flags)
{
#ifdef CONFIG_TSB
int order = fnzb32(((ITSB_ENTRY_COUNT+DTSB_ENTRY_COUNT)*sizeof(tsb_entry_t))>>FRAME_WIDTH);
uintptr_t tsb = (uintptr_t) frame_alloc(order, flags);
 
if (!tsb)
return -1;
 
as->arch.itsb = (tsb_entry_t *) tsb;
as->arch.dtsb = (tsb_entry_t *) (tsb + ITSB_ENTRY_COUNT * sizeof(tsb_entry_t));
#endif
return 0;
}
 
int as_destructor_arch(as_t *as)
{
#ifdef CONFIG_TSB
count_t cnt = ((ITSB_ENTRY_COUNT+DTSB_ENTRY_COUNT)*sizeof(tsb_entry_t))>>FRAME_WIDTH;
frame_free((uintptr_t) as->arch.itsb);
return cnt;
#else
return 0;
#endif
}
 
int as_create_arch(as_t *as, int flags)
{
#ifdef CONFIG_TSB
ipl_t ipl;
 
memsetb((uintptr_t) as->arch.itsb, (ITSB_ENTRY_COUNT+DTSB_ENTRY_COUNT)*sizeof(tsb_entry_t), 0);
ipl = interrupts_disable();
mutex_lock_active(&as->lock); /* completely unnecessary, but polite */
tsb_invalidate(as, 0, (count_t) -1);
mutex_unlock(&as->lock);
interrupts_restore(ipl);
#endif
return 0;
}
 
/** Perform sparc64-specific tasks when an address space becomes active on the processor.
*
* Install ASID and map TSBs.
78,37 → 125,35
mmu_secondary_context_write(ctx.v);
 
#ifdef CONFIG_TSB
if (as != AS_KERNEL) {
uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH);
uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH);
 
ASSERT(as->arch.itsb && as->arch.dtsb);
ASSERT(as->arch.itsb && as->arch.dtsb);
 
uintptr_t tsb = as->arch.itsb;
uintptr_t tsb = (uintptr_t) as->arch.itsb;
if (!overlaps(tsb, 8*PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
/*
* TSBs were allocated from memory not covered
* by the locked 4M kernel DTLB entry. We need
* to map both TSBs explicitly.
*/
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, tsb);
dtlb_insert_mapping(tsb, KA2PA(tsb), PAGESIZE_64K, true, true);
}
if (!overlaps(tsb, 8*PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
/*
* Setup TSB Base registers.
* TSBs were allocated from memory not covered
* by the locked 4M kernel DTLB entry. We need
* to map both TSBs explicitly.
*/
tsb_base_reg_t tsb_base;
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, tsb);
dtlb_insert_mapping(tsb, KA2PA(tsb), PAGESIZE_64K, true, true);
}
tsb_base.value = 0;
tsb_base.size = TSB_SIZE;
tsb_base.split = 0;
/*
* Setup TSB Base registers.
*/
tsb_base_reg_t tsb_base;
tsb_base.value = 0;
tsb_base.size = TSB_SIZE;
tsb_base.split = 0;
 
tsb_base.base = as->arch.itsb >> PAGE_WIDTH;
itsb_base_write(tsb_base.value);
tsb_base.base = as->arch.dtsb >> PAGE_WIDTH;
dtsb_base_write(tsb_base.value);
}
tsb_base.base = ((uintptr_t) as->arch.itsb) >> PAGE_WIDTH;
itsb_base_write(tsb_base.value);
tsb_base.base = ((uintptr_t) as->arch.dtsb) >> PAGE_WIDTH;
dtsb_base_write(tsb_base.value);
#endif
}
 
129,22 → 174,19
*/
 
#ifdef CONFIG_TSB
if (as != AS_KERNEL) {
uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH);
uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH);
 
ASSERT(as->arch.itsb && as->arch.dtsb);
ASSERT(as->arch.itsb && as->arch.dtsb);
 
uintptr_t tsb = as->arch.itsb;
uintptr_t tsb = (uintptr_t) as->arch.itsb;
if (!overlaps(tsb, 8*PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
/*
* TSBs were allocated from memory not covered
* by the locked 4M kernel DTLB entry. We need
* to demap the entry installed by as_install_arch().
*/
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, tsb);
}
if (!overlaps(tsb, 8*PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
/*
* TSBs were allocated from memory not covered
* by the locked 4M kernel DTLB entry. We need
* to demap the entry installed by as_install_arch().
*/
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, tsb);
}
#endif
}
/trunk/kernel/arch/sparc64/src/mm/tsb.c
33,21 → 33,117
*/
 
#include <arch/mm/tsb.h>
#include <arch/mm/tlb.h>
#include <arch/barrier.h>
#include <mm/as.h>
#include <arch/types.h>
#include <typedefs.h>
#include <macros.h>
#include <debug.h>
 
#define TSB_INDEX_MASK ((1<<(21+1+TSB_SIZE-PAGE_WIDTH))-1)
 
/** Invalidate portion of TSB.
*
* We assume that the address space is already locked.
* Note that respective portions of both TSBs
* are invalidated at a time.
*
* @param as Address space.
* @param page First page to invalidate in TSB.
* @param pages Number of pages to invalidate.
* @param pages Number of pages to invalidate. Value of (count_t) -1 means the whole TSB.
*/
void tsb_invalidate(as_t *as, uintptr_t page, count_t pages)
{
index_t i0, i;
count_t cnt;
ASSERT(as->arch.itsb && as->arch.dtsb);
i0 = (page >> PAGE_WIDTH) & TSB_INDEX_MASK;
cnt = min(pages, ITSB_ENTRY_COUNT);
for (i = 0; i < cnt; i++) {
as->arch.itsb[(i0 + i) & (ITSB_ENTRY_COUNT-1)].tag.invalid = 0;
as->arch.dtsb[(i0 + i) & (DTSB_ENTRY_COUNT-1)].tag.invalid = 0;
}
}
 
/** Copy software PTE to ITSB.
*
* @param t Software PTE.
*/
void itsb_pte_copy(pte_t *t)
{
as_t *as;
tsb_entry_t *tsb;
as = t->as;
tsb = &as->arch.itsb[(t->page >> PAGE_WIDTH) & TSB_INDEX_MASK];
 
/*
* We use write barriers to make sure that the TSB load
* won't use inconsistent data or that the fault will
* be repeated.
*/
 
tsb->tag.invalid = 1; /* invalidate the entry (tag target has this set to 0 */
 
write_barrier();
 
tsb->tag.context = as->asid;
tsb->tag.va_tag = t->page >> VA_TAG_PAGE_SHIFT;
tsb->data.value = 0;
tsb->data.size = PAGESIZE_8K;
tsb->data.pfn = t->frame >> PAGE_WIDTH;
tsb->data.cp = t->c;
tsb->data.cv = t->c;
tsb->data.p = t->k; /* p as privileged */
tsb->data.v = t->p;
write_barrier();
tsb->tag.invalid = 0; /* mark the entry as valid */
}
 
/** Copy software PTE to DTSB.
*
* @param t Software PTE.
* @param ro If true, the mapping is copied read-only.
*/
void dtsb_pte_copy(pte_t *t, bool ro)
{
as_t *as;
tsb_entry_t *tsb;
as = t->as;
tsb = &as->arch.dtsb[(t->page >> PAGE_WIDTH) & TSB_INDEX_MASK];
 
/*
* We use write barriers to make sure that the TSB load
* won't use inconsistent data or that the fault will
* be repeated.
*/
 
tsb->tag.invalid = 1; /* invalidate the entry (tag target has this set to 0) */
 
write_barrier();
 
tsb->tag.context = as->asid;
tsb->tag.va_tag = t->page >> VA_TAG_PAGE_SHIFT;
tsb->data.value = 0;
tsb->data.size = PAGESIZE_8K;
tsb->data.pfn = t->frame >> PAGE_WIDTH;
tsb->data.cp = t->c;
tsb->data.cv = t->c;
tsb->data.p = t->k; /* p as privileged */
tsb->data.w = ro ? false : t->w;
tsb->data.v = t->p;
write_barrier();
tsb->tag.invalid = 0; /* mark the entry as valid */
}
 
/** @}
*/