Subversion Repositories HelenOS

Compare Revisions

Ignore whitespace Rev 4128 → Rev 4129

/branches/sparc/kernel/arch/sparc64/src/mm/sun4v/tlb.c
66,15 → 66,6
static void do_fast_data_access_protection_fault(istate_t *,
uint64_t, const char *);
 
#if 0
char *context_encoding[] = {
"Primary",
"Secondary",
"Nucleus",
"Reserved"
};
#endif
 
/*
* The assembly language routine passes a 64-bit parameter to the Data Access
* MMU Miss and Data Access protection handlers, the parameter encapsulates
90,6 → 81,35
/* extracts the faulting context */
#define DMISS_CONTEXT(page_and_ctx) ((page_and_ctx) & 0x1fff)
 
/**
* Descriptions of fault types from the MMU Fault status area.
*
* fault_type[i] contains description of error for which the IFT or DFT
* field of the MMU fault status area is i.
*/
char *fault_types[] = {
"unknown",
"fast miss",
"fast protection",
"MMU miss",
"invalid RA",
"privileged violation",
"protection violation",
"NFO access",
"so page/NFO side effect",
"invalid VA",
"invalid ASI",
"nc atomic",
"privileged action",
"unknown",
"unaligned access",
"invalid page size"
};
 
/** Array of MMU fault status areas. */
extern mmu_fault_status_area_t mmu_fsas[MAX_NUM_STRANDS];
 
/*
* Invalidate all non-locked DTLB and ITLB entries.
*/
109,35 → 129,31
void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize,
bool locked, bool cacheable)
{
#if 0
tlb_tag_access_reg_t tag;
tlb_data_t data;
page_address_t pg;
frame_address_t fr;
 
pg.address = page;
fr.address = frame;
 
tag.context = ASID_KERNEL;
tag.vpn = pg.vpn;
 
dtlb_tag_access_write(tag.value);
 
tte_data_t data;
data.value = 0;
data.v = true;
data.size = pagesize;
data.pfn = fr.pfn;
data.l = locked;
data.nfo = false;
data.ra = frame >> FRAME_WIDTH;
data.ie = false;
data.e = false;
data.cp = cacheable;
#ifdef CONFIG_VIRT_IDX_DCACHE
data.cv = cacheable;
#endif /* CONFIG_VIRT_IDX_DCACHE */
#endif
data.p = true;
data.w = true;
data.g = false;
 
dtlb_data_in_write(data.value);
#endif
data.x = false;
data.w = false;
data.size = pagesize;
if (locked) {
__hypercall_fast4(
MMU_MAP_PERM_ADDR, page, 0, data.value, MMU_FLAG_DTLB);
} else {
__hypercall_hyperfast(
page, ASID_KERNEL, data.value, MMU_FLAG_DTLB, 0,
MMU_MAP_ADDR);
}
}
 
/** Copy PTE to TLB.
211,7 → 227,7
t->a = true;
itlb_pte_copy(t);
#ifdef CONFIG_TSB
//itsb_pte_copy(t, index);
itsb_pte_copy(t);
#endif
page_table_unlock(AS, true);
} else {
266,7 → 282,7
t->a = true;
dtlb_pte_copy(t, true);
#ifdef CONFIG_TSB
//dtsb_pte_copy(t, true);
dtsb_pte_copy(t, true);
#endif
page_table_unlock(AS, true);
} else {
311,7 → 327,7
mmu_demap_page(va, ctx, MMU_FLAG_DTLB);
dtlb_pte_copy(t, false);
#ifdef CONFIG_TSB
//dtsb_pte_copy(t, false);
dtsb_pte_copy(t, false);
#endif
page_table_unlock(AS, true);
} else {
327,47 → 343,14
}
}
 
/** Print TLB entry (for debugging purposes).
*
* The diag field has been left out in order to make this function more generic
* (there is no diag field in US3 architeture).
*
* @param i TLB entry number
* @param t TLB entry tag
* @param d TLB entry data
/*
* On Niagara this function does not work, as supervisor software is isolated
* from the TLB by the hypervisor and has no chance to investigate the TLB
* entries.
*/
#if 0
static void print_tlb_entry(int i, tlb_tag_read_reg_t t, tlb_data_t d)
{
printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, "
"ie=%d, soft2=%#x, pfn=%#x, soft=%#x, l=%d, "
"cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn,
t.context, d.v, d.size, d.nfo, d.ie, d.soft2,
d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
}
#endif
 
void tlb_print(void)
{
#if 0
int i;
tlb_data_t d;
tlb_tag_read_reg_t t;
printf("I-TLB contents:\n");
for (i = 0; i < ITLB_ENTRY_COUNT; i++) {
d.value = itlb_data_access_read(i);
t.value = itlb_tag_read_read(i);
print_tlb_entry(i, t, d);
}
 
printf("D-TLB contents:\n");
for (i = 0; i < DTLB_ENTRY_COUNT; i++) {
d.value = dtlb_data_access_read(i);
t.value = dtlb_tag_read_read(i);
print_tlb_entry(i, t, d);
}
#endif
printf("Operation not possible on Niagara.\n");
}
 
void do_fast_instruction_access_mmu_miss_fault(istate_t *istate,
402,8 → 385,18
panic("%s\n", str);
}
 
void describe_mmu_fault(void)
/**
* Describes the exact condition which caused the last DMMU fault.
*/
void describe_dmmu_fault(void)
{
uint64_t myid;
__hypercall_fast_ret1(0, 0, 0, 0, 0, CPU_MYID, &myid);
 
ASSERT(mmu_fsas[myid].dft < 16);
 
printf("condition which caused the fault: %s\n",
fault_types[mmu_fsas[myid].dft]);
}
 
/** Invalidate all unlocked ITLB and DTLB entries. */
423,23 → 416,13
*/
void tlb_invalidate_asid(asid_t asid)
{
#if 0
tlb_context_reg_t pc_save, ctx;
/* switch to nucleus because we are mapped by the primary context */
nucleus_enter();
ctx.v = pc_save.v = mmu_primary_context_read();
ctx.context = asid;
mmu_primary_context_write(ctx.v);
itlb_demap(TLB_DEMAP_CONTEXT, TLB_DEMAP_PRIMARY, 0);
dtlb_demap(TLB_DEMAP_CONTEXT, TLB_DEMAP_PRIMARY, 0);
mmu_primary_context_write(pc_save.v);
 
__hypercall_fast4(MMU_DEMAP_CTX, 0, 0, asid,
MMU_FLAG_ITLB | MMU_FLAG_DTLB);
 
nucleus_leave();
#endif
}
 
/** Invalidate all ITLB and DTLB entries for specified page range in specified
451,28 → 434,17
*/
void tlb_invalidate_pages(asid_t asid, uintptr_t page, count_t cnt)
{
#if 0
unsigned int i;
tlb_context_reg_t pc_save, ctx;
 
/* switch to nucleus because we are mapped by the primary context */
nucleus_enter();
ctx.v = pc_save.v = mmu_primary_context_read();
ctx.context = asid;
mmu_primary_context_write(ctx.v);
for (i = 0; i < cnt * MMU_PAGES_PER_PAGE; i++) {
itlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY,
page + i * MMU_PAGE_SIZE);
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY,
page + i * MMU_PAGE_SIZE);
 
for (i = 0; i < cnt; i++) {
__hypercall_fast5(MMU_DEMAP_PAGE, 0, 0, page, asid,
MMU_FLAG_DTLB | MMU_FLAG_ITLB);
}
mmu_primary_context_write(pc_save.v);
 
nucleus_leave();
#endif
}
 
/** @}