Subversion Repositories HelenOS

Compare Revisions

Ignore whitespace Rev 4128 → Rev 4129

/branches/sparc/kernel/arch/sparc64/include/sun4v/regdef.h
50,30 → 50,6
 
#define TSTATE_PEF_BIT (PSTATE_PEF_BIT << TSTATE_PSTATE_SHIFT)
 
/*
 
#define PSTATE_IE_BIT (1 << 1)
#define PSTATE_AM_BIT (1 << 3)
 
#define PSTATE_AG_BIT (1 << 0)
#define PSTATE_IG_BIT (1 << 11)
#define PSTATE_MG_BIT (1 << 10)
 
#define PSTATE_PRIV_BIT (1 << 2)
#define PSTATE_PEF_BIT (1 << 4)
 
#define TSTATE_PSTATE_SHIFT 8
#define TSTATE_PRIV_BIT (PSTATE_PRIV_BIT << TSTATE_PSTATE_SHIFT)
#define TSTATE_IE_BIT (PSTATE_IE_BIT << TSTATE_PSTATE_SHIFT)
#define TSTATE_PEF_BIT (PSTATE_PEF_BIT << TSTATE_PSTATE_SHIFT)
 
#define TSTATE_CWP_MASK 0x1f
 
#define WSTATE_NORMAL(n) (n)
#define WSTATE_OTHER(n) ((n) << 3)
 
*/
 
#endif
 
/** @}
/branches/sparc/kernel/arch/sparc64/include/mm/sun4v/page.h
37,17 → 37,9
 
#include <arch/mm/frame.h>
 
/*
* On the TLB and TSB level, we still use 8K pages, which are supported by the
* MMU.
*/
#define MMU_PAGE_WIDTH MMU_FRAME_WIDTH
#define MMU_PAGE_SIZE MMU_FRAME_SIZE
 
/*
* On the page table level, we use 16K pages. 16K pages are not supported by
* the MMU but we emulate them with pairs of 8K pages.
*/
#define PAGE_WIDTH FRAME_WIDTH
#define PAGE_SIZE FRAME_SIZE
 
/branches/sparc/kernel/arch/sparc64/include/mm/sun4v/tte.h
52,7 → 52,6
#include <arch/types.h>
 
/** Translation Table Entry - Data. */
/** SUN4V-OK */
union tte_data {
uint64_t value;
struct {
74,7 → 73,6
 
typedef union tte_data tte_data_t;
 
// TODO: probably remove once tsb.c exists for both sun4u and sun4v
#define VA_TAG_PAGE_SHIFT 22
 
#endif /* !def __ASM__ */
/branches/sparc/kernel/arch/sparc64/include/mm/sun4v/tlb.h
118,42 → 118,6
asi_u64_write(ASI_SECONDARY_CONTEXT_REG, VA_SECONDARY_CONTEXT_REG, v);
}
 
/** Perform IMMU TLB Demap Operation.
*
* @param type Selects between context and page demap (and entire MMU
* demap on US3).
* @param context_encoding Specifies which Context register has Context ID for
* demap.
* @param page Address which is on the page to be demapped.
*/
static inline void itlb_demap(int type, int context_encoding, uintptr_t page)
{
}
 
/** Perform DMMU TLB Demap Operation.
*
* @param type One of TLB_DEMAP_PAGE and TLB_DEMAP_CONTEXT. Selects
* between context and page demap.
* @param context_encoding Specifies which Context register has Context ID for
* demap.
* @param page Address which is on the page to be demapped.
*/
static inline void dtlb_demap(int type, int context_encoding, uintptr_t page)
{
#if 0
- this implementation is not correct!!!
if (type == TLB_DEMAP_PAGE) {
__hypercall_fast5(
MMU_DEMAP_PAGE, 0, 0,
page, context_encoding, MMU_FLAG_DTLB);
} else if (type == TLB_DEMAP_CONTEXT) {
__hypercall_fast4(
MMU_DEMAP_CTX, 0, 0,
context_encoding, MMU_FLAG_DTLB);
}
#endif
}
 
/**
* Demaps all mappings in a context.
*
164,10 → 128,29
__hypercall_fast4(MMU_DEMAP_CTX, 0, 0, context, mmu_flag);
}
 
/**
* Demaps given page.
*
* @param vaddr VA of the page to be demapped
* @param context number of the context
* @param mmu_flag MMU_FLAG_DTLB, MMU_FLAG_ITLB or a combination of both
*/
static inline void mmu_demap_page(uintptr_t vaddr, int context, int mmu_flag) {
__hypercall_fast5(MMU_DEMAP_PAGE, 0, 0, vaddr, context, mmu_flag);
}
 
/**
* Installs a locked TLB entry in kernel address space.
*
* @param vaddr VA of the page to be demapped
* @param ra real address the page is mapped to
* @param cacheable should the page be cacheble?
* @param privileged should the mapping be privileged?
* @param executable should the memory mapped be executable?
* @param writable should the memory mapped be writable?
* @param size code of the page size
* @param mmu_flag MMU_FLAG_DTLB, MMU_FLAG_ITLB or a combination of both
*/
static inline void mmu_map_perm_addr(uintptr_t vaddr, uintptr_t ra,
bool cacheable, bool privileged, bool executable,
bool writable, unsigned size, unsigned mmu_flags) {
192,7 → 175,7
 
extern void dtlb_insert_mapping(uintptr_t, uintptr_t, int, bool, bool);
 
extern void describe_mmu_fault(void);
extern void describe_dmmu_fault(void);
 
#endif /* !def __ASM__ */
 
/branches/sparc/kernel/arch/sparc64/include/mm/sun4v/as.h
60,8 → 60,7
typedef union tte_tag {
uint64_t value;
struct {
unsigned invalid : 1; /**< Invalidated by software. */
unsigned : 2;
unsigned : 3;
unsigned context : 13; /**< Software ASID. */
unsigned : 6;
uint64_t va_tag : 42; /**< Virtual address bits <63:22>. */
/branches/sparc/kernel/arch/sparc64/include/mm/sun4v/tsb.h
71,8 → 71,8
struct pte;
 
extern void tsb_invalidate(struct as *as, uintptr_t page, count_t pages);
extern void itsb_pte_copy(struct pte *t, index_t index);
extern void dtsb_pte_copy(struct pte *t, index_t index, bool ro);
extern void itsb_pte_copy(struct pte *t);
extern void dtsb_pte_copy(struct pte *t, bool ro);
 
#endif /* !def __ASM__ */
 
/branches/sparc/kernel/arch/sparc64/Makefile.inc
129,7 → 129,7
arch/$(ARCH)/src/context.S \
arch/$(ARCH)/src/fpu_context.c \
arch/$(ARCH)/src/dummy.s \
arch/$(ARCH)/src/mm/cache.S \
arch/$(ARCH)/src/mm/sun4u/cache.S \
arch/$(ARCH)/src/mm/frame.c \
arch/$(ARCH)/src/mm/page.c \
arch/$(ARCH)/src/proc/thread.c \
/branches/sparc/kernel/arch/sparc64/src/trap/exception.c
156,7 → 156,7
{
fault_if_from_uspace(istate, "%s\n", __func__);
dump_istate(istate);
describe_mmu_fault();
describe_dmmu_fault();
panic("%s\n", __func__);
}
 
/branches/sparc/kernel/arch/sparc64/src/mm/sun4v/tlb.c
66,15 → 66,6
static void do_fast_data_access_protection_fault(istate_t *,
uint64_t, const char *);
 
#if 0
char *context_encoding[] = {
"Primary",
"Secondary",
"Nucleus",
"Reserved"
};
#endif
 
/*
* The assembly language routine passes a 64-bit parameter to the Data Access
* MMU Miss and Data Access protection handlers, the parameter encapsulates
90,6 → 81,35
/* extracts the faulting context */
#define DMISS_CONTEXT(page_and_ctx) ((page_and_ctx) & 0x1fff)
 
/**
* Descriptions of fault types from the MMU Fault status area.
*
* fault_type[i] contains description of error for which the IFT or DFT
* field of the MMU fault status area is i.
*/
char *fault_types[] = {
"unknown",
"fast miss",
"fast protection",
"MMU miss",
"invalid RA",
"privileged violation",
"protection violation",
"NFO access",
"so page/NFO side effect",
"invalid VA",
"invalid ASI",
"nc atomic",
"privileged action",
"unknown",
"unaligned access",
"invalid page size"
};
 
/** Array of MMU fault status areas. */
extern mmu_fault_status_area_t mmu_fsas[MAX_NUM_STRANDS];
 
/*
* Invalidate all non-locked DTLB and ITLB entries.
*/
109,35 → 129,31
void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize,
bool locked, bool cacheable)
{
#if 0
tlb_tag_access_reg_t tag;
tlb_data_t data;
page_address_t pg;
frame_address_t fr;
 
pg.address = page;
fr.address = frame;
 
tag.context = ASID_KERNEL;
tag.vpn = pg.vpn;
 
dtlb_tag_access_write(tag.value);
 
tte_data_t data;
data.value = 0;
data.v = true;
data.size = pagesize;
data.pfn = fr.pfn;
data.l = locked;
data.nfo = false;
data.ra = frame >> FRAME_WIDTH;
data.ie = false;
data.e = false;
data.cp = cacheable;
#ifdef CONFIG_VIRT_IDX_DCACHE
data.cv = cacheable;
#endif /* CONFIG_VIRT_IDX_DCACHE */
#endif
data.p = true;
data.w = true;
data.g = false;
 
dtlb_data_in_write(data.value);
#endif
data.x = false;
data.w = false;
data.size = pagesize;
if (locked) {
__hypercall_fast4(
MMU_MAP_PERM_ADDR, page, 0, data.value, MMU_FLAG_DTLB);
} else {
__hypercall_hyperfast(
page, ASID_KERNEL, data.value, MMU_FLAG_DTLB, 0,
MMU_MAP_ADDR);
}
}
 
/** Copy PTE to TLB.
211,7 → 227,7
t->a = true;
itlb_pte_copy(t);
#ifdef CONFIG_TSB
//itsb_pte_copy(t, index);
itsb_pte_copy(t);
#endif
page_table_unlock(AS, true);
} else {
266,7 → 282,7
t->a = true;
dtlb_pte_copy(t, true);
#ifdef CONFIG_TSB
//dtsb_pte_copy(t, true);
dtsb_pte_copy(t, true);
#endif
page_table_unlock(AS, true);
} else {
311,7 → 327,7
mmu_demap_page(va, ctx, MMU_FLAG_DTLB);
dtlb_pte_copy(t, false);
#ifdef CONFIG_TSB
//dtsb_pte_copy(t, false);
dtsb_pte_copy(t, false);
#endif
page_table_unlock(AS, true);
} else {
327,47 → 343,14
}
}
 
/** Print TLB entry (for debugging purposes).
*
* The diag field has been left out in order to make this function more generic
* (there is no diag field in US3 architeture).
*
* @param i TLB entry number
* @param t TLB entry tag
* @param d TLB entry data
/*
* On Niagara this function does not work, as supervisor software is isolated
* from the TLB by the hypervisor and has no chance to investigate the TLB
* entries.
*/
#if 0
static void print_tlb_entry(int i, tlb_tag_read_reg_t t, tlb_data_t d)
{
printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, "
"ie=%d, soft2=%#x, pfn=%#x, soft=%#x, l=%d, "
"cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn,
t.context, d.v, d.size, d.nfo, d.ie, d.soft2,
d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
}
#endif
 
void tlb_print(void)
{
#if 0
int i;
tlb_data_t d;
tlb_tag_read_reg_t t;
printf("I-TLB contents:\n");
for (i = 0; i < ITLB_ENTRY_COUNT; i++) {
d.value = itlb_data_access_read(i);
t.value = itlb_tag_read_read(i);
print_tlb_entry(i, t, d);
}
 
printf("D-TLB contents:\n");
for (i = 0; i < DTLB_ENTRY_COUNT; i++) {
d.value = dtlb_data_access_read(i);
t.value = dtlb_tag_read_read(i);
print_tlb_entry(i, t, d);
}
#endif
printf("Operation not possible on Niagara.\n");
}
 
void do_fast_instruction_access_mmu_miss_fault(istate_t *istate,
402,8 → 385,18
panic("%s\n", str);
}
 
void describe_mmu_fault(void)
/**
* Describes the exact condition which caused the last DMMU fault.
*/
void describe_dmmu_fault(void)
{
uint64_t myid;
__hypercall_fast_ret1(0, 0, 0, 0, 0, CPU_MYID, &myid);
 
ASSERT(mmu_fsas[myid].dft < 16);
 
printf("condition which caused the fault: %s\n",
fault_types[mmu_fsas[myid].dft]);
}
 
/** Invalidate all unlocked ITLB and DTLB entries. */
423,23 → 416,13
*/
void tlb_invalidate_asid(asid_t asid)
{
#if 0
tlb_context_reg_t pc_save, ctx;
/* switch to nucleus because we are mapped by the primary context */
nucleus_enter();
ctx.v = pc_save.v = mmu_primary_context_read();
ctx.context = asid;
mmu_primary_context_write(ctx.v);
itlb_demap(TLB_DEMAP_CONTEXT, TLB_DEMAP_PRIMARY, 0);
dtlb_demap(TLB_DEMAP_CONTEXT, TLB_DEMAP_PRIMARY, 0);
mmu_primary_context_write(pc_save.v);
 
__hypercall_fast4(MMU_DEMAP_CTX, 0, 0, asid,
MMU_FLAG_ITLB | MMU_FLAG_DTLB);
 
nucleus_leave();
#endif
}
 
/** Invalidate all ITLB and DTLB entries for specified page range in specified
451,28 → 434,17
*/
void tlb_invalidate_pages(asid_t asid, uintptr_t page, count_t cnt)
{
#if 0
unsigned int i;
tlb_context_reg_t pc_save, ctx;
 
/* switch to nucleus because we are mapped by the primary context */
nucleus_enter();
ctx.v = pc_save.v = mmu_primary_context_read();
ctx.context = asid;
mmu_primary_context_write(ctx.v);
for (i = 0; i < cnt * MMU_PAGES_PER_PAGE; i++) {
itlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY,
page + i * MMU_PAGE_SIZE);
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY,
page + i * MMU_PAGE_SIZE);
 
for (i = 0; i < cnt; i++) {
__hypercall_fast5(MMU_DEMAP_PAGE, 0, 0, page, asid,
MMU_FLAG_DTLB | MMU_FLAG_ITLB);
}
mmu_primary_context_write(pc_save.v);
 
nucleus_leave();
#endif
}
 
/** @}
/branches/sparc/kernel/arch/sparc64/src/mm/sun4v/as.c
33,8 → 33,6
/** @file
*/
 
/* SUN4V-OK */
 
#include <arch/mm/as.h>
#include <arch/mm/pagesize.h>
#include <arch/mm/tlb.h>
/branches/sparc/kernel/arch/sparc64/src/mm/sun4v/tsb.c
72,7 → 72,7
for (i = 0; i < cnt; i++) {
((tsb_entry_t *) as->arch.tsb_description.tsb_base)[
(i0 + i) & (TSB_ENTRY_COUNT - 1)].tag.invalid = true;
(i0 + i) & (TSB_ENTRY_COUNT - 1)].data.v = false;
}
}
 
79,21 → 79,17
/** Copy software PTE to ITSB.
*
* @param t Software PTE.
* @param index Zero if lower 8K-subpage, one if higher 8K subpage.
*/
void itsb_pte_copy(pte_t *t, index_t index)
void itsb_pte_copy(pte_t *t)
{
#if 0
as_t *as;
tsb_entry_t *tsb;
index_t entry;
 
ASSERT(index <= 1);
as = t->as;
entry = ((t->page >> MMU_PAGE_WIDTH) + index) & TSB_INDEX_MASK;
ASSERT(entry < ITSB_ENTRY_COUNT);
tsb = &as->arch.itsb[entry];
entry = (t->page >> MMU_PAGE_WIDTH) & TSB_INDEX_MASK;
ASSERT(entry < TSB_ENTRY_COUNT);
tsb = &((tsb_entry_t *) as->arch.tsb_description.tsb_base)[entry];
 
/*
* We use write barriers to make sure that the TSB load
101,47 → 97,45
* be repeated.
*/
 
tsb->tag.invalid = true; /* invalidate the entry
* (tag target has this
* set to 0) */
tsb->data.v = false;
 
write_barrier();
 
tsb->tag.context = as->asid;
/* the shift is bigger than PAGE_WIDTH, do not bother with index */
tsb->tag.va_tag = t->page >> VA_TAG_PAGE_SHIFT;
 
tsb->data.value = 0;
tsb->data.size = PAGESIZE_8K;
tsb->data.pfn = (t->frame >> MMU_FRAME_WIDTH) + index;
tsb->data.nfo = false;
tsb->data.ra = t->frame >> MMU_FRAME_WIDTH;
tsb->data.ie = false;
tsb->data.e = false;
tsb->data.cp = t->c; /* cp as cache in phys.-idxed, c as cacheable */
tsb->data.cv = false;
tsb->data.p = t->k; /* p as privileged, k as kernel */
tsb->data.v = t->p; /* v as valid, p as present */
tsb->data.x = true;
tsb->data.w = false;
tsb->data.size = PAGESIZE_8K;
write_barrier();
tsb->tag.invalid = false; /* mark the entry as valid */
#endif
tsb->data.v = t->p; /* v as valid, p as present */
}
 
/** Copy software PTE to DTSB.
*
* @param t Software PTE.
* @param index Zero if lower 8K-subpage, one if higher 8K-subpage.
* @param ro If true, the mapping is copied read-only.
*/
void dtsb_pte_copy(pte_t *t, index_t index, bool ro)
void dtsb_pte_copy(pte_t *t, bool ro)
{
#if 0
as_t *as;
tsb_entry_t *tsb;
index_t entry;
ASSERT(index <= 1);
 
as = t->as;
entry = ((t->page >> MMU_PAGE_WIDTH) + index) & TSB_INDEX_MASK;
ASSERT(entry < DTSB_ENTRY_COUNT);
tsb = &as->arch.dtsb[entry];
entry = (t->page >> MMU_PAGE_WIDTH) & TSB_INDEX_MASK;
ASSERT(entry < TSB_ENTRY_COUNT);
tsb = &((tsb_entry_t *) as->arch.tsb_description.tsb_base)[entry];
 
/*
* We use write barriers to make sure that the TSB load
149,30 → 143,30
* be repeated.
*/
 
tsb->tag.invalid = true; /* invalidate the entry
* (tag target has this
* set to 0) */
tsb->data.v = false;
 
write_barrier();
 
tsb->tag.context = as->asid;
/* the shift is bigger than PAGE_WIDTH, do not bother with index */
tsb->tag.va_tag = t->page >> VA_TAG_PAGE_SHIFT;
 
tsb->data.value = 0;
tsb->data.size = PAGESIZE_8K;
tsb->data.pfn = (t->frame >> MMU_FRAME_WIDTH) + index;
tsb->data.cp = t->c;
tsb->data.nfo = false;
tsb->data.ra = t->frame >> MMU_FRAME_WIDTH;
tsb->data.ie = false;
tsb->data.e = false;
tsb->data.cp = t->c; /* cp as cache in phys.-idxed, c as cacheable */
#ifdef CONFIG_VIRT_IDX_DCACHE
tsb->data.cv = t->c;
#endif /* CONFIG_VIRT_IDX_DCACHE */
tsb->data.p = t->k; /* p as privileged */
tsb->data.p = t->k; /* p as privileged, k as kernel */
tsb->data.x = true;
tsb->data.w = ro ? false : t->w;
tsb->data.v = t->p;
tsb->data.size = PAGESIZE_8K;
write_barrier();
tsb->tag.invalid = false; /* mark the entry as valid */
#endif
tsb->data.v = t->p; /* v as valid, p as present */
}
 
/** @}