Subversion Repositories HelenOS

Compare Revisions

Ignore whitespace Rev 4276 → Rev 4277

/trunk/kernel/arch/sparc64/include/trap/mmu.h
103,16 → 103,19
* Note that branch-delay slots are used in order to save space.
*/
0:
mov VA_DMMU_TAG_ACCESS, %g1
ldxa [%g1] ASI_DMMU, %g1 ! read the faulting Context and VPN
sethi %hi(fast_data_access_mmu_miss_data_hi), %g7
wr %g0, ASI_DMMU, %asi
ldxa [VA_DMMU_TAG_ACCESS] %asi, %g1 ! read the faulting Context and VPN
set TLB_TAG_ACCESS_CONTEXT_MASK, %g2
andcc %g1, %g2, %g3 ! get Context
bnz 0f ! Context is non-zero
bnz %xcc, 0f ! Context is non-zero
andncc %g1, %g2, %g3 ! get page address into %g3
bz 0f ! page address is zero
bz %xcc, 0f ! page address is zero
ldx [%g7 + %lo(end_of_identity)], %g4
cmp %g3, %g4
bgeu %xcc, 0f
 
sethi %hi(kernel_8k_tlb_data_template), %g2
ldx [%g2 + %lo(kernel_8k_tlb_data_template)], %g2
ldx [%g7 + %lo(kernel_8k_tlb_data_template)], %g2
or %g3, %g2, %g2
stxa %g2, [%g0] ASI_DTLB_DATA_IN_REG ! identity map the kernel page
retry
138,8 → 141,7
* Read the Tag Access register for the higher-level handler.
* This is necessary to survive nested DTLB misses.
*/
mov VA_DMMU_TAG_ACCESS, %g2
ldxa [%g2] ASI_DMMU, %g2
ldxa [VA_DMMU_TAG_ACCESS] %asi, %g2
 
/*
* g2 will be passed as an argument to fast_data_access_mmu_miss().
/trunk/kernel/arch/sparc64/include/mm/frame.h
73,6 → 73,8
typedef union frame_address frame_address_t;
 
extern uintptr_t last_frame;
extern uintptr_t end_of_identity;
 
extern void frame_arch_init(void);
#define physmem_print()
 
/trunk/kernel/arch/sparc64/src/mm/tlb.c
199,12 → 199,12
/** ITLB miss handler. */
void fast_instruction_access_mmu_miss(unative_t unused, istate_t *istate)
{
uintptr_t va = ALIGN_DOWN(istate->tpc, PAGE_SIZE);
uintptr_t page_16k = ALIGN_DOWN(istate->tpc, PAGE_SIZE);
index_t index = (istate->tpc >> MMU_PAGE_WIDTH) % MMU_PAGES_PER_PAGE;
pte_t *t;
 
page_table_lock(AS, true);
t = page_mapping_find(AS, va);
t = page_mapping_find(AS, page_16k);
if (t && PTE_EXECUTABLE(t)) {
/*
* The mapping was found in the software page hash table.
222,7 → 222,8
* handler.
*/
page_table_unlock(AS, true);
if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
if (as_page_fault(page_16k, PF_ACCESS_EXEC, istate) ==
AS_PF_FAULT) {
do_fast_instruction_access_mmu_miss_fault(istate,
__func__);
}
242,11 → 243,13
*/
void fast_data_access_mmu_miss(tlb_tag_access_reg_t tag, istate_t *istate)
{
uintptr_t va;
uintptr_t page_8k;
uintptr_t page_16k;
index_t index;
pte_t *t;
 
va = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE);
page_8k = (uint64_t) tag.vpn << MMU_PAGE_WIDTH;
page_16k = ALIGN_DOWN(page_8k, PAGE_SIZE);
index = tag.vpn % MMU_PAGES_PER_PAGE;
 
if (tag.context == ASID_KERNEL) {
254,6 → 257,15
/* NULL access in kernel */
do_fast_data_access_mmu_miss_fault(istate, tag,
__func__);
} else if (page_8k >= end_of_identity) {
/*
* The kernel is accessing the I/O space.
* We still do identity mapping for I/O,
* but without caching.
*/
dtlb_insert_mapping(page_8k, KA2PA(page_8k),
PAGESIZE_8K, false, false);
return;
}
do_fast_data_access_mmu_miss_fault(istate, tag, "Unexpected "
"kernel page fault.");
260,7 → 272,7
}
 
page_table_lock(AS, true);
t = page_mapping_find(AS, va);
t = page_mapping_find(AS, page_16k);
if (t) {
/*
* The mapping was found in the software page hash table.
278,7 → 290,8
* handler.
*/
page_table_unlock(AS, true);
if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
if (as_page_fault(page_16k, PF_ACCESS_READ, istate) ==
AS_PF_FAULT) {
do_fast_data_access_mmu_miss_fault(istate, tag,
__func__);
}
295,15 → 308,15
*/
void fast_data_access_protection(tlb_tag_access_reg_t tag, istate_t *istate)
{
uintptr_t va;
uintptr_t page_16k;
index_t index;
pte_t *t;
 
va = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE);
page_16k = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE);
index = tag.vpn % MMU_PAGES_PER_PAGE; /* 16K-page emulation */
 
page_table_lock(AS, true);
t = page_mapping_find(AS, va);
t = page_mapping_find(AS, page_16k);
if (t && PTE_WRITABLE(t)) {
/*
* The mapping was found in the software page hash table and is
313,7 → 326,7
t->a = true;
t->d = true;
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_SECONDARY,
va + index * MMU_PAGE_SIZE);
page_16k + index * MMU_PAGE_SIZE);
dtlb_pte_copy(t, index, false);
#ifdef CONFIG_TSB
dtsb_pte_copy(t, index, false);
325,7 → 338,8
* handler.
*/
page_table_unlock(AS, true);
if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) {
if (as_page_fault(page_16k, PF_ACCESS_WRITE, istate) ==
AS_PF_FAULT) {
do_fast_data_access_protection_fault(istate, tag,
__func__);
}
/trunk/kernel/arch/sparc64/src/mm/frame.c
79,6 → 79,8
*/
frame_mark_unavailable(ADDR2PFN(KA2PA(PFN2ADDR(0))), 1);
}
 
end_of_identity = PA2KA(last_frame);
}
 
/** @}
/trunk/kernel/arch/sparc64/src/mm/page.c
62,19 → 62,7
*/
uintptr_t hw_map(uintptr_t physaddr, size_t size)
{
if (last_frame + ALIGN_UP(size, PAGE_SIZE) > KA2PA(KERNEL_ADDRESS_SPACE_END_ARCH))
panic("Unable to map physical memory %p (%d bytes).", physaddr, size)
uintptr_t virtaddr = PA2KA(last_frame);
pfn_t i;
for (i = 0; i < ADDR2PFN(ALIGN_UP(size, PAGE_SIZE)); i++) {
uintptr_t addr = PFN2ADDR(i);
page_mapping_insert(AS_KERNEL, virtaddr + addr, physaddr + addr, PAGE_NOT_CACHEABLE | PAGE_WRITE);
}
last_frame = ALIGN_UP(last_frame + size, FRAME_SIZE);
return virtaddr;
return KA2PA(physaddr);
}
 
/** @}
/trunk/kernel/arch/sparc64/src/start.S
84,7 → 84,7
! l5 <= physmem_base[(PHYSMEM_ADDR_SIZE - 1):13]
sllx %l5, 13 + (63 - (PHYSMEM_ADDR_SIZE - 1)), %l5
srlx %l5, 63 - (PHYSMEM_ADDR_SIZE - 1), %l5
 
/*
* Setup basic runtime environment.
*/
333,7 → 333,7
2:
ldx [%g2], %g3
cmp %g3, %g1
bne 2b
bne %xcc, 2b
nop
 
/*
381,10 → 381,31
.quad 0
 
/*
* This variable is used by the fast_data_MMU_miss trap handler. In runtime, it
* is further modified to reflect the starting address of physical memory.
* The fast_data_access_mmu_miss_data_hi label and the end_of_identity and
* kernel_8k_tlb_data_template variables are meant to stay together,
* aligned on 16B boundary.
*/
.global fast_data_access_mmu_miss_data_hi
.global end_of_identity
.global kernel_8k_tlb_data_template
 
.align 16
/*
* This label is used by the fast_data_access_MMU_miss trap handler.
*/
fast_data_access_mmu_miss_data_hi:
/*
* This variable is used by the fast_data_access_MMU_miss trap handler.
* In runtime, it is modified to contain the address of the end of physical
* memory.
*/
end_of_identity:
.quad -1
/*
* This variable is used by the fast_data_access_MMU_miss trap handler.
* In runtime, it is further modified to reflect the starting address of
* physical memory.
*/
kernel_8k_tlb_data_template:
#ifdef CONFIG_VIRT_IDX_DCACHE
.quad ((1 << TTE_V_SHIFT) | (PAGESIZE_8K << TTE_SIZE_SHIFT) | TTE_CP | \