Subversion Repositories HelenOS

Compare Revisions

Ignore whitespace Rev 3675 → Rev 4679

/branches/tracing/kernel/arch/sparc64/src/mm/tlb.c
199,12 → 199,12
/** ITLB miss handler. */
void fast_instruction_access_mmu_miss(unative_t unused, istate_t *istate)
{
uintptr_t va = ALIGN_DOWN(istate->tpc, PAGE_SIZE);
uintptr_t page_16k = ALIGN_DOWN(istate->tpc, PAGE_SIZE);
index_t index = (istate->tpc >> MMU_PAGE_WIDTH) % MMU_PAGES_PER_PAGE;
pte_t *t;
 
page_table_lock(AS, true);
t = page_mapping_find(AS, va);
t = page_mapping_find(AS, page_16k);
if (t && PTE_EXECUTABLE(t)) {
/*
* The mapping was found in the software page hash table.
222,7 → 222,8
* handler.
*/
page_table_unlock(AS, true);
if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
if (as_page_fault(page_16k, PF_ACCESS_EXEC, istate) ==
AS_PF_FAULT) {
do_fast_instruction_access_mmu_miss_fault(istate,
__func__);
}
242,11 → 243,13
*/
void fast_data_access_mmu_miss(tlb_tag_access_reg_t tag, istate_t *istate)
{
uintptr_t va;
uintptr_t page_8k;
uintptr_t page_16k;
index_t index;
pte_t *t;
 
va = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE);
page_8k = (uint64_t) tag.vpn << MMU_PAGE_WIDTH;
page_16k = ALIGN_DOWN(page_8k, PAGE_SIZE);
index = tag.vpn % MMU_PAGES_PER_PAGE;
 
if (tag.context == ASID_KERNEL) {
254,6 → 257,15
/* NULL access in kernel */
do_fast_data_access_mmu_miss_fault(istate, tag,
__func__);
} else if (page_8k >= end_of_identity) {
/*
* The kernel is accessing the I/O space.
* We still do identity mapping for I/O,
* but without caching.
*/
dtlb_insert_mapping(page_8k, KA2PA(page_8k),
PAGESIZE_8K, false, false);
return;
}
do_fast_data_access_mmu_miss_fault(istate, tag, "Unexpected "
"kernel page fault.");
260,7 → 272,7
}
 
page_table_lock(AS, true);
t = page_mapping_find(AS, va);
t = page_mapping_find(AS, page_16k);
if (t) {
/*
* The mapping was found in the software page hash table.
278,7 → 290,8
* handler.
*/
page_table_unlock(AS, true);
if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
if (as_page_fault(page_16k, PF_ACCESS_READ, istate) ==
AS_PF_FAULT) {
do_fast_data_access_mmu_miss_fault(istate, tag,
__func__);
}
295,15 → 308,15
*/
void fast_data_access_protection(tlb_tag_access_reg_t tag, istate_t *istate)
{
uintptr_t va;
uintptr_t page_16k;
index_t index;
pte_t *t;
 
va = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE);
page_16k = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE);
index = tag.vpn % MMU_PAGES_PER_PAGE; /* 16K-page emulation */
 
page_table_lock(AS, true);
t = page_mapping_find(AS, va);
t = page_mapping_find(AS, page_16k);
if (t && PTE_WRITABLE(t)) {
/*
* The mapping was found in the software page hash table and is
313,7 → 326,7
t->a = true;
t->d = true;
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_SECONDARY,
va + index * MMU_PAGE_SIZE);
page_16k + index * MMU_PAGE_SIZE);
dtlb_pte_copy(t, index, false);
#ifdef CONFIG_TSB
dtsb_pte_copy(t, index, false);
325,7 → 338,8
* handler.
*/
page_table_unlock(AS, true);
if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) {
if (as_page_fault(page_16k, PF_ACCESS_WRITE, istate) ==
AS_PF_FAULT) {
do_fast_data_access_protection_fault(istate, tag,
__func__);
}
424,9 → 438,9
void do_fast_instruction_access_mmu_miss_fault(istate_t *istate,
const char *str)
{
fault_if_from_uspace(istate, "%s\n", str);
fault_if_from_uspace(istate, "%s.", str);
dump_istate(istate);
panic("%s\n", str);
panic("%s.", str);
}
 
void do_fast_data_access_mmu_miss_fault(istate_t *istate,
436,12 → 450,12
 
va = tag.vpn << MMU_PAGE_WIDTH;
if (tag.context) {
fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va,
fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d).", str, va,
tag.context);
}
dump_istate(istate);
printf("Faulting page: %p, ASID=%d\n", va, tag.context);
panic("%s\n", str);
printf("Faulting page: %p, ASID=%d.\n", va, tag.context);
panic("%s.", str);
}
 
void do_fast_data_access_protection_fault(istate_t *istate,
452,12 → 466,12
va = tag.vpn << MMU_PAGE_WIDTH;
 
if (tag.context) {
fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va,
fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d).", str, va,
tag.context);
}
printf("Faulting page: %p, ASID=%d\n", va, tag.context);
dump_istate(istate);
panic("%s\n", str);
panic("%s.", str);
}
 
void dump_sfsr_and_sfar(void)
/branches/tracing/kernel/arch/sparc64/src/mm/frame.c
26,7 → 26,7
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup sparc64mm
/** @addtogroup sparc64mm
* @{
*/
/** @file
79,7 → 79,8
*/
frame_mark_unavailable(ADDR2PFN(KA2PA(PFN2ADDR(0))), 1);
}
 
end_of_identity = PA2KA(last_frame);
}
 
/** @}
/branches/tracing/kernel/arch/sparc64/src/mm/page.c
1,5 → 1,5
/*
* Copyright (c) 2005 Jakub Jermar
* Copyright (c) 2009 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
26,7 → 26,7
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup sparc64mm
/** @addtogroup sparc64mm
* @{
*/
/** @file
42,128 → 42,28
#include <align.h>
#include <config.h>
 
#ifdef CONFIG_SMP
/** Entries locked in DTLB of BSP.
*
* Application processors need to have the same locked entries in their DTLBs as
* the bootstrap processor.
*/
static struct {
uintptr_t virt_page;
uintptr_t phys_page;
int pagesize_code;
} bsp_locked_dtlb_entry[DTLB_MAX_LOCKED_ENTRIES];
 
/** Number of entries in bsp_locked_dtlb_entry array. */
static count_t bsp_locked_dtlb_entries = 0;
#endif /* CONFIG_SMP */
 
/** Perform sparc64 specific initialization of paging. */
void page_arch_init(void)
{
if (config.cpu_active == 1) {
if (config.cpu_active == 1)
page_mapping_operations = &ht_mapping_operations;
} else {
 
#ifdef CONFIG_SMP
unsigned int i;
 
/*
* Copy locked DTLB entries from the BSP.
*/
for (i = 0; i < bsp_locked_dtlb_entries; i++) {
dtlb_insert_mapping(bsp_locked_dtlb_entry[i].virt_page,
bsp_locked_dtlb_entry[i].phys_page,
bsp_locked_dtlb_entry[i].pagesize_code, true,
false);
}
#endif
 
}
}
 
/** Map memory-mapped device into virtual memory.
*
* So far, only DTLB is used to map devices into memory. Chances are that there
* will be only a limited amount of devices that the kernel itself needs to
* lock in DTLB.
* We are currently using identity mapping for mapping device registers.
*
* @param physaddr Physical address of the page where the device is located.
* Must be at least page-aligned.
* @param size Size of the device's registers. Must not exceed 4M and must
* include extra space caused by the alignment.
* @param physaddr Physical address of the page where the device is
* located.
* @param size Size of the device's registers.
*
* @return Virtual address of the page where the device is mapped.
*
*/
uintptr_t hw_map(uintptr_t physaddr, size_t size)
{
unsigned int order;
unsigned int i;
 
ASSERT(config.cpu_active == 1);
 
struct {
int pagesize_code;
size_t increment;
count_t count;
} sizemap[] = {
{ PAGESIZE_8K, 0, 1 }, /* 8K */
{ PAGESIZE_8K, MMU_PAGE_SIZE, 2 }, /* 16K */
{ PAGESIZE_8K, MMU_PAGE_SIZE, 4 }, /* 32K */
{ PAGESIZE_64K, 0, 1}, /* 64K */
{ PAGESIZE_64K, 8 * MMU_PAGE_SIZE, 2 }, /* 128K */
{ PAGESIZE_64K, 8 * MMU_PAGE_SIZE, 4 }, /* 256K */
{ PAGESIZE_512K, 0, 1 }, /* 512K */
{ PAGESIZE_512K, 64 * MMU_PAGE_SIZE, 2 }, /* 1M */
{ PAGESIZE_512K, 64 * MMU_PAGE_SIZE, 4 }, /* 2M */
{ PAGESIZE_4M, 0, 1 }, /* 4M */
{ PAGESIZE_4M, 512 * MMU_PAGE_SIZE, 2 } /* 8M */
};
ASSERT(ALIGN_UP(physaddr, MMU_PAGE_SIZE) == physaddr);
ASSERT(size <= 8 * 1024 * 1024);
if (size <= MMU_FRAME_SIZE)
order = 0;
else
order = (fnzb64(size - 1) + 1) - MMU_FRAME_WIDTH;
 
/*
* Use virtual addresses that are beyond the limit of physical memory.
* Thus, the physical address space will not be wasted by holes created
* by frame_alloc().
*/
ASSERT(PA2KA(last_frame));
uintptr_t virtaddr = ALIGN_UP(PA2KA(last_frame),
1 << (order + FRAME_WIDTH));
last_frame = ALIGN_UP(KA2PA(virtaddr) + size,
1 << (order + FRAME_WIDTH));
for (i = 0; i < sizemap[order].count; i++) {
/*
* First, insert the mapping into DTLB.
*/
dtlb_insert_mapping(virtaddr + i * sizemap[order].increment,
physaddr + i * sizemap[order].increment,
sizemap[order].pagesize_code, true, false);
#ifdef CONFIG_SMP
/*
* Second, save the information about the mapping for APs.
*/
bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].virt_page =
virtaddr + i * sizemap[order].increment;
bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].phys_page =
physaddr + i * sizemap[order].increment;
bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].pagesize_code =
sizemap[order].pagesize_code;
bsp_locked_dtlb_entries++;
#endif
}
return virtaddr;
return PA2KA(physaddr);
}
 
/** @}
*/