Subversion Repositories HelenOS

Compare Revisions

Ignore whitespace Rev 3386 → Rev 4153

/branches/network/kernel/arch/arm32/src/mm/tlb.c
48,7 → 48,7
asm volatile (
"eor r1, r1\n"
"mcr p15, 0, r1, c8, c7, 0\n"
: : : "r1"
::: "r1"
);
}
 
68,9 → 68,8
static inline void invalidate_page(uintptr_t page)
{
asm volatile (
"mcr p15, 0, %0, c8, c7, 1"
:
: "r" (page)
"mcr p15, 0, %[page], c8, c7, 1\n"
:: [page] "r" (page)
);
}
 
81,7 → 80,7
* @param page Address of the first page whose entry is to be invalidated.
* @param cnt Number of entries to invalidate.
*/
void tlb_invalidate_pages(asid_t asid, uintptr_t page, count_t cnt)
void tlb_invalidate_pages(asid_t asid __attribute__((unused)), uintptr_t page, count_t cnt)
{
unsigned int i;
 
89,5 → 88,13
invalidate_page(page + i * PAGE_SIZE);
}
 
void tlb_arch_init(void)
{
}
 
void tlb_print(void)
{
}
 
/** @}
*/
/branches/network/kernel/arch/arm32/src/mm/page_fault.c
34,7 → 34,6
*/
#include <panic.h>
#include <arch/exception.h>
#include <arch/debug/print.h>
#include <arch/mm/page_fault.h>
#include <mm/as.h>
#include <genarch/mm/page_pt.h>
49,12 → 48,13
static inline fault_status_t read_fault_status_register(void)
{
fault_status_union_t fsu;
 
/* fault status is stored in CP15 register 5 */
asm volatile (
"mrc p15, 0, %0, c5, c0, 0"
: "=r"(fsu.dummy)
"mrc p15, 0, %[dummy], c5, c0, 0"
: [dummy] "=r" (fsu.dummy)
);
return fsu.fs;
}
 
61,17 → 61,18
/** Returns FAR (fault address register) content.
*
* @return FAR (fault address register) content (address that caused a page
* fault)
* fault)
*/
static inline uintptr_t read_fault_address_register(void)
{
uintptr_t ret;
 
/* fault adress is stored in CP15 register 6 */
asm volatile (
"mrc p15, 0, %0, c6, c0, 0"
: "=r"(ret)
"mrc p15, 0, %[ret], c6, c0, 0"
: [ret] "=r" (ret)
);
return ret;
}
 
80,29 → 81,26
* @param instr Instruction
*
* @return true when instruction is load/store, false otherwise
*
*/
static inline bool is_load_store_instruction(instruction_t instr)
{
/* load store immediate offset */
if (instr.type == 0x2) {
if (instr.type == 0x2)
return true;
}
 
/* load store register offset */
if (instr.type == 0x3 && instr.bit4 == 0) {
if ((instr.type == 0x3) && (instr.bit4 == 0))
return true;
}
 
/* load store multiple */
if (instr.type == 0x4) {
if (instr.type == 0x4)
return true;
}
 
/* oprocessor load/store */
if (instr.type == 0x6) {
if (instr.type == 0x6)
return true;
}
 
return false;
}
 
115,12 → 113,11
static inline bool is_swap_instruction(instruction_t instr)
{
/* swap, swapb instruction */
if (instr.type == 0x0 &&
(instr.opcode == 0x8 || instr.opcode == 0xa) &&
instr.access == 0x0 && instr.bits567 == 0x4 && instr.bit4 == 1) {
if ((instr.type == 0x0) &&
((instr.opcode == 0x8) || (instr.opcode == 0xa)) &&
(instr.access == 0x0) && (instr.bits567 == 0x4) && (instr.bit4 == 1))
return true;
}
 
return false;
}
 
142,8 → 139,8
 
/* undefined instructions */
if (instr.condition == 0xf) {
panic("page_fault - instruction doesn't access memory "
"(instr_code: %x, badvaddr:%x)", instr, badvaddr);
panic("page_fault - instruction does not access memory "
"(instr_code: %x, badvaddr:%x).", instr, badvaddr);
return PF_ACCESS_EXEC;
}
 
162,7 → 159,7
}
 
panic("page_fault - instruction doesn't access memory "
"(instr_code: %x, badvaddr:%x)", instr, badvaddr);
"(instr_code: %x, badvaddr:%x).", instr, badvaddr);
 
return PF_ACCESS_EXEC;
}
184,12 → 181,12
 
if (ret == AS_PF_FAULT) {
print_istate(istate);
dprintf("page fault - pc: %x, va: %x, status: %x(%x), "
printf("page fault - pc: %x, va: %x, status: %x(%x), "
"access:%d\n", istate->pc, badvaddr, fsr.status, fsr,
access);
 
fault_if_from_uspace(istate, "Page fault: %#x", badvaddr);
panic("page fault\n");
fault_if_from_uspace(istate, "Page fault: %#x.", badvaddr);
panic("Page fault.");
}
}
 
203,9 → 200,9
int ret = as_page_fault(istate->pc, PF_ACCESS_EXEC, istate);
 
if (ret == AS_PF_FAULT) {
dprintf("prefetch_abort\n");
printf("prefetch_abort\n");
print_istate(istate);
panic("page fault - prefetch_abort at address: %x\n",
panic("page fault - prefetch_abort at address: %x.",
istate->pc);
}
}
/branches/network/kernel/arch/arm32/src/mm/frame.c
26,7 → 26,7
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup arm32mm
/** @addtogroup arm32mm
* @{
*/
/** @file
35,9 → 35,8
 
#include <mm/frame.h>
#include <arch/mm/frame.h>
#include <arch/machine.h>
#include <arch/drivers/gxemul.h>
#include <config.h>
#include <arch/debug/print.h>
 
/** Address of the last frame in the memory. */
uintptr_t last_frame = 0;
45,11 → 44,12
/** Creates memory zones. */
void frame_arch_init(void)
{
/* all memory as one zone */
zone_create(0, ADDR2PFN(machine_get_memory_size()),
last_frame = *((uintptr_t *) (GXEMUL_MP_ADDRESS + GXEMUL_MP_MEMSIZE_OFFSET));
/* All memory as one zone */
zone_create(0, ADDR2PFN(last_frame),
BOOT_PAGE_TABLE_START_FRAME + BOOT_PAGE_TABLE_SIZE_IN_FRAMES, 0);
last_frame = machine_get_memory_size();
 
/* blacklist boot page table */
frame_mark_unavailable(BOOT_PAGE_TABLE_START_FRAME,
BOOT_PAGE_TABLE_SIZE_IN_FRAMES);
58,10 → 58,9
/** Frees the boot page table. */
void boot_page_table_free(void)
{
int i;
for (i = 0; i < BOOT_PAGE_TABLE_SIZE_IN_FRAMES; i++) {
unsigned int i;
for (i = 0; i < BOOT_PAGE_TABLE_SIZE_IN_FRAMES; i++)
frame_free(i * FRAME_SIZE + BOOT_PAGE_TABLE_ADDRESS);
}
}
 
/** @}
/branches/network/kernel/arch/arm32/src/mm/page.c
51,19 → 51,15
*/
void page_arch_init(void)
{
int flags = PAGE_CACHEABLE;
page_mapping_operations = &pt_mapping_operations;
uintptr_t cur;
int flags;
 
page_mapping_operations = &pt_mapping_operations;
 
flags = PAGE_CACHEABLE;
 
/* PA2KA(identity) mapping for all frames until last_frame */
for (cur = 0; cur < last_frame; cur += FRAME_SIZE) {
/* Kernel identity mapping */
for (cur = 0; cur < last_frame; cur += FRAME_SIZE)
page_mapping_insert(AS_KERNEL, PA2KA(cur), cur, flags);
}
/* create mapping for exception table at high offset */
/* Create mapping for exception table at high offset */
#ifdef HIGH_EXCEPTION_VECTORS
void *virtaddr = frame_alloc(ONE_FRAME, FRAME_KA);
page_mapping_insert(AS_KERNEL, EXC_BASE_ADDRESS, KA2PA(virtaddr), flags);
70,9 → 66,9
#else
#error "Only high exception vector supported now"
#endif
 
as_switch(NULL, AS_KERNEL);
 
boot_page_table_free();
}
 
90,10 → 86,10
{
if (last_frame + ALIGN_UP(size, PAGE_SIZE) >
KA2PA(KERNEL_ADDRESS_SPACE_END_ARCH)) {
panic("Unable to map physical memory %p (%d bytes)",
panic("Unable to map physical memory %p (%d bytes).",
physaddr, size)
}
 
uintptr_t virtaddr = PA2KA(last_frame);
pfn_t i;
for (i = 0; i < ADDR2PFN(ALIGN_UP(size, PAGE_SIZE)); i++) {
101,7 → 97,7
physaddr + PFN2ADDR(i),
PAGE_NOT_CACHEABLE | PAGE_READ | PAGE_WRITE | PAGE_KERNEL);
}
 
last_frame = ALIGN_UP(last_frame + size, FRAME_SIZE);
return virtaddr;
}