Subversion Repositories HelenOS

Compare Revisions

Regard whitespace Rev HEAD → Rev 3094

/trunk/kernel/arch/arm32/src/mm/page_fault.c
34,6 → 34,7
*/
#include <panic.h>
#include <arch/exception.h>
#include <arch/debug/print.h>
#include <arch/mm/page_fault.h>
#include <mm/as.h>
#include <genarch/mm/page_pt.h>
51,10 → 52,9
/* fault status is stored in CP15 register 5 */
asm volatile (
"mrc p15, 0, %[dummy], c5, c0, 0"
: [dummy] "=r" (fsu.dummy)
"mrc p15, 0, %0, c5, c0, 0"
: "=r"(fsu.dummy)
);
return fsu.fs;
}
 
69,10 → 69,9
/* fault adress is stored in CP15 register 6 */
asm volatile (
"mrc p15, 0, %[ret], c6, c0, 0"
: [ret] "=r" (ret)
"mrc p15, 0, %0, c6, c0, 0"
: "=r"(ret)
);
return ret;
}
 
81,25 → 80,28
* @param instr Instruction
*
* @return true when instruction is load/store, false otherwise
*
*/
static inline bool is_load_store_instruction(instruction_t instr)
{
/* load store immediate offset */
if (instr.type == 0x2)
if (instr.type == 0x2) {
return true;
}
/* load store register offset */
if ((instr.type == 0x3) && (instr.bit4 == 0))
if (instr.type == 0x3 && instr.bit4 == 0) {
return true;
}
/* load store multiple */
if (instr.type == 0x4)
if (instr.type == 0x4) {
return true;
}
/* oprocessor load/store */
if (instr.type == 0x6)
if (instr.type == 0x6) {
return true;
}
return false;
}
113,10 → 115,11
static inline bool is_swap_instruction(instruction_t instr)
{
/* swap, swapb instruction */
if ((instr.type == 0x0) &&
((instr.opcode == 0x8) || (instr.opcode == 0xa)) &&
(instr.access == 0x0) && (instr.bits567 == 0x4) && (instr.bit4 == 1))
if (instr.type == 0x0 &&
(instr.opcode == 0x8 || instr.opcode == 0xa) &&
instr.access == 0x0 && instr.bits567 == 0x4 && instr.bit4 == 1) {
return true;
}
return false;
}
139,8 → 142,8
 
/* undefined instructions */
if (instr.condition == 0xf) {
panic("page_fault - instruction does not access memory "
"(instr_code: %x, badvaddr:%x).", instr, badvaddr);
panic("page_fault - instruction doesn't access memory "
"(instr_code: %x, badvaddr:%x)", instr, badvaddr);
return PF_ACCESS_EXEC;
}
 
159,7 → 162,7
}
 
panic("page_fault - instruction doesn't access memory "
"(instr_code: %x, badvaddr:%x).", instr, badvaddr);
"(instr_code: %x, badvaddr:%x)", instr, badvaddr);
 
return PF_ACCESS_EXEC;
}
181,12 → 184,12
 
if (ret == AS_PF_FAULT) {
print_istate(istate);
printf("page fault - pc: %x, va: %x, status: %x(%x), "
dprintf("page fault - pc: %x, va: %x, status: %x(%x), "
"access:%d\n", istate->pc, badvaddr, fsr.status, fsr,
access);
fault_if_from_uspace(istate, "Page fault: %#x.", badvaddr);
panic("Page fault.");
fault_if_from_uspace(istate, "Page fault: %#x", badvaddr);
panic("page fault\n");
}
}
 
200,9 → 203,9
int ret = as_page_fault(istate->pc, PF_ACCESS_EXEC, istate);
 
if (ret == AS_PF_FAULT) {
printf("prefetch_abort\n");
dprintf("prefetch_abort\n");
print_istate(istate);
panic("page fault - prefetch_abort at address: %x.",
panic("page fault - prefetch_abort at address: %x\n",
istate->pc);
}
}
/trunk/kernel/arch/arm32/src/mm/frame.c
37,6 → 37,7
#include <arch/mm/frame.h>
#include <arch/machine.h>
#include <config.h>
#include <arch/debug/print.h>
 
/** Address of the last frame in the memory. */
uintptr_t last_frame = 0;
44,26 → 45,24
/** Creates memory zones. */
void frame_arch_init(void)
{
/* all memory as one zone */
zone_create(0, ADDR2PFN(machine_get_memory_size()),
BOOT_PAGE_TABLE_START_FRAME + BOOT_PAGE_TABLE_SIZE_IN_FRAMES, 0);
last_frame = machine_get_memory_size();
/* All memory as one zone */
zone_create(0, ADDR2PFN(last_frame),
BOOT_PAGE_TABLE_START_FRAME + BOOT_PAGE_TABLE_SIZE_IN_FRAMES, 0);
/* blacklist boot page table */
frame_mark_unavailable(BOOT_PAGE_TABLE_START_FRAME,
BOOT_PAGE_TABLE_SIZE_IN_FRAMES);
 
machine_frame_init();
}
 
/** Frees the boot page table. */
void boot_page_table_free(void)
{
unsigned int i;
for (i = 0; i < BOOT_PAGE_TABLE_SIZE_IN_FRAMES; i++)
int i;
for (i = 0; i < BOOT_PAGE_TABLE_SIZE_IN_FRAMES; i++) {
frame_free(i * FRAME_SIZE + BOOT_PAGE_TABLE_ADDRESS);
}
}
 
/** @}
*/
/trunk/kernel/arch/arm32/src/mm/tlb.c
68,8 → 68,9
static inline void invalidate_page(uintptr_t page)
{
asm volatile (
"mcr p15, 0, %[page], c8, c7, 1\n"
:: [page] "r" (page)
"mcr p15, 0, %0, c8, c7, 1"
:
: "r" (page)
);
}
 
80,7 → 81,7
* @param page Address of the first page whose entry is to be invalidated.
* @param cnt Number of entries to invalidate.
*/
void tlb_invalidate_pages(asid_t asid __attribute__((unused)), uintptr_t page, size_t cnt)
void tlb_invalidate_pages(asid_t asid, uintptr_t page, count_t cnt)
{
unsigned int i;
 
88,13 → 89,5
invalidate_page(page + i * PAGE_SIZE);
}
 
void tlb_arch_init(void)
{
}
 
void tlb_print(void)
{
}
 
/** @}
*/
/trunk/kernel/arch/arm32/src/mm/page.c
51,15 → 51,19
*/
void page_arch_init(void)
{
int flags = PAGE_CACHEABLE;
uintptr_t cur;
int flags;
 
page_mapping_operations = &pt_mapping_operations;
uintptr_t cur;
/* Kernel identity mapping */
for (cur = 0; cur < last_frame; cur += FRAME_SIZE)
flags = PAGE_CACHEABLE;
 
/* PA2KA(identity) mapping for all frames until last_frame */
for (cur = 0; cur < last_frame; cur += FRAME_SIZE) {
page_mapping_insert(AS_KERNEL, PA2KA(cur), cur, flags);
}
/* Create mapping for exception table at high offset */
/* create mapping for exception table at high offset */
#ifdef HIGH_EXCEPTION_VECTORS
void *virtaddr = frame_alloc(ONE_FRAME, FRAME_KA);
page_mapping_insert(AS_KERNEL, EXC_BASE_ADDRESS, KA2PA(virtaddr), flags);
86,7 → 90,7
{
if (last_frame + ALIGN_UP(size, PAGE_SIZE) >
KA2PA(KERNEL_ADDRESS_SPACE_END_ARCH)) {
panic("Unable to map physical memory %p (%d bytes).",
panic("Unable to map physical memory %p (%d bytes)",
physaddr, size)
}