/branches/arm/kernel/arch/arm32/src/mm/tlb.c |
---|
39,13 → 39,15 |
#include <arch/types.h> |
#include <arch/mm/page.h> |
/** Invalidate all entries in TLB. */ |
/** Invalidate all entries in TLB. |
* |
* @note See ARM Architecture reference section 3.7.7 for details. |
*/ |
void tlb_invalidate_all(void) |
{ |
asm volatile ( |
"eor r1, r1\n" |
"MCR p15, 0, r1, c8, c7, 0\n" // see ARM Architecture reference relE 3.7.7 p.528 |
"mcr p15, 0, r1, c8, c7, 0\n" |
::: "r1" |
); |
} |
53,7 → 55,7 |
/** Invalidate all entries in TLB that belong to specified address space. |
* |
* @param asid This parameter is ignored as the ARM architecture doesn't support it. |
* @param asid Ignored as the ARM architecture doesn't support ASIDs. |
*/ |
void tlb_invalidate_asid(asid_t asid) |
{ |
68,10 → 70,10 |
static inline void invalidate_page(uintptr_t page) |
{ |
asm volatile ( |
"MCR p15, 0, %0, c8, c7, 1" |
"mcr p15, 0, %0, c8, c7, 1" |
: /* no output */ |
: "r"(page) /* input */ |
: |
: "r"(page) |
); |
} |
78,7 → 80,7 |
/** Invalidate TLB entries for specified page range belonging to specified address space. |
* |
* @param asid This parameter is ignored as the ARM architecture doesn't support it. |
* @param asid Ignored as the ARM architecture doesn't support it. |
* @param page Address of the first page whose entry is to be invalidated. |
* @param cnt Number of entries to invalidate. |
*/ |
/branches/arm/kernel/arch/arm32/src/mm/as.c |
---|
39,7 → 39,10 |
#include <mm/as.h> |
#include <arch.h> |
/** Architecture dependent address space init. */ |
/** Architecture dependent address space init. |
* |
* Since ARM supports page tables, #as_pt_operations are used. |
*/ |
void as_arch_init(void) |
{ |
as_operations = &as_pt_operations; |
/branches/arm/kernel/arch/arm32/src/mm/page_fault.c |
---|
43,7 → 43,6 |
/** Returns value stored in fault status register. |
* FSR contain reason of page fault |
* |
* @return Value stored in CP15 fault status register (FSR). |
*/ |
51,7 → 50,7 |
{ |
fault_status_union_t fsu; |
// fault adress is stored in CP15 register 5 |
// fault status is stored in CP15 register 5 |
asm volatile ( |
"mrc p15, 0, %0, c5, c0, 0" |
: "=r"(fsu.dummy) |
77,7 → 76,7 |
} |
/** Decides whether the instructions is load/store or not. |
/** Decides whether the instruction is load/store or not. |
* |
* @param instr Instruction |
* |
130,11 → 129,10 |
/** Decides whether read or write into memory is requested. |
* |
* @param instr_addr Address of instruction which tries to access memory |
* @param badvaddr Virtual address the instruction tries to access |
* @param instr_addr Address of instruction which tries to access memory. |
* @param badvaddr Virtual address the instruction tries to access. |
* |
* @return Type of access into memmory |
* Note: Returns #PF_ACCESS_EXEC if no memory access is requested |
* @return Type of access into memmory, #PF_ACCESS_EXEC if no memory access is requested. |
*/ |
static pf_access_t get_memory_access_type(uint32_t instr_addr, uintptr_t badvaddr) |
{ |
145,7 → 143,7 |
// undefined instructions |
if (instr.condition == 0xf) { |
panic("page_fault - instruction not access memmory (instr_code: %x, badvaddr:%x)", |
panic("page_fault - instruction doesn't access memory (instr_code: %x, badvaddr:%x)", |
instr, badvaddr); |
return PF_ACCESS_EXEC; |
} |
161,34 → 159,10 |
// swap, swpb instruction |
if (is_swap_instruction(instr)) { |
/* Swap instructions make read and write in one step. |
* Type of access that caused exception have to page tables |
* and access rights. |
*/ |
pte_level1_t* pte = (pte_level1_t*) |
pt_mapping_operations.mapping_find(AS, badvaddr); |
if ( pte == NULL ) { |
return PF_ACCESS_READ; |
} |
/* check if read possible |
* Note: Don't check PTE_READABLE because it returns 1 everytimes */ |
if ( !PTE_PRESENT(pte) ) { |
return PF_ACCESS_READ; |
} |
if ( !PTE_WRITABLE(pte) ) { |
return PF_ACCESS_WRITE; |
} else { |
// badvaddr is present readable and writeable but error occured ... why? |
panic("page_fault - swap instruction, but address readable and writeable" |
"(instr_code:%X, badvaddr:%X)", instr, badvaddr); |
} |
return PF_ACCESS_WRITE; |
} |
panic("page_fault - instruction not access memory (instr_code: %x, badvaddr:%x)", |
panic("page_fault - instruction doesn't access memory (instr_code: %x, badvaddr:%x)", |
instr, badvaddr); |
return PF_ACCESS_EXEC; |
196,8 → 170,8 |
/** Handles "data abort" exception (load or store at invalid address). |
* |
* @param exc_no exception number |
* @param istate CPU state when exception occured |
* @param exc_no Exception number. |
* @param istate CPU state when exception occured. |
*/ |
void data_abort(int exc_no, istate_t *istate) |
{ |
220,8 → 194,8 |
/** Handles "prefetch abort" exception (instruction couldn't be executed). |
* |
* @param exc_no exception number |
* @param istate CPU state when exception occured |
* @param exc_no Exception number. |
* @param istate CPU state when exception occured. |
*/ |
void prefetch_abort(int exc_no, istate_t *istate) |
{ |
/branches/arm/kernel/arch/arm32/src/mm/frame.c |
---|
38,9 → 38,10 |
#include <config.h> |
#include <arch/debug/print.h> |
/** Address of the last frame in the memory. */ |
uintptr_t last_frame = 0; |
/** Create memory zones. */ |
/** Creates memory zones. */ |
void frame_arch_init(void) |
{ |
// all memory as one zone |
/branches/arm/kernel/arch/arm32/src/mm/page.c |
---|
45,8 → 45,10 |
#include <arch/mm/frame.h> |
/** |
* Initializes kernel adress space page tables, sets abourts exceptions vectors |
/** Initializes page tables. |
* |
* 1:1 virtual-physical mapping is created in kernel address space. Mapping |
* for table with exception vectors is also created. |
*/ |
void page_arch_init(void) |
{ |
64,14 → 66,10 |
// create mapping for exception table at high offset |
#ifdef HIGH_EXCEPTION_VECTORS |
/* Note: this mapping cann't be done by hw_map because fixed |
exception vector is stored at fixed virtual address |
reserve frame for exception table |
*/ |
void* virtaddr = frame_alloc(ONE_FRAME, FRAME_KA); |
void *virtaddr = frame_alloc(ONE_FRAME, FRAME_KA); |
page_mapping_insert(AS_KERNEL, EXC_BASE_ADDRESS, KA2PA(virtaddr), flags); |
#else |
#error "Only high eception vector supported now" |
#error "Only high exception vector supported now" |
#endif |
as_switch(NULL, AS_KERNEL); |
79,16 → 77,15 |
boot_page_table_free(); |
} |
/** |
* Map device into kernel space. |
/** Maps device into the kernel space. |
* |
* This function adds mapping of physical address that is read/write only |
* from kernel and not bufferable. |
* Maps physical address of device into kernel virtual address space (so it can |
* be accessed only by kernel through virtual address). |
* |
* @param physaddr Physical addres where device is connected |
* @param size Length of area where device is present |
* @param physaddr Physical address where device is connected. |
* @param size Length of area where device is present. |
* |
* @return Virtual address where device will be accessable |
* @return Virtual address where device will be accessible. |
*/ |
uintptr_t hw_map(uintptr_t physaddr, size_t size) |
{ |