Subversion Repositories HelenOS-historic

Compare Revisions

Ignore whitespace Rev 1411 → Rev 1410

/kernel/trunk/arch/ppc64/src/mm/page.c
53,14 → 53,12
* @param as Address space.
* @param lock Lock/unlock the address space.
* @param badvaddr Faulting virtual address.
* @param access Access mode that caused the fault.
* @param istate Pointer to interrupted state.
* @param pfrc Pointer to variable where as_page_fault() return code will be stored.
* @return PTE on success, NULL otherwise.
*
*/
static pte_t *find_mapping_and_check(as_t *as, bool lock, __address badvaddr, int access,
istate_t *istate, int *pfcr)
static pte_t *find_mapping_and_check(as_t *as, bool lock, __address badvaddr, istate_t *istate, int *pfcr)
{
/*
* Check if the mapping exists in page tables.
80,7 → 78,7
* Resort to higher-level page fault handler.
*/
page_table_unlock(as, lock);
switch (rc = as_page_fault(badvaddr, access, istate)) {
switch (rc = as_page_fault(badvaddr, istate)) {
case AS_PF_OK:
/*
* The higher-level page fault handler succeeded,
213,7 → 211,7
page_table_lock(as, lock);
pte = find_mapping_and_check(as, lock, badvaddr, PF_ACCESS_READ /* FIXME */, istate, &pfcr);
pte = find_mapping_and_check(as, lock, badvaddr, istate, &pfcr);
if (!pte) {
switch (pfcr) {
case AS_PF_FAULT:
/kernel/trunk/arch/ppc32/src/mm/page.c
53,14 → 53,12
* @param as Address space.
* @param lock Lock/unlock the address space.
* @param badvaddr Faulting virtual address.
* @param access Access mode that caused the fault.
* @param istate Pointer to interrupted state.
* @param pfrc Pointer to variable where as_page_fault() return code will be stored.
* @return PTE on success, NULL otherwise.
*
*/
static pte_t *find_mapping_and_check(as_t *as, bool lock, __address badvaddr, int access,
istate_t *istate, int *pfcr)
static pte_t *find_mapping_and_check(as_t *as, bool lock, __address badvaddr, istate_t *istate, int *pfcr)
{
/*
* Check if the mapping exists in page tables.
80,7 → 78,7
* Resort to higher-level page fault handler.
*/
page_table_unlock(as, lock);
switch (rc = as_page_fault(badvaddr, access, istate)) {
switch (rc = as_page_fault(badvaddr, istate)) {
case AS_PF_OK:
/*
* The higher-level page fault handler succeeded,
213,7 → 211,7
page_table_lock(as, lock);
pte = find_mapping_and_check(as, lock, badvaddr, PF_ACCESS_READ /* FIXME */, istate, &pfcr);
pte = find_mapping_and_check(as, lock, badvaddr, istate, &pfcr);
if (!pte) {
switch (pfcr) {
case AS_PF_FAULT:
/kernel/trunk/arch/mips32/src/mm/tlb.c
44,7 → 44,7
static void tlb_invalid_fail(istate_t *istate);
static void tlb_modified_fail(istate_t *istate);
 
static pte_t *find_mapping_and_check(__address badvaddr, int access, istate_t *istate, int *pfrc);
static pte_t *find_mapping_and_check(__address badvaddr, istate_t *istate, int *pfrc);
 
static void prepare_entry_lo(entry_lo_t *lo, bool g, bool v, bool d, bool cacheable, __address pfn);
static void prepare_entry_hi(entry_hi_t *hi, asid_t asid, __address addr);
101,7 → 101,7
 
page_table_lock(AS, true);
 
pte = find_mapping_and_check(badvaddr, PF_ACCESS_READ, istate, &pfrc);
pte = find_mapping_and_check(badvaddr, istate, &pfrc);
if (!pte) {
switch (pfrc) {
case AS_PF_FAULT:
186,7 → 186,7
goto fail;
}
 
pte = find_mapping_and_check(badvaddr, PF_ACCESS_READ, istate, &pfrc);
pte = find_mapping_and_check(badvaddr, istate, &pfrc);
if (!pte) {
switch (pfrc) {
case AS_PF_FAULT:
270,7 → 270,7
goto fail;
}
 
pte = find_mapping_and_check(badvaddr, PF_ACCESS_WRITE, istate, &pfrc);
pte = find_mapping_and_check(badvaddr, istate, &pfrc);
if (!pte) {
switch (pfrc) {
case AS_PF_FAULT:
366,13 → 366,12
* The AS->lock must be held on entry to this function.
*
* @param badvaddr Faulting virtual address.
* @param access Access mode that caused the fault.
* @param istate Pointer to interrupted state.
* @param pfrc Pointer to variable where as_page_fault() return code will be stored.
*
* @return PTE on success, NULL otherwise.
*/
pte_t *find_mapping_and_check(__address badvaddr, int access, istate_t *istate, int *pfrc)
pte_t *find_mapping_and_check(__address badvaddr, istate_t *istate, int *pfrc)
{
entry_hi_t hi;
pte_t *pte;
405,7 → 404,7
* Resort to higher-level page fault handler.
*/
page_table_unlock(AS, true);
switch (rc = as_page_fault(badvaddr, access, istate)) {
switch (rc = as_page_fault(badvaddr, istate)) {
case AS_PF_OK:
/*
* The higher-level page fault handler succeeded,
/kernel/trunk/arch/amd64/src/mm/page.c
168,21 → 168,9
void page_fault(int n, istate_t *istate)
{
__address page;
pf_access_t access;
page = read_cr2();
if (istate->error_word & PFERR_CODE_RSVD)
panic("Reserved bit set in page table entry.\n");
if (istate->error_word & PFERR_CODE_RW)
access = PF_ACCESS_WRITE;
else if (istate->error_word & PFERR_CODE_ID)
access = PF_ACCESS_EXEC;
else
access = PF_ACCESS_READ;
if (as_page_fault(page, access, istate) == AS_PF_FAULT) {
if (as_page_fault(page, istate) == AS_PF_FAULT) {
print_info_errcode(n, istate);
printf("Page fault address: %llX\n", page);
panic("page fault\n");
/kernel/trunk/arch/amd64/include/mm/page.h
106,23 → 106,6
 
#ifndef __ASM__
 
/* Page fault error codes. */
 
/** When bit on this position is 0, the page fault was caused by a not-present page. */
#define PFERR_CODE_P (1<<0)
 
/** When bit on this position is 1, the page fault was caused by a write. */
#define PFERR_CODE_RW (1<<1)
 
/** When bit on this position is 1, the page fault was caused in user mode. */
#define PFERR_CODE_US (1<<2)
 
/** When bit on this position is 1, a reserved bit was set in page directory. */
#define PFERR_CODE_RSVD (1<<3)
 
/** When bit on this position os 1, the page fault was caused during instruction fecth. */
#define PFERR_CODE_ID (1<<4)
 
/** Page Table Entry. */
struct page_specifier {
unsigned present : 1;
/kernel/trunk/arch/ia32/src/mm/page.c
43,6 → 43,7
#include <print.h>
#include <interrupt.h>
 
 
void page_arch_init(void)
{
__address cur;
86,25 → 87,3
return virtaddr;
}
 
void page_fault(int n, istate_t *istate)
{
__address page;
pf_access_t access;
page = read_cr2();
if (istate->error_word & PFERR_CODE_RSVD)
panic("Reserved bit set in page directory.\n");
 
if (istate->error_word & PFERR_CODE_RW)
access = PF_ACCESS_WRITE;
else
access = PF_ACCESS_READ;
 
if (as_page_fault(page, access, istate) == AS_PF_FAULT) {
PRINT_INFO_ERRCODE(istate);
printf("page fault address: %#x\n", page);
panic("page fault\n");
}
}
/kernel/trunk/arch/ia32/src/interrupt.c
54,7 → 54,7
void (* enable_irqs_function)(__u16 irqmask) = NULL;
void (* eoi_function)(void) = NULL;
 
void PRINT_INFO_ERRCODE(istate_t *istate)
static void PRINT_INFO_ERRCODE(istate_t *istate)
{
char *symbol = get_symtab_entry(istate->eip);
 
139,6 → 139,18
#endif
}
 
void page_fault(int n, istate_t *istate)
{
__address page;
 
page = read_cr2();
if (as_page_fault(page, istate) == AS_PF_FAULT) {
PRINT_INFO_ERRCODE(istate);
printf("page fault address: %#x\n", page);
panic("page fault\n");
}
}
 
void syscall(int n, istate_t *istate)
{
panic("Obsolete syscall handler.");
/kernel/trunk/arch/ia32/include/interrupt.h
92,12 → 92,12
extern void (* enable_irqs_function)(__u16 irqmask);
extern void (* eoi_function)(void);
 
extern void PRINT_INFO_ERRCODE(istate_t *istate);
extern void null_interrupt(int n, istate_t *istate);
extern void gp_fault(int n, istate_t *istate);
extern void nm_fault(int n, istate_t *istate);
extern void ss_fault(int n, istate_t *istate);
extern void simd_fp_exception(int n, istate_t *istate);
extern void page_fault(int n, istate_t *istate);
extern void syscall(int n, istate_t *istate);
extern void tlb_shootdown_ipi(int n, istate_t *istate);
 
/kernel/trunk/arch/ia32/include/mm/page.h
90,20 → 90,6
#include <arch/mm/frame.h>
#include <typedefs.h>
 
/* Page fault error codes. */
 
/** When bit on this position is 0, the page fault was caused by a not-present page. */
#define PFERR_CODE_P (1<<0)
 
/** When bit on this position is 1, the page fault was caused by a write. */
#define PFERR_CODE_RW (1<<1)
 
/** When bit on this position is 1, the page fault was caused in user mode. */
#define PFERR_CODE_US (1<<2)
 
/** When bit on this position is 1, a reserved bit was set in page directory. */
#define PFERR_CODE_RSVD (1<<3)
 
/** Page Table Entry. */
struct page_specifier {
unsigned present : 1;
152,7 → 138,6
}
 
extern void page_arch_init(void);
extern void page_fault(int n, istate_t *istate);
 
#endif /* __ASM__ */
 
/kernel/trunk/arch/ia64/src/mm/tlb.c
429,14 → 429,10
void alternate_instruction_tlb_fault(__u64 vector, istate_t *istate)
{
region_register rr;
rid_t rid;
__address va;
pte_t *t;
va = istate->cr_ifa; /* faulting address */
rr.word = rr_read(VA2VRN(va));
rid = rr.map.rid;
 
page_table_lock(AS, true);
t = page_mapping_find(AS, va);
if (t) {
451,8 → 447,8
* Forward the page fault to address space page fault handler.
*/
page_table_unlock(AS, true);
if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
if (as_page_fault(va, istate) == AS_PF_FAULT) {
panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, istate->cr_ifa, rr.map.rid, istate->cr_iip);
}
}
}
497,7 → 493,7
* Forward the page fault to address space page fault handler.
*/
page_table_unlock(AS, true);
if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
if (as_page_fault(va, istate) == AS_PF_FAULT) {
panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
}
}
522,19 → 518,12
*/
void data_dirty_bit_fault(__u64 vector, istate_t *istate)
{
region_register rr;
rid_t rid;
__address va;
pte_t *t;
va = istate->cr_ifa; /* faulting address */
rr.word = rr_read(VA2VRN(va));
rid = rr.map.rid;
 
page_table_lock(AS, true);
t = page_mapping_find(AS, va);
t = page_mapping_find(AS, istate->cr_ifa);
ASSERT(t && t->p);
if (t && t->p && t->w) {
if (t && t->p) {
/*
* Update the Dirty bit in page tables and reinsert
* the mapping into DTC.
541,12 → 530,6
*/
t->d = true;
dtc_pte_copy(t);
} else {
if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) {
panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
t->d = true;
dtc_pte_copy(t);
}
}
page_table_unlock(AS, true);
}
558,19 → 541,12
*/
void instruction_access_bit_fault(__u64 vector, istate_t *istate)
{
region_register rr;
rid_t rid;
__address va;
pte_t *t;
pte_t *t;
 
va = istate->cr_ifa; /* faulting address */
rr.word = rr_read(VA2VRN(va));
rid = rr.map.rid;
 
page_table_lock(AS, true);
t = page_mapping_find(AS, va);
t = page_mapping_find(AS, istate->cr_ifa);
ASSERT(t && t->p);
if (t && t->p && t->x) {
if (t && t->p) {
/*
* Update the Accessed bit in page tables and reinsert
* the mapping into ITC.
577,12 → 553,6
*/
t->a = true;
itc_pte_copy(t);
} else {
if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
t->a = true;
itc_pte_copy(t);
}
}
page_table_unlock(AS, true);
}
594,17 → 564,10
*/
void data_access_bit_fault(__u64 vector, istate_t *istate)
{
region_register rr;
rid_t rid;
__address va;
pte_t *t;
 
va = istate->cr_ifa; /* faulting address */
rr.word = rr_read(VA2VRN(va));
rid = rr.map.rid;
 
page_table_lock(AS, true);
t = page_mapping_find(AS, va);
t = page_mapping_find(AS, istate->cr_ifa);
ASSERT(t && t->p);
if (t && t->p) {
/*
613,12 → 576,6
*/
t->a = true;
dtc_pte_copy(t);
} else {
if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
t->a = true;
itc_pte_copy(t);
}
}
page_table_unlock(AS, true);
}
631,14 → 588,10
void page_not_present(__u64 vector, istate_t *istate)
{
region_register rr;
rid_t rid;
__address va;
pte_t *t;
va = istate->cr_ifa; /* faulting address */
rr.word = rr_read(VA2VRN(va));
rid = rr.map.rid;
 
page_table_lock(AS, true);
t = page_mapping_find(AS, va);
ASSERT(t);
655,8 → 608,8
page_table_unlock(AS, true);
} else {
page_table_unlock(AS, true);
if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
panic("%s: va=%p, rid=%d\n", __FUNCTION__, va, rid);
if (as_page_fault(va, istate) == AS_PF_FAULT) {
panic("%s: va=%p, rid=%d\n", __FUNCTION__, va, rr.map.rid);
}
}
}