Subversion Repositories HelenOS-historic

Compare Revisions

Ignore whitespace Rev 1410 → Rev 1411

/kernel/trunk/generic/include/mm/page.h
60,6 → 60,14
 
#define PAGE_GLOBAL (1<<PAGE_GLOBAL_SHIFT)
 
/** Page fault access type. */
enum pf_access {
PF_ACCESS_READ,
PF_ACCESS_WRITE,
PF_ACCESS_EXEC
};
typedef enum pf_access pf_access_t;
 
/** Operations to manipulate page mappings. */
struct page_mapping_operations {
void (* mapping_insert)(as_t *as, __address page, __address frame, int flags);
/kernel/trunk/generic/include/mm/as.h
127,7 → 127,7
 
/** Address space area backend structure. */
struct mem_backend {
int (* backend_page_fault)(as_area_t *area, __address addr);
int (* backend_page_fault)(as_area_t *area, __address addr, pf_access_t access);
void (* backend_frame_free)(as_area_t *area, __address page, __address frame);
};
 
145,7 → 145,7
extern int as_area_destroy(as_t *as, __address address);
extern int as_area_get_flags(as_area_t *area);
extern void as_set_mapping(as_t *as, __address page, __address frame);
extern int as_page_fault(__address page, istate_t *istate);
extern int as_page_fault(__address page, pf_access_t access, istate_t *istate);
extern void as_switch(as_t *old, as_t *new);
extern void as_free(as_t *as);
extern int as_area_steal(task_t *src_task, __address src_base, size_t acc_size, __address dst_base);
/kernel/trunk/generic/src/lib/elf.c
56,7 → 56,7
static int section_header(elf_section_header_t *entry, elf_header_t *elf, as_t *as);
static int load_segment(elf_segment_header_t *entry, elf_header_t *elf, as_t *as);
 
static int elf_page_fault(as_area_t *area, __address addr);
static int elf_page_fault(as_area_t *area, __address addr, pf_access_t access);
static void elf_frame_free(as_area_t *area, __address page, __address frame);
 
mem_backend_t elf_backend = {
225,10 → 225,11
*
* @param area Pointer to the address space area.
* @param addr Faulting virtual address.
* @param access Access mode that caused the fault (i.e. read/write/exec).
*
* @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. serviced).
*/
int elf_page_fault(as_area_t *area, __address addr)
int elf_page_fault(as_area_t *area, __address addr, pf_access_t access)
{
elf_header_t *elf = (elf_header_t *) area->backend_data[0];
elf_segment_header_t *entry = (elf_segment_header_t *) area->backend_data[1];
/kernel/trunk/generic/src/mm/as.c
375,6 → 375,7
as_area_t *area;
__address base;
ipl_t ipl;
bool cond;
 
ipl = interrupts_disable();
mutex_lock(&as->lock);
387,47 → 388,38
}
 
base = area->base;
if (!(area->flags & AS_AREA_DEVICE)) {
bool cond;
/*
* Releasing physical memory.
* Areas mapping memory-mapped devices are treated differently than
* areas backing frame_alloc()'ed memory.
*/
 
/*
* Visit only the pages mapped by used_space B+tree.
* Note that we must be very careful when walking the tree
* leaf list and removing used space as the leaf list changes
* unpredictibly after each remove. The solution is to actually
* not walk the tree at all, but to remove items from the head
* of the leaf list until there are some keys left.
*/
for (cond = true; cond;) {
btree_node_t *node;
/*
* Visit only the pages mapped by used_space B+tree.
* Note that we must be very careful when walking the tree
* leaf list and removing used space as the leaf list changes
* unpredictibly after each remove. The solution is to actually
* not walk the tree at all, but to remove items from the head
* of the leaf list until there are some keys left.
*/
for (cond = true; cond;) {
btree_node_t *node;
ASSERT(!list_empty(&area->used_space.leaf_head));
node = list_get_instance(area->used_space.leaf_head.next, btree_node_t, leaf_link);
if ((cond = (bool) node->keys)) {
__address b = node->key[0];
count_t i;
pte_t *pte;
ASSERT(!list_empty(&area->used_space.leaf_head));
node = list_get_instance(area->used_space.leaf_head.next, btree_node_t, leaf_link);
if ((cond = (bool) node->keys)) {
__address b = node->key[0];
count_t i;
pte_t *pte;
for (i = 0; i < (count_t) node->value[0]; i++) {
page_table_lock(as, false);
pte = page_mapping_find(as, b + i*PAGE_SIZE);
ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
if (area->backend && area->backend->backend_frame_free) {
area->backend->backend_frame_free(area,
b + i*PAGE_SIZE, PTE_GET_FRAME(pte));
}
page_mapping_remove(as, b + i*PAGE_SIZE);
page_table_unlock(as, false);
for (i = 0; i < (count_t) node->value[0]; i++) {
page_table_lock(as, false);
pte = page_mapping_find(as, b + i*PAGE_SIZE);
ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
if (area->backend && area->backend->backend_frame_free) {
area->backend->backend_frame_free(area,
b + i*PAGE_SIZE, PTE_GET_FRAME(pte));
}
if (!used_space_remove(area, b, i))
panic("Could not remove used space.\n");
page_mapping_remove(as, b + i*PAGE_SIZE);
page_table_unlock(as, false);
}
if (!used_space_remove(area, b, i))
panic("Could not remove used space.\n");
}
}
btree_destroy(&area->used_space);
623,12 → 615,13
* Interrupts are assumed disabled.
*
* @param page Faulting page.
* @param access Access mode that caused the fault (i.e. read/write/exec).
* @param istate Pointer to interrupted state.
*
* @return AS_PF_FAULT on page fault, AS_PF_OK on success or AS_PF_DEFER if the
* fault was caused by copy_to_uspace() or copy_from_uspace().
*/
int as_page_fault(__address page, istate_t *istate)
int as_page_fault(__address page, pf_access_t access, istate_t *istate)
{
pte_t *pte;
as_area_t *area;
688,7 → 681,7
/*
* Resort to the backend page fault handler.
*/
if (area->backend->backend_page_fault(area, page) != AS_PF_OK) {
if (area->backend->backend_page_fault(area, page, access) != AS_PF_OK) {
page_table_unlock(AS, false);
mutex_unlock(&area->lock);
mutex_unlock(&AS->lock);
1450,7 → 1443,7
}
}
 
static int anon_page_fault(as_area_t *area, __address addr);
static int anon_page_fault(as_area_t *area, __address addr, pf_access_t access);
static void anon_frame_free(as_area_t *area, __address page, __address frame);
 
/*
1467,10 → 1460,11
*
* @param area Pointer to the address space area.
* @param addr Faulting virtual address.
* @param access Access mode that caused the fault (i.e. read/write/exec).
*
* @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. serviced).
*/
int anon_page_fault(as_area_t *area, __address addr)
int anon_page_fault(as_area_t *area, __address addr, pf_access_t access)
{
__address frame;
 
/kernel/trunk/arch/ia64/src/mm/tlb.c
429,10 → 429,14
void alternate_instruction_tlb_fault(__u64 vector, istate_t *istate)
{
region_register rr;
rid_t rid;
__address va;
pte_t *t;
va = istate->cr_ifa; /* faulting address */
rr.word = rr_read(VA2VRN(va));
rid = rr.map.rid;
 
page_table_lock(AS, true);
t = page_mapping_find(AS, va);
if (t) {
447,8 → 451,8
* Forward the page fault to address space page fault handler.
*/
page_table_unlock(AS, true);
if (as_page_fault(va, istate) == AS_PF_FAULT) {
panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, istate->cr_ifa, rr.map.rid, istate->cr_iip);
if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
}
}
}
493,7 → 497,7
* Forward the page fault to address space page fault handler.
*/
page_table_unlock(AS, true);
if (as_page_fault(va, istate) == AS_PF_FAULT) {
if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
}
}
518,12 → 522,19
*/
void data_dirty_bit_fault(__u64 vector, istate_t *istate)
{
region_register rr;
rid_t rid;
__address va;
pte_t *t;
va = istate->cr_ifa; /* faulting address */
rr.word = rr_read(VA2VRN(va));
rid = rr.map.rid;
 
page_table_lock(AS, true);
t = page_mapping_find(AS, istate->cr_ifa);
t = page_mapping_find(AS, va);
ASSERT(t && t->p);
if (t && t->p) {
if (t && t->p && t->w) {
/*
* Update the Dirty bit in page tables and reinsert
* the mapping into DTC.
530,6 → 541,12
*/
t->d = true;
dtc_pte_copy(t);
} else {
if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) {
panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
t->d = true;
dtc_pte_copy(t);
}
}
page_table_unlock(AS, true);
}
541,12 → 558,19
*/
void instruction_access_bit_fault(__u64 vector, istate_t *istate)
{
pte_t *t;
region_register rr;
rid_t rid;
__address va;
pte_t *t;
 
va = istate->cr_ifa; /* faulting address */
rr.word = rr_read(VA2VRN(va));
rid = rr.map.rid;
 
page_table_lock(AS, true);
t = page_mapping_find(AS, istate->cr_ifa);
t = page_mapping_find(AS, va);
ASSERT(t && t->p);
if (t && t->p) {
if (t && t->p && t->x) {
/*
* Update the Accessed bit in page tables and reinsert
* the mapping into ITC.
553,6 → 577,12
*/
t->a = true;
itc_pte_copy(t);
} else {
if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
t->a = true;
itc_pte_copy(t);
}
}
page_table_unlock(AS, true);
}
564,10 → 594,17
*/
void data_access_bit_fault(__u64 vector, istate_t *istate)
{
region_register rr;
rid_t rid;
__address va;
pte_t *t;
 
va = istate->cr_ifa; /* faulting address */
rr.word = rr_read(VA2VRN(va));
rid = rr.map.rid;
 
page_table_lock(AS, true);
t = page_mapping_find(AS, istate->cr_ifa);
t = page_mapping_find(AS, va);
ASSERT(t && t->p);
if (t && t->p) {
/*
576,6 → 613,12
*/
t->a = true;
dtc_pte_copy(t);
} else {
if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
t->a = true;
itc_pte_copy(t);
}
}
page_table_unlock(AS, true);
}
588,10 → 631,14
void page_not_present(__u64 vector, istate_t *istate)
{
region_register rr;
rid_t rid;
__address va;
pte_t *t;
va = istate->cr_ifa; /* faulting address */
rr.word = rr_read(VA2VRN(va));
rid = rr.map.rid;
 
page_table_lock(AS, true);
t = page_mapping_find(AS, va);
ASSERT(t);
608,8 → 655,8
page_table_unlock(AS, true);
} else {
page_table_unlock(AS, true);
if (as_page_fault(va, istate) == AS_PF_FAULT) {
panic("%s: va=%p, rid=%d\n", __FUNCTION__, va, rr.map.rid);
if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
panic("%s: va=%p, rid=%d\n", __FUNCTION__, va, rid);
}
}
}
/kernel/trunk/arch/ppc32/src/mm/page.c
53,12 → 53,14
* @param as Address space.
* @param lock Lock/unlock the address space.
* @param badvaddr Faulting virtual address.
* @param access Access mode that caused the fault.
* @param istate Pointer to interrupted state.
* @param pfrc Pointer to variable where as_page_fault() return code will be stored.
* @return PTE on success, NULL otherwise.
*
*/
static pte_t *find_mapping_and_check(as_t *as, bool lock, __address badvaddr, istate_t *istate, int *pfcr)
static pte_t *find_mapping_and_check(as_t *as, bool lock, __address badvaddr, int access,
istate_t *istate, int *pfcr)
{
/*
* Check if the mapping exists in page tables.
78,7 → 80,7
* Resort to higher-level page fault handler.
*/
page_table_unlock(as, lock);
switch (rc = as_page_fault(badvaddr, istate)) {
switch (rc = as_page_fault(badvaddr, access, istate)) {
case AS_PF_OK:
/*
* The higher-level page fault handler succeeded,
211,7 → 213,7
page_table_lock(as, lock);
pte = find_mapping_and_check(as, lock, badvaddr, istate, &pfcr);
pte = find_mapping_and_check(as, lock, badvaddr, PF_ACCESS_READ /* FIXME */, istate, &pfcr);
if (!pte) {
switch (pfcr) {
case AS_PF_FAULT:
/kernel/trunk/arch/amd64/include/mm/page.h
106,6 → 106,23
 
#ifndef __ASM__
 
/* Page fault error codes. */
 
/** When bit on this position is 0, the page fault was caused by a not-present page. */
#define PFERR_CODE_P (1<<0)
 
/** When bit on this position is 1, the page fault was caused by a write. */
#define PFERR_CODE_RW (1<<1)
 
/** When bit on this position is 1, the page fault was caused in user mode. */
#define PFERR_CODE_US (1<<2)
 
/** When bit on this position is 1, a reserved bit was set in page directory. */
#define PFERR_CODE_RSVD (1<<3)
 
/** When bit on this position os 1, the page fault was caused during instruction fecth. */
#define PFERR_CODE_ID (1<<4)
 
/** Page Table Entry. */
struct page_specifier {
unsigned present : 1;
/kernel/trunk/arch/amd64/src/mm/page.c
168,9 → 168,21
void page_fault(int n, istate_t *istate)
{
__address page;
pf_access_t access;
page = read_cr2();
if (as_page_fault(page, istate) == AS_PF_FAULT) {
if (istate->error_word & PFERR_CODE_RSVD)
panic("Reserved bit set in page table entry.\n");
if (istate->error_word & PFERR_CODE_RW)
access = PF_ACCESS_WRITE;
else if (istate->error_word & PFERR_CODE_ID)
access = PF_ACCESS_EXEC;
else
access = PF_ACCESS_READ;
if (as_page_fault(page, access, istate) == AS_PF_FAULT) {
print_info_errcode(n, istate);
printf("Page fault address: %llX\n", page);
panic("page fault\n");
/kernel/trunk/arch/ppc64/src/mm/page.c
53,12 → 53,14
* @param as Address space.
* @param lock Lock/unlock the address space.
* @param badvaddr Faulting virtual address.
* @param access Access mode that caused the fault.
* @param istate Pointer to interrupted state.
* @param pfrc Pointer to variable where as_page_fault() return code will be stored.
* @return PTE on success, NULL otherwise.
*
*/
static pte_t *find_mapping_and_check(as_t *as, bool lock, __address badvaddr, istate_t *istate, int *pfcr)
static pte_t *find_mapping_and_check(as_t *as, bool lock, __address badvaddr, int access,
istate_t *istate, int *pfcr)
{
/*
* Check if the mapping exists in page tables.
78,7 → 80,7
* Resort to higher-level page fault handler.
*/
page_table_unlock(as, lock);
switch (rc = as_page_fault(badvaddr, istate)) {
switch (rc = as_page_fault(badvaddr, access, istate)) {
case AS_PF_OK:
/*
* The higher-level page fault handler succeeded,
211,7 → 213,7
page_table_lock(as, lock);
pte = find_mapping_and_check(as, lock, badvaddr, istate, &pfcr);
pte = find_mapping_and_check(as, lock, badvaddr, PF_ACCESS_READ /* FIXME */, istate, &pfcr);
if (!pte) {
switch (pfcr) {
case AS_PF_FAULT:
/kernel/trunk/arch/mips32/src/mm/tlb.c
44,7 → 44,7
static void tlb_invalid_fail(istate_t *istate);
static void tlb_modified_fail(istate_t *istate);
 
static pte_t *find_mapping_and_check(__address badvaddr, istate_t *istate, int *pfrc);
static pte_t *find_mapping_and_check(__address badvaddr, int access, istate_t *istate, int *pfrc);
 
static void prepare_entry_lo(entry_lo_t *lo, bool g, bool v, bool d, bool cacheable, __address pfn);
static void prepare_entry_hi(entry_hi_t *hi, asid_t asid, __address addr);
101,7 → 101,7
 
page_table_lock(AS, true);
 
pte = find_mapping_and_check(badvaddr, istate, &pfrc);
pte = find_mapping_and_check(badvaddr, PF_ACCESS_READ, istate, &pfrc);
if (!pte) {
switch (pfrc) {
case AS_PF_FAULT:
186,7 → 186,7
goto fail;
}
 
pte = find_mapping_and_check(badvaddr, istate, &pfrc);
pte = find_mapping_and_check(badvaddr, PF_ACCESS_READ, istate, &pfrc);
if (!pte) {
switch (pfrc) {
case AS_PF_FAULT:
270,7 → 270,7
goto fail;
}
 
pte = find_mapping_and_check(badvaddr, istate, &pfrc);
pte = find_mapping_and_check(badvaddr, PF_ACCESS_WRITE, istate, &pfrc);
if (!pte) {
switch (pfrc) {
case AS_PF_FAULT:
366,12 → 366,13
* The AS->lock must be held on entry to this function.
*
* @param badvaddr Faulting virtual address.
* @param access Access mode that caused the fault.
* @param istate Pointer to interrupted state.
* @param pfrc Pointer to variable where as_page_fault() return code will be stored.
*
* @return PTE on success, NULL otherwise.
*/
pte_t *find_mapping_and_check(__address badvaddr, istate_t *istate, int *pfrc)
pte_t *find_mapping_and_check(__address badvaddr, int access, istate_t *istate, int *pfrc)
{
entry_hi_t hi;
pte_t *pte;
404,7 → 405,7
* Resort to higher-level page fault handler.
*/
page_table_unlock(AS, true);
switch (rc = as_page_fault(badvaddr, istate)) {
switch (rc = as_page_fault(badvaddr, access, istate)) {
case AS_PF_OK:
/*
* The higher-level page fault handler succeeded,
/kernel/trunk/arch/ia32/include/interrupt.h
92,12 → 92,12
extern void (* enable_irqs_function)(__u16 irqmask);
extern void (* eoi_function)(void);
 
extern void PRINT_INFO_ERRCODE(istate_t *istate);
extern void null_interrupt(int n, istate_t *istate);
extern void gp_fault(int n, istate_t *istate);
extern void nm_fault(int n, istate_t *istate);
extern void ss_fault(int n, istate_t *istate);
extern void simd_fp_exception(int n, istate_t *istate);
extern void page_fault(int n, istate_t *istate);
extern void syscall(int n, istate_t *istate);
extern void tlb_shootdown_ipi(int n, istate_t *istate);
 
/kernel/trunk/arch/ia32/include/mm/page.h
90,6 → 90,20
#include <arch/mm/frame.h>
#include <typedefs.h>
 
/* Page fault error codes. */
 
/** When bit on this position is 0, the page fault was caused by a not-present page. */
#define PFERR_CODE_P (1<<0)
 
/** When bit on this position is 1, the page fault was caused by a write. */
#define PFERR_CODE_RW (1<<1)
 
/** When bit on this position is 1, the page fault was caused in user mode. */
#define PFERR_CODE_US (1<<2)
 
/** When bit on this position is 1, a reserved bit was set in page directory. */
#define PFERR_CODE_RSVD (1<<3)
 
/** Page Table Entry. */
struct page_specifier {
unsigned present : 1;
138,6 → 152,7
}
 
extern void page_arch_init(void);
extern void page_fault(int n, istate_t *istate);
 
#endif /* __ASM__ */
 
/kernel/trunk/arch/ia32/src/mm/page.c
43,7 → 43,6
#include <print.h>
#include <interrupt.h>
 
 
void page_arch_init(void)
{
__address cur;
87,3 → 86,25
return virtaddr;
}
 
void page_fault(int n, istate_t *istate)
{
__address page;
pf_access_t access;
page = read_cr2();
if (istate->error_word & PFERR_CODE_RSVD)
panic("Reserved bit set in page directory.\n");
 
if (istate->error_word & PFERR_CODE_RW)
access = PF_ACCESS_WRITE;
else
access = PF_ACCESS_READ;
 
if (as_page_fault(page, access, istate) == AS_PF_FAULT) {
PRINT_INFO_ERRCODE(istate);
printf("page fault address: %#x\n", page);
panic("page fault\n");
}
}
/kernel/trunk/arch/ia32/src/interrupt.c
54,7 → 54,7
void (* enable_irqs_function)(__u16 irqmask) = NULL;
void (* eoi_function)(void) = NULL;
 
static void PRINT_INFO_ERRCODE(istate_t *istate)
void PRINT_INFO_ERRCODE(istate_t *istate)
{
char *symbol = get_symtab_entry(istate->eip);
 
139,18 → 139,6
#endif
}
 
void page_fault(int n, istate_t *istate)
{
__address page;
 
page = read_cr2();
if (as_page_fault(page, istate) == AS_PF_FAULT) {
PRINT_INFO_ERRCODE(istate);
printf("page fault address: %#x\n", page);
panic("page fault\n");
}
}
 
void syscall(int n, istate_t *istate)
{
panic("Obsolete syscall handler.");