36,7 → 36,6 |
#include <arch.h> |
#include <arch/types.h> |
#include <arch/exception.h> |
#include <align.h> |
#include <config.h> |
#include <print.h> |
#include <symtab.h> |
47,11 → 46,8 |
/** Try to find PTE for faulting address |
* |
* Try to find PTE for faulting address. |
* The as->lock must be held on entry to this function |
* if lock is true. |
* The AS->lock must be held on entry to this function. |
* |
* @param as Address space. |
* @param lock Lock/unlock the address space. |
* @param badvaddr Faulting virtual address. |
* @param istate Pointer to interrupted state. |
* @param pfrc Pointer to variable where as_page_fault() return code will be stored. |
58,12 → 54,12 |
* @return PTE on success, NULL otherwise. |
* |
*/ |
static pte_t *find_mapping_and_check(as_t *as, bool lock, __address badvaddr, istate_t *istate, int *pfcr) |
static pte_t *find_mapping_and_check(__address badvaddr, istate_t *istate, int *pfcr) |
{ |
/* |
* Check if the mapping exists in page tables. |
*/ |
pte_t *pte = page_mapping_find(as, badvaddr); |
pte_t *pte = page_mapping_find(AS, badvaddr); |
if ((pte) && (pte->p)) { |
/* |
* Mapping found in page tables. |
77,7 → 73,7 |
* Mapping not found in page tables. |
* Resort to higher-level page fault handler. |
*/ |
page_table_unlock(as, lock); |
page_table_unlock(AS, true); |
switch (rc = as_page_fault(badvaddr, istate)) { |
case AS_PF_OK: |
/* |
84,16 → 80,16 |
* The higher-level page fault handler succeeded, |
* The mapping ought to be in place. |
*/ |
page_table_lock(as, lock); |
pte = page_mapping_find(as, badvaddr); |
page_table_lock(AS, true); |
pte = page_mapping_find(AS, badvaddr); |
ASSERT((pte) && (pte->p)); |
return pte; |
case AS_PF_DEFER: |
page_table_lock(as, lock); |
page_table_lock(AS, true); |
*pfcr = rc; |
return NULL; |
case AS_PF_FAULT: |
page_table_lock(as, lock); |
page_table_lock(AS, true); |
printf("Page fault.\n"); |
*pfcr = rc; |
return NULL; |
184,18 → 180,9 |
asid_t asid; |
__address badvaddr; |
pte_t *pte; |
|
int pfcr; |
as_t *as; |
bool lock; |
|
if (AS == NULL) { |
as = AS_KERNEL; |
lock = false; |
} else { |
as = AS; |
lock = true; |
} |
|
if (data) { |
asm volatile ( |
"mfdar %0\n" |
204,13 → 191,13 |
} else |
badvaddr = istate->pc; |
|
spinlock_lock(&as->lock); |
asid = as->asid; |
spinlock_unlock(&as->lock); |
spinlock_lock(&AS->lock); |
asid = AS->asid; |
spinlock_unlock(&AS->lock); |
|
page_table_lock(as, lock); |
page_table_lock(AS, true); |
|
pte = find_mapping_and_check(as, lock, badvaddr, istate, &pfcr); |
pte = find_mapping_and_check(badvaddr, istate, &pfcr); |
if (!pte) { |
switch (pfcr) { |
case AS_PF_FAULT: |
221,7 → 208,7 |
* The page fault came during copy_from_uspace() |
* or copy_to_uspace(). |
*/ |
page_table_unlock(as, lock); |
page_table_unlock(AS, true); |
return; |
default: |
panic("Unexpected pfrc (%d)\n", pfcr); |
231,11 → 218,11 |
pte->a = 1; /* Record access to PTE */ |
pht_insert(badvaddr, pte->pfn); |
|
page_table_unlock(as, lock); |
page_table_unlock(AS, true); |
return; |
|
fail: |
page_table_unlock(as, lock); |
page_table_unlock(AS, true); |
pht_refill_fail(badvaddr, istate); |
} |
|
251,6 → 238,19 |
if (config.cpu_active == 1) { |
page_mapping_operations = &pt_mapping_operations; |
|
/* |
* PA2KA(identity) mapping for all frames until last_frame. |
*/ |
__address cur; |
int flags; |
|
for (cur = 0; cur < last_frame; cur += FRAME_SIZE) { |
flags = PAGE_CACHEABLE; |
if ((PA2KA(cur) >= config.base) && (PA2KA(cur) < config.base + config.kernel_size)) |
flags |= PAGE_GLOBAL; |
page_mapping_insert(AS_KERNEL, PA2KA(cur), cur, flags); |
} |
|
/* Allocate page hash table */ |
phte_t *physical_phte = (phte_t *) PFN2ADDR(frame_alloc(PHT_ORDER, FRAME_KA | FRAME_PANIC)); |
phte = (phte_t *) PA2KA((__address) physical_phte); |
265,19 → 265,3 |
); |
} |
} |
|
|
__address hw_map(__address physaddr, size_t size) |
{ |
if (last_frame + ALIGN_UP(size, PAGE_SIZE) > KA2PA(KERNEL_ADDRESS_SPACE_END_ARCH)) |
panic("Unable to map physical memory %p (%d bytes)", physaddr, size) |
|
__address virtaddr = PA2KA(last_frame); |
pfn_t i; |
for (i = 0; i < ADDR2PFN(ALIGN_UP(size, PAGE_SIZE)); i++) |
page_mapping_insert(AS_KERNEL, virtaddr + PFN2ADDR(i), physaddr + PFN2ADDR(i), PAGE_NOT_CACHEABLE); |
|
last_frame = ALIGN_UP(last_frame + size, FRAME_SIZE); |
|
return virtaddr; |
} |