Subversion Repositories HelenOS-historic

Compare Revisions

Ignore whitespace Rev 1043 → Rev 1044

/kernel/trunk/genarch/src/mm/as_ht.c
34,11 → 34,17
#include <typedefs.h>
#include <memstr.h>
#include <adt/hash_table.h>
#include <synch/spinlock.h>
 
static pte_t *ht_create(int flags);
 
static void ht_lock(as_t *as, bool lock);
static void ht_unlock(as_t *as, bool unlock);
 
as_operations_t as_ht_operations = {
.page_table_create = ht_create
.page_table_create = ht_create,
.page_table_lock = ht_lock,
.page_table_unlock = ht_unlock,
};
 
 
58,3 → 64,33
}
return NULL;
}
 
/** Lock page table.
*
* Lock address space and page hash table.
* Interrupts must be disabled.
*
* @param as Address space.
* @param lock If false, do not attempt to lock the address space.
*/
void ht_lock(as_t *as, bool lock)
{
if (lock)
spinlock_lock(&as->lock);
spinlock_lock(&page_ht_lock);
}
 
/** Unlock page table.
*
* Unlock address space and page hash table.
* Interrupts must be disabled.
*
* @param as Address space.
* @param unlock If false, do not attempt to lock the address space.
*/
void ht_unlock(as_t *as, bool unlock)
{
spinlock_unlock(&page_ht_lock);
if (unlock)
spinlock_unlock(&as->lock);
}
/kernel/trunk/genarch/src/mm/page_pt.c
52,7 → 52,7
* Map virtual address 'page' to physical address 'frame'
* using 'flags'.
*
* The address space must be locked and interrupts must be disabled.
* The page table must be locked and interrupts must be disabled.
*
* @param as Address space to wich page belongs.
* @param page Virtual address of the page to be mapped.
105,7 → 105,7
*
* Empty page tables except PTL0 are freed.
*
* The address space must be locked and interrupts must be disabled.
* The page table must be locked and interrupts must be disabled.
*
* @param as Address space to wich page belongs.
* @param page Virtual address of the page to be demapped.
225,7 → 225,7
*
* Find mapping for virtual page.
*
* The address space must be locked and interrupts must be disabled.
* The page table must be locked and interrupts must be disabled.
*
* @param as Address space to which page belongs.
* @param page Virtual page.
/kernel/trunk/genarch/src/mm/as_pt.c
39,8 → 39,13
 
static pte_t *ptl0_create(int flags);
 
static void pt_lock(as_t *as, bool lock);
static void pt_unlock(as_t *as, bool unlock);
 
as_operations_t as_pt_operations = {
.page_table_create = ptl0_create
.page_table_create = ptl0_create,
.page_table_lock = pt_lock,
.page_table_unlock = pt_unlock
};
 
/** Create PTL0.
76,3 → 81,31
 
return (pte_t *) KA2PA((__address) dst_ptl0);
}
 
/** Lock page tables.
*
* Lock only the address space.
* Interrupts must be disabled.
*
* @param as Address space.
* @param lock If false, do not attempt to lock the address space.
*/
void pt_lock(as_t *as, bool lock)
{
if (lock)
spinlock_lock(&as->lock);
}
 
/** Unlock page tables.
*
* Unlock the address space.
* Interrupts must be disabled.
*
* @param as Address space.
* @param unlock If false, do not attempt to unlock the address space.
*/
void pt_unlock(as_t *as, bool unlock)
{
if (unlock)
spinlock_unlock(&as->lock);
}
/kernel/trunk/genarch/src/mm/page_ht.c
52,7 → 52,9
static pte_t *ht_mapping_find(as_t *as, __address page);
 
/**
* This lock protects the page hash table.
* This lock protects the page hash table. It must be acquired
* after address space lock and after any address space area
* locks.
*/
SPINLOCK_INITIALIZE(page_ht_lock);
 
155,7 → 157,7
* Map virtual address 'page' to physical address 'frame'
* using 'flags'.
*
* The address space must be locked and interruptsmust be disabled.
* The page table must be locked and interrupts must be disabled.
*
* @param as Address space to which page belongs.
* @param page Virtual address of the page to be mapped.
167,8 → 169,6
pte_t *t;
__native key[2] = { (__address) as, page = ALIGN_DOWN(page, PAGE_SIZE) };
spinlock_lock(&page_ht_lock);
 
if (!hash_table_find(&page_ht, key)) {
t = (pte_t *) malloc(sizeof(pte_t), FRAME_ATOMIC);
ASSERT(t != NULL);
186,8 → 186,6
 
hash_table_insert(&page_ht, key, &t->link);
}
spinlock_unlock(&page_ht_lock);
}
 
/** Remove mapping of page from page hash table.
196,7 → 194,7
* TLB shootdown should follow in order to make effects of
* this call visible.
*
* The address space must be locked and interrupts must be disabled.
* The page table must be locked and interrupts must be disabled.
*
* @param as Address space to wich page belongs.
* @param page Virtual address of the page to be demapped.
205,15 → 203,11
{
__native key[2] = { (__address) as, page = ALIGN_DOWN(page, PAGE_SIZE) };
spinlock_lock(&page_ht_lock);
 
/*
* Note that removed PTE's will be freed
* by remove_callback().
*/
hash_table_remove(&page_ht, key, 2);
 
spinlock_unlock(&page_ht_lock);
}
 
 
221,7 → 215,7
*
* Find mapping for virtual page.
*
* The address space must be locked and interrupts must be disabled.
* The page table must be locked and interrupts must be disabled.
*
* @param as Address space to wich page belongs.
* @param page Virtual page.
234,12 → 228,9
pte_t *t = NULL;
__native key[2] = { (__address) as, page = ALIGN_DOWN(page, PAGE_SIZE) };
spinlock_lock(&page_ht_lock);
 
hlp = hash_table_find(&page_ht, key);
if (hlp)
t = hash_table_get_instance(hlp, pte_t, link);
 
spinlock_unlock(&page_ht_lock);
return t;
}
/kernel/trunk/generic/include/mm/page.h
82,6 → 82,8
extern page_mapping_operations_t *page_mapping_operations;
 
extern void page_init(void);
extern void page_table_lock(as_t *as, bool lock);
extern void page_table_unlock(as_t *as, bool unlock);
extern void page_mapping_insert(as_t *as, __address page, __address frame, int flags);
extern void page_mapping_remove(as_t *as, __address page);
extern pte_t *page_mapping_find(as_t *as, __address page);
/kernel/trunk/generic/include/mm/as.h
93,6 → 93,8
 
struct as_operations {
pte_t *(* page_table_create)(int flags);
void (* page_table_lock)(as_t *as, bool lock);
void (* page_table_unlock)(as_t *as, bool unlock);
};
typedef struct as_operations as_operations_t;
 
/kernel/trunk/generic/src/mm/as.c
173,7 → 173,7
ipl_t ipl;
ipl = interrupts_disable();
spinlock_lock(&as->lock);
page_table_lock(as, true);
area = find_area_and_lock(as, page);
if (!area) {
183,7 → 183,7
page_mapping_insert(as, page, frame, get_area_flags(area));
spinlock_unlock(&area->lock);
spinlock_unlock(&as->lock);
page_table_unlock(as, true);
interrupts_restore(ipl);
}
 
198,12 → 198,13
*/
int as_page_fault(__address page)
{
pte_t *pte;
as_area_t *area;
__address frame;
ASSERT(AS);
 
spinlock_lock(&AS->lock);
area = find_area_and_lock(AS, page);
if (!area) {
/*
214,7 → 215,23
return 0;
}
 
page_table_lock(AS, false);
/*
* To avoid race condition between two page faults
* on the same address, we need to make sure
* the mapping has not been already inserted.
*/
if ((pte = page_mapping_find(AS, page))) {
if (PTE_PRESENT(pte)) {
page_table_unlock(AS, false);
spinlock_unlock(&area->lock);
spinlock_unlock(&AS->lock);
return 1;
}
}
 
/*
* In general, there can be several reasons that
* can have caused this fault.
*
237,10 → 254,10
* inserted into page tables.
*/
page_mapping_insert(AS, page, frame, get_area_flags(area));
page_table_unlock(AS, false);
spinlock_unlock(&area->lock);
spinlock_unlock(&AS->lock);
 
return 1;
}
 
357,6 → 374,39
return as_operations->page_table_create(flags);
}
 
/** Lock page table.
*
* This function should be called before any page_mapping_insert(),
* page_mapping_remove() and page_mapping_find().
*
* Locking order is such that address space areas must be locked
* prior to this call. Address space can be locked prior to this
* call in which case the lock argument is false.
*
* @param as Address space.
* @param as_locked If false, do not attempt to lock as->lock.
*/
void page_table_lock(as_t *as, bool lock)
{
ASSERT(as_operations);
ASSERT(as_operations->page_table_lock);
 
as_operations->page_table_lock(as, lock);
}
 
/** Unlock page table.
*
* @param as Address space.
* @param as_locked If false, do not attempt to unlock as->lock.
*/
void page_table_unlock(as_t *as, bool unlock)
{
ASSERT(as_operations);
ASSERT(as_operations->page_table_unlock);
 
as_operations->page_table_unlock(as, unlock);
}
 
/** Find address space area and change it.
*
* @param as Address space.
397,12 → 447,20
/*
* Releasing physical memory.
* This depends on the fact that the memory was allocated using frame_alloc().
*/
*/
page_table_lock(as, false);
pte = page_mapping_find(as, area->base + i*PAGE_SIZE);
if (pte && PTE_VALID(pte)) {
__address frame;
 
ASSERT(PTE_PRESENT(pte));
frame_free(ADDR2PFN(PTE_GET_FRAME(pte)));
frame = PTE_GET_FRAME(pte);
page_mapping_remove(as, area->base + i*PAGE_SIZE);
page_table_unlock(as, false);
 
frame_free(ADDR2PFN(frame));
} else {
page_table_unlock(as, false);
}
}
/*
/kernel/trunk/generic/src/mm/page.c
76,7 → 76,7
* Map virtual address 'page' to physical address 'frame'
* using 'flags'. Allocate and setup any missing page tables.
*
* The address space must be locked and interrupts must be disabled.
* The page table must be locked and interrupts must be disabled.
*
* @param as Address space to wich page belongs.
* @param page Virtual address of the page to be mapped.
97,7 → 97,7
* TLB shootdown should follow in order to make effects of
* this call visible.
*
* The address space must be locked and interrupts must be disabled.
* The page table must be locked and interrupts must be disabled.
*
* @param as Address space to wich page belongs.
* @param page Virtual address of the page to be demapped.
114,7 → 114,7
*
* Find mapping for virtual page.
*
* The address space must be locked and interrupts must be disabled.
* The page table must be locked and interrupts must be disabled.
*
* @param as Address space to wich page belongs.
* @param page Virtual page.
/kernel/trunk/arch/ia64/src/mm/tlb.c
423,6 → 423,7
pte_t *t;
va = istate->cr_ifa; /* faulting address */
page_table_lock(AS, true);
t = page_mapping_find(AS, va);
if (t) {
/*
430,11 → 431,14
* Insert it into data translation cache.
*/
itc_pte_copy(t);
page_table_unlock(AS, true);
} else {
/*
* Forward the page fault to address space page fault handler.
*/
page_table_unlock(AS, true);
if (!as_page_fault(va)) {
page_table_unlock(AS, true);
panic("%s: va=%P, rid=%d\n", __FUNCTION__, istate->cr_ifa, rr.map.rid);
}
}
466,6 → 470,7
}
}
 
page_table_lock(AS, true);
t = page_mapping_find(AS, va);
if (t) {
/*
473,10 → 478,12
* Insert it into data translation cache.
*/
dtc_pte_copy(t);
page_table_unlock(AS, true);
} else {
/*
* Forward the page fault to address space page fault handler.
*/
page_table_unlock(AS, true);
if (!as_page_fault(va)) {
panic("%s: va=%P, rid=%d, iip=%P\n", __FUNCTION__, va, rid, istate->cr_iip);
}
504,6 → 511,7
{
pte_t *t;
 
page_table_lock(AS, true);
t = page_mapping_find(AS, istate->cr_ifa);
ASSERT(t && t->p);
if (t && t->p) {
514,6 → 522,7
t->d = true;
dtc_pte_copy(t);
}
page_table_unlock(AS, true);
}
 
/** Instruction access bit fault handler.
525,6 → 534,7
{
pte_t *t;
 
page_table_lock(AS, true);
t = page_mapping_find(AS, istate->cr_ifa);
ASSERT(t && t->p);
if (t && t->p) {
535,6 → 545,7
t->a = true;
itc_pte_copy(t);
}
page_table_unlock(AS, true);
}
 
/** Data access bit fault handler.
546,6 → 557,7
{
pte_t *t;
 
page_table_lock(AS, true);
t = page_mapping_find(AS, istate->cr_ifa);
ASSERT(t && t->p);
if (t && t->p) {
556,6 → 568,7
t->a = true;
dtc_pte_copy(t);
}
page_table_unlock(AS, true);
}
 
/** Page not present fault handler.
570,6 → 583,7
pte_t *t;
va = istate->cr_ifa; /* faulting address */
page_table_lock(AS, true);
t = page_mapping_find(AS, va);
ASSERT(t);
582,7 → 596,9
itc_pte_copy(t);
else
dtc_pte_copy(t);
page_table_unlock(AS, true);
} else {
page_table_unlock(AS, true);
if (!as_page_fault(va)) {
panic("%s: va=%P, rid=%d\n", __FUNCTION__, va, rr.map.rid);
}
/kernel/trunk/arch/mips32/src/mm/tlb.c
87,14 → 87,19
void tlb_refill(istate_t *istate)
{
entry_lo_t lo;
entry_hi_t hi;
entry_hi_t hi;
asid_t asid;
__address badvaddr;
pte_t *pte;
 
badvaddr = cp0_badvaddr_read();
 
spinlock_lock(&AS->lock);
spinlock_lock(&AS->lock);
asid = AS->asid;
spinlock_unlock(&AS->lock);
 
page_table_lock(AS, true);
 
pte = find_mapping_and_check(badvaddr);
if (!pte)
goto fail;
104,7 → 109,7
*/
pte->a = 1;
 
prepare_entry_hi(&hi, AS->asid, badvaddr);
prepare_entry_hi(&hi, asid, badvaddr);
prepare_entry_lo(&lo, pte->g, pte->p, pte->d, pte->cacheable, pte->pfn);
 
/*
122,11 → 127,11
cp0_pagemask_write(TLB_PAGE_MASK_16K);
tlbwr();
 
spinlock_unlock(&AS->lock);
page_table_unlock(AS, true);
return;
fail:
spinlock_unlock(&AS->lock);
page_table_unlock(AS, true);
tlb_refill_fail(istate);
}
 
154,9 → 159,9
cp0_entry_hi_write(hi.value);
tlbp();
index.value = cp0_index_read();
 
page_table_lock(AS, true);
spinlock_lock(&AS->lock);
/*
* Fail if the entry is not in TLB.
*/
191,11 → 196,11
cp0_pagemask_write(TLB_PAGE_MASK_16K);
tlbwi();
 
spinlock_unlock(&AS->lock);
page_table_unlock(AS, true);
return;
fail:
spinlock_unlock(&AS->lock);
page_table_unlock(AS, true);
tlb_invalid_fail(istate);
}
 
223,9 → 228,9
cp0_entry_hi_write(hi.value);
tlbp();
index.value = cp0_index_read();
 
page_table_lock(AS, true);
spinlock_lock(&AS->lock);
/*
* Fail if the entry is not in TLB.
*/
267,11 → 272,11
cp0_pagemask_write(TLB_PAGE_MASK_16K);
tlbwi();
 
spinlock_unlock(&AS->lock);
page_table_unlock(AS, true);
return;
fail:
spinlock_unlock(&AS->lock);
page_table_unlock(AS, true);
tlb_modified_fail(istate);
}
 
349,34 → 354,23
* Mapping not found in page tables.
* Resort to higher-level page fault handler.
*/
page_table_unlock(AS, true);
if (as_page_fault(badvaddr)) {
/*
* The higher-level page fault handler succeeded,
* The mapping ought to be in place.
*/
page_table_lock(AS, true);
pte = page_mapping_find(AS, badvaddr);
ASSERT(pte && pte->p);
return pte;
} else {
page_table_lock(AS, true);
printf("Page fault.\n");
return NULL;
}
}
 
/*
* Handler cannot succeed if badvaddr has no mapping.
*/
if (!pte) {
printf("No such mapping.\n");
return NULL;
}
 
/*
* Handler cannot succeed if the mapping is marked as invalid.
*/
if (!pte->p) {
printf("Invalid mapping.\n");
return NULL;
}
 
return pte;
}
 
void prepare_entry_lo(entry_lo_t *lo, bool g, bool v, bool d, bool cacheable, __address pfn)