/kernel/trunk/genarch/src/mm/as_ht.c |
---|
34,11 → 34,17 |
#include <typedefs.h> |
#include <memstr.h> |
#include <adt/hash_table.h> |
#include <synch/spinlock.h> |
static pte_t *ht_create(int flags); |
static void ht_lock(as_t *as, bool lock); |
static void ht_unlock(as_t *as, bool unlock); |
as_operations_t as_ht_operations = { |
.page_table_create = ht_create |
.page_table_create = ht_create, |
.page_table_lock = ht_lock, |
.page_table_unlock = ht_unlock, |
}; |
58,3 → 64,33 |
} |
return NULL; |
} |
/** Lock page table. |
* |
* Lock address space and page hash table. |
* Interrupts must be disabled. |
* |
* @param as Address space. |
* @param lock If false, do not attempt to lock the address space. |
*/ |
void ht_lock(as_t *as, bool lock) |
{ |
if (lock) |
spinlock_lock(&as->lock); |
spinlock_lock(&page_ht_lock); |
} |
/** Unlock page table. |
* |
* Unlock address space and page hash table. |
* Interrupts must be disabled. |
* |
* @param as Address space. |
* @param unlock If false, do not attempt to lock the address space. |
*/ |
void ht_unlock(as_t *as, bool unlock) |
{ |
spinlock_unlock(&page_ht_lock); |
if (unlock) |
spinlock_unlock(&as->lock); |
} |
/kernel/trunk/genarch/src/mm/page_pt.c |
---|
52,7 → 52,7 |
* Map virtual address 'page' to physical address 'frame' |
* using 'flags'. |
* |
* The address space must be locked and interrupts must be disabled. |
* The page table must be locked and interrupts must be disabled. |
* |
* @param as Address space to wich page belongs. |
* @param page Virtual address of the page to be mapped. |
105,7 → 105,7 |
* |
* Empty page tables except PTL0 are freed. |
* |
* The address space must be locked and interrupts must be disabled. |
* The page table must be locked and interrupts must be disabled. |
* |
* @param as Address space to wich page belongs. |
* @param page Virtual address of the page to be demapped. |
225,7 → 225,7 |
* |
* Find mapping for virtual page. |
* |
* The address space must be locked and interrupts must be disabled. |
* The page table must be locked and interrupts must be disabled. |
* |
* @param as Address space to which page belongs. |
* @param page Virtual page. |
/kernel/trunk/genarch/src/mm/as_pt.c |
---|
39,8 → 39,13 |
static pte_t *ptl0_create(int flags); |
static void pt_lock(as_t *as, bool lock); |
static void pt_unlock(as_t *as, bool unlock); |
as_operations_t as_pt_operations = { |
.page_table_create = ptl0_create |
.page_table_create = ptl0_create, |
.page_table_lock = pt_lock, |
.page_table_unlock = pt_unlock |
}; |
/** Create PTL0. |
76,3 → 81,31 |
return (pte_t *) KA2PA((__address) dst_ptl0); |
} |
/** Lock page tables. |
* |
* Lock only the address space. |
* Interrupts must be disabled. |
* |
* @param as Address space. |
* @param lock If false, do not attempt to lock the address space. |
*/ |
void pt_lock(as_t *as, bool lock) |
{ |
if (lock) |
spinlock_lock(&as->lock); |
} |
/** Unlock page tables. |
* |
* Unlock the address space. |
* Interrupts must be disabled. |
* |
* @param as Address space. |
* @param unlock If false, do not attempt to unlock the address space. |
*/ |
void pt_unlock(as_t *as, bool unlock) |
{ |
if (unlock) |
spinlock_unlock(&as->lock); |
} |
/kernel/trunk/genarch/src/mm/page_ht.c |
---|
52,7 → 52,9 |
static pte_t *ht_mapping_find(as_t *as, __address page); |
/** |
* This lock protects the page hash table. |
* This lock protects the page hash table. It must be acquired |
* after address space lock and after any address space area |
* locks. |
*/ |
SPINLOCK_INITIALIZE(page_ht_lock); |
155,7 → 157,7 |
* Map virtual address 'page' to physical address 'frame' |
* using 'flags'. |
* |
* The address space must be locked and interruptsmust be disabled. |
* The page table must be locked and interrupts must be disabled. |
* |
* @param as Address space to which page belongs. |
* @param page Virtual address of the page to be mapped. |
167,8 → 169,6 |
pte_t *t; |
__native key[2] = { (__address) as, page = ALIGN_DOWN(page, PAGE_SIZE) }; |
spinlock_lock(&page_ht_lock); |
if (!hash_table_find(&page_ht, key)) { |
t = (pte_t *) malloc(sizeof(pte_t), FRAME_ATOMIC); |
ASSERT(t != NULL); |
186,8 → 186,6 |
hash_table_insert(&page_ht, key, &t->link); |
} |
spinlock_unlock(&page_ht_lock); |
} |
/** Remove mapping of page from page hash table. |
196,7 → 194,7 |
* TLB shootdown should follow in order to make effects of |
* this call visible. |
* |
* The address space must be locked and interrupts must be disabled. |
* The page table must be locked and interrupts must be disabled. |
* |
* @param as Address space to wich page belongs. |
* @param page Virtual address of the page to be demapped. |
205,15 → 203,11 |
{ |
__native key[2] = { (__address) as, page = ALIGN_DOWN(page, PAGE_SIZE) }; |
spinlock_lock(&page_ht_lock); |
/* |
* Note that removed PTE's will be freed |
* by remove_callback(). |
*/ |
hash_table_remove(&page_ht, key, 2); |
spinlock_unlock(&page_ht_lock); |
} |
221,7 → 215,7 |
* |
* Find mapping for virtual page. |
* |
* The address space must be locked and interrupts must be disabled. |
* The page table must be locked and interrupts must be disabled. |
* |
* @param as Address space to wich page belongs. |
* @param page Virtual page. |
234,12 → 228,9 |
pte_t *t = NULL; |
__native key[2] = { (__address) as, page = ALIGN_DOWN(page, PAGE_SIZE) }; |
spinlock_lock(&page_ht_lock); |
hlp = hash_table_find(&page_ht, key); |
if (hlp) |
t = hash_table_get_instance(hlp, pte_t, link); |
spinlock_unlock(&page_ht_lock); |
return t; |
} |