Subversion Repositories HelenOS-historic

Compare Revisions

Ignore whitespace Rev 1767 → Rev 1780

/kernel/trunk/genarch/src/mm/page_pt.c
46,9 → 46,9
#include <arch/asm.h>
#include <memstr.h>
 
static void pt_mapping_insert(as_t *as, __address page, __address frame, int flags);
static void pt_mapping_remove(as_t *as, __address page);
static pte_t *pt_mapping_find(as_t *as, __address page);
static void pt_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame, int flags);
static void pt_mapping_remove(as_t *as, uintptr_t page);
static pte_t *pt_mapping_find(as_t *as, uintptr_t page);
 
page_mapping_operations_t pt_mapping_operations = {
.mapping_insert = pt_mapping_insert,
68,16 → 68,16
* @param frame Physical address of memory frame to which the mapping is done.
* @param flags Flags to be used for mapping.
*/
void pt_mapping_insert(as_t *as, __address page, __address frame, int flags)
void pt_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame, int flags)
{
pte_t *ptl0, *ptl1, *ptl2, *ptl3;
pte_t *newpt;
 
ptl0 = (pte_t *) PA2KA((__address) as->page_table);
ptl0 = (pte_t *) PA2KA((uintptr_t) as->page_table);
 
if (GET_PTL1_FLAGS(ptl0, PTL0_INDEX(page)) & PAGE_NOT_PRESENT) {
newpt = (pte_t *)frame_alloc(ONE_FRAME, FRAME_KA);
memsetb((__address)newpt, PAGE_SIZE, 0);
memsetb((uintptr_t)newpt, PAGE_SIZE, 0);
SET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page), KA2PA(newpt));
SET_PTL1_FLAGS(ptl0, PTL0_INDEX(page), PAGE_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE | PAGE_WRITE);
}
86,7 → 86,7
 
if (GET_PTL2_FLAGS(ptl1, PTL1_INDEX(page)) & PAGE_NOT_PRESENT) {
newpt = (pte_t *)frame_alloc(ONE_FRAME, FRAME_KA);
memsetb((__address)newpt, PAGE_SIZE, 0);
memsetb((uintptr_t)newpt, PAGE_SIZE, 0);
SET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page), KA2PA(newpt));
SET_PTL2_FLAGS(ptl1, PTL1_INDEX(page), PAGE_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE | PAGE_WRITE);
}
95,7 → 95,7
 
if (GET_PTL3_FLAGS(ptl2, PTL2_INDEX(page)) & PAGE_NOT_PRESENT) {
newpt = (pte_t *)frame_alloc(ONE_FRAME, FRAME_KA);
memsetb((__address)newpt, PAGE_SIZE, 0);
memsetb((uintptr_t)newpt, PAGE_SIZE, 0);
SET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page), KA2PA(newpt));
SET_PTL3_FLAGS(ptl2, PTL2_INDEX(page), PAGE_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE | PAGE_WRITE);
}
119,7 → 119,7
* @param as Address space to wich page belongs.
* @param page Virtual address of the page to be demapped.
*/
void pt_mapping_remove(as_t *as, __address page)
void pt_mapping_remove(as_t *as, uintptr_t page)
{
pte_t *ptl0, *ptl1, *ptl2, *ptl3;
bool empty = true;
129,7 → 129,7
* First, remove the mapping, if it exists.
*/
 
ptl0 = (pte_t *) PA2KA((__address) as->page_table);
ptl0 = (pte_t *) PA2KA((uintptr_t) as->page_table);
 
if (GET_PTL1_FLAGS(ptl0, PTL0_INDEX(page)) & PAGE_NOT_PRESENT)
return;
147,7 → 147,7
ptl3 = (pte_t *) PA2KA(GET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page)));
 
/* Destroy the mapping. Setting to PAGE_NOT_PRESENT is not sufficient. */
memsetb((__address) &ptl3[PTL3_INDEX(page)], sizeof(pte_t), 0);
memsetb((uintptr_t) &ptl3[PTL3_INDEX(page)], sizeof(pte_t), 0);
 
/*
* Second, free all empty tables along the way from PTL3 down to PTL0.
165,13 → 165,13
* PTL3 is empty.
* Release the frame and remove PTL3 pointer from preceding table.
*/
frame_free(KA2PA((__address) ptl3));
frame_free(KA2PA((uintptr_t) ptl3));
if (PTL2_ENTRIES)
memsetb((__address) &ptl2[PTL2_INDEX(page)], sizeof(pte_t), 0);
memsetb((uintptr_t) &ptl2[PTL2_INDEX(page)], sizeof(pte_t), 0);
else if (PTL1_ENTRIES)
memsetb((__address) &ptl1[PTL1_INDEX(page)], sizeof(pte_t), 0);
memsetb((uintptr_t) &ptl1[PTL1_INDEX(page)], sizeof(pte_t), 0);
else
memsetb((__address) &ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0);
memsetb((uintptr_t) &ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0);
} else {
/*
* PTL3 is not empty.
194,11 → 194,11
* PTL2 is empty.
* Release the frame and remove PTL2 pointer from preceding table.
*/
frame_free(KA2PA((__address) ptl2));
frame_free(KA2PA((uintptr_t) ptl2));
if (PTL1_ENTRIES)
memsetb((__address) &ptl1[PTL1_INDEX(page)], sizeof(pte_t), 0);
memsetb((uintptr_t) &ptl1[PTL1_INDEX(page)], sizeof(pte_t), 0);
else
memsetb((__address) &ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0);
memsetb((uintptr_t) &ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0);
}
else {
/*
223,8 → 223,8
* PTL1 is empty.
* Release the frame and remove PTL1 pointer from preceding table.
*/
frame_free(KA2PA((__address) ptl1));
memsetb((__address) &ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0);
frame_free(KA2PA((uintptr_t) ptl1));
memsetb((uintptr_t) &ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0);
}
}
 
241,11 → 241,11
*
* @return NULL if there is no such mapping; entry from PTL3 describing the mapping otherwise.
*/
pte_t *pt_mapping_find(as_t *as, __address page)
pte_t *pt_mapping_find(as_t *as, uintptr_t page)
{
pte_t *ptl0, *ptl1, *ptl2, *ptl3;
 
ptl0 = (pte_t *) PA2KA((__address) as->page_table);
ptl0 = (pte_t *) PA2KA((uintptr_t) as->page_table);
 
if (GET_PTL1_FLAGS(ptl0, PTL0_INDEX(page)) & PAGE_NOT_PRESENT)
return NULL;
/kernel/trunk/genarch/src/mm/as_pt.c
76,9 → 76,9
dst_ptl0 = (pte_t *) frame_alloc(ONE_FRAME, FRAME_KA);
 
if (flags & FLAG_AS_KERNEL) {
memsetb((__address) dst_ptl0, PAGE_SIZE, 0);
memsetb((uintptr_t) dst_ptl0, PAGE_SIZE, 0);
} else {
__address src, dst;
uintptr_t src, dst;
/*
* Copy the kernel address space portion to new PTL0.
86,18 → 86,18
ipl = interrupts_disable();
mutex_lock(&AS_KERNEL->lock);
src_ptl0 = (pte_t *) PA2KA((__address) AS_KERNEL->page_table);
src_ptl0 = (pte_t *) PA2KA((uintptr_t) AS_KERNEL->page_table);
 
src = (__address) &src_ptl0[PTL0_INDEX(KERNEL_ADDRESS_SPACE_START)];
dst = (__address) &dst_ptl0[PTL0_INDEX(KERNEL_ADDRESS_SPACE_START)];
src = (uintptr_t) &src_ptl0[PTL0_INDEX(KERNEL_ADDRESS_SPACE_START)];
dst = (uintptr_t) &dst_ptl0[PTL0_INDEX(KERNEL_ADDRESS_SPACE_START)];
 
memsetb((__address) dst_ptl0, PAGE_SIZE, 0);
memcpy((void *) dst, (void *) src, PAGE_SIZE - (src - (__address) src_ptl0));
memsetb((uintptr_t) dst_ptl0, PAGE_SIZE, 0);
memcpy((void *) dst, (void *) src, PAGE_SIZE - (src - (uintptr_t) src_ptl0));
mutex_unlock(&AS_KERNEL->lock);
interrupts_restore(ipl);
}
 
return (pte_t *) KA2PA((__address) dst_ptl0);
return (pte_t *) KA2PA((uintptr_t) dst_ptl0);
}
 
/** Destroy page table.
108,7 → 108,7
*/
void ptl0_destroy(pte_t *page_table)
{
frame_free((__address)page_table);
frame_free((uintptr_t)page_table);
}
 
/** Lock page tables.
/kernel/trunk/genarch/src/mm/page_ht.c
52,13 → 52,13
#include <adt/hash_table.h>
#include <align.h>
 
static index_t hash(__native key[]);
static bool compare(__native key[], count_t keys, link_t *item);
static index_t hash(unative_t key[]);
static bool compare(unative_t key[], count_t keys, link_t *item);
static void remove_callback(link_t *item);
 
static void ht_mapping_insert(as_t *as, __address page, __address frame, int flags);
static void ht_mapping_remove(as_t *as, __address page);
static pte_t *ht_mapping_find(as_t *as, __address page);
static void ht_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame, int flags);
static void ht_mapping_remove(as_t *as, uintptr_t page);
static pte_t *ht_mapping_find(as_t *as, uintptr_t page);
 
/**
* This lock protects the page hash table. It must be acquired
93,10 → 93,10
*
* @return Index into page hash table.
*/
index_t hash(__native key[])
index_t hash(unative_t key[])
{
as_t *as = (as_t *) key[KEY_AS];
__address page = (__address) key[KEY_PAGE];
uintptr_t page = (uintptr_t) key[KEY_PAGE];
index_t index;
/*
111,7 → 111,7
* similar addresses. Least significant bits compose the
* hash index.
*/
index |= ((__native) as) & (PAGE_HT_ENTRIES-1);
index |= ((unative_t) as) & (PAGE_HT_ENTRIES-1);
return index;
}
124,7 → 124,7
*
* @return true on match, false otherwise.
*/
bool compare(__native key[], count_t keys, link_t *item)
bool compare(unative_t key[], count_t keys, link_t *item)
{
pte_t *t;
 
137,9 → 137,9
t = hash_table_get_instance(item, pte_t, link);
 
if (keys == PAGE_HT_KEYS) {
return (key[KEY_AS] == (__address) t->as) && (key[KEY_PAGE] == t->page);
return (key[KEY_AS] == (uintptr_t) t->as) && (key[KEY_PAGE] == t->page);
} else {
return (key[KEY_AS] == (__address) t->as);
return (key[KEY_AS] == (uintptr_t) t->as);
}
}
 
173,10 → 173,10
* @param frame Physical address of memory frame to which the mapping is done.
* @param flags Flags to be used for mapping.
*/
void ht_mapping_insert(as_t *as, __address page, __address frame, int flags)
void ht_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame, int flags)
{
pte_t *t;
__native key[2] = { (__address) as, page = ALIGN_DOWN(page, PAGE_SIZE) };
unative_t key[2] = { (uintptr_t) as, page = ALIGN_DOWN(page, PAGE_SIZE) };
if (!hash_table_find(&page_ht, key)) {
t = (pte_t *) malloc(sizeof(pte_t), FRAME_ATOMIC);
208,9 → 208,9
* @param as Address space to wich page belongs.
* @param page Virtual address of the page to be demapped.
*/
void ht_mapping_remove(as_t *as, __address page)
void ht_mapping_remove(as_t *as, uintptr_t page)
{
__native key[2] = { (__address) as, page = ALIGN_DOWN(page, PAGE_SIZE) };
unative_t key[2] = { (uintptr_t) as, page = ALIGN_DOWN(page, PAGE_SIZE) };
/*
* Note that removed PTE's will be freed
231,11 → 231,11
*
* @return NULL if there is no such mapping; requested mapping otherwise.
*/
pte_t *ht_mapping_find(as_t *as, __address page)
pte_t *ht_mapping_find(as_t *as, uintptr_t page)
{
link_t *hlp;
pte_t *t = NULL;
__native key[2] = { (__address) as, page = ALIGN_DOWN(page, PAGE_SIZE) };
unative_t key[2] = { (uintptr_t) as, page = ALIGN_DOWN(page, PAGE_SIZE) };
hlp = hash_table_find(&page_ht, key);
if (hlp)