/kernel/trunk/generic/src/mm/slab.c |
---|
549,7 → 549,7 |
cache->mag_cache = malloc(sizeof(slab_mag_cache_t)*config.cpu_count,0); |
for (i=0; i < config.cpu_count; i++) { |
memsetb((__address)&cache->mag_cache[i], |
memsetb((uintptr_t)&cache->mag_cache[i], |
sizeof(cache->mag_cache[i]), 0); |
spinlock_initialize(&cache->mag_cache[i].lock, |
"slab_maglock_cpu"); |
569,11 → 569,11 |
int pages; |
ipl_t ipl; |
memsetb((__address)cache, sizeof(*cache), 0); |
memsetb((uintptr_t)cache, sizeof(*cache), 0); |
cache->name = name; |
if (align < sizeof(__native)) |
align = sizeof(__native); |
if (align < sizeof(unative_t)) |
align = sizeof(unative_t); |
size = ALIGN_UP(size, align); |
cache->size = size; |
820,7 → 820,7 |
_slab_cache_create(&mag_cache, |
"slab_magazine", |
sizeof(slab_magazine_t)+SLAB_MAG_SIZE*sizeof(void*), |
sizeof(__address), |
sizeof(uintptr_t), |
NULL, NULL, |
SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); |
/* Initialize slab_cache cache */ |
827,7 → 827,7 |
_slab_cache_create(&slab_cache_cache, |
"slab_cache", |
sizeof(slab_cache_cache), |
sizeof(__address), |
sizeof(uintptr_t), |
NULL, NULL, |
SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); |
/* Initialize external slab cache */ |
/kernel/trunk/generic/src/mm/tlb.c |
---|
78,7 → 78,7 |
* @param page Virtual page address, if required by type. |
* @param count Number of pages, if required by type. |
*/ |
void tlb_shootdown_start(tlb_invalidate_type_t type, asid_t asid, __address page, count_t count) |
void tlb_shootdown_start(tlb_invalidate_type_t type, asid_t asid, uintptr_t page, count_t count) |
{ |
int i; |
141,7 → 141,7 |
{ |
tlb_invalidate_type_t type; |
asid_t asid; |
__address page; |
uintptr_t page; |
count_t count; |
int i; |
/kernel/trunk/generic/src/mm/backend_anon.c |
---|
51,8 → 51,8 |
#include <align.h> |
#include <arch.h> |
static int anon_page_fault(as_area_t *area, __address addr, pf_access_t access); |
static void anon_frame_free(as_area_t *area, __address page, __address frame); |
static int anon_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access); |
static void anon_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame); |
static void anon_share(as_area_t *area); |
mem_backend_t anon_backend = { |
71,9 → 71,9 |
* |
* @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. serviced). |
*/ |
int anon_page_fault(as_area_t *area, __address addr, pf_access_t access) |
int anon_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access) |
{ |
__address frame; |
uintptr_t frame; |
if (!as_area_check_access(area, access)) |
return AS_PF_FAULT; |
88,7 → 88,7 |
* mapping, a new frame is allocated and the mapping is created. |
*/ |
mutex_lock(&area->sh_info->lock); |
frame = (__address) btree_search(&area->sh_info->pagemap, |
frame = (uintptr_t) btree_search(&area->sh_info->pagemap, |
ALIGN_DOWN(addr, PAGE_SIZE) - area->base, &leaf); |
if (!frame) { |
bool allocate = true; |
105,7 → 105,7 |
} |
} |
if (allocate) { |
frame = (__address) frame_alloc(ONE_FRAME, 0); |
frame = (uintptr_t) frame_alloc(ONE_FRAME, 0); |
memsetb(PA2KA(frame), FRAME_SIZE, 0); |
/* |
132,7 → 132,7 |
* do not forget to distinguish between |
* the different causes |
*/ |
frame = (__address)frame_alloc(ONE_FRAME, 0); |
frame = (uintptr_t)frame_alloc(ONE_FRAME, 0); |
memsetb(PA2KA(frame), FRAME_SIZE, 0); |
} |
156,7 → 156,7 |
* @param page Ignored. |
* @param frame Frame to be released. |
*/ |
void anon_frame_free(as_area_t *area, __address page, __address frame) |
void anon_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame) |
{ |
frame_free(frame); |
} |
184,7 → 184,7 |
node = list_get_instance(cur, btree_node_t, leaf_link); |
for (i = 0; i < node->keys; i++) { |
__address base = node->key[i]; |
uintptr_t base = node->key[i]; |
count_t count = (count_t) node->value[i]; |
int j; |
/kernel/trunk/generic/src/mm/as.c |
---|
97,8 → 97,8 |
as_t *AS_KERNEL = NULL; |
static int area_flags_to_page_flags(int aflags); |
static as_area_t *find_area_and_lock(as_t *as, __address va); |
static bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area); |
static as_area_t *find_area_and_lock(as_t *as, uintptr_t va); |
static bool check_area_conflicts(as_t *as, uintptr_t va, size_t size, as_area_t *avoid_area); |
static void sh_info_remove_reference(share_info_t *sh_info); |
/** Initialize address space subsystem. */ |
199,7 → 199,7 |
* |
* @return Address space area on success or NULL on failure. |
*/ |
as_area_t *as_area_create(as_t *as, int flags, size_t size, __address base, int attrs, |
as_area_t *as_area_create(as_t *as, int flags, size_t size, uintptr_t base, int attrs, |
mem_backend_t *backend, mem_backend_data_t *backend_data) |
{ |
ipl_t ipl; |
238,7 → 238,7 |
if (backend_data) |
a->backend_data = *backend_data; |
else |
memsetb((__address) &a->backend_data, sizeof(a->backend_data), 0); |
memsetb((uintptr_t) &a->backend_data, sizeof(a->backend_data), 0); |
btree_create(&a->used_space); |
259,7 → 259,7 |
* |
* @return Zero on success or a value from @ref errno.h otherwise. |
*/ |
int as_area_resize(as_t *as, __address address, size_t size, int flags) |
int as_area_resize(as_t *as, uintptr_t address, size_t size, int flags) |
{ |
as_area_t *area; |
ipl_t ipl; |
312,7 → 312,7 |
if (pages < area->pages) { |
bool cond; |
__address start_free = area->base + pages*PAGE_SIZE; |
uintptr_t start_free = area->base + pages*PAGE_SIZE; |
/* |
* Shrinking the area. |
337,7 → 337,7 |
ASSERT(!list_empty(&area->used_space.leaf_head)); |
node = list_get_instance(area->used_space.leaf_head.prev, btree_node_t, leaf_link); |
if ((cond = (bool) node->keys)) { |
__address b = node->key[node->keys - 1]; |
uintptr_t b = node->key[node->keys - 1]; |
count_t c = (count_t) node->value[node->keys - 1]; |
int i = 0; |
418,10 → 418,10 |
* |
* @return Zero on success or a value from @ref errno.h on failure. |
*/ |
int as_area_destroy(as_t *as, __address address) |
int as_area_destroy(as_t *as, uintptr_t address) |
{ |
as_area_t *area; |
__address base; |
uintptr_t base; |
link_t *cur; |
ipl_t ipl; |
451,7 → 451,7 |
node = list_get_instance(cur, btree_node_t, leaf_link); |
for (i = 0; i < node->keys; i++) { |
__address b = node->key[i]; |
uintptr_t b = node->key[i]; |
count_t j; |
pte_t *pte; |
518,8 → 518,8 |
* address space area. ENOTSUP is returned if an attempt |
* to share non-anonymous address space area is detected. |
*/ |
int as_area_share(as_t *src_as, __address src_base, size_t acc_size, |
as_t *dst_as, __address dst_base, int dst_flags_mask) |
int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size, |
as_t *dst_as, uintptr_t dst_base, int dst_flags_mask) |
{ |
ipl_t ipl; |
int src_flags; |
665,7 → 665,7 |
* @return AS_PF_FAULT on page fault, AS_PF_OK on success or AS_PF_DEFER if the |
* fault was caused by copy_to_uspace() or copy_from_uspace(). |
*/ |
int as_page_fault(__address page, pf_access_t access, istate_t *istate) |
int as_page_fault(uintptr_t page, pf_access_t access, istate_t *istate) |
{ |
pte_t *pte; |
as_area_t *area; |
744,10 → 744,10 |
page_fault: |
if (THREAD->in_copy_from_uspace) { |
THREAD->in_copy_from_uspace = false; |
istate_set_retaddr(istate, (__address) &memcpy_from_uspace_failover_address); |
istate_set_retaddr(istate, (uintptr_t) &memcpy_from_uspace_failover_address); |
} else if (THREAD->in_copy_to_uspace) { |
THREAD->in_copy_to_uspace = false; |
istate_set_retaddr(istate, (__address) &memcpy_to_uspace_failover_address); |
istate_set_retaddr(istate, (uintptr_t) &memcpy_to_uspace_failover_address); |
} else { |
return AS_PF_FAULT; |
} |
942,7 → 942,7 |
* |
* @return Locked address space area containing va on success or NULL on failure. |
*/ |
as_area_t *find_area_and_lock(as_t *as, __address va) |
as_area_t *find_area_and_lock(as_t *as, uintptr_t va) |
{ |
as_area_t *a; |
btree_node_t *leaf, *lnode; |
998,7 → 998,7 |
* |
* @return True if there is no conflict, false otherwise. |
*/ |
bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area) |
bool check_area_conflicts(as_t *as, uintptr_t va, size_t size, as_area_t *avoid_area) |
{ |
as_area_t *a; |
btree_node_t *leaf, *node; |
1071,7 → 1071,7 |
} |
/** Return size of the address space area with given base. */ |
size_t as_get_size(__address base) |
size_t as_get_size(uintptr_t base) |
{ |
ipl_t ipl; |
as_area_t *src_area; |
1099,7 → 1099,7 |
* |
* @return 0 on failure and 1 on success. |
*/ |
int used_space_insert(as_area_t *a, __address page, count_t count) |
int used_space_insert(as_area_t *a, uintptr_t page, count_t count) |
{ |
btree_node_t *leaf, *node; |
count_t pages; |
1123,7 → 1123,7 |
node = btree_leaf_node_left_neighbour(&a->used_space, leaf); |
if (node) { |
__address left_pg = node->key[node->keys - 1], right_pg = leaf->key[0]; |
uintptr_t left_pg = node->key[node->keys - 1], right_pg = leaf->key[0]; |
count_t left_cnt = (count_t) node->value[node->keys - 1], right_cnt = (count_t) leaf->value[0]; |
/* |
1166,7 → 1166,7 |
return 1; |
} |
} else if (page < leaf->key[0]) { |
__address right_pg = leaf->key[0]; |
uintptr_t right_pg = leaf->key[0]; |
count_t right_cnt = (count_t) leaf->value[0]; |
/* |
1197,7 → 1197,7 |
node = btree_leaf_node_right_neighbour(&a->used_space, leaf); |
if (node) { |
__address left_pg = leaf->key[leaf->keys - 1], right_pg = node->key[0]; |
uintptr_t left_pg = leaf->key[leaf->keys - 1], right_pg = node->key[0]; |
count_t left_cnt = (count_t) leaf->value[leaf->keys - 1], right_cnt = (count_t) node->value[0]; |
/* |
1240,7 → 1240,7 |
return 1; |
} |
} else if (page >= leaf->key[leaf->keys - 1]) { |
__address left_pg = leaf->key[leaf->keys - 1]; |
uintptr_t left_pg = leaf->key[leaf->keys - 1]; |
count_t left_cnt = (count_t) leaf->value[leaf->keys - 1]; |
/* |
1272,7 → 1272,7 |
*/ |
for (i = 1; i < leaf->keys; i++) { |
if (page < leaf->key[i]) { |
__address left_pg = leaf->key[i - 1], right_pg = leaf->key[i]; |
uintptr_t left_pg = leaf->key[i - 1], right_pg = leaf->key[i]; |
count_t left_cnt = (count_t) leaf->value[i - 1], right_cnt = (count_t) leaf->value[i]; |
/* |
1326,7 → 1326,7 |
* |
* @return 0 on failure and 1 on success. |
*/ |
int used_space_remove(as_area_t *a, __address page, count_t count) |
int used_space_remove(as_area_t *a, uintptr_t page, count_t count) |
{ |
btree_node_t *leaf, *node; |
count_t pages; |
1363,7 → 1363,7 |
node = btree_leaf_node_left_neighbour(&a->used_space, leaf); |
if (node && page < leaf->key[0]) { |
__address left_pg = node->key[node->keys - 1]; |
uintptr_t left_pg = node->key[node->keys - 1]; |
count_t left_cnt = (count_t) node->value[node->keys - 1]; |
if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) { |
1396,7 → 1396,7 |
} |
if (page > leaf->key[leaf->keys - 1]) { |
__address left_pg = leaf->key[leaf->keys - 1]; |
uintptr_t left_pg = leaf->key[leaf->keys - 1]; |
count_t left_cnt = (count_t) leaf->value[leaf->keys - 1]; |
if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) { |
1432,7 → 1432,7 |
*/ |
for (i = 1; i < leaf->keys - 1; i++) { |
if (page < leaf->key[i]) { |
__address left_pg = leaf->key[i - 1]; |
uintptr_t left_pg = leaf->key[i - 1]; |
count_t left_cnt = (count_t) leaf->value[i - 1]; |
/* |
1496,7 → 1496,7 |
node = list_get_instance(cur, btree_node_t, leaf_link); |
for (i = 0; i < node->keys; i++) |
frame_free((__address) node->value[i]); |
frame_free((uintptr_t) node->value[i]); |
} |
} |
1513,24 → 1513,24 |
*/ |
/** Wrapper for as_area_create(). */ |
__native sys_as_area_create(__address address, size_t size, int flags) |
unative_t sys_as_area_create(uintptr_t address, size_t size, int flags) |
{ |
if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address, AS_AREA_ATTR_NONE, &anon_backend, NULL)) |
return (__native) address; |
return (unative_t) address; |
else |
return (__native) -1; |
return (unative_t) -1; |
} |
/** Wrapper for as_area_resize. */ |
__native sys_as_area_resize(__address address, size_t size, int flags) |
unative_t sys_as_area_resize(uintptr_t address, size_t size, int flags) |
{ |
return (__native) as_area_resize(AS, address, size, 0); |
return (unative_t) as_area_resize(AS, address, size, 0); |
} |
/** Wrapper for as_area_destroy. */ |
__native sys_as_area_destroy(__address address) |
unative_t sys_as_area_destroy(uintptr_t address) |
{ |
return (__native) as_area_destroy(AS, address); |
return (unative_t) as_area_destroy(AS, address); |
} |
/** @} |
/kernel/trunk/generic/src/mm/buddy.c |
---|
66,7 → 66,7 |
* @return New buddy system. |
*/ |
void buddy_system_create(buddy_system_t *b, |
__u8 max_order, |
uint8_t max_order, |
buddy_system_operations_t *op, |
void *data) |
{ |
101,8 → 101,8 |
* |
* @return True if block can be allocated |
*/ |
bool buddy_system_can_alloc(buddy_system_t *b, __u8 i) { |
__u8 k; |
bool buddy_system_can_alloc(buddy_system_t *b, uint8_t i) { |
uint8_t k; |
/* |
* If requested block is greater then maximal block |
130,7 → 130,7 |
link_t *buddy_system_alloc_block(buddy_system_t *b, link_t *block) |
{ |
link_t *left,*right, *tmp; |
__u8 order; |
uint8_t order; |
left = b->op->find_block(b, block, BUDDY_SYSTEM_INNER_BLOCK); |
ASSERT(left); |
167,7 → 167,7 |
* |
* @return Block of data represented by link_t. |
*/ |
link_t *buddy_system_alloc(buddy_system_t *b, __u8 i) |
link_t *buddy_system_alloc(buddy_system_t *b, uint8_t i) |
{ |
link_t *res, *hlp; |
230,7 → 230,7 |
void buddy_system_free(buddy_system_t *b, link_t *block) |
{ |
link_t *buddy, *hlp; |
__u8 i; |
uint8_t i; |
/* |
* Determine block's order. |
/kernel/trunk/generic/src/mm/backend_phys.c |
---|
46,7 → 46,7 |
#include <arch.h> |
#include <align.h> |
static int phys_page_fault(as_area_t *area, __address addr, pf_access_t access); |
static int phys_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access); |
static void phys_share(as_area_t *area); |
mem_backend_t phys_backend = { |
65,9 → 65,9 |
* |
* @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. serviced). |
*/ |
int phys_page_fault(as_area_t *area, __address addr, pf_access_t access) |
int phys_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access) |
{ |
__address base = area->backend_data.base; |
uintptr_t base = area->backend_data.base; |
if (!as_area_check_access(area, access)) |
return AS_PF_FAULT; |
/kernel/trunk/generic/src/mm/frame.c |
---|
69,7 → 69,7 |
typedef struct { |
count_t refcount; /**< tracking of shared frames */ |
__u8 buddy_order; /**< buddy system block order */ |
uint8_t buddy_order; /**< buddy system block order */ |
link_t buddy_link; /**< link to the next free block inside one order */ |
void *parent; /**< If allocated by slab, this points there */ |
} frame_t; |
217,7 → 217,7 |
} |
/** @return True if zone can allocate specified order */ |
static int zone_can_alloc(zone_t *z, __u8 order) |
static int zone_can_alloc(zone_t *z, uint8_t order) |
{ |
return buddy_system_can_alloc(z->buddy_system, order); |
} |
230,7 → 230,7 |
* @param order Size (2^order) of free space we are trying to find |
* @param pzone Pointer to preferred zone or NULL, on return contains zone number |
*/ |
static zone_t * find_free_zone_lock(__u8 order, int *pzone) |
static zone_t * find_free_zone_lock(uint8_t order, int *pzone) |
{ |
int i; |
zone_t *z; |
271,7 → 271,7 |
* @param order - Order of parent must be different then this parameter!! |
*/ |
static link_t *zone_buddy_find_block(buddy_system_t *b, link_t *child, |
__u8 order) |
uint8_t order) |
{ |
frame_t * frame; |
zone_t * zone; |
380,7 → 380,7 |
* @param block Buddy system block |
* @param order Order to set |
*/ |
static void zone_buddy_set_order(buddy_system_t *b, link_t * block, __u8 order) { |
static void zone_buddy_set_order(buddy_system_t *b, link_t * block, uint8_t order) { |
frame_t * frame; |
frame = list_get_instance(block, frame_t, buddy_link); |
frame->buddy_order = order; |
393,7 → 393,7 |
* |
* @return Order of block |
*/ |
static __u8 zone_buddy_get_order(buddy_system_t *b, link_t * block) { |
static uint8_t zone_buddy_get_order(buddy_system_t *b, link_t * block) { |
frame_t * frame; |
frame = list_get_instance(block, frame_t, buddy_link); |
return frame->buddy_order; |
450,7 → 450,7 |
* @return Frame index in zone |
* |
*/ |
static pfn_t zone_frame_alloc(zone_t *zone, __u8 order) |
static pfn_t zone_frame_alloc(zone_t *zone, uint8_t order) |
{ |
pfn_t v; |
link_t *tmp; |
483,7 → 483,7 |
static void zone_frame_free(zone_t *zone, index_t frame_idx) |
{ |
frame_t *frame; |
__u8 order; |
uint8_t order; |
frame = &zone->frames[frame_idx]; |
537,7 → 537,7 |
static void _zone_merge(zone_t *z, zone_t *z1, zone_t *z2) |
{ |
__u8 max_order; |
uint8_t max_order; |
int i, z2idx; |
pfn_t frame_idx; |
frame_t *frame; |
624,7 → 624,7 |
count_t cframes; |
int i; |
pfn = ADDR2PFN((__address)KA2PA(oldzone)); |
pfn = ADDR2PFN((uintptr_t)KA2PA(oldzone)); |
cframes = SIZE2FRAMES(zone_conf_size(oldzone->count)); |
if (pfn < newzone->base || pfn >= newzone->base + newzone->count) |
653,7 → 653,7 |
static void zone_reduce_region(zone_t *zone, pfn_t frame_idx, count_t count) |
{ |
count_t i; |
__u8 order; |
uint8_t order; |
frame_t *frame; |
ASSERT(frame_idx+count < zone->count); |
689,7 → 689,7 |
ipl_t ipl; |
zone_t *zone1, *zone2, *newzone; |
int cframes; |
__u8 order; |
uint8_t order; |
int i; |
pfn_t pfn; |
779,7 → 779,7 |
static void zone_construct(pfn_t start, count_t count, zone_t *z, int flags) |
{ |
int i; |
__u8 max_order; |
uint8_t max_order; |
spinlock_initialize(&z->lock, "zone_lock"); |
z->base = start; |
817,7 → 817,7 |
* @param count Size of zone in frames |
* @return Size of zone configuration info (in bytes) |
*/ |
__address zone_conf_size(count_t count) |
uintptr_t zone_conf_size(count_t count) |
{ |
int size = sizeof(zone_t) + count*sizeof(frame_t); |
int max_order; |
845,7 → 845,7 |
int zone_create(pfn_t start, count_t count, pfn_t confframe, int flags) |
{ |
zone_t *z; |
__address addr; |
uintptr_t addr; |
count_t confcount; |
int i; |
int znum; |
931,7 → 931,7 |
* @return Physical address of the allocated frame. |
* |
*/ |
void * frame_alloc_generic(__u8 order, int flags, int *pzone) |
void * frame_alloc_generic(uint8_t order, int flags, int *pzone) |
{ |
ipl_t ipl; |
int freed; |
990,7 → 990,7 |
* |
* @param Frame Physical Address of of the frame to be freed. |
*/ |
void frame_free(__address frame) |
void frame_free(uintptr_t frame) |
{ |
ipl_t ipl; |
zone_t *zone; |
1099,7 → 1099,7 |
for (i = 0; i < zones.count; i++) { |
zone = zones.info[i]; |
spinlock_lock(&zone->lock); |
printf("%d: %.*p \t%10zd\t%10zd\n", i, sizeof(__address) * 2, PFN2ADDR(zone->base), zone->free_count, zone->busy_count); |
printf("%d: %.*p \t%10zd\t%10zd\n", i, sizeof(uintptr_t) * 2, PFN2ADDR(zone->base), zone->free_count, zone->busy_count); |
spinlock_unlock(&zone->lock); |
} |
spinlock_unlock(&zones.lock); |
1131,7 → 1131,7 |
spinlock_lock(&zone->lock); |
printf("Memory zone information\n"); |
printf("Zone base address: %#.*p\n", sizeof(__address) * 2, PFN2ADDR(zone->base)); |
printf("Zone base address: %#.*p\n", sizeof(uintptr_t) * 2, PFN2ADDR(zone->base)); |
printf("Zone size: %zd frames (%zdK)\n", zone->count, ((zone->count) * FRAME_SIZE) >> 10); |
printf("Allocated space: %zd frames (%zdK)\n", zone->busy_count, (zone->busy_count * FRAME_SIZE) >> 10); |
printf("Available space: %zd (%zdK)\n", zone->free_count, (zone->free_count * FRAME_SIZE) >> 10); |
/kernel/trunk/generic/src/mm/page.c |
---|
69,7 → 69,7 |
* @param s Address of the structure. |
* @param size Size of the structure. |
*/ |
void map_structure(__address s, size_t size) |
void map_structure(uintptr_t s, size_t size) |
{ |
int i, cnt, length; |
93,7 → 93,7 |
* @param frame Physical address of memory frame to which the mapping is done. |
* @param flags Flags to be used for mapping. |
*/ |
void page_mapping_insert(as_t *as, __address page, __address frame, int flags) |
void page_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame, int flags) |
{ |
ASSERT(page_mapping_operations); |
ASSERT(page_mapping_operations->mapping_insert); |
112,7 → 112,7 |
* @param as Address space to wich page belongs. |
* @param page Virtual address of the page to be demapped. |
*/ |
void page_mapping_remove(as_t *as, __address page) |
void page_mapping_remove(as_t *as, uintptr_t page) |
{ |
ASSERT(page_mapping_operations); |
ASSERT(page_mapping_operations->mapping_remove); |
131,7 → 131,7 |
* |
* @return NULL if there is no such mapping; requested mapping otherwise. |
*/ |
pte_t *page_mapping_find(as_t *as, __address page) |
pte_t *page_mapping_find(as_t *as, uintptr_t page) |
{ |
ASSERT(page_mapping_operations); |
ASSERT(page_mapping_operations->mapping_find); |
/kernel/trunk/generic/src/mm/backend_elf.c |
---|
50,8 → 50,8 |
#include <macros.h> |
#include <arch.h> |
static int elf_page_fault(as_area_t *area, __address addr, pf_access_t access); |
static void elf_frame_free(as_area_t *area, __address page, __address frame); |
static int elf_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access); |
static void elf_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame); |
static void elf_share(as_area_t *area); |
mem_backend_t elf_backend = { |
70,12 → 70,12 |
* |
* @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. serviced). |
*/ |
int elf_page_fault(as_area_t *area, __address addr, pf_access_t access) |
int elf_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access) |
{ |
elf_header_t *elf = area->backend_data.elf; |
elf_segment_header_t *entry = area->backend_data.segment; |
btree_node_t *leaf; |
__address base, frame; |
uintptr_t base, frame; |
index_t i; |
if (!as_area_check_access(area, access)) |
83,7 → 83,7 |
ASSERT((addr >= entry->p_vaddr) && (addr < entry->p_vaddr + entry->p_memsz)); |
i = (addr - entry->p_vaddr) >> PAGE_WIDTH; |
base = (__address) (((void *) elf) + entry->p_offset); |
base = (uintptr_t) (((void *) elf) + entry->p_offset); |
ASSERT(ALIGN_UP(base, FRAME_SIZE) == base); |
if (area->sh_info) { |
94,7 → 94,7 |
*/ |
mutex_lock(&area->sh_info->lock); |
frame = (__address) btree_search(&area->sh_info->pagemap, |
frame = (uintptr_t) btree_search(&area->sh_info->pagemap, |
ALIGN_DOWN(addr, PAGE_SIZE) - area->base, &leaf); |
if (!frame) { |
int i; |
134,7 → 134,7 |
* as COW. |
*/ |
if (entry->p_flags & PF_W) { |
frame = (__address)frame_alloc(ONE_FRAME, 0); |
frame = (uintptr_t)frame_alloc(ONE_FRAME, 0); |
memcpy((void *) PA2KA(frame), (void *) (base + i*FRAME_SIZE), FRAME_SIZE); |
if (area->sh_info) { |
153,7 → 153,7 |
* To resolve the situation, a frame must be allocated |
* and cleared. |
*/ |
frame = (__address)frame_alloc(ONE_FRAME, 0); |
frame = (uintptr_t)frame_alloc(ONE_FRAME, 0); |
memsetb(PA2KA(frame), FRAME_SIZE, 0); |
if (area->sh_info) { |
170,7 → 170,7 |
* the upper part is anonymous memory. |
*/ |
size = entry->p_filesz - (i<<PAGE_WIDTH); |
frame = (__address)frame_alloc(ONE_FRAME, 0); |
frame = (uintptr_t)frame_alloc(ONE_FRAME, 0); |
memsetb(PA2KA(frame) + size, FRAME_SIZE - size, 0); |
memcpy((void *) PA2KA(frame), (void *) (base + i*FRAME_SIZE), size); |
201,16 → 201,16 |
* @param frame Frame to be released. |
* |
*/ |
void elf_frame_free(as_area_t *area, __address page, __address frame) |
void elf_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame) |
{ |
elf_header_t *elf = area->backend_data.elf; |
elf_segment_header_t *entry = area->backend_data.segment; |
__address base; |
uintptr_t base; |
index_t i; |
ASSERT((page >= entry->p_vaddr) && (page < entry->p_vaddr + entry->p_memsz)); |
i = (page - entry->p_vaddr) >> PAGE_WIDTH; |
base = (__address) (((void *) elf) + entry->p_offset); |
base = (uintptr_t) (((void *) elf) + entry->p_offset); |
ASSERT(ALIGN_UP(base, FRAME_SIZE) == base); |
if (page + PAGE_SIZE < ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) { |
245,7 → 245,7 |
elf_segment_header_t *entry = area->backend_data.segment; |
link_t *cur; |
btree_node_t *leaf, *node; |
__address start_anon = entry->p_vaddr + entry->p_filesz; |
uintptr_t start_anon = entry->p_vaddr + entry->p_filesz; |
/* |
* Find the node in which to start linear search. |
269,7 → 269,7 |
node = list_get_instance(cur, btree_node_t, leaf_link); |
for (i = 0; i < node->keys; i++) { |
__address base = node->key[i]; |
uintptr_t base = node->key[i]; |
count_t count = (count_t) node->value[i]; |
int j; |