Subversion Repositories HelenOS

Compare Revisions

Ignore whitespace Rev 3423 → Rev 3424

/branches/tracing/kernel/generic/src/mm/slab.c
167,7 → 167,7
* Allocate frames for slab space and initialize
*
*/
static slab_t * slab_space_alloc(slab_cache_t *cache, int flags)
static slab_t *slab_space_alloc(slab_cache_t *cache, int flags)
{
void *data;
slab_t *slab;
179,7 → 179,7
if (!data) {
return NULL;
}
if (! (cache->flags & SLAB_CACHE_SLINSIDE)) {
if (!(cache->flags & SLAB_CACHE_SLINSIDE)) {
slab = slab_alloc(slab_extern_cache, flags);
if (!slab) {
frame_free(KA2PA(data));
200,7 → 200,7
slab->cache = cache;
 
for (i = 0; i < cache->objects; i++)
*((int *) (slab->start + i*cache->size)) = i+1;
*((int *) (slab->start + i*cache->size)) = i + 1;
 
atomic_inc(&cache->allocated_slabs);
return slab;
239,8 → 239,7
*
* @return Number of freed pages
*/
static count_t slab_obj_destroy(slab_cache_t *cache, void *obj,
slab_t *slab)
static count_t slab_obj_destroy(slab_cache_t *cache, void *obj, slab_t *slab)
{
int freed = 0;
 
256,7 → 255,7
ASSERT(slab->available < cache->objects);
 
*((int *)obj) = slab->nextavail;
slab->nextavail = (obj - slab->start)/cache->size;
slab->nextavail = (obj - slab->start) / cache->size;
slab->available++;
 
/* Move it to correct list */
281,7 → 280,7
*
* @return Object address or null
*/
static void * slab_obj_create(slab_cache_t *cache, int flags)
static void *slab_obj_create(slab_cache_t *cache, int flags)
{
slab_t *slab;
void *obj;
301,7 → 300,8
return NULL;
spinlock_lock(&cache->slablock);
} else {
slab = list_get_instance(cache->partial_slabs.next, slab_t, link);
slab = list_get_instance(cache->partial_slabs.next, slab_t,
link);
list_remove(&slab->link);
}
obj = slab->start + slab->nextavail * cache->size;
332,8 → 332,7
*
* @param first If true, return first, else last mag
*/
static slab_magazine_t * get_mag_from_cache(slab_cache_t *cache,
int first)
static slab_magazine_t *get_mag_from_cache(slab_cache_t *cache, int first)
{
slab_magazine_t *mag = NULL;
link_t *cur;
368,8 → 367,7
*
* @return Number of freed pages
*/
static count_t magazine_destroy(slab_cache_t *cache,
slab_magazine_t *mag)
static count_t magazine_destroy(slab_cache_t *cache, slab_magazine_t *mag)
{
unsigned int i;
count_t frames = 0;
389,7 → 387,7
*
* Assume cpu_magazine lock is held
*/
static slab_magazine_t * get_full_current_mag(slab_cache_t *cache)
static slab_magazine_t *get_full_current_mag(slab_cache_t *cache)
{
slab_magazine_t *cmag, *lastmag, *newmag;
 
423,7 → 421,7
*
* @return Pointer to object or NULL if not available
*/
static void * magazine_obj_get(slab_cache_t *cache)
static void *magazine_obj_get(slab_cache_t *cache)
{
slab_magazine_t *mag;
void *obj;
458,7 → 456,7
* allocate new, exchange last & current
*
*/
static slab_magazine_t * make_empty_current_mag(slab_cache_t *cache)
static slab_magazine_t *make_empty_current_mag(slab_cache_t *cache)
{
slab_magazine_t *cmag,*lastmag,*newmag;
 
530,7 → 528,8
static unsigned int comp_objects(slab_cache_t *cache)
{
if (cache->flags & SLAB_CACHE_SLINSIDE)
return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size;
return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) /
cache->size;
else
return (PAGE_SIZE << cache->order) / cache->size;
}
557,28 → 556,25
ASSERT(_slab_initialized >= 2);
 
cache->mag_cache = malloc(sizeof(slab_mag_cache_t)*config.cpu_count,0);
cache->mag_cache = malloc(sizeof(slab_mag_cache_t) * config.cpu_count,
0);
for (i = 0; i < config.cpu_count; i++) {
memsetb((uintptr_t)&cache->mag_cache[i],
sizeof(cache->mag_cache[i]), 0);
spinlock_initialize(&cache->mag_cache[i].lock, "slab_maglock_cpu");
memsetb(&cache->mag_cache[i], sizeof(cache->mag_cache[i]), 0);
spinlock_initialize(&cache->mag_cache[i].lock,
"slab_maglock_cpu");
}
}
 
/** Initialize allocated memory as a slab cache */
static void
_slab_cache_create(slab_cache_t *cache,
char *name,
size_t size,
size_t align,
int (*constructor)(void *obj, int kmflag),
int (*destructor)(void *obj),
int flags)
_slab_cache_create(slab_cache_t *cache, char *name, size_t size, size_t align,
int (*constructor)(void *obj, int kmflag), int (*destructor)(void *obj),
int flags)
{
int pages;
ipl_t ipl;
 
memsetb((uintptr_t)cache, sizeof(*cache), 0);
memsetb(cache, sizeof(*cache), 0);
cache->name = name;
 
if (align < sizeof(unative_t))
596,7 → 592,7
list_initialize(&cache->magazines);
spinlock_initialize(&cache->slablock, "slab_lock");
spinlock_initialize(&cache->maglock, "slab_maglock");
if (! (cache->flags & SLAB_CACHE_NOMAGAZINE))
if (!(cache->flags & SLAB_CACHE_NOMAGAZINE))
make_magcache(cache);
 
/* Compute slab sizes, object counts in slabs etc. */
609,7 → 605,7
if (pages == 1)
cache->order = 0;
else
cache->order = fnzb(pages-1)+1;
cache->order = fnzb(pages - 1) + 1;
 
while (badness(cache) > SLAB_MAX_BADNESS(cache)) {
cache->order += 1;
630,18 → 626,16
}
 
/** Create slab cache */
slab_cache_t * slab_cache_create(char *name,
size_t size,
size_t align,
int (*constructor)(void *obj, int kmflag),
int (*destructor)(void *obj),
int flags)
slab_cache_t *
slab_cache_create(char *name, size_t size, size_t align,
int (*constructor)(void *obj, int kmflag), int (*destructor)(void *obj),
int flags)
{
slab_cache_t *cache;
 
cache = slab_alloc(&slab_cache_cache, 0);
_slab_cache_create(cache, name, size, align, constructor, destructor,
flags);
flags);
return cache;
}
 
665,7 → 659,7
* endless loop
*/
magcount = atomic_get(&cache->magazine_counter);
while (magcount-- && (mag=get_mag_from_cache(cache,0))) {
while (magcount-- && (mag=get_mag_from_cache(cache, 0))) {
frames += magazine_destroy(cache,mag);
if (!(flags & SLAB_RECLAIM_ALL) && frames)
break;
718,8 → 712,8
_slab_reclaim(cache, SLAB_RECLAIM_ALL);
 
/* All slabs must be empty */
if (!list_empty(&cache->full_slabs) \
|| !list_empty(&cache->partial_slabs))
if (!list_empty(&cache->full_slabs) ||
!list_empty(&cache->partial_slabs))
panic("Destroying cache that is not empty.");
 
if (!(cache->flags & SLAB_CACHE_NOMAGAZINE))
727,9 → 721,8
slab_free(&slab_cache_cache, cache);
}
 
/** Allocate new object from cache - if no flags given, always returns
memory */
void * slab_alloc(slab_cache_t *cache, int flags)
/** Allocate new object from cache - if no flags given, always returns memory */
void *slab_alloc(slab_cache_t *cache, int flags)
{
ipl_t ipl;
void *result = NULL;
758,9 → 751,8
 
ipl = interrupts_disable();
 
if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \
|| magazine_obj_put(cache, obj)) {
 
if ((cache->flags & SLAB_CACHE_NOMAGAZINE) ||
magazine_obj_put(cache, obj)) {
slab_obj_destroy(cache, obj, slab);
 
}
787,7 → 779,8
* memory allocation from interrupts can deadlock.
*/
 
for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) {
for (cur = slab_cache_list.next; cur != &slab_cache_list;
cur = cur->next) {
cache = list_get_instance(cur, slab_cache_t, link);
frames += _slab_reclaim(cache, flags);
}
807,13 → 800,21
ipl = interrupts_disable();
spinlock_lock(&slab_cache_lock);
printf("slab name size pages obj/pg slabs cached allocated ctl\n");
printf("---------------- -------- ------ ------ ------ ------ --------- ---\n");
printf("slab name size pages obj/pg slabs cached allocated"
" ctl\n");
printf("---------------- -------- ------ ------ ------ ------ ---------"
" ---\n");
for (cur = slab_cache_list.next; cur != &slab_cache_list; cur = cur->next) {
for (cur = slab_cache_list.next; cur != &slab_cache_list;
cur = cur->next) {
cache = list_get_instance(cur, slab_cache_t, link);
printf("%-16s %8zd %6zd %6zd %6zd %6zd %9zd %-3s\n", cache->name, cache->size, (1 << cache->order), cache->objects, atomic_get(&cache->allocated_slabs), atomic_get(&cache->cached_objs), atomic_get(&cache->allocated_objs), cache->flags & SLAB_CACHE_SLINSIDE ? "in" : "out");
printf("%-16s %8" PRIs " %6d %6u %6ld %6ld %9ld %-3s\n",
cache->name, cache->size, (1 << cache->order),
cache->objects, atomic_get(&cache->allocated_slabs),
atomic_get(&cache->cached_objs),
atomic_get(&cache->allocated_objs),
cache->flags & SLAB_CACHE_SLINSIDE ? "in" : "out");
}
spinlock_unlock(&slab_cache_lock);
interrupts_restore(ipl);
824,32 → 825,24
int i, size;
 
/* Initialize magazine cache */
_slab_cache_create(&mag_cache,
"slab_magazine",
sizeof(slab_magazine_t)+SLAB_MAG_SIZE*sizeof(void*),
sizeof(uintptr_t),
NULL, NULL,
SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
_slab_cache_create(&mag_cache, "slab_magazine",
sizeof(slab_magazine_t) + SLAB_MAG_SIZE * sizeof(void*),
sizeof(uintptr_t), NULL, NULL, SLAB_CACHE_NOMAGAZINE |
SLAB_CACHE_SLINSIDE);
/* Initialize slab_cache cache */
_slab_cache_create(&slab_cache_cache,
"slab_cache",
sizeof(slab_cache_cache),
sizeof(uintptr_t),
NULL, NULL,
SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
_slab_cache_create(&slab_cache_cache, "slab_cache",
sizeof(slab_cache_cache), sizeof(uintptr_t), NULL, NULL,
SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
/* Initialize external slab cache */
slab_extern_cache = slab_cache_create("slab_extern",
sizeof(slab_t),
0, NULL, NULL,
SLAB_CACHE_SLINSIDE | SLAB_CACHE_MAGDEFERRED);
slab_extern_cache = slab_cache_create("slab_extern", sizeof(slab_t), 0,
NULL, NULL, SLAB_CACHE_SLINSIDE | SLAB_CACHE_MAGDEFERRED);
 
/* Initialize structures for malloc */
for (i=0, size=(1<<SLAB_MIN_MALLOC_W);
i < (SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1);
i++, size <<= 1) {
malloc_caches[i] = slab_cache_create(malloc_names[i],
size, 0,
NULL,NULL, SLAB_CACHE_MAGDEFERRED);
for (i = 0, size = (1 << SLAB_MIN_MALLOC_W);
i < (SLAB_MAX_MALLOC_W - SLAB_MIN_MALLOC_W + 1);
i++, size <<= 1) {
malloc_caches[i] = slab_cache_create(malloc_names[i], size, 0,
NULL, NULL, SLAB_CACHE_MAGDEFERRED);
}
#ifdef CONFIG_DEBUG
_slab_initialized = 1;
874,9 → 867,11
 
spinlock_lock(&slab_cache_lock);
for (cur=slab_cache_list.next; cur != &slab_cache_list;cur=cur->next){
for (cur = slab_cache_list.next; cur != &slab_cache_list;
cur = cur->next){
s = list_get_instance(cur, slab_cache_t, link);
if ((s->flags & SLAB_CACHE_MAGDEFERRED) != SLAB_CACHE_MAGDEFERRED)
if ((s->flags & SLAB_CACHE_MAGDEFERRED) !=
SLAB_CACHE_MAGDEFERRED)
continue;
make_magcache(s);
s->flags &= ~SLAB_CACHE_MAGDEFERRED;
887,7 → 882,7
 
/**************************************/
/* kalloc/kfree functions */
void * malloc(unsigned int size, int flags)
void *malloc(unsigned int size, int flags)
{
ASSERT(_slab_initialized);
ASSERT(size && size <= (1 << SLAB_MAX_MALLOC_W));
900,7 → 895,7
return slab_alloc(malloc_caches[idx], flags);
}
 
void * realloc(void *ptr, unsigned int size, int flags)
void *realloc(void *ptr, unsigned int size, int flags)
{
ASSERT(_slab_initialized);
ASSERT(size <= (1 << SLAB_MAX_MALLOC_W));
/branches/tracing/kernel/generic/src/mm/backend_anon.c
113,7 → 113,7
}
if (allocate) {
frame = (uintptr_t) frame_alloc(ONE_FRAME, 0);
memsetb(PA2KA(frame), FRAME_SIZE, 0);
memsetb((void *) PA2KA(frame), FRAME_SIZE, 0);
dirty = true;
/*
144,7 → 144,7
* the different causes
*/
frame = (uintptr_t) frame_alloc(ONE_FRAME, 0);
memsetb(PA2KA(frame), FRAME_SIZE, 0);
memsetb((void *) PA2KA(frame), FRAME_SIZE, 0);
dirty = true;
}
/branches/tracing/kernel/generic/src/mm/as.c
324,8 → 324,7
if (backend_data)
a->backend_data = *backend_data;
else
memsetb((uintptr_t) &a->backend_data, sizeof(a->backend_data),
0);
memsetb(&a->backend_data, sizeof(a->backend_data), 0);
 
btree_create(&a->used_space);
453,10 → 452,8
cond = false; /* we are almost done */
i = (start_free - b) >> PAGE_WIDTH;
if (!used_space_remove(area, start_free,
c - i))
panic("Could not remove used "
"space.\n");
if (!used_space_remove(area, start_free, c - i))
panic("Could not remove used space.\n");
} else {
/*
* The interval of used space can be
463,8 → 460,7
* completely removed.
*/
if (!used_space_remove(area, b, c))
panic("Could not remove used "
"space.\n");
panic("Could not remove used space.\n");
}
for (; i < c; i++) {
1728,7 → 1724,7
}
}
 
panic("Inconsistency detected while adding %d pages of used space at "
panic("Inconsistency detected while adding %" PRIc " pages of used space at "
"%p.\n", count, page);
}
 
1907,7 → 1903,7
}
 
error:
panic("Inconsistency detected while removing %d pages of used space "
panic("Inconsistency detected while removing %" PRIc " pages of used space "
"from %p.\n", count, page);
}
 
2000,9 → 1996,9
as_area_t *area = node->value[i];
mutex_lock(&area->lock);
printf("as_area: %p, base=%p, pages=%d (%p - %p)\n",
printf("as_area: %p, base=%p, pages=%" PRIc " (%p - %p)\n",
area, area->base, area->pages, area->base,
area->base + area->pages*PAGE_SIZE);
area->base + FRAMES2SIZE(area->pages));
mutex_unlock(&area->lock);
}
}
/branches/tracing/kernel/generic/src/mm/buddy.c
44,6 → 44,7
#include <arch/types.h>
#include <debug.h>
#include <print.h>
#include <macros.h>
 
/** Return size needed for the buddy configuration data */
size_t buddy_conf_size(int max_order)
289,13 → 290,13
void buddy_system_structure_print(buddy_system_t *b, size_t elem_size) {
index_t i;
count_t cnt, elem_count = 0, block_count = 0;
link_t * cur;
link_t *cur;
 
printf("Order\tBlocks\tSize \tBlock size\tElems per block\n");
printf("-----\t------\t--------\t----------\t---------------\n");
for (i=0;i <= b->max_order; i++) {
for (i = 0;i <= b->max_order; i++) {
cnt = 0;
if (!list_empty(&b->order[i])) {
for (cur = b->order[i].next; cur != &b->order[i]; cur = cur->next)
302,7 → 303,8
cnt++;
}
printf("#%zd\t%5zd\t%7zdK\t%8zdK\t%6zd\t", i, cnt, (cnt * (1 << i) * elem_size) >> 10, ((1 << i) * elem_size) >> 10, 1 << i);
printf("#%" PRIi "\t%5" PRIc "\t%7" PRIc "K\t%8" PRIi "K\t%6u\t",
i, cnt, SIZE2KB(cnt * (1 << i) * elem_size), SIZE2KB((1 << i) * elem_size), 1 << i);
if (!list_empty(&b->order[i])) {
for (cur = b->order[i].next; cur != &b->order[i]; cur = cur->next) {
b->op->print_id(b, cur);
315,8 → 317,7
elem_count += (1 << i) * cnt;
}
printf("-----\t------\t--------\t----------\t---------------\n");
printf("Buddy system contains %zd free elements (%zd blocks)\n" , elem_count, block_count);
 
printf("Buddy system contains %" PRIc " free elements (%" PRIc " blocks)\n" , elem_count, block_count);
}
 
/** @}
/branches/tracing/kernel/generic/src/mm/frame.c
318,7 → 318,7
frame = list_get_instance(block, frame_t, buddy_link);
zone = (zone_t *) b->data;
index = frame_index(zone, frame);
printf("%zd", index);
printf("%" PRIi, index);
}
 
/** Buddy system find_buddy implementation
844,7 → 844,7
*/
uintptr_t zone_conf_size(count_t count)
{
int size = sizeof(zone_t) + count*sizeof(frame_t);
int size = sizeof(zone_t) + count * sizeof(frame_t);
int max_order;
 
max_order = fnzb(count);
1159,26 → 1159,31
 
ipl = interrupts_disable();
spinlock_lock(&zones.lock);
 
#ifdef __32_BITS__
printf("# base address free frames busy frames\n");
printf("-- ------------ ------------ ------------\n");
#endif
 
#ifdef __64_BITS__
printf("# base address free frames busy frames\n");
printf("-- -------------------- ------------ ------------\n");
#endif
if (sizeof(void *) == 4) {
printf("# base address free frames busy frames\n");
printf("-- ------------ ------------ ------------\n");
} else {
printf("# base address free frames busy frames\n");
printf("-- -------------------- ------------ ------------\n");
}
for (i = 0; i < zones.count; i++) {
zone = zones.info[i];
spinlock_lock(&zone->lock);
 
#ifdef __32_BITS__
printf("%-2u %10p %12" PRIc " %12" PRIc "\n", i, PFN2ADDR(zone->base),
zone->free_count, zone->busy_count);
#endif
 
#ifdef __64_BITS__
printf("%-2u %18p %12" PRIc " %12" PRIc "\n", i, PFN2ADDR(zone->base),
zone->free_count, zone->busy_count);
#endif
if (sizeof(void *) == 4)
printf("%-2d %#10zx %12zd %12zd\n", i, PFN2ADDR(zone->base),
zone->free_count, zone->busy_count);
else
printf("%-2d %#18zx %12zd %12zd\n", i, PFN2ADDR(zone->base),
zone->free_count, zone->busy_count);
spinlock_unlock(&zone->lock);
}
1211,13 → 1216,12
spinlock_lock(&zone->lock);
printf("Memory zone information\n");
printf("Zone base address: %#.*p\n", sizeof(uintptr_t) * 2,
PFN2ADDR(zone->base));
printf("Zone size: %zd frames (%zd KB)\n", zone->count,
printf("Zone base address: %p\n", PFN2ADDR(zone->base));
printf("Zone size: %" PRIc " frames (%" PRIs " KB)\n", zone->count,
SIZE2KB(FRAMES2SIZE(zone->count)));
printf("Allocated space: %zd frames (%zd KB)\n", zone->busy_count,
printf("Allocated space: %" PRIc " frames (%" PRIs " KB)\n", zone->busy_count,
SIZE2KB(FRAMES2SIZE(zone->busy_count)));
printf("Available space: %zd frames (%zd KB)\n", zone->free_count,
printf("Available space: %" PRIc " frames (%" PRIs " KB)\n", zone->free_count,
SIZE2KB(FRAMES2SIZE(zone->free_count)));
buddy_system_structure_print(zone->buddy_system, FRAME_SIZE);
spinlock_unlock(&zone->lock);
/branches/tracing/kernel/generic/src/mm/page.c
40,11 → 40,28
* They however, define the single interface.
*/
 
/*
* Note on memory prefetching and updating memory mappings, also described in:
* AMD x86-64 Architecture Programmer's Manual, Volume 2, System Programming,
* 7.2.1 Special Coherency Considerations.
*
* The processor which modifies a page table mapping can access prefetched data
* from the old mapping. In order to prevent this, we place a memory barrier
* after a mapping is updated.
*
* We assume that the other processors are either not using the mapping yet
* (i.e. during the bootstrap) or are executing the TLB shootdown code. While
* we don't care much about the former case, the processors in the latter case
* will do an implicit serialization by virtue of running the TLB shootdown
* interrupt handler.
*/
 
#include <mm/page.h>
#include <arch/mm/page.h>
#include <arch/mm/asid.h>
#include <mm/as.h>
#include <mm/frame.h>
#include <arch/barrier.h>
#include <arch/types.h>
#include <arch/asm.h>
#include <memstr.h>
65,8 → 82,8
* considering possible crossings
* of page boundaries.
*
* @param s Address of the structure.
* @param size Size of the structure.
* @param s Address of the structure.
* @param size Size of the structure.
*/
void map_structure(uintptr_t s, size_t size)
{
76,8 → 93,11
cnt = length / PAGE_SIZE + (length % PAGE_SIZE > 0);
 
for (i = 0; i < cnt; i++)
page_mapping_insert(AS_KERNEL, s + i * PAGE_SIZE, s + i * PAGE_SIZE, PAGE_NOT_CACHEABLE | PAGE_WRITE);
page_mapping_insert(AS_KERNEL, s + i * PAGE_SIZE,
s + i * PAGE_SIZE, PAGE_NOT_CACHEABLE | PAGE_WRITE);
 
/* Repel prefetched accesses to the old mapping. */
memory_barrier();
}
 
/** Insert mapping of page to frame.
87,10 → 107,11
*
* The page table must be locked and interrupts must be disabled.
*
* @param as Address space to wich page belongs.
* @param page Virtual address of the page to be mapped.
* @param frame Physical address of memory frame to which the mapping is done.
* @param flags Flags to be used for mapping.
* @param as Address space to wich page belongs.
* @param page Virtual address of the page to be mapped.
* @param frame Physical address of memory frame to which the mapping is
* done.
* @param flags Flags to be used for mapping.
*/
void page_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame, int flags)
{
98,6 → 119,9
ASSERT(page_mapping_operations->mapping_insert);
page_mapping_operations->mapping_insert(as, page, frame, flags);
/* Repel prefetched accesses to the old mapping. */
memory_barrier();
}
 
/** Remove mapping of page.
108,8 → 132,8
*
* The page table must be locked and interrupts must be disabled.
*
* @param as Address space to wich page belongs.
* @param page Virtual address of the page to be demapped.
* @param as Address space to wich page belongs.
* @param page Virtual address of the page to be demapped.
*/
void page_mapping_remove(as_t *as, uintptr_t page)
{
117,6 → 141,9
ASSERT(page_mapping_operations->mapping_remove);
page_mapping_operations->mapping_remove(as, page);
 
/* Repel prefetched accesses to the old mapping. */
memory_barrier();
}
 
/** Find mapping for virtual page
125,10 → 152,11
*
* The page table must be locked and interrupts must be disabled.
*
* @param as Address space to wich page belongs.
* @param page Virtual page.
* @param as Address space to wich page belongs.
* @param page Virtual page.
*
* @return NULL if there is no such mapping; requested mapping otherwise.
* @return NULL if there is no such mapping; requested mapping
* otherwise.
*/
pte_t *page_mapping_find(as_t *as, uintptr_t page)
{
/branches/tracing/kernel/generic/src/mm/backend_elf.c
48,6 → 48,7
#include <memstr.h>
#include <macros.h>
#include <arch.h>
#include <arch/barrier.h>
 
#ifdef CONFIG_VIRT_IDX_DCACHE
#include <arch/mm/cache.h>
67,12 → 68,13
*
* The address space area and page tables must be already locked.
*
* @param area Pointer to the address space area.
* @param addr Faulting virtual address.
* @param access Access mode that caused the fault (i.e. read/write/exec).
* @param area Pointer to the address space area.
* @param addr Faulting virtual address.
* @param access Access mode that caused the fault (i.e.
* read/write/exec).
*
* @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e.
* serviced).
* @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK
* on success (i.e. serviced).
*/
int elf_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access)
{
150,6 → 152,10
frame = (uintptr_t)frame_alloc(ONE_FRAME, 0);
memcpy((void *) PA2KA(frame),
(void *) (base + i * FRAME_SIZE), FRAME_SIZE);
if (entry->p_flags & PF_X) {
smc_coherence_block((void *) PA2KA(frame),
FRAME_SIZE);
}
dirty = true;
} else {
frame = KA2PA(base + i * FRAME_SIZE);
162,7 → 168,7
* and cleared.
*/
frame = (uintptr_t)frame_alloc(ONE_FRAME, 0);
memsetb(PA2KA(frame), FRAME_SIZE, 0);
memsetb((void *) PA2KA(frame), FRAME_SIZE, 0);
dirty = true;
} else {
size_t pad_lo, pad_hi;
187,8 → 193,13
memcpy((void *) (PA2KA(frame) + pad_lo),
(void *) (base + i * FRAME_SIZE + pad_lo),
FRAME_SIZE - pad_lo - pad_hi);
memsetb(PA2KA(frame), pad_lo, 0);
memsetb(PA2KA(frame) + FRAME_SIZE - pad_hi, pad_hi, 0);
if (entry->p_flags & PF_X) {
smc_coherence_block((void *) (PA2KA(frame) + pad_lo),
FRAME_SIZE - pad_lo - pad_hi);
}
memsetb((void *) PA2KA(frame), pad_lo, 0);
memsetb((void *) (PA2KA(frame) + FRAME_SIZE - pad_hi), pad_hi,
0);
dirty = true;
}
 
212,9 → 223,10
*
* The address space area and page tables must be already locked.
*
* @param area Pointer to the address space area.
* @param page Page that is mapped to frame. Must be aligned to PAGE_SIZE.
* @param frame Frame to be released.
* @param area Pointer to the address space area.
* @param page Page that is mapped to frame. Must be aligned to
* PAGE_SIZE.
* @param frame Frame to be released.
*
*/
void elf_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame)
257,7 → 269,7
*
* The address space and address space area must be locked prior to the call.
*
* @param area Address space area.
* @param area Address space area.
*/
void elf_share(as_area_t *area)
{