Subversion Repositories HelenOS

Compare Revisions

Regard whitespace Rev 2291 → Rev 2292

/branches/rcu/kernel/generic/src/main/main.c
246,8 → 246,6
task_init();
thread_init();
futex_init();
 
 
klog_init();
if (init.cnt > 0) {
257,6 → 255,7
init.tasks[i].size);
} else
printf("No init binaries found\n");
ipc_init();
 
/*
274,7 → 273,6
panic("can't create kinit thread\n");
thread_ready(t);
 
//tasklets disabled for debugging purposes
tasklet_run_tasklet_thread(k);
/*
/branches/rcu/kernel/generic/src/time/clock.c
104,8 → 104,6
* physmem_map() the clock_parea.
*/
sysinfo_set_item_val("clock.cacheable", NULL, (unative_t) true);
sysinfo_set_item_val("clock.fcolor", NULL, (unative_t)
PAGE_COLOR(clock_parea.vbase));
sysinfo_set_item_val("clock.faddr", NULL, (unative_t) faddr);
}
 
/branches/rcu/kernel/generic/src/ddi/ddi.c
99,8 → 99,7
* @return 0 on success, EPERM if the caller lacks capabilities to use this
* syscall, ENOENT if there is no task matching the specified ID or the
* physical address space is not enabled for mapping and ENOMEM if there
* was a problem in creating address space area. ENOTSUP is returned when
* an attempt to create an illegal address alias is detected.
* was a problem in creating address space area.
*/
static int ddi_physmem_map(uintptr_t pf, uintptr_t vp, count_t pages, int flags)
{
139,20 → 138,8
interrupts_restore(ipl);
return ENOENT;
}
 
#ifdef CONFIG_VIRT_IDX_DCACHE
if (PAGE_COLOR(parea->vbase) != PAGE_COLOR(vp)) {
/*
* Refuse to create an illegal address alias.
*/
spinlock_unlock(&parea_lock);
interrupts_restore(ipl);
return ENOTSUP;
}
#endif /* CONFIG_VIRT_IDX_DCACHE */
 
spinlock_unlock(&parea_lock);
 
spinlock_lock(&TASK->lock);
if (!as_area_create(TASK->as, flags, pages * PAGE_SIZE, vp, AS_AREA_ATTR_NONE,
/branches/rcu/kernel/generic/src/ddi/irq.c
138,7 → 138,6
{
link_initialize(&irq->link);
spinlock_initialize(&irq->lock, "irq.lock");
irq->preack = false;
irq->inr = -1;
irq->devno = -1;
irq->trigger = (irq_trigger_t) 0;
/branches/rcu/kernel/generic/src/console/klog.c
90,8 → 90,6
ddi_parea_register(&klog_parea);
 
sysinfo_set_item_val("klog.faddr", NULL, (unative_t) faddr);
sysinfo_set_item_val("klog.fcolor", NULL, (unative_t)
PAGE_COLOR((uintptr_t) klog));
sysinfo_set_item_val("klog.pages", NULL, 1 << KLOG_ORDER);
sysinfo_set_item_val("klog.devno", NULL, devno);
sysinfo_set_item_val("klog.inr", NULL, KLOG_VIRT_INR);
/branches/rcu/kernel/generic/src/proc/scheduler.c
61,7 → 61,6
#include <cpu.h>
#include <print.h>
#include <debug.h>
#include <proc/tasklet.h>
 
static void before_task_runs(void);
static void before_thread_runs(void);
227,11 → 226,7
* Take the first thread from the queue.
*/
t = list_get_instance(r->rq_head.next, thread_t, rq_link);
if (verbose)
printf("cpu%d removing, rq_head %x, t: %x, next: %x, link: %x \n",CPU->id, r->rq_head, t, r->rq_head.next, t->rq_link);
list_remove(&t->rq_link);
if (verbose)
printf("cpu%d removed, rq_head %x, t: %x, next: %x, link: %x \n",CPU->id, r->rq_head, t, r->rq_head.next, t->rq_link);
 
spinlock_unlock(&r->lock);
 
443,8 → 438,7
THREAD->call_me = NULL;
THREAD->call_me_with = NULL;
}
if (verbose)
printf("cpu%d, Sleeping unlocking \n", CPU->id);
 
spinlock_unlock(&THREAD->lock);
 
break;
460,17 → 454,12
 
THREAD = NULL;
}
if (verbose)
printf("cpu%d looking for next thread\n", CPU->id);
 
THREAD = find_best_thread();
if (verbose)
printf("cpu%d t locking THREAD:%x \n", CPU->id, THREAD);
spinlock_lock(&THREAD->lock);
priority = THREAD->priority;
spinlock_unlock(&THREAD->lock);
if (verbose)
printf("cpu%d t unlocked after priority THREAD:%x \n", CPU->id, THREAD);
 
relink_rq(priority);
 
/branches/rcu/kernel/generic/src/proc/thread.c
238,7 → 238,6
cpu = CPU;
if (t->flags & THREAD_FLAG_WIRED) {
ASSERT(t->cpu != NULL);
cpu = t->cpu;
}
t->state = Ready;
/branches/rcu/kernel/generic/src/lib/rd.c
90,8 → 90,6
sysinfo_set_item_val("rd.size", NULL, dsize);
sysinfo_set_item_val("rd.address.physical", NULL, (unative_t)
KA2PA((void *) header + hsize));
sysinfo_set_item_val("rd.address.color", NULL, (unative_t)
PAGE_COLOR((uintptr_t) header + hsize));
 
return RE_OK;
}
/branches/rcu/kernel/generic/src/mm/tlb.c
78,7 → 78,8
* @param page Virtual page address, if required by type.
* @param count Number of pages, if required by type.
*/
void tlb_shootdown_start(tlb_invalidate_type_t type, asid_t asid, uintptr_t page, count_t count)
void tlb_shootdown_start(tlb_invalidate_type_t type, asid_t asid,
uintptr_t page, count_t count)
{
int i;
 
107,11 → 108,11
/*
* Enqueue the message.
*/
cpu->tlb_messages[cpu->tlb_messages_count].type = type;
cpu->tlb_messages[cpu->tlb_messages_count].asid = asid;
cpu->tlb_messages[cpu->tlb_messages_count].page = page;
cpu->tlb_messages[cpu->tlb_messages_count].count = count;
cpu->tlb_messages_count++;
index_t idx = cpu->tlb_messages_count++;
cpu->tlb_messages[idx].type = type;
cpu->tlb_messages[idx].asid = asid;
cpu->tlb_messages[idx].page = page;
cpu->tlb_messages[idx].count = count;
}
spinlock_unlock(&cpu->lock);
}
/branches/rcu/kernel/generic/src/mm/backend_anon.c
72,11 → 72,13
* @param addr Faulting virtual address.
* @param access Access mode that caused the fault (i.e. read/write/exec).
*
* @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. serviced).
* @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e.
* serviced).
*/
int anon_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access)
{
uintptr_t frame;
bool dirty = false;
 
if (!as_area_check_access(area, access))
return AS_PF_FAULT;
86,7 → 88,8
/*
* The area is shared, chances are that the mapping can be found
* in the pagemap of the address space area share info structure.
* in the pagemap of the address space area share info
* structure.
* In the case that the pagemap does not contain the respective
* mapping, a new frame is allocated and the mapping is created.
*/
102,7 → 105,8
* Just a small workaround.
*/
for (i = 0; i < leaf->keys; i++) {
if (leaf->key[i] == ALIGN_DOWN(addr, PAGE_SIZE)) {
if (leaf->key[i] ==
ALIGN_DOWN(addr, PAGE_SIZE)) {
allocate = false;
break;
}
110,11 → 114,15
if (allocate) {
frame = (uintptr_t) frame_alloc(ONE_FRAME, 0);
memsetb(PA2KA(frame), FRAME_SIZE, 0);
dirty = true;
/*
* Insert the address of the newly allocated frame to the pagemap.
* Insert the address of the newly allocated
* frame to the pagemap.
*/
btree_insert(&area->sh_info->pagemap, ALIGN_DOWN(addr, PAGE_SIZE) - area->base, (void *) frame, leaf);
btree_insert(&area->sh_info->pagemap,
ALIGN_DOWN(addr, PAGE_SIZE) - area->base,
(void *) frame, leaf);
}
}
frame_reference_add(ADDR2PFN(frame));
137,12 → 145,13
*/
frame = (uintptr_t) frame_alloc(ONE_FRAME, 0);
memsetb(PA2KA(frame), FRAME_SIZE, 0);
dirty = true;
}
/*
* Map 'page' to 'frame'.
* Note that TLB shootdown is not attempted as only new information is being
* inserted into page tables.
* Note that TLB shootdown is not attempted as only new information is
* being inserted into page tables.
*/
page_mapping_insert(AS, addr, frame, as_area_get_flags(area));
if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1))
162,9 → 171,6
void anon_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame)
{
frame_free(frame);
#ifdef CONFIG_VIRT_IDX_DCACHE
dcache_flush_frame(page, frame);
#endif
}
 
/** Share the anonymous address space area.
184,7 → 190,8
* Copy used portions of the area to sh_info's page map.
*/
mutex_lock(&area->sh_info->lock);
for (cur = area->used_space.leaf_head.next; cur != &area->used_space.leaf_head; cur = cur->next) {
for (cur = area->used_space.leaf_head.next;
cur != &area->used_space.leaf_head; cur = cur->next) {
btree_node_t *node;
int i;
198,12 → 205,17
pte_t *pte;
page_table_lock(area->as, false);
pte = page_mapping_find(area->as, base + j*PAGE_SIZE);
ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
btree_insert(&area->sh_info->pagemap, (base + j*PAGE_SIZE) - area->base,
pte = page_mapping_find(area->as,
base + j * PAGE_SIZE);
ASSERT(pte && PTE_VALID(pte) &&
PTE_PRESENT(pte));
btree_insert(&area->sh_info->pagemap,
(base + j * PAGE_SIZE) - area->base,
(void *) PTE_GET_FRAME(pte), NULL);
page_table_unlock(area->as, false);
frame_reference_add(ADDR2PFN(PTE_GET_FRAME(pte)));
 
pfn_t pfn = ADDR2PFN(PTE_GET_FRAME(pte));
frame_reference_add(pfn);
}
}
/branches/rcu/kernel/generic/src/mm/as.c
95,10 → 95,13
#endif
 
/**
* This lock protects inactive_as_with_asid_head list. It must be acquired
* before as_t mutex.
* This lock serializes access to the ASID subsystem.
* It protects:
* - inactive_as_with_asid_head list
* - as->asid for each as of the as_t type
* - asids_allocated counter
*/
SPINLOCK_INITIALIZE(inactive_as_with_asid_lock);
SPINLOCK_INITIALIZE(asidlock);
 
/**
* This list contains address spaces that are not active on any
205,14 → 208,15
* Since there is no reference to this area,
* it is safe not to lock its mutex.
*/
 
ipl = interrupts_disable();
spinlock_lock(&inactive_as_with_asid_lock);
spinlock_lock(&asidlock);
if (as->asid != ASID_INVALID && as != AS_KERNEL) {
if (as != AS && as->cpu_refcount == 0)
list_remove(&as->inactive_as_with_asid_link);
asid_put(as->asid);
}
spinlock_unlock(&inactive_as_with_asid_lock);
spinlock_unlock(&asidlock);
 
/*
* Destroy address space areas of the address space.
613,8 → 617,7
* such address space area, EPERM if there was a problem in accepting the area
* or ENOMEM if there was a problem in allocating destination address space
* area. ENOTSUP is returned if the address space area backend does not support
* sharing or if the kernel detects an attempt to create an illegal address
* alias.
* sharing.
*/
int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size,
as_t *dst_as, uintptr_t dst_base, int dst_flags_mask)
667,21 → 670,7
return EPERM;
}
 
#ifdef CONFIG_VIRT_IDX_DCACHE
if (!(dst_flags_mask & AS_AREA_EXEC)) {
if (PAGE_COLOR(src_area->base) != PAGE_COLOR(dst_base)) {
/*
* Refuse to create an illegal address alias.
*/
mutex_unlock(&src_area->lock);
mutex_unlock(&src_as->lock);
interrupts_restore(ipl);
return ENOTSUP;
}
}
#endif /* CONFIG_VIRT_IDX_DCACHE */
 
/*
* Now we are committed to sharing the area.
* First, prepare the area for sharing.
* Then it will be safe to unlock it.
875,7 → 864,8
/** Switch address spaces.
*
* Note that this function cannot sleep as it is essentially a part of
* scheduling. Sleeping here would lead to deadlock on wakeup.
* scheduling. Sleeping here would lead to deadlock on wakeup. Another
* thing which is forbidden in this context is locking the address space.
*
* @param old Old address space or NULL.
* @param new New address space.
882,17 → 872,12
*/
void as_switch(as_t *old_as, as_t *new_as)
{
ipl_t ipl;
bool needs_asid = false;
spinlock_lock(&asidlock);
ipl = interrupts_disable();
spinlock_lock(&inactive_as_with_asid_lock);
 
/*
* First, take care of the old address space.
*/
if (old_as) {
mutex_lock_active(&old_as->lock);
ASSERT(old_as->cpu_refcount);
if((--old_as->cpu_refcount == 0) && (old_as != AS_KERNEL)) {
/*
905,7 → 890,6
list_append(&old_as->inactive_as_with_asid_link,
&inactive_as_with_asid_head);
}
mutex_unlock(&old_as->lock);
 
/*
* Perform architecture-specific tasks when the address space
917,43 → 901,24
/*
* Second, prepare the new address space.
*/
mutex_lock_active(&new_as->lock);
if ((new_as->cpu_refcount++ == 0) && (new_as != AS_KERNEL)) {
if (new_as->asid != ASID_INVALID) {
if (new_as->asid != ASID_INVALID)
list_remove(&new_as->inactive_as_with_asid_link);
} else {
/*
* Defer call to asid_get() until new_as->lock is released.
*/
needs_asid = true;
else
new_as->asid = asid_get();
}
}
#ifdef AS_PAGE_TABLE
SET_PTL0_ADDRESS(new_as->genarch.page_table);
#endif
mutex_unlock(&new_as->lock);
 
if (needs_asid) {
/*
* Allocation of new ASID was deferred
* until now in order to avoid deadlock.
*/
asid_t asid;
asid = asid_get();
mutex_lock_active(&new_as->lock);
new_as->asid = asid;
mutex_unlock(&new_as->lock);
}
spinlock_unlock(&inactive_as_with_asid_lock);
interrupts_restore(ipl);
/*
* Perform architecture-specific steps.
* (e.g. write ASID to hardware register etc.)
*/
as_install_arch(new_as);
spinlock_unlock(&asidlock);
AS = new_as;
}
 
/branches/rcu/kernel/generic/src/mm/backend_phys.c
32,7 → 32,8
 
/**
* @file
* @brief Backend for address space areas backed by continuous physical memory.
* @brief Backend for address space areas backed by continuous physical
* memory.
*/
 
#include <debug.h>
62,7 → 63,8
* @param addr Faulting virtual address.
* @param access Access mode that caused the fault (i.e. read/write/exec).
*
* @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. serviced).
* @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e.
* serviced).
*/
int phys_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access)
{
72,7 → 74,8
return AS_PF_FAULT;
 
ASSERT(addr - area->base < area->backend_data.frames * FRAME_SIZE);
page_mapping_insert(AS, addr, base + (addr - area->base), as_area_get_flags(area));
page_mapping_insert(AS, addr, base + (addr - area->base),
as_area_get_flags(area));
if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1))
panic("Could not insert used space.\n");
 
/branches/rcu/kernel/generic/src/mm/frame.c
70,16 → 70,19
typedef struct {
count_t refcount; /**< tracking of shared frames */
uint8_t buddy_order; /**< buddy system block order */
link_t buddy_link; /**< link to the next free block inside one order */
link_t buddy_link; /**< link to the next free block inside one
order */
void *parent; /**< If allocated by slab, this points there */
} frame_t;
 
typedef struct {
SPINLOCK_DECLARE(lock); /**< this lock protects everything below */
pfn_t base; /**< frame_no of the first frame in the frames array */
pfn_t base; /**< frame_no of the first frame in the frames
array */
count_t count; /**< Size of zone */
 
frame_t *frames; /**< array of frame_t structures in this zone */
frame_t *frames; /**< array of frame_t structures in this
zone */
count_t free_count; /**< number of free frame_t structures */
count_t busy_count; /**< number of busy frame_t structures */
157,8 → 160,8
for (i = 0; i < zones.count; i++) {
/* Check for overflow */
z = zones.info[i];
if (overlaps(newzone->base,newzone->count,
z->base, z->count)) {
if (overlaps(newzone->base,newzone->count, z->base,
z->count)) {
printf("Zones overlap!\n");
return -1;
}
202,7 → 205,8
z = zones.info[i];
spinlock_lock(&z->lock);
if (z->base <= frame && z->base + z->count > frame) {
spinlock_unlock(&zones.lock); /* Unlock the global lock */
/* Unlock the global lock */
spinlock_unlock(&zones.lock);
if (pzone)
*pzone = i;
return z;
229,7 → 233,8
* Assume interrupts are disabled.
*
* @param order Size (2^order) of free space we are trying to find
* @param pzone Pointer to preferred zone or NULL, on return contains zone number
* @param pzone Pointer to preferred zone or NULL, on return contains zone
* number
*/
static zone_t * find_free_zone_and_lock(uint8_t order, unsigned int *pzone)
{
319,7 → 324,8
 
frame = list_get_instance(block, frame_t, buddy_link);
zone = (zone_t *) b->data;
ASSERT(IS_BUDDY_ORDER_OK(frame_index_abs(zone, frame), frame->buddy_order));
ASSERT(IS_BUDDY_ORDER_OK(frame_index_abs(zone, frame),
frame->buddy_order));
is_left = IS_BUDDY_LEFT_BLOCK_ABS(zone, frame);
is_right = IS_BUDDY_RIGHT_BLOCK_ABS(zone, frame);
382,7 → 388,8
* @param block Buddy system block
* @param order Order to set
*/
static void zone_buddy_set_order(buddy_system_t *b, link_t * block, uint8_t order) {
static void zone_buddy_set_order(buddy_system_t *b, link_t *block,
uint8_t order) {
frame_t * frame;
frame = list_get_instance(block, frame_t, buddy_link);
frame->buddy_order = order;
560,10 → 567,10
 
z->buddy_system = (buddy_system_t *)&z[1];
buddy_system_create(z->buddy_system, max_order,
&zone_buddy_system_operations,
(void *) z);
&zone_buddy_system_operations, (void *) z);
 
z->frames = (frame_t *)((uint8_t *) z->buddy_system + buddy_conf_size(max_order));
z->frames = (frame_t *)((uint8_t *) z->buddy_system +
buddy_conf_size(max_order));
for (i = 0; i < z->count; i++) {
/* This marks all frames busy */
frame_initialize(&z->frames[i]);
710,7 → 717,8
spinlock_lock(&zone1->lock);
spinlock_lock(&zone2->lock);
 
cframes = SIZE2FRAMES(zone_conf_size(zone2->base+zone2->count-zone1->base));
cframes = SIZE2FRAMES(zone_conf_size(zone2->base + zone2->count -
zone1->base));
if (cframes == 1)
order = 0;
else
803,7 → 811,8
/* Allocate frames _after_ the conframe */
/* Check sizes */
z->frames = (frame_t *)((uint8_t *) z->buddy_system + buddy_conf_size(max_order));
z->frames = (frame_t *)((uint8_t *) z->buddy_system +
buddy_conf_size(max_order));
for (i = 0; i < count; i++) {
frame_initialize(&z->frames[i]);
}
865,16 → 874,20
if (confframe >= start && confframe < start+count) {
for (;confframe < start + count; confframe++) {
addr = PFN2ADDR(confframe);
if (overlaps(addr, PFN2ADDR(confcount), KA2PA(config.base), config.kernel_size))
if (overlaps(addr, PFN2ADDR(confcount),
KA2PA(config.base), config.kernel_size))
continue;
if (overlaps(addr, PFN2ADDR(confcount), KA2PA(config.stack_base), config.stack_size))
if (overlaps(addr, PFN2ADDR(confcount),
KA2PA(config.stack_base), config.stack_size))
continue;
bool overlap = false;
count_t i;
for (i = 0; i < init.cnt; i++)
if (overlaps(addr, PFN2ADDR(confcount), KA2PA(init.tasks[i].addr), init.tasks[i].size)) {
if (overlaps(addr, PFN2ADDR(confcount),
KA2PA(init.tasks[i].addr),
init.tasks[i].size)) {
overlap = true;
break;
}
1073,15 → 1086,21
/* Tell the architecture to create some memory */
frame_arch_init();
if (config.cpu_active == 1) {
frame_mark_unavailable(ADDR2PFN(KA2PA(config.base)), SIZE2FRAMES(config.kernel_size));
frame_mark_unavailable(ADDR2PFN(KA2PA(config.stack_base)), SIZE2FRAMES(config.stack_size));
frame_mark_unavailable(ADDR2PFN(KA2PA(config.base)),
SIZE2FRAMES(config.kernel_size));
frame_mark_unavailable(ADDR2PFN(KA2PA(config.stack_base)),
SIZE2FRAMES(config.stack_size));
count_t i;
for (i = 0; i < init.cnt; i++)
frame_mark_unavailable(ADDR2PFN(KA2PA(init.tasks[i].addr)), SIZE2FRAMES(init.tasks[i].size));
for (i = 0; i < init.cnt; i++) {
pfn_t pfn = ADDR2PFN(KA2PA(init.tasks[i].addr));
frame_mark_unavailable(pfn,
SIZE2FRAMES(init.tasks[i].size));
}
 
if (ballocs.size)
frame_mark_unavailable(ADDR2PFN(KA2PA(ballocs.base)), SIZE2FRAMES(ballocs.size));
frame_mark_unavailable(ADDR2PFN(KA2PA(ballocs.base)),
SIZE2FRAMES(ballocs.size));
 
/* Black list first frame, as allocating NULL would
* fail in some places */
1106,7 → 1125,8
for (i = 0; i < zones.count; i++) {
zone = zones.info[i];
spinlock_lock(&zone->lock);
printf("%-2d %12p %12zd %12zd\n", i, PFN2ADDR(zone->base), zone->free_count, zone->busy_count);
printf("%-2d %12p %12zd %12zd\n", i, PFN2ADDR(zone->base),
zone->free_count, zone->busy_count);
spinlock_unlock(&zone->lock);
}
spinlock_unlock(&zones.lock);
1138,10 → 1158,14
spinlock_lock(&zone->lock);
printf("Memory zone information\n");
printf("Zone base address: %#.*p\n", sizeof(uintptr_t) * 2, PFN2ADDR(zone->base));
printf("Zone size: %zd frames (%zdK)\n", zone->count, ((zone->count) * FRAME_SIZE) >> 10);
printf("Allocated space: %zd frames (%zdK)\n", zone->busy_count, (zone->busy_count * FRAME_SIZE) >> 10);
printf("Available space: %zd frames (%zdK)\n", zone->free_count, (zone->free_count * FRAME_SIZE) >> 10);
printf("Zone base address: %#.*p\n", sizeof(uintptr_t) * 2,
PFN2ADDR(zone->base));
printf("Zone size: %zd frames (%zdK)\n", zone->count,
((zone->count) * FRAME_SIZE) >> 10);
printf("Allocated space: %zd frames (%zdK)\n", zone->busy_count,
(zone->busy_count * FRAME_SIZE) >> 10);
printf("Available space: %zd frames (%zdK)\n", zone->free_count,
(zone->free_count * FRAME_SIZE) >> 10);
buddy_system_structure_print(zone->buddy_system, FRAME_SIZE);
spinlock_unlock(&zone->lock);
1152,3 → 1176,4
 
/** @}
*/
 
/branches/rcu/kernel/generic/src/mm/backend_elf.c
71,7 → 71,8
* @param addr Faulting virtual address.
* @param access Access mode that caused the fault (i.e. read/write/exec).
*
* @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. serviced).
* @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e.
* serviced).
*/
int elf_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access)
{
80,11 → 81,13
btree_node_t *leaf;
uintptr_t base, frame;
index_t i;
bool dirty = false;
 
if (!as_area_check_access(area, access))
return AS_PF_FAULT;
 
ASSERT((addr >= entry->p_vaddr) && (addr < entry->p_vaddr + entry->p_memsz));
ASSERT((addr >= entry->p_vaddr) &&
(addr < entry->p_vaddr + entry->p_memsz));
i = (addr - entry->p_vaddr) >> PAGE_WIDTH;
base = (uintptr_t) (((void *) elf) + entry->p_offset);
ASSERT(ALIGN_UP(base, FRAME_SIZE) == base);
107,7 → 110,8
*/
 
for (i = 0; i < leaf->keys; i++) {
if (leaf->key[i] == ALIGN_DOWN(addr, PAGE_SIZE)) {
if (leaf->key[i] ==
ALIGN_DOWN(addr, PAGE_SIZE)) {
found = true;
break;
}
115,8 → 119,10
}
if (frame || found) {
frame_reference_add(ADDR2PFN(frame));
page_mapping_insert(AS, addr, frame, as_area_get_flags(area));
if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1))
page_mapping_insert(AS, addr, frame,
as_area_get_flags(area));
if (!used_space_insert(area,
ALIGN_DOWN(addr, PAGE_SIZE), 1))
panic("Could not insert used space.\n");
mutex_unlock(&area->sh_info->lock);
return AS_PF_OK;
124,10 → 130,12
}
/*
* The area is either not shared or the pagemap does not contain the mapping.
* The area is either not shared or the pagemap does not contain the
* mapping.
*/
if (ALIGN_DOWN(addr, PAGE_SIZE) + PAGE_SIZE < entry->p_vaddr + entry->p_filesz) {
if (ALIGN_DOWN(addr, PAGE_SIZE) + PAGE_SIZE <
entry->p_vaddr + entry->p_filesz) {
/*
* Initialized portion of the segment. The memory is backed
* directly by the content of the ELF image. Pages are
138,11 → 146,14
*/
if (entry->p_flags & PF_W) {
frame = (uintptr_t)frame_alloc(ONE_FRAME, 0);
memcpy((void *) PA2KA(frame), (void *) (base + i*FRAME_SIZE), FRAME_SIZE);
memcpy((void *) PA2KA(frame),
(void *) (base + i * FRAME_SIZE), FRAME_SIZE);
dirty = true;
if (area->sh_info) {
frame_reference_add(ADDR2PFN(frame));
btree_insert(&area->sh_info->pagemap, ALIGN_DOWN(addr, PAGE_SIZE) - area->base,
btree_insert(&area->sh_info->pagemap,
ALIGN_DOWN(addr, PAGE_SIZE) - area->base,
(void *) frame, leaf);
}
 
149,7 → 160,8
} else {
frame = KA2PA(base + i*FRAME_SIZE);
}
} else if (ALIGN_DOWN(addr, PAGE_SIZE) >= ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) {
} else if (ALIGN_DOWN(addr, PAGE_SIZE) >=
ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) {
/*
* This is the uninitialized portion of the segment.
* It is not physically present in the ELF image.
158,10 → 170,12
*/
frame = (uintptr_t)frame_alloc(ONE_FRAME, 0);
memsetb(PA2KA(frame), FRAME_SIZE, 0);
dirty = true;
 
if (area->sh_info) {
frame_reference_add(ADDR2PFN(frame));
btree_insert(&area->sh_info->pagemap, ALIGN_DOWN(addr, PAGE_SIZE) - area->base,
btree_insert(&area->sh_info->pagemap,
ALIGN_DOWN(addr, PAGE_SIZE) - area->base,
(void *) frame, leaf);
}
 
175,11 → 189,14
size = entry->p_filesz - (i<<PAGE_WIDTH);
frame = (uintptr_t)frame_alloc(ONE_FRAME, 0);
memsetb(PA2KA(frame) + size, FRAME_SIZE - size, 0);
memcpy((void *) PA2KA(frame), (void *) (base + i*FRAME_SIZE), size);
memcpy((void *) PA2KA(frame), (void *) (base + i * FRAME_SIZE),
size);
dirty = true;
 
if (area->sh_info) {
frame_reference_add(ADDR2PFN(frame));
btree_insert(&area->sh_info->pagemap, ALIGN_DOWN(addr, PAGE_SIZE) - area->base,
btree_insert(&area->sh_info->pagemap,
ALIGN_DOWN(addr, PAGE_SIZE) - area->base,
(void *) frame, leaf);
}
 
211,31 → 228,28
uintptr_t base;
index_t i;
ASSERT((page >= entry->p_vaddr) && (page < entry->p_vaddr + entry->p_memsz));
ASSERT((page >= entry->p_vaddr) &&
(page < entry->p_vaddr + entry->p_memsz));
i = (page - entry->p_vaddr) >> PAGE_WIDTH;
base = (uintptr_t) (((void *) elf) + entry->p_offset);
ASSERT(ALIGN_UP(base, FRAME_SIZE) == base);
if (page + PAGE_SIZE < ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) {
if (page + PAGE_SIZE <
ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) {
if (entry->p_flags & PF_W) {
/*
* Free the frame with the copy of writable segment data.
* Free the frame with the copy of writable segment
* data.
*/
frame_free(frame);
#ifdef CONFIG_VIRT_IDX_DCACHE
dcache_flush_frame(page, frame);
#endif
}
} else {
/*
* The frame is either anonymous memory or the mixed case (i.e. lower
* part is backed by the ELF image and the upper is anonymous).
* In any case, a frame needs to be freed.
* The frame is either anonymous memory or the mixed case (i.e.
* lower part is backed by the ELF image and the upper is
* anonymous). In any case, a frame needs to be freed.
*/
frame_free(frame);
#ifdef CONFIG_VIRT_IDX_DCACHE
dcache_flush_frame(page, frame);
#endif
}
}
 
260,10 → 274,12
* Find the node in which to start linear search.
*/
if (area->flags & AS_AREA_WRITE) {
node = list_get_instance(area->used_space.leaf_head.next, btree_node_t, leaf_link);
node = list_get_instance(area->used_space.leaf_head.next,
btree_node_t, leaf_link);
} else {
(void) btree_search(&area->sh_info->pagemap, start_anon, &leaf);
node = btree_leaf_node_left_neighbour(&area->sh_info->pagemap, leaf);
node = btree_leaf_node_left_neighbour(&area->sh_info->pagemap,
leaf);
if (!node)
node = leaf;
}
272,7 → 288,8
* Copy used anonymous portions of the area to sh_info's page map.
*/
mutex_lock(&area->sh_info->lock);
for (cur = &node->leaf_link; cur != &area->used_space.leaf_head; cur = cur->next) {
for (cur = &node->leaf_link; cur != &area->used_space.leaf_head;
cur = cur->next) {
int i;
node = list_get_instance(cur, btree_node_t, leaf_link);
294,19 → 311,26
pte_t *pte;
/*
* Skip read-only pages that are backed by the ELF image.
* Skip read-only pages that are backed by the
* ELF image.
*/
if (!(area->flags & AS_AREA_WRITE))
if (base + (j + 1)*PAGE_SIZE <= start_anon)
if (base + (j + 1) * PAGE_SIZE <=
start_anon)
continue;
page_table_lock(area->as, false);
pte = page_mapping_find(area->as, base + j*PAGE_SIZE);
ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
btree_insert(&area->sh_info->pagemap, (base + j*PAGE_SIZE) - area->base,
pte = page_mapping_find(area->as,
base + j * PAGE_SIZE);
ASSERT(pte && PTE_VALID(pte) &&
PTE_PRESENT(pte));
btree_insert(&area->sh_info->pagemap,
(base + j * PAGE_SIZE) - area->base,
(void *) PTE_GET_FRAME(pte), NULL);
page_table_unlock(area->as, false);
frame_reference_add(ADDR2PFN(PTE_GET_FRAME(pte)));
 
pfn_t pfn = ADDR2PFN(PTE_GET_FRAME(pte));
frame_reference_add(pfn);
}
}