Subversion Repositories HelenOS

Compare Revisions

Ignore whitespace Rev 2086 → Rev 2087

/trunk/kernel/generic/src/synch/rwlock.c
220,12 → 220,14
*/
case ESYNCH_OK_BLOCKED:
/*
* We were woken with rwl->readers_in already incremented.
* Note that this arrangement avoids race condition between
* two concurrent readers. (Race is avoided if 'exclusive' is
* locked at the same time as 'readers_in' is incremented.
* Same time means both events happen atomically when
* rwl->lock is held.)
* We were woken with rwl->readers_in already
* incremented.
*
* Note that this arrangement avoids race condition
* between two concurrent readers. (Race is avoided if
* 'exclusive' is locked at the same time as
* 'readers_in' is incremented. Same time means both
* events happen atomically when rwl->lock is held.)
*/
interrupts_restore(ipl);
break;
323,7 → 325,8
spinlock_lock(&rwl->exclusive.sem.wq.lock);
 
if (!list_empty(&rwl->exclusive.sem.wq.head))
t = list_get_instance(rwl->exclusive.sem.wq.head.next, thread_t, wq_link);
t = list_get_instance(rwl->exclusive.sem.wq.head.next, thread_t,
wq_link);
do {
if (t) {
spinlock_lock(&t->lock);
343,7 → 346,8
if (type == RWLOCK_READER) {
/*
* Waking up a reader.
* We are responsible for incrementing rwl->readers_in for it.
* We are responsible for incrementing rwl->readers_in
* for it.
*/
rwl->readers_in++;
}
360,7 → 364,8
t = NULL;
if (!list_empty(&rwl->exclusive.sem.wq.head)) {
t = list_get_instance(rwl->exclusive.sem.wq.head.next, thread_t, wq_link);
t = list_get_instance(rwl->exclusive.sem.wq.head.next,
thread_t, wq_link);
if (t) {
spinlock_lock(&t->lock);
if (t->rwlock_holder_type != RWLOCK_READER)
/trunk/kernel/generic/src/synch/spinlock.c
107,8 → 107,9
continue;
#endif
if (i++ > DEADLOCK_THRESHOLD) {
printf("cpu%d: looping on spinlock %.*p:%s, caller=%.*p",
CPU->id, sizeof(uintptr_t) * 2, sl, sl->name, sizeof(uintptr_t) * 2, CALLER);
printf("cpu%d: looping on spinlock %.*p:%s, "
"caller=%.*p", CPU->id, sizeof(uintptr_t) * 2, sl,
sl->name, sizeof(uintptr_t) * 2, CALLER);
symbol = get_symtab_entry(CALLER);
if (symbol)
printf("(%s)", symbol);
/trunk/kernel/generic/src/synch/waitq.c
186,7 → 186,7
*
* The sleep can be interrupted only if the
* SYNCH_FLAGS_INTERRUPTIBLE bit is specified in flags.
*
* If usec is greater than zero, regardless of the value of the
* SYNCH_FLAGS_NON_BLOCKING bit in flags, the call will not return until either
* timeout, interruption or wakeup comes.
352,7 → 352,7
}
THREAD->timeout_pending = true;
timeout_register(&THREAD->sleep_timeout, (uint64_t) usec,
waitq_timeouted_sleep, THREAD);
waitq_timeouted_sleep, THREAD);
}
 
list_append(&THREAD->wq_link, &wq->head);
/trunk/kernel/generic/src/synch/futex.c
102,11 → 102,12
/** Sleep in futex wait queue.
*
* @param uaddr Userspace address of the futex counter.
* @param usec If non-zero, number of microseconds this thread is willing to sleep.
* @param usec If non-zero, number of microseconds this thread is willing to
* sleep.
* @param flags Select mode of operation.
*
* @return One of ESYNCH_TIMEOUT, ESYNCH_OK_ATOMIC and ESYNCH_OK_BLOCKED. See synch.h.
* If there is no physical mapping for uaddr ENOENT is returned.
* @return One of ESYNCH_TIMEOUT, ESYNCH_OK_ATOMIC and ESYNCH_OK_BLOCKED. See
* synch.h. If there is no physical mapping for uaddr ENOENT is returned.
*/
unative_t sys_futex_sleep_timeout(uintptr_t uaddr, uint32_t usec, int flags)
{
134,7 → 135,8
 
futex = futex_find(paddr);
return (unative_t) waitq_sleep_timeout(&futex->wq, usec, flags | SYNCH_FLAGS_INTERRUPTIBLE);
return (unative_t) waitq_sleep_timeout(&futex->wq, usec, flags |
SYNCH_FLAGS_INTERRUPTIBLE);
}
 
/** Wakeup one thread waiting in futex wait queue.
242,7 → 244,8
* current task's B+tree of known futexes.
*/
futex->refcount++;
btree_insert(&TASK->futexes, paddr, futex, leaf);
btree_insert(&TASK->futexes, paddr, futex,
leaf);
}
mutex_unlock(&TASK->futexes_lock);
271,7 → 274,8
 
/** Compute hash index into futex hash table.
*
* @param key Address where the key (i.e. physical address of futex counter) is stored.
* @param key Address where the key (i.e. physical address of futex counter) is
* stored.
*
* @return Index into futex hash table.
*/
282,7 → 286,8
 
/** Compare futex hash table item with a key.
*
* @param key Address where the key (i.e. physical address of futex counter) is stored.
* @param key Address where the key (i.e. physical address of futex counter) is
* stored.
*
* @return True if the item matches the key. False otherwise.
*/
316,7 → 321,8
rwlock_write_lock(&futex_ht_lock);
mutex_lock(&TASK->futexes_lock);
 
for (cur = TASK->futexes.leaf_head.next; cur != &TASK->futexes.leaf_head; cur = cur->next) {
for (cur = TASK->futexes.leaf_head.next;
cur != &TASK->futexes.leaf_head; cur = cur->next) {
btree_node_t *node;
int i;
/trunk/kernel/generic/src/main/kinit.c
98,7 → 98,8
* not mess together with kcpulb threads.
* Just a beautification.
*/
if ((t = thread_create(kmp, NULL, TASK, THREAD_FLAG_WIRED, "kmp", true))) {
if ((t = thread_create(kmp, NULL, TASK, THREAD_FLAG_WIRED,
"kmp", true))) {
spinlock_lock(&t->lock);
t->cpu = &cpus[0];
spinlock_unlock(&t->lock);
123,7 → 124,8
*/
for (i = 0; i < config.cpu_count; i++) {
 
if ((t = thread_create(kcpulb, NULL, TASK, THREAD_FLAG_WIRED, "kcpulb", true))) {
if ((t = thread_create(kcpulb, NULL, TASK,
THREAD_FLAG_WIRED, "kcpulb", true))) {
spinlock_lock(&t->lock);
t->cpu = &cpus[i];
spinlock_unlock(&t->lock);
143,7 → 145,8
/*
* Create kernel console.
*/
if ((t = thread_create(kconsole, "kconsole", TASK, 0, "kconsole", false)))
t = thread_create(kconsole, "kconsole", TASK, 0, "kconsole", false);
if (t)
thread_ready(t);
else
panic("thread_create/kconsole\n");
161,17 → 164,20
continue;
}
 
task_t *utask = task_run_program((void *) init.tasks[i].addr, "uspace");
task_t *utask = task_run_program((void *) init.tasks[i].addr,
"uspace");
if (utask) {
/*
* Set capabilities to init userspace tasks.
*/
cap_set(utask, CAP_CAP | CAP_MEM_MANAGER | CAP_IO_MANAGER | CAP_PREEMPT_CONTROL | CAP_IRQ_REG);
cap_set(utask, CAP_CAP | CAP_MEM_MANAGER |
CAP_IO_MANAGER | CAP_PREEMPT_CONTROL | CAP_IRQ_REG);
if (!ipc_phone_0)
ipc_phone_0 = &utask->answerbox;
} else {
int rd = init_rd((rd_header *) init.tasks[i].addr, init.tasks[i].size);
int rd = init_rd((rd_header *) init.tasks[i].addr,
init.tasks[i].size);
if (rd != RE_OK)
printf("Init binary %zd not used.\n", i);
/trunk/kernel/generic/src/main/main.c
146,7 → 146,7
config.memory_size = get_memory_size();
config.kernel_size = ALIGN_UP(hardcoded_ktext_size +
hardcoded_kdata_size, PAGE_SIZE);
hardcoded_kdata_size, PAGE_SIZE);
config.stack_size = CONFIG_STACK_SIZE;
/* Initialy the stack is placed just after the kernel */
156,17 → 156,17
count_t i;
for (i = 0; i < init.cnt; i++) {
if (PA_overlaps(config.stack_base, config.stack_size,
init.tasks[i].addr, init.tasks[i].size))
init.tasks[i].addr, init.tasks[i].size))
config.stack_base = ALIGN_UP(init.tasks[i].addr +
init.tasks[i].size, config.stack_size);
init.tasks[i].size, config.stack_size);
}
 
/* Avoid placing stack on top of boot allocations. */
if (ballocs.size) {
if (PA_overlaps(config.stack_base, config.stack_size,
ballocs.base, ballocs.size))
ballocs.base, ballocs.size))
config.stack_base = ALIGN_UP(ballocs.base +
ballocs.size, PAGE_SIZE);
ballocs.size, PAGE_SIZE);
}
if (config.stack_base < stack_safe)
174,7 → 174,7
context_save(&ctx);
context_set(&ctx, FADDR(main_bsp_separated_stack), config.stack_base,
THREAD_STACK_SIZE);
THREAD_STACK_SIZE);
context_restore(&ctx);
/* not reached */
}
222,11 → 222,11
 
version_print();
printf("kernel: %.*p hardcoded_ktext_size=%zdK, "
"hardcoded_kdata_size=%zdK\n", sizeof(uintptr_t) * 2,
config.base, hardcoded_ktext_size >> 10, hardcoded_kdata_size >>
10);
"hardcoded_kdata_size=%zdK\n", sizeof(uintptr_t) * 2,
config.base, hardcoded_ktext_size >> 10,
hardcoded_kdata_size >> 10);
printf("stack: %.*p size=%zdK\n", sizeof(uintptr_t) * 2,
config.stack_base, config.stack_size >> 10);
config.stack_base, config.stack_size >> 10);
 
arch_pre_smp_init();
smp_init();
249,8 → 249,8
if (init.cnt > 0) {
for (i = 0; i < init.cnt; i++)
printf("init[%zd].addr=%.*p, init[%zd].size=%zd\n", i,
sizeof(uintptr_t) * 2, init.tasks[i].addr, i,
init.tasks[i].size);
sizeof(uintptr_t) * 2, init.tasks[i].addr, i,
init.tasks[i].size);
} else
printf("No init binaries found\n");
323,7 → 323,7
* switch to this cpu's private stack prior to waking kmp up.
*/
context_set(&CPU->saved_context, FADDR(main_ap_separated_stack),
(uintptr_t) CPU->stack, CPU_STACK_SIZE);
(uintptr_t) CPU->stack, CPU_STACK_SIZE);
context_restore(&CPU->saved_context);
/* not reached */
}
/trunk/kernel/generic/src/main/uinit.c
48,7 → 48,8
 
/** Thread used to bring up userspace thread.
*
* @param arg Pointer to structure containing userspace entry and stack addresses.
* @param arg Pointer to structure containing userspace entry and stack
* addresses.
*/
void uinit(void *arg)
{
/trunk/kernel/generic/src/proc/scheduler.c
360,7 → 360,7
*/
context_save(&CPU->saved_context);
context_set(&CPU->saved_context, FADDR(scheduler_separated_stack),
(uintptr_t) CPU->stack, CPU_STACK_SIZE);
(uintptr_t) CPU->stack, CPU_STACK_SIZE);
context_restore(&CPU->saved_context);
/* not reached */
}
500,8 → 500,8
 
#ifdef SCHEDULER_VERBOSE
printf("cpu%d: tid %d (priority=%d, ticks=%lld, nrdy=%ld)\n",
CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks,
atomic_get(&CPU->nrdy));
CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks,
atomic_get(&CPU->nrdy));
#endif
 
/*
635,9 → 635,9
spinlock_lock(&t->lock);
#ifdef KCPULB_VERBOSE
printf("kcpulb%d: TID %d -> cpu%d, nrdy=%ld, "
"avg=%nd\n", CPU->id, t->tid, CPU->id,
atomic_get(&CPU->nrdy),
atomic_get(&nrdy) / config.cpu_active);
"avg=%nd\n", CPU->id, t->tid, CPU->id,
atomic_get(&CPU->nrdy),
atomic_get(&nrdy) / config.cpu_active);
#endif
t->flags |= THREAD_FLAG_STOLEN;
t->state = Entering;
703,8 → 703,8
 
spinlock_lock(&cpus[cpu].lock);
printf("cpu%d: address=%p, nrdy=%ld, needs_relink=%ld\n",
cpus[cpu].id, &cpus[cpu], atomic_get(&cpus[cpu].nrdy),
cpus[cpu].needs_relink);
cpus[cpu].id, &cpus[cpu], atomic_get(&cpus[cpu].nrdy),
cpus[cpu].needs_relink);
for (i = 0; i < RQ_COUNT; i++) {
r = &cpus[cpu].rq[i];
718,7 → 718,7
cur = cur->next) {
t = list_get_instance(cur, thread_t, rq_link);
printf("%d(%s) ", t->tid,
thread_states[t->state]);
thread_states[t->state]);
}
printf("\n");
spinlock_unlock(&r->lock);
/trunk/kernel/generic/src/proc/task.c
65,9 → 65,11
 
/** B+tree of active tasks.
*
* The task is guaranteed to exist after it was found in the tasks_btree as long as:
* The task is guaranteed to exist after it was found in the tasks_btree as
* long as:
* @li the tasks_lock is held,
* @li the task's lock is held when task's lock is acquired before releasing tasks_lock or
* @li the task's lock is held when task's lock is acquired before releasing
* tasks_lock or
* @li the task's refcount is greater than 0
*
*/
125,7 → 127,8
ipc_answerbox_init(&ta->answerbox);
for (i = 0; i < IPC_MAX_PHONES; i++)
ipc_phone_init(&ta->phones[i]);
if ((ipc_phone_0) && (context_check(ipc_phone_0->task->context, ta->context)))
if ((ipc_phone_0) && (context_check(ipc_phone_0->task->context,
ta->context)))
ipc_phone_connect(&ta->phones[0], ipc_phone_0);
atomic_set(&ta->active_calls, 0);
 
202,7 → 205,8
}
kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0);
kernel_uarg->uspace_entry = (void *) ((elf_header_t *) program_addr)->e_entry;
kernel_uarg->uspace_entry =
(void *) ((elf_header_t *) program_addr)->e_entry;
kernel_uarg->uspace_stack = (void *) USTACK_ADDRESS;
kernel_uarg->uspace_thread_function = NULL;
kernel_uarg->uspace_thread_arg = NULL;
214,14 → 218,15
/*
* Create the data as_area.
*/
a = as_area_create(as, AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE,
LOADED_PROG_STACK_PAGES_NO*PAGE_SIZE,
USTACK_ADDRESS, AS_AREA_ATTR_NONE, &anon_backend, NULL);
a = as_area_create(as, AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE,
LOADED_PROG_STACK_PAGES_NO * PAGE_SIZE, USTACK_ADDRESS,
AS_AREA_ATTR_NONE, &anon_backend, NULL);
 
/*
* Create the main thread.
*/
t1 = thread_create(uinit, kernel_uarg, task, THREAD_FLAG_USPACE, "uinit", false);
t1 = thread_create(uinit, kernel_uarg, task, THREAD_FLAG_USPACE,
"uinit", false);
ASSERT(t1);
/*
238,7 → 243,8
 
/** Syscall for reading task ID from userspace.
*
* @param uspace_task_id Userspace address of 8-byte buffer where to store current task ID.
* @param uspace_task_id Userspace address of 8-byte buffer where to store
* current task ID.
*
* @return 0 on success or an error code from @ref errno.h.
*/
248,7 → 254,8
* No need to acquire lock on TASK because taskid
* remains constant for the lifespan of the task.
*/
return (unative_t) copy_to_uspace(uspace_task_id, &TASK->taskid, sizeof(TASK->taskid));
return (unative_t) copy_to_uspace(uspace_task_id, &TASK->taskid,
sizeof(TASK->taskid));
}
 
/** Find task structure corresponding to task ID.
288,8 → 295,10
spinlock_lock(&thr->lock);
/* Process only counted threads */
if (!thr->uncounted) {
if (thr == THREAD) /* Update accounting of current thread */
thread_update_accounting();
if (thr == THREAD) {
/* Update accounting of current thread */
thread_update_accounting();
}
ret += thr->cycles;
}
spinlock_unlock(&thr->lock);
376,10 → 385,12
ipl = interrupts_disable();
spinlock_lock(&tasks_lock);
printf("taskid name ctx address as cycles threads calls callee\n");
printf("------ ---------- --- ---------- ---------- ---------- ------- ------ ------>\n");
printf("taskid name ctx address as cycles threads "
"calls callee\n");
printf("------ ---------- --- ---------- ---------- ---------- ------- " "------ ------>\n");
 
for (cur = tasks_btree.leaf_head.next; cur != &tasks_btree.leaf_head; cur = cur->next) {
for (cur = tasks_btree.leaf_head.next; cur != &tasks_btree.leaf_head;
cur = cur->next) {
btree_node_t *node;
int i;
396,10 → 407,14
char suffix;
order(task_get_accounting(t), &cycles, &suffix);
printf("%-6lld %-10s %-3ld %#10zx %#10zx %9llu%c %7zd %6zd", t->taskid, t->name, t->context, t, t->as, cycles, suffix, t->refcount, atomic_get(&t->active_calls));
printf("%-6lld %-10s %-3ld %#10zx %#10zx %9llu%c %7zd "
"%6zd", t->taskid, t->name, t->context, t, t->as,
cycles, suffix, t->refcount,
atomic_get(&t->active_calls));
for (j = 0; j < IPC_MAX_PHONES; j++) {
if (t->phones[j].callee)
printf(" %zd:%#zx", j, t->phones[j].callee);
printf(" %zd:%#zx", j,
t->phones[j].callee);
}
printf("\n");
465,10 → 480,11
}
if (t != THREAD) {
ASSERT(t != main_thread); /* uninit is joined and detached in ktaskgc */
ASSERT(t != main_thread); /* uninit is joined and detached
* in ktaskgc */
thread_join(t);
thread_detach(t);
goto loop; /* go for another thread */
goto loop; /* go for another thread */
}
/*
497,22 → 513,26
* Userspace threads cannot detach themselves,
* therefore the thread pointer is guaranteed to be valid.
*/
if (thread_join_timeout(t, 1000000, SYNCH_FLAGS_NONE) == ESYNCH_TIMEOUT) { /* sleep uninterruptibly here! */
if (thread_join_timeout(t, 1000000, SYNCH_FLAGS_NONE) ==
ESYNCH_TIMEOUT) { /* sleep uninterruptibly here! */
ipl_t ipl;
link_t *cur;
thread_t *thr = NULL;
/*
* The join timed out. Try to do some garbage collection of Undead threads.
* The join timed out. Try to do some garbage collection of
* Undead threads.
*/
more_gc:
ipl = interrupts_disable();
spinlock_lock(&TASK->lock);
for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) {
for (cur = TASK->th_head.next; cur != &TASK->th_head;
cur = cur->next) {
thr = list_get_instance(cur, thread_t, th_link);
spinlock_lock(&thr->lock);
if (thr != t && thr->state == Undead && thr->join_type == None) {
if (thr != t && thr->state == Undead &&
thr->join_type == None) {
thr->join_type = TaskGC;
spinlock_unlock(&thr->lock);
break;
/trunk/kernel/generic/src/proc/thread.c
204,11 → 204,11
THREAD = NULL;
atomic_set(&nrdy,0);
thread_slab = slab_cache_create("thread_slab", sizeof(thread_t), 0,
thr_constructor, thr_destructor, 0);
thr_constructor, thr_destructor, 0);
 
#ifdef ARCH_HAS_FPU
fpu_context_slab = slab_cache_create("fpu_slab", sizeof(fpu_context_t),
FPU_CONTEXT_ALIGN, NULL, NULL, 0);
FPU_CONTEXT_ALIGN, NULL, NULL, 0);
#endif
 
btree_create(&threads_btree);
328,7 → 328,7
/* Not needed, but good for debugging */
memsetb((uintptr_t) t->kstack, THREAD_STACK_SIZE * 1 << STACK_FRAMES,
0);
0);
ipl = interrupts_disable();
spinlock_lock(&tidlock);
338,7 → 338,7
context_save(&t->saved_context);
context_set(&t->saved_context, FADDR(cushion), (uintptr_t) t->kstack,
THREAD_STACK_SIZE);
THREAD_STACK_SIZE);
the_initialize((the_t *) t->kstack);
404,7 → 404,7
*/
spinlock_lock(&threads_lock);
btree_insert(&threads_btree, (btree_key_t) ((uintptr_t) t), (void *) t,
NULL);
NULL);
spinlock_unlock(&threads_lock);
interrupts_restore(ipl);
560,10 → 560,13
ipl = interrupts_disable();
spinlock_lock(&threads_lock);
printf("tid name address state task ctx code stack cycles cpu kstack waitqueue\n");
printf("------ ---------- ---------- -------- ---------- --- ---------- ---------- ---------- ---- ---------- ----------\n");
printf("tid name address state task ctx code "
" stack cycles cpu kstack waitqueue\n");
printf("------ ---------- ---------- -------- ---------- --- --------"
"-- ---------- ---------- ---- ---------- ----------\n");
 
for (cur = threads_btree.leaf_head.next; cur != &threads_btree.leaf_head; cur = cur->next) {
for (cur = threads_btree.leaf_head.next;
cur != &threads_btree.leaf_head; cur = cur->next) {
btree_node_t *node;
int i;
 
577,7 → 580,10
char suffix;
order(t->cycles, &cycles, &suffix);
printf("%-6zd %-10s %#10zx %-8s %#10zx %-3ld %#10zx %#10zx %9llu%c ", t->tid, t->name, t, thread_states[t->state], t->task, t->task->context, t->thread_code, t->kstack, cycles, suffix);
printf("%-6zd %-10s %#10zx %-8s %#10zx %-3ld %#10zx "
"%#10zx %9llu%c ", t->tid, t->name, t,
thread_states[t->state], t->task, t->task->context,
t->thread_code, t->kstack, cycles, suffix);
if (t->cpu)
printf("%-4zd", t->cpu->id);
585,7 → 591,8
printf("none");
if (t->state == Sleeping)
printf(" %#10zx %#10zx", t->kstack, t->sleep_queue);
printf(" %#10zx %#10zx", t->kstack,
t->sleep_queue);
printf("\n");
}
608,7 → 615,8
{
btree_node_t *leaf;
return btree_search(&threads_btree, (btree_key_t) ((uintptr_t) t), &leaf) != NULL;
return btree_search(&threads_btree, (btree_key_t) ((uintptr_t) t),
&leaf) != NULL;
}
 
 
647,7 → 655,9
return (unative_t) rc;
}
 
if ((t = thread_create(uinit, kernel_uarg, TASK, THREAD_FLAG_USPACE, namebuf, false))) {
t = thread_create(uinit, kernel_uarg, TASK, THREAD_FLAG_USPACE, namebuf,
false);
if (t) {
tid = t->tid;
thread_ready(t);
return (unative_t) tid;
670,3 → 680,4
 
/** @}
*/
 
/trunk/kernel/generic/src/mm/as.c
93,7 → 93,10
*/
static slab_cache_t *as_slab;
 
/** This lock protects inactive_as_with_asid_head list. It must be acquired before as_t mutex. */
/**
* This lock protects inactive_as_with_asid_head list. It must be acquired
* before as_t mutex.
*/
SPINLOCK_INITIALIZE(inactive_as_with_asid_lock);
 
/**
107,7 → 110,8
 
static int area_flags_to_page_flags(int aflags);
static as_area_t *find_area_and_lock(as_t *as, uintptr_t va);
static bool check_area_conflicts(as_t *as, uintptr_t va, size_t size, as_area_t *avoid_area);
static bool check_area_conflicts(as_t *as, uintptr_t va, size_t size,
as_area_t *avoid_area);
static void sh_info_remove_reference(share_info_t *sh_info);
 
static int as_constructor(void *obj, int flags)
136,7 → 140,7
as_arch_init();
as_slab = slab_cache_create("as_slab", sizeof(as_t), 0,
as_constructor, as_destructor, SLAB_CACHE_MAGDEFERRED);
as_constructor, as_destructor, SLAB_CACHE_MAGDEFERRED);
AS_KERNEL = as_create(FLAG_AS_KERNEL);
if (!AS_KERNEL)
171,8 → 175,8
 
/** Destroy adress space.
*
* When there are no tasks referencing this address space (i.e. its refcount is zero),
* the address space can be destroyed.
* When there are no tasks referencing this address space (i.e. its refcount is
* zero), the address space can be destroyed.
*/
void as_destroy(as_t *as)
{
203,7 → 207,8
btree_node_t *node;
 
ASSERT(!list_empty(&as->as_area_btree.leaf_head));
node = list_get_instance(as->as_area_btree.leaf_head.next, btree_node_t, leaf_link);
node = list_get_instance(as->as_area_btree.leaf_head.next,
btree_node_t, leaf_link);
 
if ((cond = node->keys)) {
as_area_destroy(as, node->key[0]);
272,7 → 277,8
if (backend_data)
a->backend_data = *backend_data;
else
memsetb((uintptr_t) &a->backend_data, sizeof(a->backend_data), 0);
memsetb((uintptr_t) &a->backend_data, sizeof(a->backend_data),
0);
 
btree_create(&a->used_space);
287,7 → 293,8
/** Find address space area and change it.
*
* @param as Address space.
* @param address Virtual address belonging to the area to be changed. Must be page-aligned.
* @param address Virtual address belonging to the area to be changed. Must be
* page-aligned.
* @param size New size of the virtual memory block starting at address.
* @param flags Flags influencing the remap operation. Currently unused.
*
356,7 → 363,8
/*
* Start TLB shootdown sequence.
*/
tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base +
pages * PAGE_SIZE, area->pages - pages);
 
/*
* Remove frames belonging to used space starting from
369,37 → 377,47
btree_node_t *node;
ASSERT(!list_empty(&area->used_space.leaf_head));
node = list_get_instance(area->used_space.leaf_head.prev, btree_node_t, leaf_link);
node =
list_get_instance(area->used_space.leaf_head.prev,
btree_node_t, leaf_link);
if ((cond = (bool) node->keys)) {
uintptr_t b = node->key[node->keys - 1];
count_t c = (count_t) node->value[node->keys - 1];
count_t c =
(count_t) node->value[node->keys - 1];
int i = 0;
if (overlaps(b, c*PAGE_SIZE, area->base, pages*PAGE_SIZE)) {
if (overlaps(b, c * PAGE_SIZE, area->base,
pages*PAGE_SIZE)) {
if (b + c*PAGE_SIZE <= start_free) {
if (b + c * PAGE_SIZE <= start_free) {
/*
* The whole interval fits completely
* in the resized address space area.
* The whole interval fits
* completely in the resized
* address space area.
*/
break;
}
/*
* Part of the interval corresponding to b and c
* overlaps with the resized address space area.
* Part of the interval corresponding
* to b and c overlaps with the resized
* address space area.
*/
cond = false; /* we are almost done */
i = (start_free - b) >> PAGE_WIDTH;
if (!used_space_remove(area, start_free, c - i))
panic("Could not remove used space.\n");
if (!used_space_remove(area, start_free,
c - i))
panic("Could not remove used "
"space.\n");
} else {
/*
* The interval of used space can be completely removed.
* The interval of used space can be
* completely removed.
*/
if (!used_space_remove(area, b, c))
panic("Could not remove used space.\n");
panic("Could not remove used "
"space.\n");
}
for (; i < c; i++) {
406,13 → 424,18
pte_t *pte;
page_table_lock(as, false);
pte = page_mapping_find(as, b + i*PAGE_SIZE);
ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
if (area->backend && area->backend->frame_free) {
pte = page_mapping_find(as, b +
i * PAGE_SIZE);
ASSERT(pte && PTE_VALID(pte) &&
PTE_PRESENT(pte));
if (area->backend &&
area->backend->frame_free) {
area->backend->frame_free(area,
b + i*PAGE_SIZE, PTE_GET_FRAME(pte));
b + i * PAGE_SIZE,
PTE_GET_FRAME(pte));
}
page_mapping_remove(as, b + i*PAGE_SIZE);
page_mapping_remove(as, b +
i * PAGE_SIZE);
page_table_unlock(as, false);
}
}
421,19 → 444,22
/*
* Finish TLB shootdown sequence.
*/
tlb_invalidate_pages(as->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
tlb_invalidate_pages(as->asid, area->base + pages * PAGE_SIZE,
area->pages - pages);
tlb_shootdown_finalize();
/*
* Invalidate software translation caches (e.g. TSB on sparc64).
*/
as_invalidate_translation_cache(as, area->base + pages*PAGE_SIZE, area->pages - pages);
as_invalidate_translation_cache(as, area->base +
pages * PAGE_SIZE, area->pages - pages);
} else {
/*
* Growing the area.
* Check for overlaps with other address space areas.
*/
if (!check_area_conflicts(as, address, pages * PAGE_SIZE, area)) {
if (!check_area_conflicts(as, address, pages * PAGE_SIZE,
area)) {
mutex_unlock(&area->lock);
mutex_unlock(&as->lock);
interrupts_restore(ipl);
484,7 → 510,8
/*
* Visit only the pages mapped by used_space B+tree.
*/
for (cur = area->used_space.leaf_head.next; cur != &area->used_space.leaf_head; cur = cur->next) {
for (cur = area->used_space.leaf_head.next;
cur != &area->used_space.leaf_head; cur = cur->next) {
btree_node_t *node;
int i;
496,13 → 523,15
for (j = 0; j < (count_t) node->value[i]; j++) {
page_table_lock(as, false);
pte = page_mapping_find(as, b + j*PAGE_SIZE);
ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
if (area->backend && area->backend->frame_free) {
area->backend->frame_free(area,
b + j*PAGE_SIZE, PTE_GET_FRAME(pte));
pte = page_mapping_find(as, b + j * PAGE_SIZE);
ASSERT(pte && PTE_VALID(pte) &&
PTE_PRESENT(pte));
if (area->backend &&
area->backend->frame_free) {
area->backend->frame_free(area, b +
j * PAGE_SIZE, PTE_GET_FRAME(pte));
}
page_mapping_remove(as, b + j*PAGE_SIZE);
page_mapping_remove(as, b + j * PAGE_SIZE);
page_table_unlock(as, false);
}
}
515,7 → 544,8
tlb_shootdown_finalize();
/*
* Invalidate potential software translation caches (e.g. TSB on sparc64).
* Invalidate potential software translation caches (e.g. TSB on
* sparc64).
*/
as_invalidate_translation_cache(as, area->base, area->pages);
605,7 → 635,8
if (src_flags & AS_AREA_CACHEABLE)
dst_flags_mask |= AS_AREA_CACHEABLE;
 
if (src_size != acc_size || (src_flags & dst_flags_mask) != dst_flags_mask) {
if (src_size != acc_size ||
(src_flags & dst_flags_mask) != dst_flags_mask) {
mutex_unlock(&src_area->lock);
mutex_unlock(&src_as->lock);
interrupts_restore(ipl);
658,7 → 689,7
* to support sharing in less privileged mode.
*/
dst_area = as_area_create(dst_as, dst_flags_mask, src_size, dst_base,
AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data);
AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data);
if (!dst_area) {
/*
* Destination address space area could not be created.
776,8 → 807,8
if ((pte = page_mapping_find(AS, page))) {
if (PTE_PRESENT(pte)) {
if (((access == PF_ACCESS_READ) && PTE_READABLE(pte)) ||
(access == PF_ACCESS_WRITE && PTE_WRITABLE(pte)) ||
(access == PF_ACCESS_EXEC && PTE_EXECUTABLE(pte))) {
(access == PF_ACCESS_WRITE && PTE_WRITABLE(pte)) ||
(access == PF_ACCESS_EXEC && PTE_EXECUTABLE(pte))) {
page_table_unlock(AS, false);
mutex_unlock(&area->lock);
mutex_unlock(&AS->lock);
804,10 → 835,12
page_fault:
if (THREAD->in_copy_from_uspace) {
THREAD->in_copy_from_uspace = false;
istate_set_retaddr(istate, (uintptr_t) &memcpy_from_uspace_failover_address);
istate_set_retaddr(istate,
(uintptr_t) &memcpy_from_uspace_failover_address);
} else if (THREAD->in_copy_to_uspace) {
THREAD->in_copy_to_uspace = false;
istate_set_retaddr(istate, (uintptr_t) &memcpy_to_uspace_failover_address);
istate_set_retaddr(istate,
(uintptr_t) &memcpy_to_uspace_failover_address);
} else {
return AS_PF_FAULT;
}
845,7 → 878,8
* ASID.
*/
ASSERT(old->asid != ASID_INVALID);
list_append(&old->inactive_as_with_asid_link, &inactive_as_with_asid_head);
list_append(&old->inactive_as_with_asid_link,
&inactive_as_with_asid_head);
}
mutex_unlock(&old->lock);
 
861,10 → 895,14
*/
mutex_lock_active(&new->lock);
if ((new->cpu_refcount++ == 0) && (new != AS_KERNEL)) {
if (new->asid != ASID_INVALID)
if (new->asid != ASID_INVALID) {
list_remove(&new->inactive_as_with_asid_link);
else
needs_asid = true; /* defer call to asid_get() until new->lock is released */
} else {
/*
* Defer call to asid_get() until new->lock is released.
*/
needs_asid = true;
}
}
SET_PTL0_ADDRESS(new->page_table);
mutex_unlock(&new->lock);
1006,7 → 1044,8
* @param as Address space.
* @param va Virtual address.
*
* @return Locked address space area containing va on success or NULL on failure.
* @return Locked address space area containing va on success or NULL on
* failure.
*/
as_area_t *find_area_and_lock(as_t *as, uintptr_t va)
{
1041,7 → 1080,8
* Second, locate the left neighbour and test its last record.
* Because of its position in the B+tree, it must have base < va.
*/
if ((lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf);
if (lnode) {
a = (as_area_t *) lnode->value[lnode->keys - 1];
mutex_lock(&a->lock);
if (va < a->base + a->pages * PAGE_SIZE) {
1064,7 → 1104,8
*
* @return True if there is no conflict, false otherwise.
*/
bool check_area_conflicts(as_t *as, uintptr_t va, size_t size, as_area_t *avoid_area)
bool check_area_conflicts(as_t *as, uintptr_t va, size_t size,
as_area_t *avoid_area)
{
as_area_t *a;
btree_node_t *leaf, *node;
1099,7 → 1140,8
}
mutex_unlock(&a->lock);
}
if ((node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf))) {
node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf);
if (node) {
a = (as_area_t *) node->value[0];
mutex_lock(&a->lock);
if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1130,7 → 1172,8
*/
if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
return !overlaps(va, size,
KERNEL_ADDRESS_SPACE_START, KERNEL_ADDRESS_SPACE_END-KERNEL_ADDRESS_SPACE_START);
KERNEL_ADDRESS_SPACE_START,
KERNEL_ADDRESS_SPACE_END - KERNEL_ADDRESS_SPACE_START);
}
 
return true;
1189,8 → 1232,10
 
node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
if (node) {
uintptr_t left_pg = node->key[node->keys - 1], right_pg = leaf->key[0];
count_t left_cnt = (count_t) node->value[node->keys - 1], right_cnt = (count_t) leaf->value[0];
uintptr_t left_pg = node->key[node->keys - 1];
uintptr_t right_pg = leaf->key[0];
count_t left_cnt = (count_t) node->value[node->keys - 1];
count_t right_cnt = (count_t) leaf->value[0];
/*
* Examine the possibility that the interval fits
1200,25 → 1245,35
if (page >= right_pg) {
/* Do nothing. */
} else if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
} else if (overlaps(page, count * PAGE_SIZE, left_pg,
left_cnt * PAGE_SIZE)) {
/* The interval intersects with the left interval. */
return 0;
} else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
} else if (overlaps(page, count * PAGE_SIZE, right_pg,
right_cnt * PAGE_SIZE)) {
/* The interval intersects with the right interval. */
return 0;
} else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) {
/* The interval can be added by merging the two already present intervals. */
} else if ((page == left_pg + left_cnt * PAGE_SIZE) &&
(page + count * PAGE_SIZE == right_pg)) {
/*
* The interval can be added by merging the two already
* present intervals.
*/
node->value[node->keys - 1] += count + right_cnt;
btree_remove(&a->used_space, right_pg, leaf);
return 1;
} else if (page == left_pg + left_cnt*PAGE_SIZE) {
/* The interval can be added by simply growing the left interval. */
} else if (page == left_pg + left_cnt * PAGE_SIZE) {
/*
* The interval can be added by simply growing the left
* interval.
*/
node->value[node->keys - 1] += count;
return 1;
} else if (page + count*PAGE_SIZE == right_pg) {
} else if (page + count * PAGE_SIZE == right_pg) {
/*
* The interval can be addded by simply moving base of the right
* interval down and increasing its size accordingly.
* The interval can be addded by simply moving base of
* the right interval down and increasing its size
* accordingly.
*/
leaf->value[0] += count;
leaf->key[0] = page;
1228,7 → 1283,8
* The interval is between both neigbouring intervals,
* but cannot be merged with any of them.
*/
btree_insert(&a->used_space, page, (void *) count, leaf);
btree_insert(&a->used_space, page, (void *) count,
leaf);
return 1;
}
} else if (page < leaf->key[0]) {
1236,17 → 1292,19
count_t right_cnt = (count_t) leaf->value[0];
/*
* Investigate the border case in which the left neighbour does not
* exist but the interval fits from the left.
* Investigate the border case in which the left neighbour does
* not exist but the interval fits from the left.
*/
if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
if (overlaps(page, count * PAGE_SIZE, right_pg,
right_cnt * PAGE_SIZE)) {
/* The interval intersects with the right interval. */
return 0;
} else if (page + count*PAGE_SIZE == right_pg) {
} else if (page + count * PAGE_SIZE == right_pg) {
/*
* The interval can be added by moving the base of the right interval down
* and increasing its size accordingly.
* The interval can be added by moving the base of the
* right interval down and increasing its size
* accordingly.
*/
leaf->key[0] = page;
leaf->value[0] += count;
1256,7 → 1314,8
* The interval doesn't adjoin with the right interval.
* It must be added individually.
*/
btree_insert(&a->used_space, page, (void *) count, leaf);
btree_insert(&a->used_space, page, (void *) count,
leaf);
return 1;
}
}
1263,8 → 1322,10
 
node = btree_leaf_node_right_neighbour(&a->used_space, leaf);
if (node) {
uintptr_t left_pg = leaf->key[leaf->keys - 1], right_pg = node->key[0];
count_t left_cnt = (count_t) leaf->value[leaf->keys - 1], right_cnt = (count_t) node->value[0];
uintptr_t left_pg = leaf->key[leaf->keys - 1];
uintptr_t right_pg = node->key[0];
count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
count_t right_cnt = (count_t) node->value[0];
/*
* Examine the possibility that the interval fits
1274,25 → 1335,35
 
if (page < left_pg) {
/* Do nothing. */
} else if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
} else if (overlaps(page, count * PAGE_SIZE, left_pg,
left_cnt * PAGE_SIZE)) {
/* The interval intersects with the left interval. */
return 0;
} else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
} else if (overlaps(page, count * PAGE_SIZE, right_pg,
right_cnt * PAGE_SIZE)) {
/* The interval intersects with the right interval. */
return 0;
} else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) {
/* The interval can be added by merging the two already present intervals. */
} else if ((page == left_pg + left_cnt * PAGE_SIZE) &&
(page + count * PAGE_SIZE == right_pg)) {
/*
* The interval can be added by merging the two already
* present intervals.
* */
leaf->value[leaf->keys - 1] += count + right_cnt;
btree_remove(&a->used_space, right_pg, node);
return 1;
} else if (page == left_pg + left_cnt*PAGE_SIZE) {
/* The interval can be added by simply growing the left interval. */
} else if (page == left_pg + left_cnt * PAGE_SIZE) {
/*
* The interval can be added by simply growing the left
* interval.
* */
leaf->value[leaf->keys - 1] += count;
return 1;
} else if (page + count*PAGE_SIZE == right_pg) {
} else if (page + count * PAGE_SIZE == right_pg) {
/*
* The interval can be addded by simply moving base of the right
* interval down and increasing its size accordingly.
* The interval can be addded by simply moving base of
* the right interval down and increasing its size
* accordingly.
*/
node->value[0] += count;
node->key[0] = page;
1302,7 → 1373,8
* The interval is between both neigbouring intervals,
* but cannot be merged with any of them.
*/
btree_insert(&a->used_space, page, (void *) count, leaf);
btree_insert(&a->used_space, page, (void *) count,
leaf);
return 1;
}
} else if (page >= leaf->key[leaf->keys - 1]) {
1310,15 → 1382,19
count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
/*
* Investigate the border case in which the right neighbour does not
* exist but the interval fits from the right.
* Investigate the border case in which the right neighbour
* does not exist but the interval fits from the right.
*/
if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
if (overlaps(page, count * PAGE_SIZE, left_pg,
left_cnt * PAGE_SIZE)) {
/* The interval intersects with the left interval. */
return 0;
} else if (left_pg + left_cnt*PAGE_SIZE == page) {
/* The interval can be added by growing the left interval. */
} else if (left_pg + left_cnt * PAGE_SIZE == page) {
/*
* The interval can be added by growing the left
* interval.
*/
leaf->value[leaf->keys - 1] += count;
return 1;
} else {
1326,44 → 1402,63
* The interval doesn't adjoin with the left interval.
* It must be added individually.
*/
btree_insert(&a->used_space, page, (void *) count, leaf);
btree_insert(&a->used_space, page, (void *) count,
leaf);
return 1;
}
}
/*
* Note that if the algorithm made it thus far, the interval can fit only
* between two other intervals of the leaf. The two border cases were already
* resolved.
* Note that if the algorithm made it thus far, the interval can fit
* only between two other intervals of the leaf. The two border cases
* were already resolved.
*/
for (i = 1; i < leaf->keys; i++) {
if (page < leaf->key[i]) {
uintptr_t left_pg = leaf->key[i - 1], right_pg = leaf->key[i];
count_t left_cnt = (count_t) leaf->value[i - 1], right_cnt = (count_t) leaf->value[i];
uintptr_t left_pg = leaf->key[i - 1];
uintptr_t right_pg = leaf->key[i];
count_t left_cnt = (count_t) leaf->value[i - 1];
count_t right_cnt = (count_t) leaf->value[i];
 
/*
* The interval fits between left_pg and right_pg.
*/
 
if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
/* The interval intersects with the left interval. */
if (overlaps(page, count * PAGE_SIZE, left_pg,
left_cnt * PAGE_SIZE)) {
/*
* The interval intersects with the left
* interval.
*/
return 0;
} else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
/* The interval intersects with the right interval. */
} else if (overlaps(page, count * PAGE_SIZE, right_pg,
right_cnt * PAGE_SIZE)) {
/*
* The interval intersects with the right
* interval.
*/
return 0;
} else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) {
/* The interval can be added by merging the two already present intervals. */
} else if ((page == left_pg + left_cnt * PAGE_SIZE) &&
(page + count * PAGE_SIZE == right_pg)) {
/*
* The interval can be added by merging the two
* already present intervals.
*/
leaf->value[i - 1] += count + right_cnt;
btree_remove(&a->used_space, right_pg, leaf);
return 1;
} else if (page == left_pg + left_cnt*PAGE_SIZE) {
/* The interval can be added by simply growing the left interval. */
} else if (page == left_pg + left_cnt * PAGE_SIZE) {
/*
* The interval can be added by simply growing
* the left interval.
*/
leaf->value[i - 1] += count;
return 1;
} else if (page + count*PAGE_SIZE == right_pg) {
} else if (page + count * PAGE_SIZE == right_pg) {
/*
* The interval can be addded by simply moving base of the right
* interval down and increasing its size accordingly.
* The interval can be addded by simply moving
* base of the right interval down and
* increasing its size accordingly.
*/
leaf->value[i] += count;
leaf->key[i] = page;
1370,16 → 1465,19
return 1;
} else {
/*
* The interval is between both neigbouring intervals,
* but cannot be merged with any of them.
* The interval is between both neigbouring
* intervals, but cannot be merged with any of
* them.
*/
btree_insert(&a->used_space, page, (void *) count, leaf);
btree_insert(&a->used_space, page,
(void *) count, leaf);
return 1;
}
}
}
 
panic("Inconsistency detected while adding %d pages of used space at %p.\n", count, page);
panic("Inconsistency detected while adding %d pages of used space at "
"%p.\n", count, page);
}
 
/** Mark portion of address space area as unused.
1418,7 → 1516,7
*/
for (i = 0; i < leaf->keys; i++) {
if (leaf->key[i] == page) {
leaf->key[i] += count*PAGE_SIZE;
leaf->key[i] += count * PAGE_SIZE;
leaf->value[i] -= count;
return 1;
}
1432,27 → 1530,34
uintptr_t left_pg = node->key[node->keys - 1];
count_t left_cnt = (count_t) node->value[node->keys - 1];
 
if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) {
if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) {
if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,
count * PAGE_SIZE)) {
if (page + count * PAGE_SIZE ==
left_pg + left_cnt * PAGE_SIZE) {
/*
* The interval is contained in the rightmost interval
* of the left neighbour and can be removed by
* updating the size of the bigger interval.
* The interval is contained in the rightmost
* interval of the left neighbour and can be
* removed by updating the size of the bigger
* interval.
*/
node->value[node->keys - 1] -= count;
return 1;
} else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) {
} else if (page + count * PAGE_SIZE <
left_pg + left_cnt*PAGE_SIZE) {
count_t new_cnt;
/*
* The interval is contained in the rightmost interval
* of the left neighbour but its removal requires
* both updating the size of the original interval and
* also inserting a new interval.
* The interval is contained in the rightmost
* interval of the left neighbour but its
* removal requires both updating the size of
* the original interval and also inserting a
* new interval.
*/
new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -
(page + count*PAGE_SIZE)) >> PAGE_WIDTH;
node->value[node->keys - 1] -= count + new_cnt;
btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf);
btree_insert(&a->used_space, page +
count * PAGE_SIZE, (void *) new_cnt, leaf);
return 1;
}
}
1465,27 → 1570,33
uintptr_t left_pg = leaf->key[leaf->keys - 1];
count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
 
if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) {
if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) {
if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,
count * PAGE_SIZE)) {
if (page + count * PAGE_SIZE ==
left_pg + left_cnt * PAGE_SIZE) {
/*
* The interval is contained in the rightmost interval
* of the leaf and can be removed by updating the size
* of the bigger interval.
* The interval is contained in the rightmost
* interval of the leaf and can be removed by
* updating the size of the bigger interval.
*/
leaf->value[leaf->keys - 1] -= count;
return 1;
} else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) {
} else if (page + count * PAGE_SIZE < left_pg +
left_cnt * PAGE_SIZE) {
count_t new_cnt;
/*
* The interval is contained in the rightmost interval
* of the leaf but its removal requires both updating
* the size of the original interval and
* also inserting a new interval.
* The interval is contained in the rightmost
* interval of the leaf but its removal
* requires both updating the size of the
* original interval and also inserting a new
* interval.
*/
new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -
(page + count * PAGE_SIZE)) >> PAGE_WIDTH;
leaf->value[leaf->keys - 1] -= count + new_cnt;
btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf);
btree_insert(&a->used_space, page +
count * PAGE_SIZE, (void *) new_cnt, leaf);
return 1;
}
}
1502,29 → 1613,40
count_t left_cnt = (count_t) leaf->value[i - 1];
 
/*
* Now the interval is between intervals corresponding to (i - 1) and i.
* Now the interval is between intervals corresponding
* to (i - 1) and i.
*/
if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) {
if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) {
if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,
count * PAGE_SIZE)) {
if (page + count * PAGE_SIZE ==
left_pg + left_cnt*PAGE_SIZE) {
/*
* The interval is contained in the interval (i - 1)
* of the leaf and can be removed by updating the size
* of the bigger interval.
* The interval is contained in the
* interval (i - 1) of the leaf and can
* be removed by updating the size of
* the bigger interval.
*/
leaf->value[i - 1] -= count;
return 1;
} else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) {
} else if (page + count * PAGE_SIZE <
left_pg + left_cnt * PAGE_SIZE) {
count_t new_cnt;
/*
* The interval is contained in the interval (i - 1)
* of the leaf but its removal requires both updating
* the size of the original interval and
* The interval is contained in the
* interval (i - 1) of the leaf but its
* removal requires both updating the
* size of the original interval and
* also inserting a new interval.
*/
new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
new_cnt = ((left_pg +
left_cnt * PAGE_SIZE) -
(page + count * PAGE_SIZE)) >>
PAGE_WIDTH;
leaf->value[i - 1] -= count + new_cnt;
btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf);
btree_insert(&a->used_space, page +
count * PAGE_SIZE, (void *) new_cnt,
leaf);
return 1;
}
}
1533,7 → 1655,8
}
 
error:
panic("Inconsistency detected while removing %d pages of used space from %p.\n", count, page);
panic("Inconsistency detected while removing %d pages of used space "
"from %p.\n", count, page);
}
 
/** Remove reference to address space area share info.
1556,7 → 1679,8
* Now walk carefully the pagemap B+tree and free/remove
* reference from all frames found there.
*/
for (cur = sh_info->pagemap.leaf_head.next; cur != &sh_info->pagemap.leaf_head; cur = cur->next) {
for (cur = sh_info->pagemap.leaf_head.next;
cur != &sh_info->pagemap.leaf_head; cur = cur->next) {
btree_node_t *node;
int i;
1581,7 → 1705,8
/** Wrapper for as_area_create(). */
unative_t sys_as_area_create(uintptr_t address, size_t size, int flags)
{
if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address, AS_AREA_ATTR_NONE, &anon_backend, NULL))
if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address,
AS_AREA_ATTR_NONE, &anon_backend, NULL))
return (unative_t) address;
else
return (unative_t) -1;
1612,9 → 1737,12
/* print out info about address space areas */
link_t *cur;
for (cur = as->as_area_btree.leaf_head.next; cur != &as->as_area_btree.leaf_head; cur = cur->next) {
btree_node_t *node = list_get_instance(cur, btree_node_t, leaf_link);
for (cur = as->as_area_btree.leaf_head.next;
cur != &as->as_area_btree.leaf_head; cur = cur->next) {
btree_node_t *node;
node = list_get_instance(cur, btree_node_t, leaf_link);
int i;
for (i = 0; i < node->keys; i++) {
as_area_t *area = node->value[i];
1621,7 → 1749,8
mutex_lock(&area->lock);
printf("as_area: %p, base=%p, pages=%d (%p - %p)\n",
area, area->base, area->pages, area->base, area->base + area->pages*PAGE_SIZE);
area, area->base, area->pages, area->base,
area->base + area->pages*PAGE_SIZE);
mutex_unlock(&area->lock);
}
}
/trunk/kernel/arch/amd64/src/mm/memory_init.c
43,7 → 43,7
 
size_t get_memory_size(void)
{
return e801memorysize*1024;
return e801memorysize * 1024;
}
 
void memory_print_map(void)
50,26 → 50,26
{
uint8_t i;
for (i=0;i<e820counter;i++) {
for (i = 0; i < e820counter; i++) {
printf("E820 base: %#llx size: %#llx type: ", e820table[i].base_address, e820table[i].size);
switch (e820table[i].type) {
case MEMMAP_MEMORY_AVAILABLE:
printf("available memory\n");
break;
case MEMMAP_MEMORY_RESERVED:
printf("reserved memory\n");
break;
case MEMMAP_MEMORY_ACPI:
printf("ACPI table\n");
break;
case MEMMAP_MEMORY_NVS:
printf("NVS\n");
break;
case MEMMAP_MEMORY_UNUSABLE:
printf("unusable memory\n");
break;
default:
printf("undefined memory type\n");
case MEMMAP_MEMORY_AVAILABLE:
printf("available memory\n");
break;
case MEMMAP_MEMORY_RESERVED:
printf("reserved memory\n");
break;
case MEMMAP_MEMORY_ACPI:
printf("ACPI table\n");
break;
case MEMMAP_MEMORY_NVS:
printf("NVS\n");
break;
case MEMMAP_MEMORY_UNUSABLE:
printf("unusable memory\n");
break;
default:
printf("undefined memory type\n");
}
}
 
/trunk/kernel/arch/ia32/src/mm/frame.c
65,7 → 65,7
FRAME_SIZE));
size = SIZE2FRAMES(ALIGN_DOWN(e820table[i].size,
FRAME_SIZE));
if (minconf < start || minconf >= start+size)
if (minconf < start || minconf >= start + size)
conf = start;
else
conf = minconf;
/trunk/kernel/arch/ia32/src/mm/memory_init.c
43,7 → 43,7
 
size_t get_memory_size(void)
{
return e801memorysize*1024;
return e801memorysize * 1024;
}
 
void memory_print_map(void)
50,26 → 50,26
{
uint8_t i;
for (i=0;i<e820counter;i++) {
for (i = 0; i < e820counter; i++) {
printf("E820 base: %#.16llx size: %#.16llx type: ", e820table[i].base_address, e820table[i].size);
switch (e820table[i].type) {
case MEMMAP_MEMORY_AVAILABLE:
printf("available memory\n");
break;
case MEMMAP_MEMORY_RESERVED:
printf("reserved memory\n");
break;
case MEMMAP_MEMORY_ACPI:
printf("ACPI table\n");
break;
case MEMMAP_MEMORY_NVS:
printf("NVS\n");
break;
case MEMMAP_MEMORY_UNUSABLE:
printf("unusable memory\n");
break;
default:
printf("undefined memory type\n");
case MEMMAP_MEMORY_AVAILABLE:
printf("available memory\n");
break;
case MEMMAP_MEMORY_RESERVED:
printf("reserved memory\n");
break;
case MEMMAP_MEMORY_ACPI:
printf("ACPI table\n");
break;
case MEMMAP_MEMORY_NVS:
printf("NVS\n");
break;
case MEMMAP_MEMORY_UNUSABLE:
printf("unusable memory\n");
break;
default:
printf("undefined memory type\n");
}
}