Subversion Repositories HelenOS

Compare Revisions

Ignore whitespace Rev 3033 → Rev 3034

/branches/tracing/kernel/generic/src/ipc/ipc.c
494,18 → 494,13
 
static void ipc_kbox_cleanup()
{
ipl_t ipl;
bool have_kb_thread;
 
/* Only hold kb_cleanup_lock while setting kb_finished - this is enough */
ipl = interrupts_disable();
spinlock_lock(&TASK->kb_cleanup_lock);
 
mutex_lock(&TASK->kb_cleanup_lock);
TASK->kb_finished = true;
mutex_unlock(&TASK->kb_cleanup_lock);
 
spinlock_unlock(&TASK->kb_cleanup_lock);
interrupts_restore(ipl);
 
have_kb_thread = (TASK->kb_thread != NULL);
 
/* From now on nobody will try to connect phones or attach kbox threads */
784,12 → 779,7
task_t *ta;
thread_t *kb_thread;
ipl_t ipl;
bool had_kb_thread;
 
newphid = phone_alloc();
if (newphid < 0)
return ELIMIT;
 
ipl = interrupts_disable();
spinlock_lock(&tasks_lock);
 
800,83 → 790,52
return ENOENT;
}
 
spinlock_lock(&ta->kb_cleanup_lock);
atomic_inc(&ta->refcount);
 
spinlock_unlock(&tasks_lock);
interrupts_restore(ipl);
 
mutex_lock(&ta->kb_cleanup_lock);
 
if (atomic_predec(&ta->refcount) == 0) {
mutex_unlock(&ta->kb_cleanup_lock);
task_destroy(ta);
return ENOENT;
}
 
if (ta->kb_finished != false) {
spinlock_unlock(&ta->kb_cleanup_lock);
spinlock_unlock(&tasks_lock);
interrupts_restore(ipl);
mutex_unlock(&ta->kb_cleanup_lock);
return EINVAL;
}
 
spinlock_unlock(&tasks_lock);
newphid = phone_alloc();
if (newphid < 0) {
mutex_unlock(&ta->kb_cleanup_lock);
return ELIMIT;
}
 
/*
* Only ta->kb_cleanup_lock left. Since we checked the value
* of ta->kb_finished, this suffices to ensure the task's exitence.
* (And that it didn't start kbox cleanup yet). It also ensures
* mutual exclusion with other threads running this function.
*/
 
/* Connect the newly allocated phone to the kbox */
ipc_phone_connect(&TASK->phones[newphid], &ta->kernel_box);
 
had_kb_thread = (ta->kb_thread != NULL);
/*
* Release all locks. This is an optimisation, that makes
* unnecessary thread creation very unlikely.
*/
spinlock_unlock(&ta->kb_cleanup_lock);
interrupts_restore(ipl);
if (ta->kb_thread != NULL) {
mutex_unlock(&ta->kb_cleanup_lock);
return newphid;
}
 
/* Create a kbox thread */
 
kb_thread = thread_create(kbox_thread_proc,
NULL, ta, THREAD_FLAG_NOATTACH, "kbox", false);
if (!kb_thread)
kb_thread = thread_create(kbox_thread_proc, NULL, ta, 0, "kbox", false);
if (!kb_thread) {
mutex_unlock(&ta->kb_cleanup_lock);
return ENOMEM;
 
/*
* It might happen that someone else has attached a kbox thread.
* in the meantime. Also, the task might have gone or shut down.
* Let's try from the beginning.
*/
 
ipl = interrupts_disable();
spinlock_lock(&tasks_lock);
 
ta = task_find_by_id(taskid);
if (ta == NULL) {
spinlock_unlock(&tasks_lock);
return ENOENT;
}
 
spinlock_lock(&ta->kb_cleanup_lock);
spinlock_unlock(&tasks_lock);
 
if (ta->kb_finished != false || ta->kb_thread != NULL) {
spinlock_unlock(&ta->kb_cleanup_lock);
interrupts_restore(ipl);
 
/*
* Release the allocated thread struct. This won't
* happen too often, only if two CPUs raced for
* connecting to the kbox.
*/
thread_unattached_free(kb_thread);
return EINVAL;
}
 
/* Attach thread */
ta->kb_thread = kb_thread;
thread_attach(kb_thread, ta);
 
/* FIXME: we could join the kbox thread */
thread_detach(kb_thread);
thread_ready(kb_thread);
 
spinlock_unlock(&ta->kb_cleanup_lock);
interrupts_restore(ipl);
mutex_unlock(&ta->kb_cleanup_lock);
 
return newphid;
}