Subversion Repositories HelenOS

Compare Revisions

Ignore whitespace Rev 3033 → Rev 3034

/branches/tracing/kernel/generic/include/proc/task.h
103,7 → 103,7
/** Thread used to service kernel answerbox */
struct thread *kb_thread;
/** Kbox thread creation vs. begin of cleanup mutual exclusion */
SPINLOCK_DECLARE(kb_cleanup_lock);
mutex_t kb_cleanup_lock;
/** True if cleanup of kbox has already started */
bool kb_finished;
/** Used for waiting on kbox thread shutdown */
/branches/tracing/kernel/generic/include/proc/thread.h
249,7 → 249,6
void *call_me_with);
extern void thread_print_list(void);
extern void thread_destroy(thread_t *t);
extern void thread_unattached_free(thread_t *t);
extern void thread_update_accounting(void);
extern bool thread_exists(thread_t *t);
 
/branches/tracing/kernel/generic/src/proc/task.c
177,7 → 177,7
/* Init kbox stuff */
ipc_answerbox_init(&ta->kernel_box, ta);
ta->kb_thread = NULL;
spinlock_initialize(&ta->kb_cleanup_lock, "task_kb_cleanup_lock");
mutex_initialize(&ta->kb_cleanup_lock);
ta->kb_finished = false;
waitq_initialize(&ta->kb_thread_shutdown_wq);
 
/branches/tracing/kernel/generic/src/proc/thread.c
358,16 → 358,6
return t;
}
 
/** Destroy thread structure of an unattached thread.
*
* Thread t must only have been created and never attached.
*/
void thread_unattached_free(thread_t *t)
{
slab_free(thread_slab, t);
}
 
 
/** Destroy thread memory structure
*
* Detach thread from all queues, cpus etc. and destroy it.
/branches/tracing/kernel/generic/src/ipc/ipc.c
494,18 → 494,13
 
static void ipc_kbox_cleanup()
{
ipl_t ipl;
bool have_kb_thread;
 
/* Only hold kb_cleanup_lock while setting kb_finished - this is enough */
ipl = interrupts_disable();
spinlock_lock(&TASK->kb_cleanup_lock);
 
mutex_lock(&TASK->kb_cleanup_lock);
TASK->kb_finished = true;
mutex_unlock(&TASK->kb_cleanup_lock);
 
spinlock_unlock(&TASK->kb_cleanup_lock);
interrupts_restore(ipl);
 
have_kb_thread = (TASK->kb_thread != NULL);
 
/* From now on nobody will try to connect phones or attach kbox threads */
784,12 → 779,7
task_t *ta;
thread_t *kb_thread;
ipl_t ipl;
bool had_kb_thread;
 
newphid = phone_alloc();
if (newphid < 0)
return ELIMIT;
 
ipl = interrupts_disable();
spinlock_lock(&tasks_lock);
 
800,83 → 790,52
return ENOENT;
}
 
spinlock_lock(&ta->kb_cleanup_lock);
atomic_inc(&ta->refcount);
 
spinlock_unlock(&tasks_lock);
interrupts_restore(ipl);
 
mutex_lock(&ta->kb_cleanup_lock);
 
if (atomic_predec(&ta->refcount) == 0) {
mutex_unlock(&ta->kb_cleanup_lock);
task_destroy(ta);
return ENOENT;
}
 
if (ta->kb_finished != false) {
spinlock_unlock(&ta->kb_cleanup_lock);
spinlock_unlock(&tasks_lock);
interrupts_restore(ipl);
mutex_unlock(&ta->kb_cleanup_lock);
return EINVAL;
}
 
spinlock_unlock(&tasks_lock);
newphid = phone_alloc();
if (newphid < 0) {
mutex_unlock(&ta->kb_cleanup_lock);
return ELIMIT;
}
 
/*
* Only ta->kb_cleanup_lock left. Since we checked the value
* of ta->kb_finished, this suffices to ensure the task's exitence.
* (And that it didn't start kbox cleanup yet). It also ensures
* mutual exclusion with other threads running this function.
*/
 
/* Connect the newly allocated phone to the kbox */
ipc_phone_connect(&TASK->phones[newphid], &ta->kernel_box);
 
had_kb_thread = (ta->kb_thread != NULL);
/*
* Release all locks. This is an optimisation, that makes
* unnecessary thread creation very unlikely.
*/
spinlock_unlock(&ta->kb_cleanup_lock);
interrupts_restore(ipl);
if (ta->kb_thread != NULL) {
mutex_unlock(&ta->kb_cleanup_lock);
return newphid;
}
 
/* Create a kbox thread */
 
kb_thread = thread_create(kbox_thread_proc,
NULL, ta, THREAD_FLAG_NOATTACH, "kbox", false);
if (!kb_thread)
kb_thread = thread_create(kbox_thread_proc, NULL, ta, 0, "kbox", false);
if (!kb_thread) {
mutex_unlock(&ta->kb_cleanup_lock);
return ENOMEM;
 
/*
* It might happen that someone else has attached a kbox thread.
* in the meantime. Also, the task might have gone or shut down.
* Let's try from the beginning.
*/
 
ipl = interrupts_disable();
spinlock_lock(&tasks_lock);
 
ta = task_find_by_id(taskid);
if (ta == NULL) {
spinlock_unlock(&tasks_lock);
return ENOENT;
}
 
spinlock_lock(&ta->kb_cleanup_lock);
spinlock_unlock(&tasks_lock);
 
if (ta->kb_finished != false || ta->kb_thread != NULL) {
spinlock_unlock(&ta->kb_cleanup_lock);
interrupts_restore(ipl);
 
/*
* Release the allocated thread struct. This won't
* happen too often, only if two CPUs raced for
* connecting to the kbox.
*/
thread_unattached_free(kb_thread);
return EINVAL;
}
 
/* Attach thread */
ta->kb_thread = kb_thread;
thread_attach(kb_thread, ta);
 
/* FIXME: we could join the kbox thread */
thread_detach(kb_thread);
thread_ready(kb_thread);
 
spinlock_unlock(&ta->kb_cleanup_lock);
interrupts_restore(ipl);
mutex_unlock(&ta->kb_cleanup_lock);
 
return newphid;
}