Subversion Repositories HelenOS

Compare Revisions

Ignore whitespace Rev 2901 → Rev 2902

/branches/tracing/kernel/generic/include/proc/task.h
106,8 → 106,12
answerbox_t kernel_box;
/** Thread used to service kernel answerbox */
struct thread *kb_thread;
/** True if kb_thread != NULL or is being prepared */
bool kb_thread_at_hand;
/** Kbox thread creation vs. begin of cleanup mutual exclusion */
SPINLOCK_DECLARE(kb_cleanup_lock);
/** True if cleanup of kbox has already started */
bool kb_finished;
/** Used for waiting on kbox thread shutdown */
waitq_t kb_thread_shutdown_wq;
/** Architecture specific task data. */
task_arch_t arch;
/branches/tracing/kernel/generic/include/proc/thread.h
233,7 → 233,6
extern thread_t *thread_create(void (* func)(void *), void *arg, task_t *task,
int flags, char *name, bool uncounted);
extern void thread_attach(thread_t *t, task_t *task);
extern int thread_attach_by_id(thread_t *t, task_id_t taskid);
extern void thread_ready(thread_t *t);
extern void thread_exit(void) __attribute__((noreturn));
 
/branches/tracing/kernel/generic/src/proc/task.c
177,9 → 177,12
ta->not_stoppable_count = 0;
ta->debug_evmask = 0;
 
/* Init kbox stuff */
ipc_answerbox_init(&ta->kernel_box, ta);
ta->kb_thread = NULL;
ta->kb_thread_at_hand = false;
spinlock_initialize(&ta->kb_cleanup_lock, "task_kb_cleanup_lock");
ta->kb_finished = false;
waitq_initialize(&ta->kb_thread_shutdown_wq);
 
ipc_answerbox_init(&ta->answerbox, ta);
for (i = 0; i < IPC_MAX_PHONES; i++)
/branches/tracing/kernel/generic/src/proc/thread.c
412,52 → 412,6
slab_free(thread_slab, t);
}
 
/** Attach thread to the given task.
*
* The task's lock must already be held and interrupts must be disabled.
*
* @param t Thread to be attached to the task.
* @param task Task to which the thread is to be attached.
*/
static void _thread_attach_task(thread_t *t, task_t *task)
{
atomic_inc(&task->refcount);
atomic_inc(&task->lifecount);
 
list_append(&t->th_link, &task->th_head);
 
/*
* Copy task debugging state to thread struct.
* The thread needs to know it is being debugged,
* otherwise it would neither stop nor respond to
* debug ops.
*/
if (t->flags & THREAD_FLAG_USPACE) {
if (task->dt_state == UDEBUG_TS_BEGINNING ||
task->dt_state == UDEBUG_TS_ACTIVE) {
t->debug_active = true;
}
}
}
 
/** Add thread to the threads tree.
*
* Interrupts must be already disabled.
*
* @param t Thread to be attached to the task.
* @param task Task to which the thread is to be attached.
*/
static void _thread_attach_tree(thread_t *t)
{
/*
* Register this thread in the system-wide list.
*/
spinlock_lock(&threads_lock);
avltree_insert(&threads_tree, &t->threads_tree_node);
spinlock_unlock(&threads_lock);
}
 
 
/** Make the thread visible to the system.
*
* Attach the thread structure to the current task and make it visible in the
470,68 → 424,28
{
ipl_t ipl;
 
ipl = interrupts_disable();
 
/*
* Attach to the current task.
* Attach to the specified task.
*/
spinlock_lock(&task->lock);
_thread_attach_task(t, task);
spinlock_unlock(&task->lock);
 
/*
* Register this thread in the system-wide list.
*/
_thread_attach_tree(t);
interrupts_restore(ipl);
}
 
/** Attach thread to a task given by its ID.
*
* Unlike thread_attach(), this function allows to attach a thread
* to an arbitrary task.
*
* @param t Thread to be attached to the task.
* @param taskid Task id to which the thread is to be attached.
* @return An error code from errno.h
*/
int thread_attach_by_id(thread_t *t, task_id_t taskid)
{
ipl_t ipl;
task_t *task;
 
ipl = interrupts_disable();
 
spinlock_lock(&tasks_lock);
task = task_find_by_id(taskid);
if (task == NULL) {
spinlock_unlock(&tasks_lock);
interrupts_restore(ipl);
return ENOENT;
}
 
spinlock_lock(&task->lock);
spinlock_unlock(&tasks_lock);
 
/*
* Attach to the current task.
*/
_thread_attach_task(t, task);
 
atomic_inc(&task->refcount);
/* Must not count kbox thread into lifecount */
if (t->flags & THREAD_FLAG_USPACE)
atomic_inc(&task->lifecount);
list_append(&t->th_link, &task->th_head);
spinlock_unlock(&task->lock);
 
/*
* Register this thread in the system-wide list.
*/
_thread_attach_tree(t);
spinlock_lock(&threads_lock);
avltree_insert(&threads_tree, &t->threads_tree_node);
spinlock_unlock(&threads_lock);
interrupts_restore(ipl);
 
return EOK;
}
 
 
/** Terminate thread.
*
* End current thread execution and switch it to the exiting state. All pending
541,16 → 455,17
{
ipl_t ipl;
 
if (atomic_predec(&TASK->lifecount) == 0) {
/*
* We are the last thread in the task that still has not exited.
* With the exception of the moment the task was created, new
* threads can only be created by threads of the same task.
* We are safe to perform cleanup.
*/
if (THREAD->flags & THREAD_FLAG_USPACE) {
if (THREAD->flags & THREAD_FLAG_USPACE) {
if (atomic_predec(&TASK->lifecount) == 0) {
/*
* We are the last userspace thread in the task that
* still has not exited. With the exception of the
* moment the task was created, new userspace threads
* can only be created by threads of the same task.
* We are safe to perform cleanup.
*/
ipc_cleanup();
futex_cleanup();
futex_cleanup();
klog_printf("Cleanup of task %llu completed.",
TASK->taskid);
}
/branches/tracing/kernel/generic/src/ipc/ipc.c
49,6 → 49,7
#include <debug.h>
 
#include <print.h>
#include <console/klog.h>
#include <proc/thread.h>
#include <arch/interrupt.h>
#include <ipc/irq.h>
424,33 → 425,31
}
}
 
/** Cleans up all IPC communication of the current task.
/** Disconnects all phones connected to an answerbox.
*
* Note: ipc_hangup sets returning answerbox to TASK->answerbox, you
* have to change it as well if you want to cleanup other tasks than TASK.
* @param box Answerbox to disconnect phones from.
* @param notify_box If true, the answerbox will get a hangup message for
* each disconnected phone.
*/
void ipc_cleanup(void)
static void ipc_answerbox_slam_phones(answerbox_t *box, bool notify_box)
{
int i;
call_t *call;
phone_t *phone;
DEADLOCK_PROBE_INIT(p_phonelck);
ipl_t ipl;
call_t *call;
 
/* Disconnect all our phones ('ipc_phone_hangup') */
for (i = 0; i < IPC_MAX_PHONES; i++)
ipc_phone_hangup(&TASK->phones[i]);
call = ipc_call_alloc(0);
 
/* Disconnect all connected irqs */
ipc_irq_cleanup(&TASK->answerbox);
 
/* Disconnect all phones connected to our answerbox */
restart_phones:
spinlock_lock(&TASK->answerbox.lock);
while (!list_empty(&TASK->answerbox.connected_phones)) {
phone = list_get_instance(TASK->answerbox.connected_phones.next,
ipl = interrupts_disable();
spinlock_lock(&box->lock);
while (!list_empty(&box->connected_phones)) {
phone = list_get_instance(box->connected_phones.next,
phone_t, link);
if (!spinlock_trylock(&phone->lock)) {
spinlock_unlock(&TASK->answerbox.lock);
spinlock_unlock(&box->lock);
interrupts_restore(ipl);
DEADLOCK_PROBE(p_phonelck, DEADLOCK_THRESHOLD);
goto restart_phones;
}
457,13 → 456,107
/* Disconnect phone */
ASSERT(phone->state == IPC_PHONE_CONNECTED);
 
list_remove(&phone->link);
phone->state = IPC_PHONE_SLAMMED;
list_remove(&phone->link);
 
if (notify_box) {
spinlock_unlock(&phone->lock);
spinlock_unlock(&box->lock);
interrupts_restore(ipl);
 
/*
* Send one message to the answerbox for each
* phone. Used to make sure the kbox thread
* wakes up after the last phone has been
* disconnected.
*/
IPC_SET_METHOD(call->data, IPC_M_PHONE_HUNGUP);
call->flags |= IPC_CALL_DISCARD_ANSWER;
_ipc_call(phone, box, call);
 
/* Allocate another call in advance */
call = ipc_call_alloc(0);
 
/* Must start again */
goto restart_phones;
}
 
spinlock_unlock(&phone->lock);
}
 
spinlock_unlock(&box->lock);
interrupts_restore(ipl);
 
/* Free unused call */
if (call) ipc_call_free(call);
}
 
static void ipc_kbox_cleanup()
{
ipl_t ipl;
bool have_kb_thread;
 
/* Only hold kb_cleanup_lock while setting kb_finished - this is enough */
ipl = interrupts_disable();
spinlock_lock(&TASK->kb_cleanup_lock);
 
TASK->kb_finished = true;
 
spinlock_unlock(&TASK->kb_cleanup_lock);
interrupts_restore(ipl);
 
have_kb_thread = (TASK->kb_thread != NULL);
 
/* From now on nobody will try to connect phones or attach kbox threads */
 
/*
* Disconnect all phones connected to our kbox. Passing true for
* notify_box causes a HANGUP message to be inserted for each
* disconnected phone. This ensures the kbox thread is going to
* wake up and terminate.
*/
ipc_answerbox_slam_phones(&TASK->kernel_box, have_kb_thread);
/* TODO: Wait for kbox thread to terminate */
if (have_kb_thread) {
klog_printf("ipc_kbox_cleanup - wait for kbox thread to finish");
waitq_sleep(&TASK->kb_thread_shutdown_wq);
}
 
/* Answer all messages in 'calls' and 'dispatched_calls' queues */
spinlock_lock(&TASK->kernel_box.lock);
ipc_cleanup_call_list(&TASK->kernel_box.dispatched_calls);
ipc_cleanup_call_list(&TASK->kernel_box.calls);
spinlock_unlock(&TASK->kernel_box.lock);
}
 
 
/** Cleans up all IPC communication of the current task.
*
* Note: ipc_hangup sets returning answerbox to TASK->answerbox, you
* have to change it as well if you want to cleanup other tasks than TASK.
*/
void ipc_cleanup(void)
{
int i;
call_t *call;
 
/* Disconnect all our phones ('ipc_phone_hangup') */
for (i = 0; i < IPC_MAX_PHONES; i++)
ipc_phone_hangup(&TASK->phones[i]);
 
/* Disconnect all connected irqs */
ipc_irq_cleanup(&TASK->answerbox);
 
/* Disconnect all phones connected to our regular answerbox */
ipc_answerbox_slam_phones(&TASK->answerbox, false);
 
/* Clean up kbox thread and communications */
ipc_kbox_cleanup();
 
/* Answer all messages in 'calls' and 'dispatched_calls' queues */
spinlock_lock(&TASK->answerbox.lock);
ipc_cleanup_call_list(&TASK->answerbox.dispatched_calls);
ipc_cleanup_call_list(&TASK->answerbox.calls);
spinlock_unlock(&TASK->answerbox.lock);
634,12 → 727,12
}
 
if (method == IPC_M_PHONE_HUNGUP) {
klog_printf("kbox: handle hangup message\n");
klog_printf("kbox: handle hangup message");
 
/* Was it our debugger, who hung up? */
if (call->sender == TASK->debugger) {
/* Terminate debugging session (if any) */
klog_printf("kbox: terminate debug session\n");
klog_printf("kbox: terminate debug session");
ipl = interrupts_disable();
spinlock_lock(&TASK->lock);
udebug_task_cleanup(TASK);
646,10 → 739,10
spinlock_unlock(&TASK->lock);
interrupts_restore(ipl);
} else {
klog_printf("kbox: was not debugger\n");
klog_printf("kbox: was not debugger");
}
 
klog_printf("kbox: continue with hangup message\n");
klog_printf("kbox: continue with hangup message");
IPC_SET_RETVAL(call->data, 0);
ipc_answer(&TASK->kernel_box, call);
 
658,10 → 751,9
spinlock_lock(&TASK->answerbox.lock);
if (list_empty(&TASK->answerbox.connected_phones)) {
/* Last phone has been disconnected */
TASK->kb_thread_at_hand = false;
TASK->kb_thread = NULL;
done = true;
printf("phone list is empty\n");
klog_printf("phone list is empty");
}
spinlock_unlock(&TASK->answerbox.lock);
spinlock_unlock(&TASK->lock);
670,6 → 762,8
}
}
 
klog_printf("kbox: done, waking up possible shutdown routine");
waitq_wakeup(&TASK->kb_thread_shutdown_wq, WAKEUP_ALL);
klog_printf("kbox: finished");
}
 
677,6 → 771,11
/**
* Connect phone to a task kernel-box specified by id.
*
* Note that this is not completely atomic. For optimisation reasons,
* The task might start cleaning up kbox after the phone has been connected
* and before a kbox thread has been created. This must be taken into account
* in the cleanup code.
*
* @return Phone id on success, or negative error code.
*/
int ipc_connect_kbox(task_id_t taskid)
684,8 → 783,8
int newphid;
task_t *ta;
thread_t *kb_thread;
int rc;
ipl_t ipl;
bool had_kb_thread;
 
newphid = phone_alloc();
if (newphid < 0)
697,44 → 796,86
ta = task_find_by_id(taskid);
if (ta == NULL) {
spinlock_unlock(&tasks_lock);
interrupts_restore(ipl);
return ENOENT;
}
spinlock_lock(&ta->lock);
 
spinlock_lock(&ta->kb_cleanup_lock);
spinlock_unlock(&tasks_lock);
 
ipc_phone_connect(&TASK->phones[newphid], &ta->kernel_box);
/*
* Only ta->kb_cleanup_lock left. Since we checked the value
* of ta->kb_finished, this suffices to ensure the task's exitence.
* (And that it didn't start kbox cleanup yet). It also ensures
* mutual exclusion with other threads running this function.
*/
 
if (ta->kb_thread_at_hand == false) {
ta->kb_thread_at_hand = true;
spinlock_unlock(&ta->lock);
if (ta->kb_finished != false) {
spinlock_unlock(&ta->kb_cleanup_lock);
interrupts_restore(ipl);
return EINVAL;
}
 
kb_thread = thread_create(kbox_thread_proc,
NULL, ta, THREAD_FLAG_NOATTACH, "kbox", false);
if (!kb_thread)
return ENOMEM;
/* Connect the newly allocated phone to the kbox */
ipc_phone_connect(&TASK->phones[newphid], &ta->kernel_box);
 
rc = thread_attach_by_id(kb_thread, taskid);
if (rc == EOK) {
ipl = interrupts_disable();
spinlock_lock(&ta->lock);
ta->kb_thread = kb_thread;
spinlock_unlock(&ta->lock);
interrupts_restore(ipl);
had_kb_thread = (ta->kb_thread != NULL);
/*
* Release all locks. This is an optimisation, that makes
* unnecessary thread creation very unlikely.
*/
spinlock_unlock(&ta->kb_cleanup_lock);
interrupts_restore(ipl);
 
thread_detach(kb_thread);
thread_ready(kb_thread);
} else {
/* Return the allocated thread struct */
thread_unattached_free(kb_thread);
}
} else {
spinlock_unlock(&ta->lock);
/* Create a kbox thread */
 
kb_thread = thread_create(kbox_thread_proc,
NULL, ta, THREAD_FLAG_NOATTACH, "kbox", false);
if (!kb_thread)
return ENOMEM;
 
/*
* It might happen that someone else has attached a kbox thread.
* in the meantime. Also, the task might have gone or shut down.
* Let's try from the beginning.
*/
 
ipl = interrupts_disable();
spinlock_lock(&tasks_lock);
 
ta = task_find_by_id(taskid);
if (ta == NULL) {
spinlock_unlock(&tasks_lock);
return ENOENT;
}
 
spinlock_lock(&ta->kb_cleanup_lock);
spinlock_unlock(&tasks_lock);
 
if (ta->kb_finished != false || ta->kb_thread != NULL) {
spinlock_unlock(&ta->kb_cleanup_lock);
interrupts_restore(ipl);
 
/*
* Release the allocated thread struct. This won't
* happen too often, only if two CPUs raced for
* connecting to the kbox.
*/
thread_unattached_free(kb_thread);
return EINVAL;
}
 
/* Attach thread */
ta->kb_thread = kb_thread;
thread_attach(kb_thread, ta);
 
thread_detach(kb_thread);
thread_ready(kb_thread);
 
spinlock_unlock(&ta->kb_cleanup_lock);
interrupts_restore(ipl);
 
return newphid;
}
 
/branches/tracing/kernel/generic/src/udebug/udebug.c
47,11 → 47,13
call_t *db_call, *go_call;
ipl_t ipl;
 
ASSERT(THREAD);
ASSERT(TASK);
 
ipl = interrupts_disable();
spinlock_lock(&TASK->lock);
 
nsc = --TASK->not_stoppable_count;
db_call = TASK->debug_begin_call;
 
if (TASK->dt_state == UDEBUG_TS_BEGINNING) {
klog_printf("udebug_stoppable_begin");
64,6 → 66,9
* DEBUG_BEGIN call.
*/
 
db_call = TASK->debug_begin_call;
ASSERT(db_call);
 
/* Lock order OK, THREAD->debug_lock is after TASK->lock */
spinlock_lock(&THREAD->debug_lock);
THREAD->debug_stoppable = true;
87,7 → 92,7
spinlock_lock(&THREAD->debug_lock);
THREAD->debug_stoppable = true;
 
if (THREAD->debug_stop) {
if (THREAD->debug_active && THREAD->debug_stop) {
/*
* Thread was requested to stop - answer go call
*/
95,6 → 100,7
/* Make sure nobody takes this call away from us */
go_call = THREAD->debug_go_call;
THREAD->debug_go_call = NULL;
ASSERT(go_call);
 
IPC_SET_RETVAL(go_call->data, 0);
IPC_SET_ARG1(go_call->data, UDEBUG_EVENT_STOP);
145,8 → 151,7
klog_printf("debug_stop=%d", THREAD->debug_stop);
}
 
if ((TASK->dt_state == UDEBUG_TS_BEGINNING ||
TASK->dt_state == UDEBUG_TS_ACTIVE) &&
if (THREAD->debug_active &&
THREAD->debug_stop == true) {
TASK->debug_begin_call = NULL;
spinlock_unlock(&THREAD->debug_lock);