/kernel/trunk/generic/src/proc/task.c |
---|
47,6 → 47,7 |
#include <memstr.h> |
#include <print.h> |
#include <elf.h> |
#include <errno.h> |
#include <syscall/copy.h> |
#ifndef LOADED_PROG_STACK_PAGES_NO |
57,6 → 58,8 |
btree_t tasks_btree; |
static task_id_t task_counter = 0; |
static void ktask_cleanup(void *); |
/** Initialize tasks |
* |
* Initialize kernel tasks support. |
94,7 → 97,10 |
ta->as = as; |
ta->name = name; |
ta->refcount = 0; |
ta->capabilities = 0; |
ta->accept_new_threads = true; |
ipc_answerbox_init(&ta->answerbox); |
for (i=0; i < IPC_MAX_PHONES;i++) |
127,6 → 133,14 |
return ta; |
} |
/** Destroy task. |
* |
* @param t Task to be destroyed. |
*/ |
void task_destroy(task_t *t) |
{ |
} |
/** Create new task with 1 thread and run it |
* |
* @param program_addr Address of program executable image. |
207,6 → 221,60 |
return (task_t *) btree_search(&tasks_btree, (btree_key_t) id, &leaf); |
} |
/** Kill task. |
* |
* @param id ID of the task to be killed. |
* |
* @return 0 on success or an error code from errno.h |
*/ |
int task_kill(task_id_t id) |
{ |
ipl_t ipl; |
task_t *ta; |
thread_t *t; |
link_t *cur; |
ipl = interrupts_disable(); |
spinlock_lock(&tasks_lock); |
if (!(ta = task_find_by_id(id))) { |
spinlock_unlock(&tasks_lock); |
interrupts_restore(ipl); |
return ENOENT; |
} |
spinlock_lock(&ta->lock); |
ta->refcount++; |
spinlock_unlock(&ta->lock); |
t = thread_create(ktask_cleanup, NULL, ta, 0, "ktask_cleanup"); |
spinlock_lock(&ta->lock); |
ta->refcount--; |
for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) { |
thread_t *thr; |
bool sleeping = false; |
thr = list_get_instance(cur, thread_t, th_link); |
if (thr == t) |
continue; |
spinlock_lock(&thr->lock); |
thr->interrupted = true; |
if (thr->state == Sleeping) |
sleeping = true; |
spinlock_unlock(&thr->lock); |
if (sleeping) |
waitq_interrupt_sleep(thr); |
} |
thread_ready(t); |
return 0; |
} |
/** Print task list */ |
void task_print_list(void) |
{ |
243,3 → 311,15 |
spinlock_unlock(&tasks_lock); |
interrupts_restore(ipl); |
} |
/** Kernel thread used to cleanup the task. */ |
void ktask_cleanup(void *arg) |
{ |
/* |
* TODO: |
* Wait until it is save to cleanup the task (i.e. all other threads exit) |
* and do the cleanup (e.g. close IPC communication and release used futexes). |
* When this thread exits, the task refcount drops to zero and the task structure is |
* cleaned. |
*/ |
} |
/kernel/trunk/generic/src/proc/thread.c |
---|
233,6 → 233,8 |
*/ |
void thread_destroy(thread_t *t) |
{ |
bool destroy_task = false; |
ASSERT(t->state == Exiting); |
ASSERT(t->task); |
ASSERT(t->cpu); |
242,19 → 244,26 |
t->cpu->fpu_owner=NULL; |
spinlock_unlock(&t->cpu->lock); |
spinlock_unlock(&t->lock); |
spinlock_lock(&threads_lock); |
btree_remove(&threads_btree, (btree_key_t) ((__address ) t), NULL); |
spinlock_unlock(&threads_lock); |
/* |
* Detach from the containing task. |
*/ |
spinlock_lock(&t->task->lock); |
list_remove(&t->th_link); |
if (--t->task->refcount == 0) { |
t->task->accept_new_threads = false; |
destroy_task = true; |
} |
spinlock_unlock(&t->task->lock); |
spinlock_unlock(&t->lock); |
if (destroy_task) |
task_destroy(t->task); |
spinlock_lock(&threads_lock); |
btree_remove(&threads_btree, (btree_key_t) ((__address ) t), NULL); |
spinlock_unlock(&threads_lock); |
slab_free(thread_slab, t); |
} |
320,6 → 329,7 |
t->in_copy_from_uspace = false; |
t->in_copy_to_uspace = false; |
t->interrupted = false; |
t->detached = false; |
waitq_initialize(&t->join_wq); |
331,6 → 341,19 |
t->fpu_context_engaged = 0; |
/* |
* Attach to the containing task. |
*/ |
spinlock_lock(&task->lock); |
if (!task->accept_new_threads) { |
spinlock_unlock(&task->lock); |
slab_free(thread_slab, t); |
return NULL; |
} |
list_append(&t->th_link, &task->th_head); |
task->refcount++; |
spinlock_unlock(&task->lock); |
/* |
* Register this thread in the system-wide list. |
*/ |
ipl = interrupts_disable(); |
338,13 → 361,6 |
btree_insert(&threads_btree, (btree_key_t) ((__address) t), (void *) t, NULL); |
spinlock_unlock(&threads_lock); |
/* |
* Attach to the containing task. |
*/ |
spinlock_lock(&task->lock); |
list_append(&t->th_link, &task->th_head); |
spinlock_unlock(&task->lock); |
interrupts_restore(ipl); |
return t; |