Subversion Repositories HelenOS

Compare Revisions

Ignore whitespace Rev 2445 → Rev 2446

/trunk/kernel/generic/include/proc/task.h
64,8 → 64,6
SPINLOCK_DECLARE(lock);
char *name;
/** Pointer to the main thread. */
struct thread *main_thread;
/** List of threads contained in this task. */
link_t th_head;
/** Address space. */
75,10 → 73,10
/** Task security context. */
context_id_t context;
 
/** If this is true, new threads can become part of the task. */
bool accept_new_threads;
/** Number of references (i.e. threads). */
count_t refcount;
atomic_t refcount;
/** Number of threads that haven't exited yet. */
atomic_t lifecount;
 
/** Task capabilities. */
cap_t capabilities;
122,7 → 120,6
extern void cap_set(task_t *t, cap_t caps);
extern cap_t cap_get(task_t *t);
 
 
#ifndef task_create_arch
extern void task_create_arch(task_t *t);
#endif
/trunk/kernel/generic/include/proc/thread.h
1,5 → 1,5
/*
* Copyright (c) 2001-2004 Jakub Jermar
* Copyright (c) 2001-2007 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
40,6 → 40,7
#include <time/timeout.h>
#include <cpu.h>
#include <synch/rwlock.h>
#include <synch/spinlock.h>
#include <adt/btree.h>
#include <mm/slab.h>
#include <arch/cpu.h>
80,17 → 81,10
Entering,
/** After a thread calls thread_exit(), it is put into Exiting state. */
Exiting,
/** Threads that were not detached but exited are in the Undead state. */
Undead
/** Threads that were not detached but exited are in the JoinMe state. */
JoinMe
} state_t;
 
/** Join types. */
typedef enum {
None,
TaskClnp, /**< The thread will be joined by ktaskclnp thread. */
TaskGC /**< The thread will be joined by ktaskgc thread. */
} thread_join_type_t;
 
/** Thread structure. There is one per thread. */
typedef struct thread {
link_t rq_link; /**< Run queue link. */
152,12 → 146,12
*/
bool interrupted;
/** Who joinins the thread. */
thread_join_type_t join_type;
/** If true, thread_join_timeout() cannot be used on this thread. */
bool detached;
/** Waitq for thread_join_timeout(). */
waitq_t join_wq;
/** Link used in the joiner_head list. */
link_t joiner_link;
 
fpu_context_t *saved_fpu_context;
int fpu_context_exists;
/trunk/kernel/generic/include/adt/list.h
180,7 → 180,7
headless_list_split_or_concat(part1, part2);
}
 
#define list_get_instance(link,type,member) \
#define list_get_instance(link, type, member) \
((type *)(((uint8_t *)(link)) - ((uint8_t *)&(((type *)NULL)->member))))
 
extern bool list_member(const link_t *link, const link_t *head);
/trunk/kernel/generic/src/main/uinit.c
45,6 → 45,7
#include <proc/thread.h>
#include <userspace.h>
#include <mm/slab.h>
#include <arch.h>
 
/** Thread used to bring up userspace thread.
*
54,12 → 55,21
void uinit(void *arg)
{
uspace_arg_t uarg;
 
/*
* So far, we don't have a use for joining userspace threads so we
* immediately detach each uinit thread. If joining of userspace threads
* is required, some userspace API based on the kernel mechanism will
* have to be implemented. Moreover, garbage collecting of threads that
* didn't detach themselves and nobody else joined them will have to be
* deployed for the event of forceful task termination.
*/
thread_detach(THREAD);
uarg.uspace_entry = ((uspace_arg_t *) arg)->uspace_entry;
uarg.uspace_stack = ((uspace_arg_t *) arg)->uspace_stack;
uarg.uspace_uarg = ((uspace_arg_t *) arg)->uspace_uarg;
uarg.uspace_thread_function = NULL;
uarg.uspace_thread_arg = NULL;
uarg.uspace_thread_function = NULL; uarg.uspace_thread_arg = NULL;
 
free((uspace_arg_t *) arg);
userspace(&uarg);
/trunk/kernel/generic/src/proc/scheduler.c
405,7 → 405,7
* Avoid deadlock.
*/
spinlock_unlock(&THREAD->lock);
delay(10);
delay(HZ);
spinlock_lock(&THREAD->lock);
DEADLOCK_PROBE(p_joinwq,
DEADLOCK_THRESHOLD);
415,7 → 415,7
WAKEUP_FIRST);
spinlock_unlock(&THREAD->join_wq.lock);
THREAD->state = Undead;
THREAD->state = JoinMe;
spinlock_unlock(&THREAD->lock);
}
break;
/trunk/kernel/generic/src/proc/task.c
56,7 → 56,6
#include <errno.h>
#include <func.h>
#include <syscall/copy.h>
#include <console/klog.h>
 
#ifndef LOADED_PROG_STACK_PAGES_NO
#define LOADED_PROG_STACK_PAGES_NO 1
79,9 → 78,6
 
static task_id_t task_counter = 0;
 
static void ktaskclnp(void *arg);
static void ktaskgc(void *arg);
 
/** Initialize tasks
*
* Initialize kernel tasks support.
164,12 → 160,11
list_initialize(&ta->th_head);
ta->as = as;
ta->name = name;
ta->main_thread = NULL;
ta->refcount = 0;
atomic_set(&ta->refcount, 0);
atomic_set(&ta->lifecount, 0);
ta->context = CONTEXT;
 
ta->capabilities = 0;
ta->accept_new_threads = true;
ta->cycles = 0;
ipc_answerbox_init(&ta->answerbox);
191,10 → 186,8
atomic_inc(&as->refcount);
 
spinlock_lock(&tasks_lock);
 
ta->taskid = ++task_counter;
btree_insert(&tasks_btree, (btree_key_t) ta->taskid, (void *) ta, NULL);
 
spinlock_unlock(&tasks_lock);
interrupts_restore(ipl);
 
207,9 → 200,26
*/
void task_destroy(task_t *t)
{
/*
* Remove the task from the task B+tree.
*/
spinlock_lock(&tasks_lock);
btree_remove(&tasks_btree, t->taskid, NULL);
spinlock_unlock(&tasks_lock);
 
/*
* Perform architecture specific task destruction.
*/
task_destroy_arch(t);
 
/*
* Free up dynamically allocated state.
*/
btree_destroy(&t->futexes);
 
/*
* Drop our reference to the address space.
*/
if (atomic_predec(&t->as->refcount) == 0)
as_destroy(t->as);
229,7 → 239,7
as_t *as;
as_area_t *a;
int rc;
thread_t *t1, *t2;
thread_t *t;
task_t *task;
uspace_arg_t *kernel_uarg;
 
263,19 → 273,12
/*
* Create the main thread.
*/
t1 = thread_create(uinit, kernel_uarg, task, THREAD_FLAG_USPACE,
t = thread_create(uinit, kernel_uarg, task, THREAD_FLAG_USPACE,
"uinit", false);
ASSERT(t1);
ASSERT(t);
/*
* Create killer thread for the new task.
*/
t2 = thread_create(ktaskgc, t1, task, 0, "ktaskgc", true);
ASSERT(t2);
thread_ready(t2);
thread_ready(t);
 
thread_ready(t1);
 
return task;
}
 
347,6 → 350,9
 
/** Kill task.
*
* This function is idempotent.
* It signals all the task's threads to bail it out.
*
* @param id ID of the task to be killed.
*
* @return 0 on success or an error code from errno.h
355,7 → 361,6
{
ipl_t ipl;
task_t *ta;
thread_t *t;
link_t *cur;
 
if (id == 1)
363,36 → 368,22
ipl = interrupts_disable();
spinlock_lock(&tasks_lock);
 
if (!(ta = task_find_by_id(id))) {
spinlock_unlock(&tasks_lock);
interrupts_restore(ipl);
return ENOENT;
}
 
spinlock_lock(&ta->lock);
ta->refcount++;
spinlock_unlock(&ta->lock);
 
btree_remove(&tasks_btree, ta->taskid, NULL);
spinlock_unlock(&tasks_lock);
t = thread_create(ktaskclnp, NULL, ta, 0, "ktaskclnp", true);
spinlock_lock(&ta->lock);
ta->accept_new_threads = false;
ta->refcount--;
 
/*
* Interrupt all threads except ktaskclnp.
*/
*/
spinlock_lock(&ta->lock);
for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) {
thread_t *thr;
bool sleeping = false;
bool sleeping = false;
thr = list_get_instance(cur, thread_t, th_link);
if (thr == t)
continue;
spinlock_lock(&thr->lock);
thr->interrupted = true;
403,13 → 394,9
if (sleeping)
waitq_interrupt_sleep(thr);
}
spinlock_unlock(&ta->lock);
interrupts_restore(ipl);
if (t)
thread_ready(t);
 
return 0;
}
 
425,7 → 412,8
printf("taskid name ctx address as cycles threads "
"calls callee\n");
printf("------ ---------- --- ---------- ---------- ---------- ------- " "------ ------>\n");
printf("------ ---------- --- ---------- ---------- ---------- ------- "
"------ ------>\n");
 
for (cur = tasks_btree.leaf_head.next; cur != &tasks_btree.leaf_head;
cur = cur->next) {
464,135 → 452,5
interrupts_restore(ipl);
}
 
/** Kernel thread used to cleanup the task after it is killed. */
void ktaskclnp(void *arg)
{
ipl_t ipl;
thread_t *t = NULL, *main_thread;
link_t *cur;
bool again;
 
thread_detach(THREAD);
 
loop:
ipl = interrupts_disable();
spinlock_lock(&TASK->lock);
main_thread = TASK->main_thread;
/*
* Find a thread to join.
*/
again = false;
for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) {
t = list_get_instance(cur, thread_t, th_link);
 
spinlock_lock(&t->lock);
if (t == THREAD) {
spinlock_unlock(&t->lock);
continue;
} else if (t == main_thread) {
spinlock_unlock(&t->lock);
continue;
} else if (t->join_type != None) {
spinlock_unlock(&t->lock);
again = true;
continue;
} else {
t->join_type = TaskClnp;
spinlock_unlock(&t->lock);
again = false;
break;
}
}
spinlock_unlock(&TASK->lock);
interrupts_restore(ipl);
if (again) {
/*
* Other cleanup (e.g. ktaskgc) is in progress.
*/
scheduler();
goto loop;
}
if (t != THREAD) {
ASSERT(t != main_thread); /* uninit is joined and detached
* in ktaskgc */
thread_join(t);
thread_detach(t);
goto loop; /* go for another thread */
}
/*
* Now there are no other threads in this task
* and no new threads can be created.
*/
 
ipc_cleanup();
futex_cleanup();
klog_printf("Cleanup of task %llu completed.", TASK->taskid);
}
 
/** Kernel thread used to kill the userspace task when its main thread exits.
*
* This thread waits until the main userspace thread (i.e. uninit) exits.
* When this happens, the task is killed. In the meantime, exited threads
* are garbage collected.
*
* @param arg Pointer to the thread structure of the task's main thread.
*/
void ktaskgc(void *arg)
{
thread_t *t = (thread_t *) arg;
loop:
/*
* Userspace threads cannot detach themselves,
* therefore the thread pointer is guaranteed to be valid.
*/
if (thread_join_timeout(t, 1000000, SYNCH_FLAGS_NONE) ==
ESYNCH_TIMEOUT) { /* sleep uninterruptibly here! */
ipl_t ipl;
link_t *cur;
thread_t *thr = NULL;
/*
* The join timed out. Try to do some garbage collection of
* Undead threads.
*/
more_gc:
ipl = interrupts_disable();
spinlock_lock(&TASK->lock);
for (cur = TASK->th_head.next; cur != &TASK->th_head;
cur = cur->next) {
thr = list_get_instance(cur, thread_t, th_link);
spinlock_lock(&thr->lock);
if (thr != t && thr->state == Undead &&
thr->join_type == None) {
thr->join_type = TaskGC;
spinlock_unlock(&thr->lock);
break;
}
spinlock_unlock(&thr->lock);
thr = NULL;
}
spinlock_unlock(&TASK->lock);
interrupts_restore(ipl);
if (thr) {
thread_join(thr);
thread_detach(thr);
scheduler();
goto more_gc;
}
goto loop;
}
thread_detach(t);
task_kill(TASK->taskid);
}
 
/** @}
*/
/trunk/kernel/generic/src/proc/thread.c
67,6 → 67,7
#include <main/uinit.h>
#include <syscall/copy.h>
#include <errno.h>
#include <console/klog.h>
 
 
/** Thread states */
77,7 → 78,7
"Ready",
"Entering",
"Exiting",
"Undead"
"JoinMe"
};
 
/** Lock protecting the threads_btree B+tree.
328,7 → 329,6
t->in_copy_to_uspace = false;
 
t->interrupted = false;
t->join_type = None;
t->detached = false;
waitq_initialize(&t->join_wq);
342,23 → 342,6
/* might depend on previous initialization */
thread_create_arch(t);
 
ipl = interrupts_disable();
spinlock_lock(&task->lock);
if (!task->accept_new_threads) {
spinlock_unlock(&task->lock);
slab_free(thread_slab, t);
interrupts_restore(ipl);
return NULL;
} else {
/*
* Bump the reference count so that this task cannot be
* destroyed while the new thread is being attached to it.
*/
task->refcount++;
}
spinlock_unlock(&task->lock);
interrupts_restore(ipl);
 
if (!(flags & THREAD_FLAG_NOATTACH))
thread_attach(t, task);
 
373,9 → 356,7
*/
void thread_destroy(thread_t *t)
{
bool destroy_task = false;
 
ASSERT(t->state == Exiting || t->state == Undead);
ASSERT(t->state == Exiting || t->state == JoinMe);
ASSERT(t->task);
ASSERT(t->cpu);
 
395,13 → 376,13
*/
spinlock_lock(&t->task->lock);
list_remove(&t->th_link);
if (--t->task->refcount == 0) {
t->task->accept_new_threads = false;
destroy_task = true;
}
spinlock_unlock(&t->task->lock);
if (destroy_task)
 
/*
* t is guaranteed to be the very last thread of its task.
* It is safe to destroy the task.
*/
if (atomic_predec(&t->task->refcount) == 0)
task_destroy(t->task);
/*
431,12 → 412,11
/*
* Attach to the current task.
*/
ipl = interrupts_disable();
ipl = interrupts_disable();
spinlock_lock(&task->lock);
ASSERT(task->refcount);
atomic_inc(&task->refcount);
atomic_inc(&task->lifecount);
list_append(&t->th_link, &task->th_head);
if (task->refcount == 1)
task->main_thread = t;
spinlock_unlock(&task->lock);
 
/*
459,6 → 439,21
{
ipl_t ipl;
 
if (atomic_predec(&TASK->lifecount) == 0) {
/*
* We are the last thread in the task that still has not exited.
* With the exception of the moment the task was created, new
* threads can only be created by threads of the same task.
* We are safe to perform cleanup.
*/
if (THREAD->flags & THREAD_FLAG_USPACE) {
ipc_cleanup();
futex_cleanup();
klog_printf("Cleanup of task %llu completed.",
TASK->taskid);
}
}
 
restart:
ipl = interrupts_disable();
spinlock_lock(&THREAD->lock);
468,6 → 463,7
interrupts_restore(ipl);
goto restart;
}
THREAD->state = Exiting;
spinlock_unlock(&THREAD->lock);
scheduler();
524,7 → 520,7
 
/** Detach thread.
*
* Mark the thread as detached, if the thread is already in the Undead state,
* Mark the thread as detached, if the thread is already in the JoinMe state,
* deallocate its resources.
*
* @param t Thread to be detached.
540,7 → 536,7
ipl = interrupts_disable();
spinlock_lock(&t->lock);
ASSERT(!t->detached);
if (t->state == Undead) {
if (t->state == JoinMe) {
thread_destroy(t); /* unlocks &t->lock */
interrupts_restore(ipl);
return;
702,8 → 698,6
rc = copy_to_uspace(uspace_thread_id, &t->tid,
sizeof(t->tid));
if (rc != 0) {
ipl_t ipl;
 
/*
* We have encountered a failure, but the thread
* has already been created. We need to undo its
711,26 → 705,13
*/
 
/*
* The new thread structure is initialized,
* but is still not visible to the system.
* The new thread structure is initialized, but
* is still not visible to the system.
* We can safely deallocate it.
*/
slab_free(thread_slab, t);
free(kernel_uarg);
 
/*
* Now we need to decrement the task reference
* counter. Because we are running within the
* same task, thread t is not the last thread
* in the task, so it is safe to merely
* decrement the counter.
*/
ipl = interrupts_disable();
spinlock_lock(&TASK->lock);
TASK->refcount--;
spinlock_unlock(&TASK->lock);
interrupts_restore(ipl);
 
return (unative_t) rc;
}
}