/kernel/trunk/generic/src/proc/thread.c |
---|
0,0 → 1,615 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup genericproc |
* @{ |
*/ |
/** |
* @file |
* @brief Thread management functions. |
*/ |
#include <proc/scheduler.h> |
#include <proc/thread.h> |
#include <proc/task.h> |
#include <proc/uarg.h> |
#include <mm/frame.h> |
#include <mm/page.h> |
#include <arch/asm.h> |
#include <arch.h> |
#include <synch/synch.h> |
#include <synch/spinlock.h> |
#include <synch/waitq.h> |
#include <synch/rwlock.h> |
#include <cpu.h> |
#include <func.h> |
#include <context.h> |
#include <adt/btree.h> |
#include <adt/list.h> |
#include <typedefs.h> |
#include <time/clock.h> |
#include <config.h> |
#include <arch/interrupt.h> |
#include <smp/ipi.h> |
#include <arch/faddr.h> |
#include <atomic.h> |
#include <memstr.h> |
#include <print.h> |
#include <mm/slab.h> |
#include <debug.h> |
#include <main/uinit.h> |
#include <syscall/copy.h> |
#include <errno.h> |
/** Thread states */ |
char *thread_states[] = { |
"Invalid", |
"Running", |
"Sleeping", |
"Ready", |
"Entering", |
"Exiting", |
"Undead" |
}; |
/** Lock protecting the threads_btree B+tree. For locking rules, see declaration thereof. */ |
SPINLOCK_INITIALIZE(threads_lock); |
/** B+tree of all threads. |
* |
* When a thread is found in the threads_btree B+tree, it is guaranteed to exist as long |
* as the threads_lock is held. |
*/ |
btree_t threads_btree; |
SPINLOCK_INITIALIZE(tidlock); |
__u32 last_tid = 0; |
static slab_cache_t *thread_slab; |
#ifdef ARCH_HAS_FPU |
slab_cache_t *fpu_context_slab; |
#endif |
/** Thread wrapper |
* |
* This wrapper is provided to ensure that every thread |
* makes a call to thread_exit() when its implementing |
* function returns. |
* |
* interrupts_disable() is assumed. |
* |
*/ |
static void cushion(void) |
{ |
void (*f)(void *) = THREAD->thread_code; |
void *arg = THREAD->thread_arg; |
/* this is where each thread wakes up after its creation */ |
spinlock_unlock(&THREAD->lock); |
interrupts_enable(); |
f(arg); |
thread_exit(); |
/* not reached */ |
} |
/** Initialization and allocation for thread_t structure */ |
static int thr_constructor(void *obj, int kmflags) |
{ |
thread_t *t = (thread_t *)obj; |
int status; |
spinlock_initialize(&t->lock, "thread_t_lock"); |
link_initialize(&t->rq_link); |
link_initialize(&t->wq_link); |
link_initialize(&t->th_link); |
#ifdef ARCH_HAS_FPU |
# ifdef CONFIG_FPU_LAZY |
t->saved_fpu_context = NULL; |
# else |
t->saved_fpu_context = slab_alloc(fpu_context_slab,kmflags); |
if (!t->saved_fpu_context) |
return -1; |
# endif |
#endif |
t->kstack = frame_alloc_rc(STACK_FRAMES, FRAME_KA | kmflags,&status); |
if (status) { |
#ifdef ARCH_HAS_FPU |
if (t->saved_fpu_context) |
slab_free(fpu_context_slab,t->saved_fpu_context); |
#endif |
return -1; |
} |
return 0; |
} |
/** Destruction of thread_t object */ |
static int thr_destructor(void *obj) |
{ |
thread_t *t = (thread_t *)obj; |
frame_free(KA2PA(t->kstack)); |
#ifdef ARCH_HAS_FPU |
if (t->saved_fpu_context) |
slab_free(fpu_context_slab,t->saved_fpu_context); |
#endif |
return 1; /* One page freed */ |
} |
/** Initialize threads |
* |
* Initialize kernel threads support. |
* |
*/ |
void thread_init(void) |
{ |
THREAD = NULL; |
atomic_set(&nrdy,0); |
thread_slab = slab_cache_create("thread_slab", |
sizeof(thread_t),0, |
thr_constructor, thr_destructor, 0); |
#ifdef ARCH_HAS_FPU |
fpu_context_slab = slab_cache_create("fpu_slab", |
sizeof(fpu_context_t), |
FPU_CONTEXT_ALIGN, |
NULL, NULL, 0); |
#endif |
btree_create(&threads_btree); |
} |
/** Make thread ready |
* |
* Switch thread t to the ready state. |
* |
* @param t Thread to make ready. |
* |
*/ |
void thread_ready(thread_t *t) |
{ |
cpu_t *cpu; |
runq_t *r; |
ipl_t ipl; |
int i, avg; |
ipl = interrupts_disable(); |
spinlock_lock(&t->lock); |
ASSERT(! (t->state == Ready)); |
i = (t->priority < RQ_COUNT -1) ? ++t->priority : t->priority; |
cpu = CPU; |
if (t->flags & X_WIRED) { |
cpu = t->cpu; |
} |
t->state = Ready; |
spinlock_unlock(&t->lock); |
/* |
* Append t to respective ready queue on respective processor. |
*/ |
r = &cpu->rq[i]; |
spinlock_lock(&r->lock); |
list_append(&t->rq_link, &r->rq_head); |
r->n++; |
spinlock_unlock(&r->lock); |
atomic_inc(&nrdy); |
avg = atomic_get(&nrdy) / config.cpu_active; |
atomic_inc(&cpu->nrdy); |
interrupts_restore(ipl); |
} |
/** Destroy thread memory structure |
* |
* Detach thread from all queues, cpus etc. and destroy it. |
* |
* Assume thread->lock is held!! |
*/ |
void thread_destroy(thread_t *t) |
{ |
bool destroy_task = false; |
ASSERT(t->state == Exiting || t->state == Undead); |
ASSERT(t->task); |
ASSERT(t->cpu); |
spinlock_lock(&t->cpu->lock); |
if(t->cpu->fpu_owner==t) |
t->cpu->fpu_owner=NULL; |
spinlock_unlock(&t->cpu->lock); |
spinlock_unlock(&t->lock); |
spinlock_lock(&threads_lock); |
btree_remove(&threads_btree, (btree_key_t) ((__address ) t), NULL); |
spinlock_unlock(&threads_lock); |
/* |
* Detach from the containing task. |
*/ |
spinlock_lock(&t->task->lock); |
list_remove(&t->th_link); |
if (--t->task->refcount == 0) { |
t->task->accept_new_threads = false; |
destroy_task = true; |
} |
spinlock_unlock(&t->task->lock); |
if (destroy_task) |
task_destroy(t->task); |
slab_free(thread_slab, t); |
} |
/** Create new thread |
* |
* Create a new thread. |
* |
* @param func Thread's implementing function. |
* @param arg Thread's implementing function argument. |
* @param task Task to which the thread belongs. |
* @param flags Thread flags. |
* @param name Symbolic name. |
* |
* @return New thread's structure on success, NULL on failure. |
* |
*/ |
thread_t *thread_create(void (* func)(void *), void *arg, task_t *task, int flags, char *name) |
{ |
thread_t *t; |
ipl_t ipl; |
t = (thread_t *) slab_alloc(thread_slab, 0); |
if (!t) |
return NULL; |
thread_create_arch(t); |
/* Not needed, but good for debugging */ |
memsetb((__address)t->kstack, THREAD_STACK_SIZE * 1<<STACK_FRAMES, 0); |
ipl = interrupts_disable(); |
spinlock_lock(&tidlock); |
t->tid = ++last_tid; |
spinlock_unlock(&tidlock); |
interrupts_restore(ipl); |
context_save(&t->saved_context); |
context_set(&t->saved_context, FADDR(cushion), (__address) t->kstack, THREAD_STACK_SIZE); |
the_initialize((the_t *) t->kstack); |
ipl = interrupts_disable(); |
t->saved_context.ipl = interrupts_read(); |
interrupts_restore(ipl); |
memcpy(t->name, name, THREAD_NAME_BUFLEN); |
t->thread_code = func; |
t->thread_arg = arg; |
t->ticks = -1; |
t->priority = -1; /* start in rq[0] */ |
t->cpu = NULL; |
t->flags = 0; |
t->state = Entering; |
t->call_me = NULL; |
t->call_me_with = NULL; |
timeout_initialize(&t->sleep_timeout); |
t->sleep_interruptible = false; |
t->sleep_queue = NULL; |
t->timeout_pending = 0; |
t->in_copy_from_uspace = false; |
t->in_copy_to_uspace = false; |
t->interrupted = false; |
t->join_type = None; |
t->detached = false; |
waitq_initialize(&t->join_wq); |
t->rwlock_holder_type = RWLOCK_NONE; |
t->task = task; |
t->fpu_context_exists = 0; |
t->fpu_context_engaged = 0; |
/* |
* Attach to the containing task. |
*/ |
ipl = interrupts_disable(); |
spinlock_lock(&task->lock); |
if (!task->accept_new_threads) { |
spinlock_unlock(&task->lock); |
slab_free(thread_slab, t); |
interrupts_restore(ipl); |
return NULL; |
} |
list_append(&t->th_link, &task->th_head); |
if (task->refcount++ == 0) |
task->main_thread = t; |
spinlock_unlock(&task->lock); |
/* |
* Register this thread in the system-wide list. |
*/ |
spinlock_lock(&threads_lock); |
btree_insert(&threads_btree, (btree_key_t) ((__address) t), (void *) t, NULL); |
spinlock_unlock(&threads_lock); |
interrupts_restore(ipl); |
return t; |
} |
/** Terminate thread. |
* |
* End current thread execution and switch it to the exiting |
* state. All pending timeouts are executed. |
* |
*/ |
void thread_exit(void) |
{ |
ipl_t ipl; |
restart: |
ipl = interrupts_disable(); |
spinlock_lock(&THREAD->lock); |
if (THREAD->timeout_pending) { /* busy waiting for timeouts in progress */ |
spinlock_unlock(&THREAD->lock); |
interrupts_restore(ipl); |
goto restart; |
} |
THREAD->state = Exiting; |
spinlock_unlock(&THREAD->lock); |
scheduler(); |
/* Not reached */ |
while (1) |
; |
} |
/** Thread sleep |
* |
* Suspend execution of the current thread. |
* |
* @param sec Number of seconds to sleep. |
* |
*/ |
void thread_sleep(__u32 sec) |
{ |
thread_usleep(sec*1000000); |
} |
/** Wait for another thread to exit. |
* |
* @param t Thread to join on exit. |
* @param usec Timeout in microseconds. |
* @param flags Mode of operation. |
* |
* @return An error code from errno.h or an error code from synch.h. |
*/ |
int thread_join_timeout(thread_t *t, __u32 usec, int flags) |
{ |
ipl_t ipl; |
int rc; |
if (t == THREAD) |
return EINVAL; |
/* |
* Since thread join can only be called once on an undetached thread, |
* the thread pointer is guaranteed to be still valid. |
*/ |
ipl = interrupts_disable(); |
spinlock_lock(&t->lock); |
ASSERT(!t->detached); |
spinlock_unlock(&t->lock); |
interrupts_restore(ipl); |
rc = waitq_sleep_timeout(&t->join_wq, usec, flags); |
return rc; |
} |
/** Detach thread. |
* |
* Mark the thread as detached, if the thread is already in the Undead state, |
* deallocate its resources. |
* |
* @param t Thread to be detached. |
*/ |
void thread_detach(thread_t *t) |
{ |
ipl_t ipl; |
/* |
* Since the thread is expected to not be already detached, |
* pointer to it must be still valid. |
*/ |
ipl = interrupts_disable(); |
spinlock_lock(&t->lock); |
ASSERT(!t->detached); |
if (t->state == Undead) { |
thread_destroy(t); /* unlocks &t->lock */ |
interrupts_restore(ipl); |
return; |
} else { |
t->detached = true; |
} |
spinlock_unlock(&t->lock); |
interrupts_restore(ipl); |
} |
/** Thread usleep |
* |
* Suspend execution of the current thread. |
* |
* @param usec Number of microseconds to sleep. |
* |
*/ |
void thread_usleep(__u32 usec) |
{ |
waitq_t wq; |
waitq_initialize(&wq); |
(void) waitq_sleep_timeout(&wq, usec, SYNCH_FLAGS_NON_BLOCKING); |
} |
/** Register thread out-of-context invocation |
* |
* Register a function and its argument to be executed |
* on next context switch to the current thread. |
* |
* @param call_me Out-of-context function. |
* @param call_me_with Out-of-context function argument. |
* |
*/ |
void thread_register_call_me(void (* call_me)(void *), void *call_me_with) |
{ |
ipl_t ipl; |
ipl = interrupts_disable(); |
spinlock_lock(&THREAD->lock); |
THREAD->call_me = call_me; |
THREAD->call_me_with = call_me_with; |
spinlock_unlock(&THREAD->lock); |
interrupts_restore(ipl); |
} |
/** Print list of threads debug info */ |
void thread_print_list(void) |
{ |
link_t *cur; |
ipl_t ipl; |
/* Messing with thread structures, avoid deadlock */ |
ipl = interrupts_disable(); |
spinlock_lock(&threads_lock); |
for (cur = threads_btree.leaf_head.next; cur != &threads_btree.leaf_head; cur = cur->next) { |
btree_node_t *node; |
int i; |
node = list_get_instance(cur, btree_node_t, leaf_link); |
for (i = 0; i < node->keys; i++) { |
thread_t *t; |
t = (thread_t *) node->value[i]; |
printf("%s: address=%#zx, tid=%zd, state=%s, task=%#zx, code=%#zx, stack=%#zx, cpu=", |
t->name, t, t->tid, thread_states[t->state], t->task, t->thread_code, t->kstack); |
if (t->cpu) |
printf("cpu%zd", t->cpu->id); |
else |
printf("none"); |
if (t->state == Sleeping) { |
printf(", kst=%#zx", t->kstack); |
printf(", wq=%#zx", t->sleep_queue); |
} |
printf("\n"); |
} |
} |
spinlock_unlock(&threads_lock); |
interrupts_restore(ipl); |
} |
/** Check whether thread exists. |
* |
* Note that threads_lock must be already held and |
* interrupts must be already disabled. |
* |
* @param t Pointer to thread. |
* |
* @return True if thread t is known to the system, false otherwise. |
*/ |
bool thread_exists(thread_t *t) |
{ |
btree_node_t *leaf; |
return btree_search(&threads_btree, (btree_key_t) ((__address) t), &leaf) != NULL; |
} |
/** Process syscall to create new thread. |
* |
*/ |
__native sys_thread_create(uspace_arg_t *uspace_uarg, char *uspace_name) |
{ |
thread_t *t; |
char namebuf[THREAD_NAME_BUFLEN]; |
uspace_arg_t *kernel_uarg; |
__u32 tid; |
int rc; |
rc = copy_from_uspace(namebuf, uspace_name, THREAD_NAME_BUFLEN); |
if (rc != 0) |
return (__native) rc; |
kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0); |
rc = copy_from_uspace(kernel_uarg, uspace_uarg, sizeof(uspace_arg_t)); |
if (rc != 0) { |
free(kernel_uarg); |
return (__native) rc; |
} |
if ((t = thread_create(uinit, kernel_uarg, TASK, 0, namebuf))) { |
tid = t->tid; |
thread_ready(t); |
return (__native) tid; |
} else { |
free(kernel_uarg); |
} |
return (__native) ENOMEM; |
} |
/** Process syscall to terminate thread. |
* |
*/ |
__native sys_thread_exit(int uspace_status) |
{ |
thread_exit(); |
/* Unreachable */ |
return 0; |
} |
/** @} |
*/ |
/kernel/trunk/generic/src/proc/scheduler.c |
---|
0,0 → 1,701 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup genericproc |
* @{ |
*/ |
/** |
* @file |
* @brief Scheduler and load balancing. |
* |
* This file contains the scheduler and kcpulb kernel thread which |
* performs load-balancing of per-CPU run queues. |
*/ |
#include <proc/scheduler.h> |
#include <proc/thread.h> |
#include <proc/task.h> |
#include <mm/frame.h> |
#include <mm/page.h> |
#include <mm/as.h> |
#include <time/delay.h> |
#include <arch/asm.h> |
#include <arch/faddr.h> |
#include <atomic.h> |
#include <synch/spinlock.h> |
#include <config.h> |
#include <context.h> |
#include <func.h> |
#include <arch.h> |
#include <adt/list.h> |
#include <panic.h> |
#include <typedefs.h> |
#include <cpu.h> |
#include <print.h> |
#include <debug.h> |
static void before_task_runs(void); |
static void before_thread_runs(void); |
static void after_thread_ran(void); |
static void scheduler_separated_stack(void); |
atomic_t nrdy; /**< Number of ready threads in the system. */ |
/** Carry out actions before new task runs. */ |
void before_task_runs(void) |
{ |
before_task_runs_arch(); |
} |
/** Take actions before new thread runs. |
* |
* Perform actions that need to be |
* taken before the newly selected |
* tread is passed control. |
* |
* THREAD->lock is locked on entry |
* |
*/ |
void before_thread_runs(void) |
{ |
before_thread_runs_arch(); |
#ifdef CONFIG_FPU_LAZY |
if(THREAD==CPU->fpu_owner) |
fpu_enable(); |
else |
fpu_disable(); |
#else |
fpu_enable(); |
if (THREAD->fpu_context_exists) |
fpu_context_restore(THREAD->saved_fpu_context); |
else { |
fpu_init(); |
THREAD->fpu_context_exists=1; |
} |
#endif |
} |
/** Take actions after THREAD had run. |
* |
* Perform actions that need to be |
* taken after the running thread |
* had been preempted by the scheduler. |
* |
* THREAD->lock is locked on entry |
* |
*/ |
void after_thread_ran(void) |
{ |
after_thread_ran_arch(); |
} |
#ifdef CONFIG_FPU_LAZY |
void scheduler_fpu_lazy_request(void) |
{ |
restart: |
fpu_enable(); |
spinlock_lock(&CPU->lock); |
/* Save old context */ |
if (CPU->fpu_owner != NULL) { |
spinlock_lock(&CPU->fpu_owner->lock); |
fpu_context_save(CPU->fpu_owner->saved_fpu_context); |
/* don't prevent migration */ |
CPU->fpu_owner->fpu_context_engaged=0; |
spinlock_unlock(&CPU->fpu_owner->lock); |
CPU->fpu_owner = NULL; |
} |
spinlock_lock(&THREAD->lock); |
if (THREAD->fpu_context_exists) { |
fpu_context_restore(THREAD->saved_fpu_context); |
} else { |
/* Allocate FPU context */ |
if (!THREAD->saved_fpu_context) { |
/* Might sleep */ |
spinlock_unlock(&THREAD->lock); |
spinlock_unlock(&CPU->lock); |
THREAD->saved_fpu_context = slab_alloc(fpu_context_slab, |
0); |
/* We may have switched CPUs during slab_alloc */ |
goto restart; |
} |
fpu_init(); |
THREAD->fpu_context_exists=1; |
} |
CPU->fpu_owner=THREAD; |
THREAD->fpu_context_engaged = 1; |
spinlock_unlock(&THREAD->lock); |
spinlock_unlock(&CPU->lock); |
} |
#endif |
/** Initialize scheduler |
* |
* Initialize kernel scheduler. |
* |
*/ |
void scheduler_init(void) |
{ |
} |
/** Get thread to be scheduled |
* |
* Get the optimal thread to be scheduled |
* according to thread accounting and scheduler |
* policy. |
* |
* @return Thread to be scheduled. |
* |
*/ |
static thread_t *find_best_thread(void) |
{ |
thread_t *t; |
runq_t *r; |
int i; |
ASSERT(CPU != NULL); |
loop: |
interrupts_enable(); |
if (atomic_get(&CPU->nrdy) == 0) { |
/* |
* For there was nothing to run, the CPU goes to sleep |
* until a hardware interrupt or an IPI comes. |
* This improves energy saving and hyperthreading. |
*/ |
/* |
* An interrupt might occur right now and wake up a thread. |
* In such case, the CPU will continue to go to sleep |
* even though there is a runnable thread. |
*/ |
cpu_sleep(); |
goto loop; |
} |
interrupts_disable(); |
for (i = 0; i<RQ_COUNT; i++) { |
r = &CPU->rq[i]; |
spinlock_lock(&r->lock); |
if (r->n == 0) { |
/* |
* If this queue is empty, try a lower-priority queue. |
*/ |
spinlock_unlock(&r->lock); |
continue; |
} |
atomic_dec(&CPU->nrdy); |
atomic_dec(&nrdy); |
r->n--; |
/* |
* Take the first thread from the queue. |
*/ |
t = list_get_instance(r->rq_head.next, thread_t, rq_link); |
list_remove(&t->rq_link); |
spinlock_unlock(&r->lock); |
spinlock_lock(&t->lock); |
t->cpu = CPU; |
t->ticks = us2ticks((i+1)*10000); |
t->priority = i; /* correct rq index */ |
/* |
* Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge. |
*/ |
t->flags &= ~X_STOLEN; |
spinlock_unlock(&t->lock); |
return t; |
} |
goto loop; |
} |
/** Prevent rq starvation |
* |
* Prevent low priority threads from starving in rq's. |
* |
* When the function decides to relink rq's, it reconnects |
* respective pointers so that in result threads with 'pri' |
* greater or equal start are moved to a higher-priority queue. |
* |
* @param start Threshold priority. |
* |
*/ |
static void relink_rq(int start) |
{ |
link_t head; |
runq_t *r; |
int i, n; |
list_initialize(&head); |
spinlock_lock(&CPU->lock); |
if (CPU->needs_relink > NEEDS_RELINK_MAX) { |
for (i = start; i<RQ_COUNT-1; i++) { |
/* remember and empty rq[i + 1] */ |
r = &CPU->rq[i + 1]; |
spinlock_lock(&r->lock); |
list_concat(&head, &r->rq_head); |
n = r->n; |
r->n = 0; |
spinlock_unlock(&r->lock); |
/* append rq[i + 1] to rq[i] */ |
r = &CPU->rq[i]; |
spinlock_lock(&r->lock); |
list_concat(&r->rq_head, &head); |
r->n += n; |
spinlock_unlock(&r->lock); |
} |
CPU->needs_relink = 0; |
} |
spinlock_unlock(&CPU->lock); |
} |
/** The scheduler |
* |
* The thread scheduling procedure. |
* Passes control directly to |
* scheduler_separated_stack(). |
* |
*/ |
void scheduler(void) |
{ |
volatile ipl_t ipl; |
ASSERT(CPU != NULL); |
ipl = interrupts_disable(); |
if (atomic_get(&haltstate)) |
halt(); |
if (THREAD) { |
spinlock_lock(&THREAD->lock); |
#ifndef CONFIG_FPU_LAZY |
fpu_context_save(THREAD->saved_fpu_context); |
#endif |
if (!context_save(&THREAD->saved_context)) { |
/* |
* This is the place where threads leave scheduler(); |
*/ |
spinlock_unlock(&THREAD->lock); |
interrupts_restore(THREAD->saved_context.ipl); |
return; |
} |
/* |
* Interrupt priority level of preempted thread is recorded here |
* to facilitate scheduler() invocations from interrupts_disable()'d |
* code (e.g. waitq_sleep_timeout()). |
*/ |
THREAD->saved_context.ipl = ipl; |
} |
/* |
* Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM |
* and preemption counter. At this point THE could be coming either |
* from THREAD's or CPU's stack. |
*/ |
the_copy(THE, (the_t *) CPU->stack); |
/* |
* We may not keep the old stack. |
* Reason: If we kept the old stack and got blocked, for instance, in |
* find_best_thread(), the old thread could get rescheduled by another |
* CPU and overwrite the part of its own stack that was also used by |
* the scheduler on this CPU. |
* |
* Moreover, we have to bypass the compiler-generated POP sequence |
* which is fooled by SP being set to the very top of the stack. |
* Therefore the scheduler() function continues in |
* scheduler_separated_stack(). |
*/ |
context_save(&CPU->saved_context); |
context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), (__address) CPU->stack, CPU_STACK_SIZE); |
context_restore(&CPU->saved_context); |
/* not reached */ |
} |
/** Scheduler stack switch wrapper |
* |
* Second part of the scheduler() function |
* using new stack. Handling the actual context |
* switch to a new thread. |
* |
* Assume THREAD->lock is held. |
*/ |
void scheduler_separated_stack(void) |
{ |
int priority; |
ASSERT(CPU != NULL); |
if (THREAD) { |
/* must be run after the switch to scheduler stack */ |
after_thread_ran(); |
switch (THREAD->state) { |
case Running: |
spinlock_unlock(&THREAD->lock); |
thread_ready(THREAD); |
break; |
case Exiting: |
repeat: |
if (THREAD->detached) { |
thread_destroy(THREAD); |
} else { |
/* |
* The thread structure is kept allocated until somebody |
* calls thread_detach() on it. |
*/ |
if (!spinlock_trylock(&THREAD->join_wq.lock)) { |
/* |
* Avoid deadlock. |
*/ |
spinlock_unlock(&THREAD->lock); |
delay(10); |
spinlock_lock(&THREAD->lock); |
goto repeat; |
} |
_waitq_wakeup_unsafe(&THREAD->join_wq, false); |
spinlock_unlock(&THREAD->join_wq.lock); |
THREAD->state = Undead; |
spinlock_unlock(&THREAD->lock); |
} |
break; |
case Sleeping: |
/* |
* Prefer the thread after it's woken up. |
*/ |
THREAD->priority = -1; |
/* |
* We need to release wq->lock which we locked in waitq_sleep(). |
* Address of wq->lock is kept in THREAD->sleep_queue. |
*/ |
spinlock_unlock(&THREAD->sleep_queue->lock); |
/* |
* Check for possible requests for out-of-context invocation. |
*/ |
if (THREAD->call_me) { |
THREAD->call_me(THREAD->call_me_with); |
THREAD->call_me = NULL; |
THREAD->call_me_with = NULL; |
} |
spinlock_unlock(&THREAD->lock); |
break; |
default: |
/* |
* Entering state is unexpected. |
*/ |
panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]); |
break; |
} |
THREAD = NULL; |
} |
THREAD = find_best_thread(); |
spinlock_lock(&THREAD->lock); |
priority = THREAD->priority; |
spinlock_unlock(&THREAD->lock); |
relink_rq(priority); |
/* |
* If both the old and the new task are the same, lots of work is avoided. |
*/ |
if (TASK != THREAD->task) { |
as_t *as1 = NULL; |
as_t *as2; |
if (TASK) { |
spinlock_lock(&TASK->lock); |
as1 = TASK->as; |
spinlock_unlock(&TASK->lock); |
} |
spinlock_lock(&THREAD->task->lock); |
as2 = THREAD->task->as; |
spinlock_unlock(&THREAD->task->lock); |
/* |
* Note that it is possible for two tasks to share one address space. |
*/ |
if (as1 != as2) { |
/* |
* Both tasks and address spaces are different. |
* Replace the old one with the new one. |
*/ |
as_switch(as1, as2); |
} |
TASK = THREAD->task; |
before_task_runs(); |
} |
spinlock_lock(&THREAD->lock); |
THREAD->state = Running; |
#ifdef SCHEDULER_VERBOSE |
printf("cpu%d: tid %d (priority=%d,ticks=%lld,nrdy=%ld)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, atomic_get(&CPU->nrdy)); |
#endif |
/* |
* Some architectures provide late kernel PA2KA(identity) |
* mapping in a page fault handler. However, the page fault |
* handler uses the kernel stack of the running thread and |
* therefore cannot be used to map it. The kernel stack, if |
* necessary, is to be mapped in before_thread_runs(). This |
* function must be executed before the switch to the new stack. |
*/ |
before_thread_runs(); |
/* |
* Copy the knowledge of CPU, TASK, THREAD and preemption counter to thread's stack. |
*/ |
the_copy(THE, (the_t *) THREAD->kstack); |
context_restore(&THREAD->saved_context); |
/* not reached */ |
} |
#ifdef CONFIG_SMP |
/** Load balancing thread |
* |
* SMP load balancing thread, supervising thread supplies |
* for the CPU it's wired to. |
* |
* @param arg Generic thread argument (unused). |
* |
*/ |
void kcpulb(void *arg) |
{ |
thread_t *t; |
int count, average, i, j, k = 0; |
ipl_t ipl; |
/* |
* Detach kcpulb as nobody will call thread_join_timeout() on it. |
*/ |
thread_detach(THREAD); |
loop: |
/* |
* Work in 1s intervals. |
*/ |
thread_sleep(1); |
not_satisfied: |
/* |
* Calculate the number of threads that will be migrated/stolen from |
* other CPU's. Note that situation can have changed between two |
* passes. Each time get the most up to date counts. |
*/ |
average = atomic_get(&nrdy) / config.cpu_active + 1; |
count = average - atomic_get(&CPU->nrdy); |
if (count <= 0) |
goto satisfied; |
/* |
* Searching least priority queues on all CPU's first and most priority queues on all CPU's last. |
*/ |
for (j=RQ_COUNT-1; j >= 0; j--) { |
for (i=0; i < config.cpu_active; i++) { |
link_t *l; |
runq_t *r; |
cpu_t *cpu; |
cpu = &cpus[(i + k) % config.cpu_active]; |
/* |
* Not interested in ourselves. |
* Doesn't require interrupt disabling for kcpulb is X_WIRED. |
*/ |
if (CPU == cpu) |
continue; |
if (atomic_get(&cpu->nrdy) <= average) |
continue; |
ipl = interrupts_disable(); |
r = &cpu->rq[j]; |
spinlock_lock(&r->lock); |
if (r->n == 0) { |
spinlock_unlock(&r->lock); |
interrupts_restore(ipl); |
continue; |
} |
t = NULL; |
l = r->rq_head.prev; /* search rq from the back */ |
while (l != &r->rq_head) { |
t = list_get_instance(l, thread_t, rq_link); |
/* |
* We don't want to steal CPU-wired threads neither threads already stolen. |
* The latter prevents threads from migrating between CPU's without ever being run. |
* We don't want to steal threads whose FPU context is still in CPU. |
*/ |
spinlock_lock(&t->lock); |
if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) { |
/* |
* Remove t from r. |
*/ |
spinlock_unlock(&t->lock); |
atomic_dec(&cpu->nrdy); |
atomic_dec(&nrdy); |
r->n--; |
list_remove(&t->rq_link); |
break; |
} |
spinlock_unlock(&t->lock); |
l = l->prev; |
t = NULL; |
} |
spinlock_unlock(&r->lock); |
if (t) { |
/* |
* Ready t on local CPU |
*/ |
spinlock_lock(&t->lock); |
#ifdef KCPULB_VERBOSE |
printf("kcpulb%d: TID %d -> cpu%d, nrdy=%ld, avg=%nd\n", CPU->id, t->tid, CPU->id, atomic_get(&CPU->nrdy), atomic_get(&nrdy) / config.cpu_active); |
#endif |
t->flags |= X_STOLEN; |
t->state = Entering; |
spinlock_unlock(&t->lock); |
thread_ready(t); |
interrupts_restore(ipl); |
if (--count == 0) |
goto satisfied; |
/* |
* We are not satisfied yet, focus on another CPU next time. |
*/ |
k++; |
continue; |
} |
interrupts_restore(ipl); |
} |
} |
if (atomic_get(&CPU->nrdy)) { |
/* |
* Be a little bit light-weight and let migrated threads run. |
*/ |
scheduler(); |
} else { |
/* |
* We failed to migrate a single thread. |
* Give up this turn. |
*/ |
goto loop; |
} |
goto not_satisfied; |
satisfied: |
goto loop; |
} |
#endif /* CONFIG_SMP */ |
/** Print information about threads & scheduler queues */ |
void sched_print_list(void) |
{ |
ipl_t ipl; |
int cpu,i; |
runq_t *r; |
thread_t *t; |
link_t *cur; |
/* We are going to mess with scheduler structures, |
* let's not be interrupted */ |
ipl = interrupts_disable(); |
for (cpu=0;cpu < config.cpu_count; cpu++) { |
if (!cpus[cpu].active) |
continue; |
spinlock_lock(&cpus[cpu].lock); |
printf("cpu%d: address=%p, nrdy=%ld, needs_relink=%ld\n", |
cpus[cpu].id, &cpus[cpu], atomic_get(&cpus[cpu].nrdy), cpus[cpu].needs_relink); |
for (i=0; i<RQ_COUNT; i++) { |
r = &cpus[cpu].rq[i]; |
spinlock_lock(&r->lock); |
if (!r->n) { |
spinlock_unlock(&r->lock); |
continue; |
} |
printf("\trq[%d]: ", i); |
for (cur=r->rq_head.next; cur!=&r->rq_head; cur=cur->next) { |
t = list_get_instance(cur, thread_t, rq_link); |
printf("%d(%s) ", t->tid, |
thread_states[t->state]); |
} |
printf("\n"); |
spinlock_unlock(&r->lock); |
} |
spinlock_unlock(&cpus[cpu].lock); |
} |
interrupts_restore(ipl); |
} |
/** @} |
*/ |
/kernel/trunk/generic/src/proc/the.c |
---|
0,0 → 1,76 |
/* |
* Copyright (C) 2005 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup genericproc |
* @{ |
*/ |
/** |
* @file |
* @brief THE structure functions. |
* |
* This file contains functions to manage the THE structure. |
* The THE structure exists at the base address of every kernel |
* stack and carries information about current settings |
* (e.g. current CPU, current thread, task and address space |
* and current preemption counter). |
*/ |
#include <arch.h> |
#include <typedefs.h> |
/** Initialize THE structure |
* |
* Initialize THE structure passed as argument. |
* |
* @param the THE structure to be initialized. |
*/ |
void the_initialize(the_t *the) |
{ |
the->preemption_disabled = 0; |
the->cpu = NULL; |
the->thread = NULL; |
the->task = NULL; |
the->as = NULL; |
} |
/** Copy THE structure |
* |
* Copy the source THE structure to the destination THE structure. |
* |
* @param src The source THE structure. |
* @param dst The destination THE structure. |
*/ |
void the_copy(the_t *src, the_t *dst) |
{ |
*dst = *src; |
} |
/** @} |
*/ |
/kernel/trunk/generic/src/proc/task.c |
---|
0,0 → 1,499 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup genericproc |
* @{ |
*/ |
/** |
* @file |
* @brief Task management. |
*/ |
#include <main/uinit.h> |
#include <proc/thread.h> |
#include <proc/task.h> |
#include <proc/uarg.h> |
#include <mm/as.h> |
#include <mm/slab.h> |
#include <synch/spinlock.h> |
#include <arch.h> |
#include <panic.h> |
#include <adt/btree.h> |
#include <adt/list.h> |
#include <ipc/ipc.h> |
#include <security/cap.h> |
#include <memstr.h> |
#include <print.h> |
#include <elf.h> |
#include <errno.h> |
#include <syscall/copy.h> |
#include <console/klog.h> |
#ifndef LOADED_PROG_STACK_PAGES_NO |
#define LOADED_PROG_STACK_PAGES_NO 1 |
#endif |
/** Spinlock protecting the tasks_btree B+tree. */ |
SPINLOCK_INITIALIZE(tasks_lock); |
/** B+tree of active tasks. |
* |
* The task is guaranteed to exist after it was found in the tasks_btree as long as: |
* @li the tasks_lock is held, |
* @li the task's lock is held when task's lock is acquired before releasing tasks_lock or |
* @li the task's refcount is grater than 0 |
* |
*/ |
btree_t tasks_btree; |
static task_id_t task_counter = 0; |
static void ktaskclnp(void *arg); |
static void ktaskgc(void *arg); |
/** Initialize tasks |
* |
* Initialize kernel tasks support. |
* |
*/ |
void task_init(void) |
{ |
TASK = NULL; |
btree_create(&tasks_btree); |
} |
/** Create new task |
* |
* Create new task with no threads. |
* |
* @param as Task's address space. |
* @param name Symbolic name. |
* |
* @return New task's structure |
* |
*/ |
task_t *task_create(as_t *as, char *name) |
{ |
ipl_t ipl; |
task_t *ta; |
int i; |
ta = (task_t *) malloc(sizeof(task_t), 0); |
task_create_arch(ta); |
spinlock_initialize(&ta->lock, "task_ta_lock"); |
list_initialize(&ta->th_head); |
ta->as = as; |
ta->name = name; |
ta->main_thread = NULL; |
ta->refcount = 0; |
ta->capabilities = 0; |
ta->accept_new_threads = true; |
ipc_answerbox_init(&ta->answerbox); |
for (i=0; i < IPC_MAX_PHONES;i++) |
ipc_phone_init(&ta->phones[i]); |
if (ipc_phone_0) |
ipc_phone_connect(&ta->phones[0], ipc_phone_0); |
atomic_set(&ta->active_calls, 0); |
mutex_initialize(&ta->futexes_lock); |
btree_create(&ta->futexes); |
ipl = interrupts_disable(); |
/* |
* Increment address space reference count. |
* TODO: Reconsider the locking scheme. |
*/ |
mutex_lock(&as->lock); |
as->refcount++; |
mutex_unlock(&as->lock); |
spinlock_lock(&tasks_lock); |
ta->taskid = ++task_counter; |
btree_insert(&tasks_btree, (btree_key_t) ta->taskid, (void *) ta, NULL); |
spinlock_unlock(&tasks_lock); |
interrupts_restore(ipl); |
return ta; |
} |
/** Destroy task. |
* |
* @param t Task to be destroyed. |
*/ |
void task_destroy(task_t *t) |
{ |
task_destroy_arch(t); |
btree_destroy(&t->futexes); |
mutex_lock_active(&t->as->lock); |
if (--t->as->refcount == 0) { |
mutex_unlock(&t->as->lock); |
as_destroy(t->as); |
/* |
* t->as is destroyed. |
*/ |
} else { |
mutex_unlock(&t->as->lock); |
} |
free(t); |
TASK = NULL; |
} |
/** Create new task with 1 thread and run it |
* |
* @param program_addr Address of program executable image. |
* @param name Program name. |
* |
* @return Task of the running program or NULL on error. |
*/ |
task_t * task_run_program(void *program_addr, char *name) |
{ |
as_t *as; |
as_area_t *a; |
int rc; |
thread_t *t1, *t2; |
task_t *task; |
uspace_arg_t *kernel_uarg; |
as = as_create(0); |
ASSERT(as); |
rc = elf_load((elf_header_t *) program_addr, as); |
if (rc != EE_OK) { |
as_destroy(as); |
return NULL; |
} |
kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0); |
kernel_uarg->uspace_entry = (void *) ((elf_header_t *) program_addr)->e_entry; |
kernel_uarg->uspace_stack = (void *) USTACK_ADDRESS; |
kernel_uarg->uspace_thread_function = NULL; |
kernel_uarg->uspace_thread_arg = NULL; |
kernel_uarg->uspace_uarg = NULL; |
task = task_create(as, name); |
ASSERT(task); |
/* |
* Create the data as_area. |
*/ |
a = as_area_create(as, AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE, |
LOADED_PROG_STACK_PAGES_NO*PAGE_SIZE, |
USTACK_ADDRESS, AS_AREA_ATTR_NONE, &anon_backend, NULL); |
/* |
* Create the main thread. |
*/ |
t1 = thread_create(uinit, kernel_uarg, task, 0, "uinit"); |
ASSERT(t1); |
/* |
* Create killer thread for the new task. |
*/ |
t2 = thread_create(ktaskgc, t1, task, 0, "ktaskgc"); |
ASSERT(t2); |
thread_ready(t2); |
thread_ready(t1); |
return task; |
} |
/** Syscall for reading task ID from userspace. |
* |
* @param uspace_task_id Userspace address of 8-byte buffer where to store current task ID. |
* |
* @return 0 on success or an error code from @ref errno.h. |
*/ |
__native sys_task_get_id(task_id_t *uspace_task_id) |
{ |
/* |
* No need to acquire lock on TASK because taskid |
* remains constant for the lifespan of the task. |
*/ |
return (__native) copy_to_uspace(uspace_task_id, &TASK->taskid, sizeof(TASK->taskid)); |
} |
/** Find task structure corresponding to task ID. |
* |
* The tasks_lock must be already held by the caller of this function |
* and interrupts must be disabled. |
* |
* @param id Task ID. |
* |
* @return Task structure address or NULL if there is no such task ID. |
*/ |
task_t *task_find_by_id(task_id_t id) |
{ |
btree_node_t *leaf; |
return (task_t *) btree_search(&tasks_btree, (btree_key_t) id, &leaf); |
} |
/** Kill task. |
* |
* @param id ID of the task to be killed. |
* |
* @return 0 on success or an error code from errno.h |
*/ |
int task_kill(task_id_t id) |
{ |
ipl_t ipl; |
task_t *ta; |
thread_t *t; |
link_t *cur; |
if (id == 1) |
return EPERM; |
ipl = interrupts_disable(); |
spinlock_lock(&tasks_lock); |
if (!(ta = task_find_by_id(id))) { |
spinlock_unlock(&tasks_lock); |
interrupts_restore(ipl); |
return ENOENT; |
} |
spinlock_lock(&ta->lock); |
ta->refcount++; |
spinlock_unlock(&ta->lock); |
btree_remove(&tasks_btree, ta->taskid, NULL); |
spinlock_unlock(&tasks_lock); |
t = thread_create(ktaskclnp, NULL, ta, 0, "ktaskclnp"); |
spinlock_lock(&ta->lock); |
ta->accept_new_threads = false; |
ta->refcount--; |
/* |
* Interrupt all threads except ktaskclnp. |
*/ |
for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) { |
thread_t *thr; |
bool sleeping = false; |
thr = list_get_instance(cur, thread_t, th_link); |
if (thr == t) |
continue; |
spinlock_lock(&thr->lock); |
thr->interrupted = true; |
if (thr->state == Sleeping) |
sleeping = true; |
spinlock_unlock(&thr->lock); |
if (sleeping) |
waitq_interrupt_sleep(thr); |
} |
spinlock_unlock(&ta->lock); |
interrupts_restore(ipl); |
if (t) |
thread_ready(t); |
return 0; |
} |
/** Print task list */ |
void task_print_list(void) |
{ |
link_t *cur; |
ipl_t ipl; |
/* Messing with thread structures, avoid deadlock */ |
ipl = interrupts_disable(); |
spinlock_lock(&tasks_lock); |
for (cur = tasks_btree.leaf_head.next; cur != &tasks_btree.leaf_head; cur = cur->next) { |
btree_node_t *node; |
int i; |
node = list_get_instance(cur, btree_node_t, leaf_link); |
for (i = 0; i < node->keys; i++) { |
task_t *t; |
int j; |
t = (task_t *) node->value[i]; |
spinlock_lock(&t->lock); |
printf("%s(%lld): address=%#zx, as=%#zx, ActiveCalls: %zd", |
t->name, t->taskid, t, t->as, atomic_get(&t->active_calls)); |
for (j=0; j < IPC_MAX_PHONES; j++) { |
if (t->phones[j].callee) |
printf(" Ph(%zd): %#zx ", j, t->phones[j].callee); |
} |
printf("\n"); |
spinlock_unlock(&t->lock); |
} |
} |
spinlock_unlock(&tasks_lock); |
interrupts_restore(ipl); |
} |
/** Kernel thread used to cleanup the task after it is killed. */ |
void ktaskclnp(void *arg) |
{ |
ipl_t ipl; |
thread_t *t = NULL, *main_thread; |
link_t *cur; |
bool again; |
thread_detach(THREAD); |
loop: |
ipl = interrupts_disable(); |
spinlock_lock(&TASK->lock); |
main_thread = TASK->main_thread; |
/* |
* Find a thread to join. |
*/ |
again = false; |
for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) { |
t = list_get_instance(cur, thread_t, th_link); |
spinlock_lock(&t->lock); |
if (t == THREAD) { |
spinlock_unlock(&t->lock); |
continue; |
} else if (t == main_thread) { |
spinlock_unlock(&t->lock); |
continue; |
} else if (t->join_type != None) { |
spinlock_unlock(&t->lock); |
again = true; |
continue; |
} else { |
t->join_type = TaskClnp; |
spinlock_unlock(&t->lock); |
again = false; |
break; |
} |
} |
spinlock_unlock(&TASK->lock); |
interrupts_restore(ipl); |
if (again) { |
/* |
* Other cleanup (e.g. ktaskgc) is in progress. |
*/ |
scheduler(); |
goto loop; |
} |
if (t != THREAD) { |
ASSERT(t != main_thread); /* uninit is joined and detached in ktaskgc */ |
thread_join(t); |
thread_detach(t); |
goto loop; /* go for another thread */ |
} |
/* |
* Now there are no other threads in this task |
* and no new threads can be created. |
*/ |
ipc_cleanup(); |
futex_cleanup(); |
klog_printf("Cleanup of task %lld completed.", TASK->taskid); |
} |
/** Kernel thread used to kill the userspace task when its main thread exits. |
* |
* This thread waits until the main userspace thread (i.e. uninit) exits. |
* When this happens, the task is killed. In the meantime, exited threads |
* are garbage collected. |
* |
* @param arg Pointer to the thread structure of the task's main thread. |
*/ |
void ktaskgc(void *arg) |
{ |
thread_t *t = (thread_t *) arg; |
loop: |
/* |
* Userspace threads cannot detach themselves, |
* therefore the thread pointer is guaranteed to be valid. |
*/ |
if (thread_join_timeout(t, 1000000, SYNCH_FLAGS_NONE) == ESYNCH_TIMEOUT) { /* sleep uninterruptibly here! */ |
ipl_t ipl; |
link_t *cur; |
thread_t *thr = NULL; |
/* |
* The join timed out. Try to do some garbage collection of Undead threads. |
*/ |
more_gc: |
ipl = interrupts_disable(); |
spinlock_lock(&TASK->lock); |
for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) { |
thr = list_get_instance(cur, thread_t, th_link); |
spinlock_lock(&thr->lock); |
if (thr != t && thr->state == Undead && thr->join_type == None) { |
thr->join_type = TaskGC; |
spinlock_unlock(&thr->lock); |
break; |
} |
spinlock_unlock(&thr->lock); |
thr = NULL; |
} |
spinlock_unlock(&TASK->lock); |
interrupts_restore(ipl); |
if (thr) { |
thread_join(thr); |
thread_detach(thr); |
scheduler(); |
goto more_gc; |
} |
goto loop; |
} |
thread_detach(t); |
task_kill(TASK->taskid); |
} |
/** @} |
*/ |