/branches/network/kernel/generic/src/proc/task.c |
---|
0,0 → 1,407 |
/* |
* Copyright (c) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup genericproc |
* @{ |
*/ |
/** |
* @file |
* @brief Task management. |
*/ |
#include <proc/thread.h> |
#include <proc/task.h> |
#include <mm/as.h> |
#include <mm/slab.h> |
#include <atomic.h> |
#include <synch/spinlock.h> |
#include <synch/waitq.h> |
#include <arch.h> |
#include <arch/barrier.h> |
#include <adt/avl.h> |
#include <adt/btree.h> |
#include <adt/list.h> |
#include <ipc/ipc.h> |
#include <ipc/ipcrsc.h> |
#include <print.h> |
#include <errno.h> |
#include <func.h> |
#include <syscall/copy.h> |
/** Spinlock protecting the tasks_tree AVL tree. */ |
SPINLOCK_INITIALIZE(tasks_lock); |
/** AVL tree of active tasks. |
* |
* The task is guaranteed to exist after it was found in the tasks_tree as |
* long as: |
* @li the tasks_lock is held, |
* @li the task's lock is held when task's lock is acquired before releasing |
* tasks_lock or |
* @li the task's refcount is greater than 0 |
* |
*/ |
avltree_t tasks_tree; |
static task_id_t task_counter = 0; |
/** Initialize kernel tasks support. */ |
void task_init(void) |
{ |
TASK = NULL; |
avltree_create(&tasks_tree); |
} |
/* |
* The idea behind this walker is to remember a single task different from |
* TASK. |
*/ |
static bool task_done_walker(avltree_node_t *node, void *arg) |
{ |
task_t *t = avltree_get_instance(node, task_t, tasks_tree_node); |
task_t **tp = (task_t **) arg; |
if (t != TASK) { |
*tp = t; |
return false; /* stop walking */ |
} |
return true; /* continue the walk */ |
} |
/** Kill all tasks except the current task. */ |
void task_done(void) |
{ |
task_t *t; |
do { /* Repeat until there are any tasks except TASK */ |
/* Messing with task structures, avoid deadlock */ |
ipl_t ipl = interrupts_disable(); |
spinlock_lock(&tasks_lock); |
t = NULL; |
avltree_walk(&tasks_tree, task_done_walker, &t); |
if (t != NULL) { |
task_id_t id = t->taskid; |
spinlock_unlock(&tasks_lock); |
interrupts_restore(ipl); |
#ifdef CONFIG_DEBUG |
printf("Killing task %" PRIu64 "\n", id); |
#endif |
task_kill(id); |
thread_usleep(10000); |
} else { |
spinlock_unlock(&tasks_lock); |
interrupts_restore(ipl); |
} |
} while (t != NULL); |
} |
/** Create new task with no threads. |
* |
* @param as Task's address space. |
* @param name Symbolic name. |
* |
* @return New task's structure. |
* |
*/ |
task_t *task_create(as_t *as, char *name) |
{ |
ipl_t ipl; |
task_t *ta; |
int i; |
ta = (task_t *) malloc(sizeof(task_t), 0); |
task_create_arch(ta); |
spinlock_initialize(&ta->lock, "task_ta_lock"); |
list_initialize(&ta->th_head); |
ta->as = as; |
ta->name = name; |
atomic_set(&ta->refcount, 0); |
atomic_set(&ta->lifecount, 0); |
ta->context = CONTEXT; |
ta->capabilities = 0; |
ta->cycles = 0; |
ipc_answerbox_init(&ta->answerbox, ta); |
for (i = 0; i < IPC_MAX_PHONES; i++) |
ipc_phone_init(&ta->phones[i]); |
if ((ipc_phone_0) && (context_check(ipc_phone_0->task->context, |
ta->context))) |
ipc_phone_connect(&ta->phones[0], ipc_phone_0); |
atomic_set(&ta->active_calls, 0); |
mutex_initialize(&ta->futexes_lock, MUTEX_PASSIVE); |
btree_create(&ta->futexes); |
ipl = interrupts_disable(); |
/* |
* Increment address space reference count. |
*/ |
atomic_inc(&as->refcount); |
spinlock_lock(&tasks_lock); |
ta->taskid = ++task_counter; |
avltree_node_initialize(&ta->tasks_tree_node); |
ta->tasks_tree_node.key = ta->taskid; |
avltree_insert(&tasks_tree, &ta->tasks_tree_node); |
spinlock_unlock(&tasks_lock); |
interrupts_restore(ipl); |
return ta; |
} |
/** Destroy task. |
* |
* @param t Task to be destroyed. |
*/ |
void task_destroy(task_t *t) |
{ |
/* |
* Remove the task from the task B+tree. |
*/ |
spinlock_lock(&tasks_lock); |
avltree_delete(&tasks_tree, &t->tasks_tree_node); |
spinlock_unlock(&tasks_lock); |
/* |
* Perform architecture specific task destruction. |
*/ |
task_destroy_arch(t); |
/* |
* Free up dynamically allocated state. |
*/ |
btree_destroy(&t->futexes); |
/* |
* Drop our reference to the address space. |
*/ |
if (atomic_predec(&t->as->refcount) == 0) |
as_destroy(t->as); |
free(t); |
TASK = NULL; |
} |
/** Syscall for reading task ID from userspace. |
* |
* @param uspace_task_id userspace address of 8-byte buffer |
* where to store current task ID. |
* |
* @return Zero on success or an error code from @ref errno.h. |
*/ |
unative_t sys_task_get_id(task_id_t *uspace_task_id) |
{ |
/* |
* No need to acquire lock on TASK because taskid remains constant for |
* the lifespan of the task. |
*/ |
return (unative_t) copy_to_uspace(uspace_task_id, &TASK->taskid, |
sizeof(TASK->taskid)); |
} |
/** Find task structure corresponding to task ID. |
* |
* The tasks_lock must be already held by the caller of this function and |
* interrupts must be disabled. |
* |
* @param id Task ID. |
* |
* @return Task structure address or NULL if there is no such task |
* ID. |
*/ |
task_t *task_find_by_id(task_id_t id) { avltree_node_t *node; |
node = avltree_search(&tasks_tree, (avltree_key_t) id); |
if (node) |
return avltree_get_instance(node, task_t, tasks_tree_node); |
return NULL; |
} |
/** Get accounting data of given task. |
* |
* Note that task lock of 't' must be already held and interrupts must be |
* already disabled. |
* |
* @param t Pointer to thread. |
* |
* @return Number of cycles used by the task and all its threads |
* so far. |
*/ |
uint64_t task_get_accounting(task_t *t) |
{ |
/* Accumulated value of task */ |
uint64_t ret = t->cycles; |
/* Current values of threads */ |
link_t *cur; |
for (cur = t->th_head.next; cur != &t->th_head; cur = cur->next) { |
thread_t *thr = list_get_instance(cur, thread_t, th_link); |
spinlock_lock(&thr->lock); |
/* Process only counted threads */ |
if (!thr->uncounted) { |
if (thr == THREAD) { |
/* Update accounting of current thread */ |
thread_update_accounting(); |
} |
ret += thr->cycles; |
} |
spinlock_unlock(&thr->lock); |
} |
return ret; |
} |
/** Kill task. |
* |
* This function is idempotent. |
* It signals all the task's threads to bail it out. |
* |
* @param id ID of the task to be killed. |
* |
* @return Zero on success or an error code from errno.h. |
*/ |
int task_kill(task_id_t id) |
{ |
ipl_t ipl; |
task_t *ta; |
link_t *cur; |
if (id == 1) |
return EPERM; |
ipl = interrupts_disable(); |
spinlock_lock(&tasks_lock); |
if (!(ta = task_find_by_id(id))) { |
spinlock_unlock(&tasks_lock); |
interrupts_restore(ipl); |
return ENOENT; |
} |
spinlock_unlock(&tasks_lock); |
/* |
* Interrupt all threads. |
*/ |
spinlock_lock(&ta->lock); |
for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) { |
thread_t *thr; |
bool sleeping = false; |
thr = list_get_instance(cur, thread_t, th_link); |
spinlock_lock(&thr->lock); |
thr->interrupted = true; |
if (thr->state == Sleeping) |
sleeping = true; |
spinlock_unlock(&thr->lock); |
if (sleeping) |
waitq_interrupt_sleep(thr); |
} |
spinlock_unlock(&ta->lock); |
interrupts_restore(ipl); |
return 0; |
} |
static bool task_print_walker(avltree_node_t *node, void *arg) |
{ |
task_t *t = avltree_get_instance(node, task_t, tasks_tree_node); |
int j; |
spinlock_lock(&t->lock); |
uint64_t cycles; |
char suffix; |
order(task_get_accounting(t), &cycles, &suffix); |
#ifdef __32_BITS__ |
printf("%-6" PRIu64 " %-10s %-3" PRIu32 " %10p %10p %9" PRIu64 |
"%c %7ld %6ld", t->taskid, t->name, t->context, t, t->as, cycles, |
suffix, atomic_get(&t->refcount), atomic_get(&t->active_calls)); |
#endif |
#ifdef __64_BITS__ |
printf("%-6" PRIu64 " %-10s %-3" PRIu32 " %18p %18p %9" PRIu64 |
"%c %7ld %6ld", t->taskid, t->name, t->context, t, t->as, cycles, |
suffix, atomic_get(&t->refcount), atomic_get(&t->active_calls)); |
#endif |
for (j = 0; j < IPC_MAX_PHONES; j++) { |
if (t->phones[j].callee) |
printf(" %d:%p", j, t->phones[j].callee); |
} |
printf("\n"); |
spinlock_unlock(&t->lock); |
return true; |
} |
/** Print task list */ |
void task_print_list(void) |
{ |
ipl_t ipl; |
/* Messing with task structures, avoid deadlock */ |
ipl = interrupts_disable(); |
spinlock_lock(&tasks_lock); |
#ifdef __32_BITS__ |
printf("taskid name ctx address as " |
"cycles threads calls callee\n"); |
printf("------ ---------- --- ---------- ---------- " |
"---------- ------- ------ ------>\n"); |
#endif |
#ifdef __64_BITS__ |
printf("taskid name ctx address as " |
"cycles threads calls callee\n"); |
printf("------ ---------- --- ------------------ ------------------ " |
"---------- ------- ------ ------>\n"); |
#endif |
avltree_walk(&tasks_tree, task_print_walker, NULL); |
spinlock_unlock(&tasks_lock); |
interrupts_restore(ipl); |
} |
/** @} |
*/ |
/branches/network/kernel/generic/src/proc/thread.c |
---|
0,0 → 1,779 |
/* |
* Copyright (c) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup genericproc |
* @{ |
*/ |
/** |
* @file |
* @brief Thread management functions. |
*/ |
#include <proc/scheduler.h> |
#include <proc/thread.h> |
#include <proc/task.h> |
#include <proc/uarg.h> |
#include <mm/frame.h> |
#include <mm/page.h> |
#include <arch/asm.h> |
#include <arch/cycle.h> |
#include <arch.h> |
#include <synch/synch.h> |
#include <synch/spinlock.h> |
#include <synch/waitq.h> |
#include <synch/rwlock.h> |
#include <cpu.h> |
#include <func.h> |
#include <context.h> |
#include <adt/avl.h> |
#include <adt/list.h> |
#include <time/clock.h> |
#include <time/timeout.h> |
#include <config.h> |
#include <arch/interrupt.h> |
#include <smp/ipi.h> |
#include <arch/faddr.h> |
#include <atomic.h> |
#include <memstr.h> |
#include <print.h> |
#include <mm/slab.h> |
#include <debug.h> |
#include <main/uinit.h> |
#include <syscall/copy.h> |
#include <errno.h> |
#ifndef LOADED_PROG_STACK_PAGES_NO |
#define LOADED_PROG_STACK_PAGES_NO 1 |
#endif |
/** Thread states */ |
char *thread_states[] = { |
"Invalid", |
"Running", |
"Sleeping", |
"Ready", |
"Entering", |
"Exiting", |
"Lingering" |
}; |
/** Lock protecting the threads_tree AVL tree. |
* |
* For locking rules, see declaration thereof. |
*/ |
SPINLOCK_INITIALIZE(threads_lock); |
/** AVL tree of all threads. |
* |
* When a thread is found in the threads_tree AVL tree, it is guaranteed to |
* exist as long as the threads_lock is held. |
*/ |
avltree_t threads_tree; |
SPINLOCK_INITIALIZE(tidlock); |
thread_id_t last_tid = 0; |
static slab_cache_t *thread_slab; |
#ifdef ARCH_HAS_FPU |
slab_cache_t *fpu_context_slab; |
#endif |
/** Thread wrapper. |
* |
* This wrapper is provided to ensure that every thread makes a call to |
* thread_exit() when its implementing function returns. |
* |
* interrupts_disable() is assumed. |
* |
*/ |
static void cushion(void) |
{ |
void (*f)(void *) = THREAD->thread_code; |
void *arg = THREAD->thread_arg; |
THREAD->last_cycle = get_cycle(); |
/* This is where each thread wakes up after its creation */ |
spinlock_unlock(&THREAD->lock); |
interrupts_enable(); |
f(arg); |
/* Accumulate accounting to the task */ |
ipl_t ipl = interrupts_disable(); |
spinlock_lock(&THREAD->lock); |
if (!THREAD->uncounted) { |
thread_update_accounting(); |
uint64_t cycles = THREAD->cycles; |
THREAD->cycles = 0; |
spinlock_unlock(&THREAD->lock); |
spinlock_lock(&TASK->lock); |
TASK->cycles += cycles; |
spinlock_unlock(&TASK->lock); |
} else |
spinlock_unlock(&THREAD->lock); |
interrupts_restore(ipl); |
thread_exit(); |
/* not reached */ |
} |
/** Initialization and allocation for thread_t structure */ |
static int thr_constructor(void *obj, int kmflags) |
{ |
thread_t *t = (thread_t *) obj; |
spinlock_initialize(&t->lock, "thread_t_lock"); |
link_initialize(&t->rq_link); |
link_initialize(&t->wq_link); |
link_initialize(&t->th_link); |
/* call the architecture-specific part of the constructor */ |
thr_constructor_arch(t); |
#ifdef ARCH_HAS_FPU |
#ifdef CONFIG_FPU_LAZY |
t->saved_fpu_context = NULL; |
#else |
t->saved_fpu_context = slab_alloc(fpu_context_slab, kmflags); |
if (!t->saved_fpu_context) |
return -1; |
#endif |
#endif |
t->kstack = (uint8_t *) frame_alloc(STACK_FRAMES, FRAME_KA | kmflags); |
if (!t->kstack) { |
#ifdef ARCH_HAS_FPU |
if (t->saved_fpu_context) |
slab_free(fpu_context_slab, t->saved_fpu_context); |
#endif |
return -1; |
} |
return 0; |
} |
/** Destruction of thread_t object */ |
static int thr_destructor(void *obj) |
{ |
thread_t *t = (thread_t *) obj; |
/* call the architecture-specific part of the destructor */ |
thr_destructor_arch(t); |
frame_free(KA2PA(t->kstack)); |
#ifdef ARCH_HAS_FPU |
if (t->saved_fpu_context) |
slab_free(fpu_context_slab, t->saved_fpu_context); |
#endif |
return 1; /* One page freed */ |
} |
/** Initialize threads |
* |
* Initialize kernel threads support. |
* |
*/ |
void thread_init(void) |
{ |
THREAD = NULL; |
atomic_set(&nrdy,0); |
thread_slab = slab_cache_create("thread_slab", sizeof(thread_t), 0, |
thr_constructor, thr_destructor, 0); |
#ifdef ARCH_HAS_FPU |
fpu_context_slab = slab_cache_create("fpu_slab", sizeof(fpu_context_t), |
FPU_CONTEXT_ALIGN, NULL, NULL, 0); |
#endif |
avltree_create(&threads_tree); |
} |
/** Make thread ready |
* |
* Switch thread t to the ready state. |
* |
* @param t Thread to make ready. |
* |
*/ |
void thread_ready(thread_t *t) |
{ |
cpu_t *cpu; |
runq_t *r; |
ipl_t ipl; |
int i, avg; |
ipl = interrupts_disable(); |
spinlock_lock(&t->lock); |
ASSERT(!(t->state == Ready)); |
i = (t->priority < RQ_COUNT - 1) ? ++t->priority : t->priority; |
cpu = CPU; |
if (t->flags & THREAD_FLAG_WIRED) { |
ASSERT(t->cpu != NULL); |
cpu = t->cpu; |
} |
t->state = Ready; |
spinlock_unlock(&t->lock); |
/* |
* Append t to respective ready queue on respective processor. |
*/ |
r = &cpu->rq[i]; |
spinlock_lock(&r->lock); |
list_append(&t->rq_link, &r->rq_head); |
r->n++; |
spinlock_unlock(&r->lock); |
atomic_inc(&nrdy); |
avg = atomic_get(&nrdy) / config.cpu_active; |
atomic_inc(&cpu->nrdy); |
interrupts_restore(ipl); |
} |
/** Create new thread |
* |
* Create a new thread. |
* |
* @param func Thread's implementing function. |
* @param arg Thread's implementing function argument. |
* @param task Task to which the thread belongs. The caller must |
* guarantee that the task won't cease to exist during the |
* call. The task's lock may not be held. |
* @param flags Thread flags. |
* @param name Symbolic name. |
* @param uncounted Thread's accounting doesn't affect accumulated task |
* accounting. |
* |
* @return New thread's structure on success, NULL on failure. |
* |
*/ |
thread_t *thread_create(void (* func)(void *), void *arg, task_t *task, |
int flags, char *name, bool uncounted) |
{ |
thread_t *t; |
ipl_t ipl; |
t = (thread_t *) slab_alloc(thread_slab, 0); |
if (!t) |
return NULL; |
/* Not needed, but good for debugging */ |
memsetb(t->kstack, THREAD_STACK_SIZE * 1 << STACK_FRAMES, 0); |
ipl = interrupts_disable(); |
spinlock_lock(&tidlock); |
t->tid = ++last_tid; |
spinlock_unlock(&tidlock); |
interrupts_restore(ipl); |
context_save(&t->saved_context); |
context_set(&t->saved_context, FADDR(cushion), (uintptr_t) t->kstack, |
THREAD_STACK_SIZE); |
the_initialize((the_t *) t->kstack); |
ipl = interrupts_disable(); |
t->saved_context.ipl = interrupts_read(); |
interrupts_restore(ipl); |
memcpy(t->name, name, THREAD_NAME_BUFLEN); |
t->thread_code = func; |
t->thread_arg = arg; |
t->ticks = -1; |
t->cycles = 0; |
t->uncounted = uncounted; |
t->priority = -1; /* start in rq[0] */ |
t->cpu = NULL; |
t->flags = flags; |
t->state = Entering; |
t->call_me = NULL; |
t->call_me_with = NULL; |
timeout_initialize(&t->sleep_timeout); |
t->sleep_interruptible = false; |
t->sleep_queue = NULL; |
t->timeout_pending = 0; |
t->in_copy_from_uspace = false; |
t->in_copy_to_uspace = false; |
t->interrupted = false; |
t->detached = false; |
waitq_initialize(&t->join_wq); |
t->rwlock_holder_type = RWLOCK_NONE; |
t->task = task; |
t->fpu_context_exists = 0; |
t->fpu_context_engaged = 0; |
avltree_node_initialize(&t->threads_tree_node); |
t->threads_tree_node.key = (uintptr_t) t; |
/* might depend on previous initialization */ |
thread_create_arch(t); |
if (!(flags & THREAD_FLAG_NOATTACH)) |
thread_attach(t, task); |
return t; |
} |
/** Destroy thread memory structure |
* |
* Detach thread from all queues, cpus etc. and destroy it. |
* |
* Assume thread->lock is held!! |
*/ |
void thread_destroy(thread_t *t) |
{ |
ASSERT(t->state == Exiting || t->state == Lingering); |
ASSERT(t->task); |
ASSERT(t->cpu); |
spinlock_lock(&t->cpu->lock); |
if (t->cpu->fpu_owner == t) |
t->cpu->fpu_owner = NULL; |
spinlock_unlock(&t->cpu->lock); |
spinlock_unlock(&t->lock); |
spinlock_lock(&threads_lock); |
avltree_delete(&threads_tree, &t->threads_tree_node); |
spinlock_unlock(&threads_lock); |
/* |
* Detach from the containing task. |
*/ |
spinlock_lock(&t->task->lock); |
list_remove(&t->th_link); |
spinlock_unlock(&t->task->lock); |
/* |
* t is guaranteed to be the very last thread of its task. |
* It is safe to destroy the task. |
*/ |
if (atomic_predec(&t->task->refcount) == 0) |
task_destroy(t->task); |
slab_free(thread_slab, t); |
} |
/** Make the thread visible to the system. |
* |
* Attach the thread structure to the current task and make it visible in the |
* threads_tree. |
* |
* @param t Thread to be attached to the task. |
* @param task Task to which the thread is to be attached. |
*/ |
void thread_attach(thread_t *t, task_t *task) |
{ |
ipl_t ipl; |
/* |
* Attach to the current task. |
*/ |
ipl = interrupts_disable(); |
spinlock_lock(&task->lock); |
atomic_inc(&task->refcount); |
atomic_inc(&task->lifecount); |
list_append(&t->th_link, &task->th_head); |
spinlock_unlock(&task->lock); |
/* |
* Register this thread in the system-wide list. |
*/ |
spinlock_lock(&threads_lock); |
avltree_insert(&threads_tree, &t->threads_tree_node); |
spinlock_unlock(&threads_lock); |
interrupts_restore(ipl); |
} |
/** Terminate thread. |
* |
* End current thread execution and switch it to the exiting state. All pending |
* timeouts are executed. |
*/ |
void thread_exit(void) |
{ |
ipl_t ipl; |
if (atomic_predec(&TASK->lifecount) == 0) { |
/* |
* We are the last thread in the task that still has not exited. |
* With the exception of the moment the task was created, new |
* threads can only be created by threads of the same task. |
* We are safe to perform cleanup. |
*/ |
if (THREAD->flags & THREAD_FLAG_USPACE) { |
ipc_cleanup(); |
futex_cleanup(); |
LOG("Cleanup of task %" PRIu64" completed.", TASK->taskid); |
} |
} |
restart: |
ipl = interrupts_disable(); |
spinlock_lock(&THREAD->lock); |
if (THREAD->timeout_pending) { |
/* busy waiting for timeouts in progress */ |
spinlock_unlock(&THREAD->lock); |
interrupts_restore(ipl); |
goto restart; |
} |
THREAD->state = Exiting; |
spinlock_unlock(&THREAD->lock); |
scheduler(); |
/* Not reached */ |
while (1) |
; |
} |
/** Thread sleep |
* |
* Suspend execution of the current thread. |
* |
* @param sec Number of seconds to sleep. |
* |
*/ |
void thread_sleep(uint32_t sec) |
{ |
thread_usleep(sec * 1000000); |
} |
/** Wait for another thread to exit. |
* |
* @param t Thread to join on exit. |
* @param usec Timeout in microseconds. |
* @param flags Mode of operation. |
* |
* @return An error code from errno.h or an error code from synch.h. |
*/ |
int thread_join_timeout(thread_t *t, uint32_t usec, int flags) |
{ |
ipl_t ipl; |
int rc; |
if (t == THREAD) |
return EINVAL; |
/* |
* Since thread join can only be called once on an undetached thread, |
* the thread pointer is guaranteed to be still valid. |
*/ |
ipl = interrupts_disable(); |
spinlock_lock(&t->lock); |
ASSERT(!t->detached); |
spinlock_unlock(&t->lock); |
interrupts_restore(ipl); |
rc = waitq_sleep_timeout(&t->join_wq, usec, flags); |
return rc; |
} |
/** Detach thread. |
* |
* Mark the thread as detached, if the thread is already in the Lingering |
* state, deallocate its resources. |
* |
* @param t Thread to be detached. |
*/ |
void thread_detach(thread_t *t) |
{ |
ipl_t ipl; |
/* |
* Since the thread is expected not to be already detached, |
* pointer to it must be still valid. |
*/ |
ipl = interrupts_disable(); |
spinlock_lock(&t->lock); |
ASSERT(!t->detached); |
if (t->state == Lingering) { |
thread_destroy(t); /* unlocks &t->lock */ |
interrupts_restore(ipl); |
return; |
} else { |
t->detached = true; |
} |
spinlock_unlock(&t->lock); |
interrupts_restore(ipl); |
} |
/** Thread usleep |
* |
* Suspend execution of the current thread. |
* |
* @param usec Number of microseconds to sleep. |
* |
*/ |
void thread_usleep(uint32_t usec) |
{ |
waitq_t wq; |
waitq_initialize(&wq); |
(void) waitq_sleep_timeout(&wq, usec, SYNCH_FLAGS_NON_BLOCKING); |
} |
/** Register thread out-of-context invocation |
* |
* Register a function and its argument to be executed |
* on next context switch to the current thread. |
* |
* @param call_me Out-of-context function. |
* @param call_me_with Out-of-context function argument. |
* |
*/ |
void thread_register_call_me(void (* call_me)(void *), void *call_me_with) |
{ |
ipl_t ipl; |
ipl = interrupts_disable(); |
spinlock_lock(&THREAD->lock); |
THREAD->call_me = call_me; |
THREAD->call_me_with = call_me_with; |
spinlock_unlock(&THREAD->lock); |
interrupts_restore(ipl); |
} |
static bool thread_walker(avltree_node_t *node, void *arg) |
{ |
thread_t *t = avltree_get_instance(node, thread_t, threads_tree_node); |
uint64_t cycles; |
char suffix; |
order(t->cycles, &cycles, &suffix); |
#ifdef __32_BITS__ |
printf("%-6" PRIu64" %-10s %10p %-8s %10p %-3" PRIu32 " %10p %10p %9" PRIu64 "%c ", |
t->tid, t->name, t, thread_states[t->state], t->task, |
t->task->context, t->thread_code, t->kstack, cycles, suffix); |
#endif |
#ifdef __64_BITS__ |
printf("%-6" PRIu64" %-10s %18p %-8s %18p %-3" PRIu32 " %18p %18p %9" PRIu64 "%c ", |
t->tid, t->name, t, thread_states[t->state], t->task, |
t->task->context, t->thread_code, t->kstack, cycles, suffix); |
#endif |
if (t->cpu) |
printf("%-4u", t->cpu->id); |
else |
printf("none"); |
if (t->state == Sleeping) { |
#ifdef __32_BITS__ |
printf(" %10p", t->sleep_queue); |
#endif |
#ifdef __64_BITS__ |
printf(" %18p", t->sleep_queue); |
#endif |
} |
printf("\n"); |
return true; |
} |
/** Print list of threads debug info */ |
void thread_print_list(void) |
{ |
ipl_t ipl; |
/* Messing with thread structures, avoid deadlock */ |
ipl = interrupts_disable(); |
spinlock_lock(&threads_lock); |
#ifdef __32_BITS__ |
printf("tid name address state task " |
"ctx code stack cycles cpu " |
"waitqueue\n"); |
printf("------ ---------- ---------- -------- ---------- " |
"--- ---------- ---------- ---------- ---- " |
"----------\n"); |
#endif |
#ifdef __64_BITS__ |
printf("tid name address state task " |
"ctx code stack cycles cpu " |
"waitqueue\n"); |
printf("------ ---------- ------------------ -------- ------------------ " |
"--- ------------------ ------------------ ---------- ---- " |
"------------------\n"); |
#endif |
avltree_walk(&threads_tree, thread_walker, NULL); |
spinlock_unlock(&threads_lock); |
interrupts_restore(ipl); |
} |
/** Check whether thread exists. |
* |
* Note that threads_lock must be already held and |
* interrupts must be already disabled. |
* |
* @param t Pointer to thread. |
* |
* @return True if thread t is known to the system, false otherwise. |
*/ |
bool thread_exists(thread_t *t) |
{ |
avltree_node_t *node; |
node = avltree_search(&threads_tree, (avltree_key_t) ((uintptr_t) t)); |
return node != NULL; |
} |
/** Update accounting of current thread. |
* |
* Note that thread_lock on THREAD must be already held and |
* interrupts must be already disabled. |
* |
*/ |
void thread_update_accounting(void) |
{ |
uint64_t time = get_cycle(); |
THREAD->cycles += time - THREAD->last_cycle; |
THREAD->last_cycle = time; |
} |
/** Process syscall to create new thread. |
* |
*/ |
unative_t sys_thread_create(uspace_arg_t *uspace_uarg, char *uspace_name, |
thread_id_t *uspace_thread_id) |
{ |
thread_t *t; |
char namebuf[THREAD_NAME_BUFLEN]; |
uspace_arg_t *kernel_uarg; |
int rc; |
rc = copy_from_uspace(namebuf, uspace_name, THREAD_NAME_BUFLEN); |
if (rc != 0) |
return (unative_t) rc; |
/* |
* In case of failure, kernel_uarg will be deallocated in this function. |
* In case of success, kernel_uarg will be freed in uinit(). |
*/ |
kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0); |
rc = copy_from_uspace(kernel_uarg, uspace_uarg, sizeof(uspace_arg_t)); |
if (rc != 0) { |
free(kernel_uarg); |
return (unative_t) rc; |
} |
t = thread_create(uinit, kernel_uarg, TASK, |
THREAD_FLAG_USPACE | THREAD_FLAG_NOATTACH, namebuf, false); |
if (t) { |
if (uspace_thread_id != NULL) { |
int rc; |
rc = copy_to_uspace(uspace_thread_id, &t->tid, |
sizeof(t->tid)); |
if (rc != 0) { |
/* |
* We have encountered a failure, but the thread |
* has already been created. We need to undo its |
* creation now. |
*/ |
/* |
* The new thread structure is initialized, but |
* is still not visible to the system. |
* We can safely deallocate it. |
*/ |
slab_free(thread_slab, t); |
free(kernel_uarg); |
return (unative_t) rc; |
} |
} |
thread_attach(t, TASK); |
thread_ready(t); |
return 0; |
} else |
free(kernel_uarg); |
return (unative_t) ENOMEM; |
} |
/** Process syscall to terminate thread. |
* |
*/ |
unative_t sys_thread_exit(int uspace_status) |
{ |
thread_exit(); |
/* Unreachable */ |
return 0; |
} |
/** Syscall for getting TID. |
* |
* @param uspace_thread_id Userspace address of 8-byte buffer where to store |
* current thread ID. |
* |
* @return 0 on success or an error code from @ref errno.h. |
*/ |
unative_t sys_thread_get_id(thread_id_t *uspace_thread_id) |
{ |
/* |
* No need to acquire lock on THREAD because tid |
* remains constant for the lifespan of the thread. |
*/ |
return (unative_t) copy_to_uspace(uspace_thread_id, &THREAD->tid, |
sizeof(THREAD->tid)); |
} |
/** @} |
*/ |
/branches/network/kernel/generic/src/proc/program.c |
---|
0,0 → 1,239 |
/* |
* Copyright (c) 2001-2004 Jakub Jermar |
* Copyright (c) 2008 Jiri Svoboda |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup genericproc |
* @{ |
*/ |
/** |
* @file |
* @brief Running userspace programs. |
*/ |
#include <main/uinit.h> |
#include <proc/thread.h> |
#include <proc/task.h> |
#include <proc/uarg.h> |
#include <mm/as.h> |
#include <mm/slab.h> |
#include <arch.h> |
#include <adt/list.h> |
#include <ipc/ipc.h> |
#include <ipc/ipcrsc.h> |
#include <security/cap.h> |
#include <lib/elf.h> |
#include <errno.h> |
#include <print.h> |
#include <syscall/copy.h> |
#include <proc/program.h> |
#ifndef LOADED_PROG_STACK_PAGES_NO |
#define LOADED_PROG_STACK_PAGES_NO 1 |
#endif |
/** |
* Points to the binary image used as the program loader. All non-initial |
* tasks are created from this executable image. |
*/ |
void *program_loader = NULL; |
/** Create a program using an existing address space. |
* |
* @param as Address space containing a binary program image. |
* @param entry_addr Program entry-point address in program address space. |
* @param p Buffer for storing program information. |
*/ |
void program_create(as_t *as, uintptr_t entry_addr, program_t *p) |
{ |
as_area_t *a; |
uspace_arg_t *kernel_uarg; |
kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0); |
kernel_uarg->uspace_entry = (void *) entry_addr; |
kernel_uarg->uspace_stack = (void *) USTACK_ADDRESS; |
kernel_uarg->uspace_thread_function = NULL; |
kernel_uarg->uspace_thread_arg = NULL; |
kernel_uarg->uspace_uarg = NULL; |
p->task = task_create(as, "app"); |
ASSERT(p->task); |
/* |
* Create the data as_area. |
*/ |
a = as_area_create(as, AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE, |
LOADED_PROG_STACK_PAGES_NO * PAGE_SIZE, USTACK_ADDRESS, |
AS_AREA_ATTR_NONE, &anon_backend, NULL); |
/* |
* Create the main thread. |
*/ |
p->main_thread = thread_create(uinit, kernel_uarg, p->task, |
THREAD_FLAG_USPACE, "uinit", false); |
ASSERT(p->main_thread); |
} |
/** Parse an executable image in the kernel memory. |
* |
* If the image belongs to a program loader, it is registered as such, |
* (and *task is set to NULL). Otherwise a task is created from the |
* executable image. The task is returned in *task. |
* |
* @param image_addr Address of an executable program image. |
* @param p Buffer for storing program info. If image_addr |
* points to a loader image, p->task will be set to |
* NULL and EOK will be returned. |
* |
* @return EOK on success or negative error code. |
*/ |
int program_create_from_image(void *image_addr, program_t *p) |
{ |
as_t *as; |
unsigned int rc; |
as = as_create(0); |
ASSERT(as); |
rc = elf_load((elf_header_t *) image_addr, as, 0); |
if (rc != EE_OK) { |
as_destroy(as); |
p->task = NULL; |
p->main_thread = NULL; |
if (rc != EE_LOADER) |
return ENOTSUP; |
/* Register image as the program loader */ |
ASSERT(program_loader == NULL); |
program_loader = image_addr; |
printf("Registered program loader at 0x%" PRIp "\n", |
image_addr); |
return EOK; |
} |
program_create(as, ((elf_header_t *) image_addr)->e_entry, p); |
return EOK; |
} |
/** Create a task from the program loader image. |
* |
* @param p Buffer for storing program info. |
* @return EOK on success or negative error code. |
*/ |
int program_create_loader(program_t *p) |
{ |
as_t *as; |
unsigned int rc; |
void *loader; |
as = as_create(0); |
ASSERT(as); |
loader = program_loader; |
if (!loader) { |
printf("Cannot spawn loader as none was registered\n"); |
return ENOENT; |
} |
rc = elf_load((elf_header_t *) program_loader, as, ELD_F_LOADER); |
if (rc != EE_OK) { |
as_destroy(as); |
return ENOENT; |
} |
program_create(as, ((elf_header_t *) program_loader)->e_entry, p); |
return EOK; |
} |
/** Make program ready. |
* |
* Switch program's main thread to the ready state. |
* |
* @param p Program to make ready. |
*/ |
void program_ready(program_t *p) |
{ |
thread_ready(p->main_thread); |
} |
/** Syscall for creating a new loader instance from userspace. |
* |
* Creates a new task from the program loader image, connects a phone |
* to it and stores the phone id into the provided buffer. |
* |
* @param uspace_phone_id Userspace address where to store the phone id. |
* |
* @return 0 on success or an error code from @ref errno.h. |
*/ |
unative_t sys_program_spawn_loader(int *uspace_phone_id) |
{ |
program_t p; |
int fake_id; |
int rc; |
int phone_id; |
fake_id = 0; |
/* Before we even try creating the task, see if we can write the id */ |
rc = (unative_t) copy_to_uspace(uspace_phone_id, &fake_id, |
sizeof(fake_id)); |
if (rc != 0) |
return rc; |
phone_id = phone_alloc(); |
if (phone_id < 0) |
return ELIMIT; |
rc = program_create_loader(&p); |
if (rc != 0) |
return rc; |
phone_connect(phone_id, &p.task->answerbox); |
/* No need to aquire lock before task_ready() */ |
rc = (unative_t) copy_to_uspace(uspace_phone_id, &phone_id, |
sizeof(phone_id)); |
if (rc != 0) { |
/* Ooops */ |
ipc_phone_hangup(&TASK->phones[phone_id]); |
task_kill(p.task->taskid); |
return rc; |
} |
// FIXME: control the capabilities |
cap_set(p.task, cap_get(TASK)); |
program_ready(&p); |
return EOK; |
} |
/** @} |
*/ |
/branches/network/kernel/generic/src/proc/scheduler.c |
---|
0,0 → 1,739 |
/* |
* Copyright (c) 2001-2007 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup genericproc |
* @{ |
*/ |
/** |
* @file |
* @brief Scheduler and load balancing. |
* |
* This file contains the scheduler and kcpulb kernel thread which |
* performs load-balancing of per-CPU run queues. |
*/ |
#include <proc/scheduler.h> |
#include <proc/thread.h> |
#include <proc/task.h> |
#include <mm/frame.h> |
#include <mm/page.h> |
#include <mm/as.h> |
#include <time/timeout.h> |
#include <time/delay.h> |
#include <arch/asm.h> |
#include <arch/faddr.h> |
#include <arch/cycle.h> |
#include <atomic.h> |
#include <synch/spinlock.h> |
#include <config.h> |
#include <context.h> |
#include <fpu_context.h> |
#include <func.h> |
#include <arch.h> |
#include <adt/list.h> |
#include <panic.h> |
#include <cpu.h> |
#include <print.h> |
#include <debug.h> |
static void before_task_runs(void); |
static void before_thread_runs(void); |
static void after_thread_ran(void); |
static void scheduler_separated_stack(void); |
atomic_t nrdy; /**< Number of ready threads in the system. */ |
/** Carry out actions before new task runs. */ |
void before_task_runs(void) |
{ |
before_task_runs_arch(); |
} |
/** Take actions before new thread runs. |
* |
* Perform actions that need to be |
* taken before the newly selected |
* tread is passed control. |
* |
* THREAD->lock is locked on entry |
* |
*/ |
void before_thread_runs(void) |
{ |
before_thread_runs_arch(); |
#ifdef CONFIG_FPU_LAZY |
if(THREAD == CPU->fpu_owner) |
fpu_enable(); |
else |
fpu_disable(); |
#else |
fpu_enable(); |
if (THREAD->fpu_context_exists) |
fpu_context_restore(THREAD->saved_fpu_context); |
else { |
fpu_init(); |
THREAD->fpu_context_exists = 1; |
} |
#endif |
} |
/** Take actions after THREAD had run. |
* |
* Perform actions that need to be |
* taken after the running thread |
* had been preempted by the scheduler. |
* |
* THREAD->lock is locked on entry |
* |
*/ |
void after_thread_ran(void) |
{ |
after_thread_ran_arch(); |
} |
#ifdef CONFIG_FPU_LAZY |
void scheduler_fpu_lazy_request(void) |
{ |
restart: |
fpu_enable(); |
spinlock_lock(&CPU->lock); |
/* Save old context */ |
if (CPU->fpu_owner != NULL) { |
spinlock_lock(&CPU->fpu_owner->lock); |
fpu_context_save(CPU->fpu_owner->saved_fpu_context); |
/* don't prevent migration */ |
CPU->fpu_owner->fpu_context_engaged = 0; |
spinlock_unlock(&CPU->fpu_owner->lock); |
CPU->fpu_owner = NULL; |
} |
spinlock_lock(&THREAD->lock); |
if (THREAD->fpu_context_exists) { |
fpu_context_restore(THREAD->saved_fpu_context); |
} else { |
/* Allocate FPU context */ |
if (!THREAD->saved_fpu_context) { |
/* Might sleep */ |
spinlock_unlock(&THREAD->lock); |
spinlock_unlock(&CPU->lock); |
THREAD->saved_fpu_context = |
(fpu_context_t *) slab_alloc(fpu_context_slab, 0); |
/* We may have switched CPUs during slab_alloc */ |
goto restart; |
} |
fpu_init(); |
THREAD->fpu_context_exists = 1; |
} |
CPU->fpu_owner = THREAD; |
THREAD->fpu_context_engaged = 1; |
spinlock_unlock(&THREAD->lock); |
spinlock_unlock(&CPU->lock); |
} |
#endif |
/** Initialize scheduler |
* |
* Initialize kernel scheduler. |
* |
*/ |
void scheduler_init(void) |
{ |
} |
/** Get thread to be scheduled |
* |
* Get the optimal thread to be scheduled |
* according to thread accounting and scheduler |
* policy. |
* |
* @return Thread to be scheduled. |
* |
*/ |
static thread_t *find_best_thread(void) |
{ |
thread_t *t; |
runq_t *r; |
int i; |
ASSERT(CPU != NULL); |
loop: |
interrupts_enable(); |
if (atomic_get(&CPU->nrdy) == 0) { |
/* |
* For there was nothing to run, the CPU goes to sleep |
* until a hardware interrupt or an IPI comes. |
* This improves energy saving and hyperthreading. |
*/ |
/* |
* An interrupt might occur right now and wake up a thread. |
* In such case, the CPU will continue to go to sleep |
* even though there is a runnable thread. |
*/ |
cpu_sleep(); |
goto loop; |
} |
interrupts_disable(); |
for (i = 0; i < RQ_COUNT; i++) { |
r = &CPU->rq[i]; |
spinlock_lock(&r->lock); |
if (r->n == 0) { |
/* |
* If this queue is empty, try a lower-priority queue. |
*/ |
spinlock_unlock(&r->lock); |
continue; |
} |
atomic_dec(&CPU->nrdy); |
atomic_dec(&nrdy); |
r->n--; |
/* |
* Take the first thread from the queue. |
*/ |
t = list_get_instance(r->rq_head.next, thread_t, rq_link); |
list_remove(&t->rq_link); |
spinlock_unlock(&r->lock); |
spinlock_lock(&t->lock); |
t->cpu = CPU; |
t->ticks = us2ticks((i + 1) * 10000); |
t->priority = i; /* correct rq index */ |
/* |
* Clear the THREAD_FLAG_STOLEN flag so that t can be migrated |
* when load balancing needs emerge. |
*/ |
t->flags &= ~THREAD_FLAG_STOLEN; |
spinlock_unlock(&t->lock); |
return t; |
} |
goto loop; |
} |
/** Prevent rq starvation |
* |
* Prevent low priority threads from starving in rq's. |
* |
* When the function decides to relink rq's, it reconnects |
* respective pointers so that in result threads with 'pri' |
* greater or equal start are moved to a higher-priority queue. |
* |
* @param start Threshold priority. |
* |
*/ |
static void relink_rq(int start) |
{ |
link_t head; |
runq_t *r; |
int i, n; |
list_initialize(&head); |
spinlock_lock(&CPU->lock); |
if (CPU->needs_relink > NEEDS_RELINK_MAX) { |
for (i = start; i < RQ_COUNT - 1; i++) { |
/* remember and empty rq[i + 1] */ |
r = &CPU->rq[i + 1]; |
spinlock_lock(&r->lock); |
list_concat(&head, &r->rq_head); |
n = r->n; |
r->n = 0; |
spinlock_unlock(&r->lock); |
/* append rq[i + 1] to rq[i] */ |
r = &CPU->rq[i]; |
spinlock_lock(&r->lock); |
list_concat(&r->rq_head, &head); |
r->n += n; |
spinlock_unlock(&r->lock); |
} |
CPU->needs_relink = 0; |
} |
spinlock_unlock(&CPU->lock); |
} |
/** The scheduler |
* |
* The thread scheduling procedure. |
* Passes control directly to |
* scheduler_separated_stack(). |
* |
*/ |
void scheduler(void) |
{ |
volatile ipl_t ipl; |
ASSERT(CPU != NULL); |
ipl = interrupts_disable(); |
if (atomic_get(&haltstate)) |
halt(); |
if (THREAD) { |
spinlock_lock(&THREAD->lock); |
/* Update thread accounting */ |
THREAD->cycles += get_cycle() - THREAD->last_cycle; |
#ifndef CONFIG_FPU_LAZY |
fpu_context_save(THREAD->saved_fpu_context); |
#endif |
if (!context_save(&THREAD->saved_context)) { |
/* |
* This is the place where threads leave scheduler(); |
*/ |
/* Save current CPU cycle */ |
THREAD->last_cycle = get_cycle(); |
spinlock_unlock(&THREAD->lock); |
interrupts_restore(THREAD->saved_context.ipl); |
return; |
} |
/* |
* Interrupt priority level of preempted thread is recorded |
* here to facilitate scheduler() invocations from |
* interrupts_disable()'d code (e.g. waitq_sleep_timeout()). |
*/ |
THREAD->saved_context.ipl = ipl; |
} |
/* |
* Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM |
* and preemption counter. At this point THE could be coming either |
* from THREAD's or CPU's stack. |
*/ |
the_copy(THE, (the_t *) CPU->stack); |
/* |
* We may not keep the old stack. |
* Reason: If we kept the old stack and got blocked, for instance, in |
* find_best_thread(), the old thread could get rescheduled by another |
* CPU and overwrite the part of its own stack that was also used by |
* the scheduler on this CPU. |
* |
* Moreover, we have to bypass the compiler-generated POP sequence |
* which is fooled by SP being set to the very top of the stack. |
* Therefore the scheduler() function continues in |
* scheduler_separated_stack(). |
*/ |
context_save(&CPU->saved_context); |
context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), |
(uintptr_t) CPU->stack, CPU_STACK_SIZE); |
context_restore(&CPU->saved_context); |
/* not reached */ |
} |
/** Scheduler stack switch wrapper |
* |
* Second part of the scheduler() function |
* using new stack. Handling the actual context |
* switch to a new thread. |
* |
* Assume THREAD->lock is held. |
*/ |
void scheduler_separated_stack(void) |
{ |
int priority; |
DEADLOCK_PROBE_INIT(p_joinwq); |
ASSERT(CPU != NULL); |
if (THREAD) { |
/* must be run after the switch to scheduler stack */ |
after_thread_ran(); |
switch (THREAD->state) { |
case Running: |
spinlock_unlock(&THREAD->lock); |
thread_ready(THREAD); |
break; |
case Exiting: |
repeat: |
if (THREAD->detached) { |
thread_destroy(THREAD); |
} else { |
/* |
* The thread structure is kept allocated until |
* somebody calls thread_detach() on it. |
*/ |
if (!spinlock_trylock(&THREAD->join_wq.lock)) { |
/* |
* Avoid deadlock. |
*/ |
spinlock_unlock(&THREAD->lock); |
delay(HZ); |
spinlock_lock(&THREAD->lock); |
DEADLOCK_PROBE(p_joinwq, |
DEADLOCK_THRESHOLD); |
goto repeat; |
} |
_waitq_wakeup_unsafe(&THREAD->join_wq, |
WAKEUP_FIRST); |
spinlock_unlock(&THREAD->join_wq.lock); |
THREAD->state = Lingering; |
spinlock_unlock(&THREAD->lock); |
} |
break; |
case Sleeping: |
/* |
* Prefer the thread after it's woken up. |
*/ |
THREAD->priority = -1; |
/* |
* We need to release wq->lock which we locked in |
* waitq_sleep(). Address of wq->lock is kept in |
* THREAD->sleep_queue. |
*/ |
spinlock_unlock(&THREAD->sleep_queue->lock); |
/* |
* Check for possible requests for out-of-context |
* invocation. |
*/ |
if (THREAD->call_me) { |
THREAD->call_me(THREAD->call_me_with); |
THREAD->call_me = NULL; |
THREAD->call_me_with = NULL; |
} |
spinlock_unlock(&THREAD->lock); |
break; |
default: |
/* |
* Entering state is unexpected. |
*/ |
panic("tid%" PRIu64 ": unexpected state %s\n", |
THREAD->tid, thread_states[THREAD->state]); |
break; |
} |
THREAD = NULL; |
} |
THREAD = find_best_thread(); |
spinlock_lock(&THREAD->lock); |
priority = THREAD->priority; |
spinlock_unlock(&THREAD->lock); |
relink_rq(priority); |
/* |
* If both the old and the new task are the same, lots of work is |
* avoided. |
*/ |
if (TASK != THREAD->task) { |
as_t *as1 = NULL; |
as_t *as2; |
if (TASK) { |
spinlock_lock(&TASK->lock); |
as1 = TASK->as; |
spinlock_unlock(&TASK->lock); |
} |
spinlock_lock(&THREAD->task->lock); |
as2 = THREAD->task->as; |
spinlock_unlock(&THREAD->task->lock); |
/* |
* Note that it is possible for two tasks to share one address |
* space. |
*/ |
if (as1 != as2) { |
/* |
* Both tasks and address spaces are different. |
* Replace the old one with the new one. |
*/ |
as_switch(as1, as2); |
} |
TASK = THREAD->task; |
before_task_runs(); |
} |
spinlock_lock(&THREAD->lock); |
THREAD->state = Running; |
#ifdef SCHEDULER_VERBOSE |
printf("cpu%u: tid %" PRIu64 " (priority=%d, ticks=%" PRIu64 |
", nrdy=%ld)\n", CPU->id, THREAD->tid, THREAD->priority, |
THREAD->ticks, atomic_get(&CPU->nrdy)); |
#endif |
/* |
* Some architectures provide late kernel PA2KA(identity) |
* mapping in a page fault handler. However, the page fault |
* handler uses the kernel stack of the running thread and |
* therefore cannot be used to map it. The kernel stack, if |
* necessary, is to be mapped in before_thread_runs(). This |
* function must be executed before the switch to the new stack. |
*/ |
before_thread_runs(); |
/* |
* Copy the knowledge of CPU, TASK, THREAD and preemption counter to |
* thread's stack. |
*/ |
the_copy(THE, (the_t *) THREAD->kstack); |
context_restore(&THREAD->saved_context); |
/* not reached */ |
} |
#ifdef CONFIG_SMP |
/** Load balancing thread |
* |
* SMP load balancing thread, supervising thread supplies |
* for the CPU it's wired to. |
* |
* @param arg Generic thread argument (unused). |
* |
*/ |
void kcpulb(void *arg) |
{ |
thread_t *t; |
int count, average, j, k = 0; |
unsigned int i; |
ipl_t ipl; |
/* |
* Detach kcpulb as nobody will call thread_join_timeout() on it. |
*/ |
thread_detach(THREAD); |
loop: |
/* |
* Work in 1s intervals. |
*/ |
thread_sleep(1); |
not_satisfied: |
/* |
* Calculate the number of threads that will be migrated/stolen from |
* other CPU's. Note that situation can have changed between two |
* passes. Each time get the most up to date counts. |
*/ |
average = atomic_get(&nrdy) / config.cpu_active + 1; |
count = average - atomic_get(&CPU->nrdy); |
if (count <= 0) |
goto satisfied; |
/* |
* Searching least priority queues on all CPU's first and most priority |
* queues on all CPU's last. |
*/ |
for (j = RQ_COUNT - 1; j >= 0; j--) { |
for (i = 0; i < config.cpu_active; i++) { |
link_t *l; |
runq_t *r; |
cpu_t *cpu; |
cpu = &cpus[(i + k) % config.cpu_active]; |
/* |
* Not interested in ourselves. |
* Doesn't require interrupt disabling for kcpulb has |
* THREAD_FLAG_WIRED. |
*/ |
if (CPU == cpu) |
continue; |
if (atomic_get(&cpu->nrdy) <= average) |
continue; |
ipl = interrupts_disable(); |
r = &cpu->rq[j]; |
spinlock_lock(&r->lock); |
if (r->n == 0) { |
spinlock_unlock(&r->lock); |
interrupts_restore(ipl); |
continue; |
} |
t = NULL; |
l = r->rq_head.prev; /* search rq from the back */ |
while (l != &r->rq_head) { |
t = list_get_instance(l, thread_t, rq_link); |
/* |
* We don't want to steal CPU-wired threads |
* neither threads already stolen. The latter |
* prevents threads from migrating between CPU's |
* without ever being run. We don't want to |
* steal threads whose FPU context is still in |
* CPU. |
*/ |
spinlock_lock(&t->lock); |
if ((!(t->flags & (THREAD_FLAG_WIRED | |
THREAD_FLAG_STOLEN))) && |
(!(t->fpu_context_engaged))) { |
/* |
* Remove t from r. |
*/ |
spinlock_unlock(&t->lock); |
atomic_dec(&cpu->nrdy); |
atomic_dec(&nrdy); |
r->n--; |
list_remove(&t->rq_link); |
break; |
} |
spinlock_unlock(&t->lock); |
l = l->prev; |
t = NULL; |
} |
spinlock_unlock(&r->lock); |
if (t) { |
/* |
* Ready t on local CPU |
*/ |
spinlock_lock(&t->lock); |
#ifdef KCPULB_VERBOSE |
printf("kcpulb%u: TID %" PRIu64 " -> cpu%u, " |
"nrdy=%ld, avg=%ld\n", CPU->id, t->tid, |
CPU->id, atomic_get(&CPU->nrdy), |
atomic_get(&nrdy) / config.cpu_active); |
#endif |
t->flags |= THREAD_FLAG_STOLEN; |
t->state = Entering; |
spinlock_unlock(&t->lock); |
thread_ready(t); |
interrupts_restore(ipl); |
if (--count == 0) |
goto satisfied; |
/* |
* We are not satisfied yet, focus on another |
* CPU next time. |
*/ |
k++; |
continue; |
} |
interrupts_restore(ipl); |
} |
} |
if (atomic_get(&CPU->nrdy)) { |
/* |
* Be a little bit light-weight and let migrated threads run. |
*/ |
scheduler(); |
} else { |
/* |
* We failed to migrate a single thread. |
* Give up this turn. |
*/ |
goto loop; |
} |
goto not_satisfied; |
satisfied: |
goto loop; |
} |
#endif /* CONFIG_SMP */ |
/** Print information about threads & scheduler queues */ |
void sched_print_list(void) |
{ |
ipl_t ipl; |
unsigned int cpu, i; |
runq_t *r; |
thread_t *t; |
link_t *cur; |
/* We are going to mess with scheduler structures, |
* let's not be interrupted */ |
ipl = interrupts_disable(); |
for (cpu = 0; cpu < config.cpu_count; cpu++) { |
if (!cpus[cpu].active) |
continue; |
spinlock_lock(&cpus[cpu].lock); |
printf("cpu%u: address=%p, nrdy=%ld, needs_relink=%" PRIc "\n", |
cpus[cpu].id, &cpus[cpu], atomic_get(&cpus[cpu].nrdy), |
cpus[cpu].needs_relink); |
for (i = 0; i < RQ_COUNT; i++) { |
r = &cpus[cpu].rq[i]; |
spinlock_lock(&r->lock); |
if (!r->n) { |
spinlock_unlock(&r->lock); |
continue; |
} |
printf("\trq[%u]: ", i); |
for (cur = r->rq_head.next; cur != &r->rq_head; |
cur = cur->next) { |
t = list_get_instance(cur, thread_t, rq_link); |
printf("%" PRIu64 "(%s) ", t->tid, |
thread_states[t->state]); |
} |
printf("\n"); |
spinlock_unlock(&r->lock); |
} |
spinlock_unlock(&cpus[cpu].lock); |
} |
interrupts_restore(ipl); |
} |
/** @} |
*/ |
/branches/network/kernel/generic/src/proc/tasklet.c |
---|
0,0 → 1,64 |
/* |
* Copyright (c) 2007 Jan Hudecek |
* Copyright (c) 2008 Martin Decky |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup genericproc |
* @{ |
*/ |
/** @file tasklet.c |
* @brief Tasklet implementation |
*/ |
#include <proc/tasklet.h> |
#include <synch/spinlock.h> |
#include <mm/slab.h> |
#include <config.h> |
/** Spinlock protecting list of tasklets */ |
SPINLOCK_INITIALIZE(tasklet_lock); |
/** Array of tasklet lists for every CPU */ |
tasklet_descriptor_t **tasklet_list; |
void tasklet_init(void) |
{ |
unsigned int i; |
tasklet_list = malloc(sizeof(tasklet_descriptor_t *) * config.cpu_count, 0); |
if (!tasklet_list) |
panic("Error initializing tasklets"); |
for (i = 0; i < config.cpu_count; i++) |
tasklet_list[i] = NULL; |
spinlock_initialize(&tasklet_lock, "tasklet_lock"); |
} |
/** @} |
*/ |
/branches/network/kernel/generic/src/proc/the.c |
---|
0,0 → 1,75 |
/* |
* Copyright (c) 2005 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup genericproc |
* @{ |
*/ |
/** |
* @file |
* @brief THE structure functions. |
* |
* This file contains functions to manage the THE structure. |
* The THE structure exists at the base address of every kernel |
* stack and carries information about current settings |
* (e.g. current CPU, current thread, task and address space |
* and current preemption counter). |
*/ |
#include <arch.h> |
/** Initialize THE structure |
* |
* Initialize THE structure passed as argument. |
* |
* @param the THE structure to be initialized. |
*/ |
void the_initialize(the_t *the) |
{ |
the->preemption_disabled = 0; |
the->cpu = NULL; |
the->thread = NULL; |
the->task = NULL; |
the->as = NULL; |
} |
/** Copy THE structure |
* |
* Copy the source THE structure to the destination THE structure. |
* |
* @param src The source THE structure. |
* @param dst The destination THE structure. |
*/ |
void the_copy(the_t *src, the_t *dst) |
{ |
*dst = *src; |
} |
/** @} |
*/ |