Subversion Repositories HelenOS-historic

Compare Revisions

Ignore whitespace Rev 898 → Rev 897

/kernel/trunk/generic/src/proc/scheduler.c
47,10 → 47,8
#include <print.h>
#include <debug.h>
 
static void scheduler_separated_stack(void);
atomic_t nrdy;
 
atomic_t nrdy; /**< Number of ready threads in the system. */
 
/** Take actions before new thread runs.
*
* Perform actions that need to be
63,12 → 61,12
void before_thread_runs(void)
{
before_thread_runs_arch();
#ifdef CONFIG_FPU_LAZY
#ifdef CONFIG_FPU_LAZY
if(THREAD==CPU->fpu_owner)
fpu_enable();
else
fpu_disable();
#else
#else
fpu_enable();
if (THREAD->fpu_context_exists)
fpu_context_restore(&(THREAD->saved_fpu_context));
76,14 → 74,14
fpu_init(&(THREAD->saved_fpu_context));
THREAD->fpu_context_exists=1;
}
#endif
#endif
}
 
/** Take actions after THREAD had run.
/** Take actions after old thread ran.
*
* Perform actions that need to be
* taken after the running thread
* had been preempted by the scheduler.
* was preempted by the scheduler.
*
* THREAD->lock is locked on entry
*
109,16 → 107,16
}
 
spinlock_lock(&THREAD->lock);
if (THREAD->fpu_context_exists) {
if (THREAD->fpu_context_exists)
fpu_context_restore(&THREAD->saved_fpu_context);
} else {
else {
fpu_init(&(THREAD->saved_fpu_context));
THREAD->fpu_context_exists=1;
}
CPU->fpu_owner=THREAD;
THREAD->fpu_context_engaged = 1;
 
spinlock_unlock(&THREAD->lock);
 
spinlock_unlock(&CPU->lock);
}
#endif
132,6 → 130,7
{
}
 
 
/** Get thread to be scheduled
*
* Get the optimal thread to be scheduled
171,7 → 170,8
 
interrupts_disable();
for (i = 0; i<RQ_COUNT; i++) {
i = 0;
for (; i<RQ_COUNT; i++) {
r = &CPU->rq[i];
spinlock_lock(&r->lock);
if (r->n == 0) {
198,7 → 198,7
t->cpu = CPU;
 
t->ticks = us2ticks((i+1)*10000);
t->priority = i; /* correct rq index */
t->priority = i; /* eventually correct rq index */
 
/*
* Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge.
212,6 → 212,7
 
}
 
 
/** Prevent rq starvation
*
* Prevent low priority threads from starving in rq's.
254,71 → 255,7
 
}
 
/** The scheduler
*
* The thread scheduling procedure.
* Passes control directly to
* scheduler_separated_stack().
*
*/
void scheduler(void)
{
volatile ipl_t ipl;
 
ASSERT(CPU != NULL);
 
ipl = interrupts_disable();
 
if (atomic_get(&haltstate))
halt();
 
if (THREAD) {
spinlock_lock(&THREAD->lock);
#ifndef CONFIG_FPU_LAZY
fpu_context_save(&(THREAD->saved_fpu_context));
#endif
if (!context_save(&THREAD->saved_context)) {
/*
* This is the place where threads leave scheduler();
*/
spinlock_unlock(&THREAD->lock);
interrupts_restore(THREAD->saved_context.ipl);
return;
}
 
/*
* Interrupt priority level of preempted thread is recorded here
* to facilitate scheduler() invocations from interrupts_disable()'d
* code (e.g. waitq_sleep_timeout()).
*/
THREAD->saved_context.ipl = ipl;
}
 
/*
* Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM
* and preemption counter. At this point THE could be coming either
* from THREAD's or CPU's stack.
*/
the_copy(THE, (the_t *) CPU->stack);
 
/*
* We may not keep the old stack.
* Reason: If we kept the old stack and got blocked, for instance, in
* find_best_thread(), the old thread could get rescheduled by another
* CPU and overwrite the part of its own stack that was also used by
* the scheduler on this CPU.
*
* Moreover, we have to bypass the compiler-generated POP sequence
* which is fooled by SP being set to the very top of the stack.
* Therefore the scheduler() function continues in
* scheduler_separated_stack().
*/
context_save(&CPU->saved_context);
context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), (__address) CPU->stack, CPU_STACK_SIZE);
context_restore(&CPU->saved_context);
/* not reached */
}
 
/** Scheduler stack switch wrapper
*
* Second part of the scheduler() function
327,7 → 264,7
*
* Assume THREAD->lock is held.
*/
void scheduler_separated_stack(void)
static void scheduler_separated_stack(void)
{
int priority;
 
334,7 → 271,7
ASSERT(CPU != NULL);
 
if (THREAD) {
/* must be run after the switch to scheduler stack */
/* must be run after switch to scheduler stack */
after_thread_ran();
 
switch (THREAD->state) {
384,6 → 321,7
THREAD = NULL;
}
 
 
THREAD = find_best_thread();
spinlock_lock(&THREAD->lock);
449,6 → 387,76
/* not reached */
}
 
 
/** The scheduler
*
* The thread scheduling procedure.
* Passes control directly to
* scheduler_separated_stack().
*
*/
void scheduler(void)
{
volatile ipl_t ipl;
 
ASSERT(CPU != NULL);
 
ipl = interrupts_disable();
 
if (atomic_get(&haltstate))
halt();
 
if (THREAD) {
spinlock_lock(&THREAD->lock);
#ifndef CONFIG_FPU_LAZY
fpu_context_save(&(THREAD->saved_fpu_context));
#endif
if (!context_save(&THREAD->saved_context)) {
/*
* This is the place where threads leave scheduler();
*/
spinlock_unlock(&THREAD->lock);
interrupts_restore(THREAD->saved_context.ipl);
return;
}
 
/*
* Interrupt priority level of preempted thread is recorded here
* to facilitate scheduler() invocations from interrupts_disable()'d
* code (e.g. waitq_sleep_timeout()).
*/
THREAD->saved_context.ipl = ipl;
}
 
/*
* Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM
* and preemption counter. At this point THE could be coming either
* from THREAD's or CPU's stack.
*/
the_copy(THE, (the_t *) CPU->stack);
 
/*
* We may not keep the old stack.
* Reason: If we kept the old stack and got blocked, for instance, in
* find_best_thread(), the old thread could get rescheduled by another
* CPU and overwrite the part of its own stack that was also used by
* the scheduler on this CPU.
*
* Moreover, we have to bypass the compiler-generated POP sequence
* which is fooled by SP being set to the very top of the stack.
* Therefore the scheduler() function continues in
* scheduler_separated_stack().
*/
context_save(&CPU->saved_context);
context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), (__address) CPU->stack, CPU_STACK_SIZE);
context_restore(&CPU->saved_context);
/* not reached */
}
 
 
 
 
 
#ifdef CONFIG_SMP
/** Load balancing thread
*
604,14 → 612,12
/* We are going to mess with scheduler structures,
* let's not be interrupted */
ipl = interrupts_disable();
printf("Scheduler dump:\n");
printf("*********** Scheduler dump ***********\n");
for (cpu=0;cpu < config.cpu_count; cpu++) {
 
if (!cpus[cpu].active)
continue;
 
spinlock_lock(&cpus[cpu].lock);
printf("cpu%d: nrdy: %d, needs_relink: %d\n",
printf("cpu%d: nrdy: %d needs_relink: %d\n",
cpus[cpu].id, atomic_get(&cpus[cpu].nrdy), cpus[cpu].needs_relink);
for (i=0; i<RQ_COUNT; i++) {
621,7 → 627,7
spinlock_unlock(&r->lock);
continue;
}
printf("\trq[%d]: ", i);
printf("\tRq %d: ", i);
for (cur=r->rq_head.next; cur!=&r->rq_head; cur=cur->next) {
t = list_get_instance(cur, thread_t, rq_link);
printf("%d(%s) ", t->tid,