Subversion Repositories HelenOS

Compare Revisions

Ignore whitespace Rev 1853 → Rev 1854

/trunk/kernel/generic/src/proc/scheduler.c
141,8 → 141,7
/* Might sleep */
spinlock_unlock(&THREAD->lock);
spinlock_unlock(&CPU->lock);
THREAD->saved_fpu_context = slab_alloc(fpu_context_slab,
0);
THREAD->saved_fpu_context = slab_alloc(fpu_context_slab, 0);
/* We may have switched CPUs during slab_alloc */
goto restart;
}
235,9 → 234,10
t->priority = i; /* correct rq index */
 
/*
* Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge.
* Clear the THREAD_FLAG_STOLEN flag so that t can be migrated
* when load balancing needs emerge.
*/
t->flags &= ~X_STOLEN;
t->flags &= ~THREAD_FLAG_STOLEN;
spinlock_unlock(&t->lock);
 
return t;
349,7 → 349,8
* scheduler_separated_stack().
*/
context_save(&CPU->saved_context);
context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), (uintptr_t) CPU->stack, CPU_STACK_SIZE);
context_set(&CPU->saved_context, FADDR(scheduler_separated_stack),
(uintptr_t) CPU->stack, CPU_STACK_SIZE);
context_restore(&CPU->saved_context);
/* not reached */
}
483,7 → 484,8
THREAD->state = Running;
 
#ifdef SCHEDULER_VERBOSE
printf("cpu%d: tid %d (priority=%d,ticks=%lld,nrdy=%ld)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, atomic_get(&CPU->nrdy));
printf("cpu%d: tid %d (priority=%d,ticks=%lld,nrdy=%ld)\n",
CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, atomic_get(&CPU->nrdy));
#endif
 
/*
556,7 → 558,7
 
/*
* Not interested in ourselves.
* Doesn't require interrupt disabling for kcpulb is X_WIRED.
* Doesn't require interrupt disabling for kcpulb has THREAD_FLAG_WIRED.
*/
if (CPU == cpu)
continue;
577,12 → 579,14
while (l != &r->rq_head) {
t = list_get_instance(l, thread_t, rq_link);
/*
* We don't want to steal CPU-wired threads neither threads already stolen.
* The latter prevents threads from migrating between CPU's without ever being run.
* We don't want to steal threads whose FPU context is still in CPU.
* We don't want to steal CPU-wired threads neither threads already
* stolen. The latter prevents threads from migrating between CPU's
* without ever being run. We don't want to steal threads whose FPU
* context is still in CPU.
*/
spinlock_lock(&t->lock);
if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) {
if ((!(t->flags & (THREAD_FLAG_WIRED | THREAD_FLAG_STOLEN))) &&
(!(t->fpu_context_engaged)) ) {
/*
* Remove t from r.
*/
608,9 → 612,11
*/
spinlock_lock(&t->lock);
#ifdef KCPULB_VERBOSE
printf("kcpulb%d: TID %d -> cpu%d, nrdy=%ld, avg=%nd\n", CPU->id, t->tid, CPU->id, atomic_get(&CPU->nrdy), atomic_get(&nrdy) / config.cpu_active);
printf("kcpulb%d: TID %d -> cpu%d, nrdy=%ld, avg=%nd\n",
CPU->id, t->tid, CPU->id, atomic_get(&CPU->nrdy),
atomic_get(&nrdy) / config.cpu_active);
#endif
t->flags |= X_STOLEN;
t->flags |= THREAD_FLAG_STOLEN;
t->state = Entering;
spinlock_unlock(&t->lock);
/trunk/kernel/generic/src/proc/thread.c
129,6 → 129,9
link_initialize(&t->rq_link);
link_initialize(&t->wq_link);
link_initialize(&t->th_link);
 
/* call the architecture-specific part of the constructor */
thr_constructor_arch(t);
#ifdef ARCH_HAS_FPU
# ifdef CONFIG_FPU_LAZY
157,6 → 160,9
{
thread_t *t = (thread_t *) obj;
 
/* call the architecture-specific part of the destructor */
thr_destructor_arch(t);
 
frame_free(KA2PA(t->kstack));
#ifdef ARCH_HAS_FPU
if (t->saved_fpu_context)
210,7 → 216,7
i = (t->priority < RQ_COUNT -1) ? ++t->priority : t->priority;
cpu = CPU;
if (t->flags & X_WIRED) {
if (t->flags & THREAD_FLAG_WIRED) {
cpu = t->cpu;
}
t->state = Ready;
295,8 → 301,6
t = (thread_t *) slab_alloc(thread_slab, 0);
if (!t)
return NULL;
 
thread_create_arch(t);
/* Not needed, but good for debugging */
memsetb((uintptr_t) t->kstack, THREAD_STACK_SIZE * 1 << STACK_FRAMES, 0);
323,7 → 327,7
t->ticks = -1;
t->priority = -1; /* start in rq[0] */
t->cpu = NULL;
t->flags = 0;
t->flags = flags;
t->state = Entering;
t->call_me = NULL;
t->call_me_with = NULL;
347,6 → 351,8
t->fpu_context_exists = 0;
t->fpu_context_engaged = 0;
 
thread_create_arch(t); /* might depend on previous initialization */
/*
* Attach to the containing task.
589,7 → 595,7
return (unative_t) rc;
}
 
if ((t = thread_create(uinit, kernel_uarg, TASK, 0, namebuf))) {
if ((t = thread_create(uinit, kernel_uarg, TASK, THREAD_FLAG_USPACE, namebuf))) {
tid = t->tid;
thread_ready(t);
return (unative_t) tid;