Subversion Repositories HelenOS-historic

Compare Revisions

Ignore whitespace Rev 905 → Rev 906

/kernel/trunk/generic/src/proc/scheduler.c
63,20 → 63,20
void before_thread_runs(void)
{
before_thread_runs_arch();
#ifdef CONFIG_FPU_LAZY
#ifdef CONFIG_FPU_LAZY
if(THREAD==CPU->fpu_owner)
fpu_enable();
else
fpu_disable();
#else
#else
fpu_enable();
if (THREAD->fpu_context_exists)
fpu_context_restore(&(THREAD->saved_fpu_context));
fpu_context_restore(THREAD->saved_fpu_context);
else {
fpu_init(&(THREAD->saved_fpu_context));
fpu_init();
THREAD->fpu_context_exists=1;
}
#endif
#endif
}
 
/** Take actions after THREAD had run.
102,7 → 102,7
/* Save old context */
if (CPU->fpu_owner != NULL) {
spinlock_lock(&CPU->fpu_owner->lock);
fpu_context_save(&CPU->fpu_owner->saved_fpu_context);
fpu_context_save(CPU->fpu_owner->saved_fpu_context);
/* don't prevent migration */
CPU->fpu_owner->fpu_context_engaged=0;
spinlock_unlock(&CPU->fpu_owner->lock);
110,9 → 110,17
 
spinlock_lock(&THREAD->lock);
if (THREAD->fpu_context_exists) {
fpu_context_restore(&THREAD->saved_fpu_context);
fpu_context_restore(THREAD->saved_fpu_context);
} else {
fpu_init(&(THREAD->saved_fpu_context));
/* Allocate FPU context */
if (!THREAD->saved_fpu_context) {
/* Might sleep */
spinlock_unlock(&THREAD->lock);
THREAD->saved_fpu_context = slab_alloc(fpu_context_slab,
0);
spinlock_lock(&THREAD->lock);
}
fpu_init();
THREAD->fpu_context_exists=1;
}
CPU->fpu_owner=THREAD;
274,9 → 282,9
 
if (THREAD) {
spinlock_lock(&THREAD->lock);
#ifndef CONFIG_FPU_LAZY
fpu_context_save(&(THREAD->saved_fpu_context));
#endif
#ifndef CONFIG_FPU_LAZY
fpu_context_save(THREAD->saved_fpu_context);
#endif
if (!context_save(&THREAD->saved_context)) {
/*
* This is the place where threads leave scheduler();
421,14 → 429,14
*/
as_switch(as1, as2);
}
TASK = THREAD->task;
TASK = THREAD->task;
}
 
THREAD->state = Running;
 
#ifdef SCHEDULER_VERBOSE
#ifdef SCHEDULER_VERBOSE
printf("cpu%d: tid %d (priority=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, atomic_get(&CPU->nrdy));
#endif
#endif
 
/*
* Some architectures provide late kernel PA2KA(identity)
546,9 → 554,9
* Ready t on local CPU
*/
spinlock_lock(&t->lock);
#ifdef KCPULB_VERBOSE
#ifdef KCPULB_VERBOSE
printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, atomic_get(&CPU->nrdy), atomic_get(&nrdy) / config.cpu_active);
#endif
#endif
t->flags |= X_STOLEN;
spinlock_unlock(&t->lock);
/kernel/trunk/generic/src/proc/thread.c
63,6 → 63,9
__u32 last_tid = 0;
 
static slab_cache_t *thread_slab;
#ifdef ARCH_HAS_FPU
slab_cache_t *fpu_context_slab;
#endif
 
 
/** Thread wrapper
103,9 → 106,24
link_initialize(&t->th_link);
link_initialize(&t->threads_link);
#ifdef ARCH_HAS_FPU
# ifdef CONFIG_FPU_LAZY
t->saved_fpu_context = NULL;
# else
t->saved_fpu_context = slab_alloc(fpu_context_slab,kmflags);
if (!t->saved_fpu_context)
return -1;
# endif
#endif
 
pfn = frame_alloc_rc(ONE_FRAME, FRAME_KA | kmflags,&status);
if (status)
if (status) {
#ifdef ARCH_HAS_FPU
if (t->saved_fpu_context)
slab_free(fpu_context_slab,t->saved_fpu_context);
#endif
return -1;
}
t->kstack = (__u8 *)PA2KA(PFN2ADDR(pfn));
 
return 0;
117,6 → 135,10
thread_t *t = (thread_t *)obj;
 
frame_free(ADDR2PFN(KA2PA(t->kstack)));
#ifdef ARCH_HAS_FPU
if (t->saved_fpu_context)
slab_free(fpu_context_slab,t->saved_fpu_context);
#endif
return 1; /* One page freed */
}
 
132,6 → 154,12
thread_slab = slab_cache_create("thread_slab",
sizeof(thread_t),0,
thr_constructor, thr_destructor, 0);
#ifdef ARCH_HAS_FPU
fpu_context_slab = slab_cache_create("fpu_slab",
sizeof(fpu_context_t),
FPU_CONTEXT_ALIGN,
NULL, NULL, 0);
#endif
}