Subversion Repositories HelenOS-historic

Compare Revisions

Ignore whitespace Rev 905 → Rev 906

/kernel/trunk/generic/include/fpu_context.h
33,9 → 33,13
#include <arch/fpu_context.h>
#include <typedefs.h>
 
#if defined(CONFIG_FPU_LAZY) && !defined(ARCH_HAS_FPU)
# error "CONFIG_FPU_LAZY defined, but no ARCH_HAS_FPU"
#endif
 
extern void fpu_context_save(fpu_context_t *);
extern void fpu_context_restore(fpu_context_t *);
extern void fpu_init(fpu_context_t *);
extern void fpu_init(void);
extern void fpu_enable(void);
extern void fpu_disable(void);
 
/kernel/trunk/generic/include/proc/thread.h
39,6 → 39,7
#include <synch/rwlock.h>
#include <config.h>
#include <adt/list.h>
#include <mm/slab.h>
 
#define THREAD_STACK_SIZE STACK_SIZE
 
82,7 → 83,7
timeout_t sleep_timeout; /**< Timeout used for timeoutable sleeping. */
volatile int timeout_pending; /**< Flag signalling sleep timeout in progress. */
 
fpu_context_t saved_fpu_context;
fpu_context_t *saved_fpu_context;
int fpu_context_exists;
 
/*
135,4 → 136,8
extern void thread_print_list(void);
extern void thread_destroy(thread_t *t);
 
 
/* Fpu context slab cache */
extern slab_cache_t *fpu_context_slab;
 
#endif
/kernel/trunk/generic/src/proc/scheduler.c
63,20 → 63,20
void before_thread_runs(void)
{
before_thread_runs_arch();
#ifdef CONFIG_FPU_LAZY
#ifdef CONFIG_FPU_LAZY
if(THREAD==CPU->fpu_owner)
fpu_enable();
else
fpu_disable();
#else
#else
fpu_enable();
if (THREAD->fpu_context_exists)
fpu_context_restore(&(THREAD->saved_fpu_context));
fpu_context_restore(THREAD->saved_fpu_context);
else {
fpu_init(&(THREAD->saved_fpu_context));
fpu_init();
THREAD->fpu_context_exists=1;
}
#endif
#endif
}
 
/** Take actions after THREAD had run.
102,7 → 102,7
/* Save old context */
if (CPU->fpu_owner != NULL) {
spinlock_lock(&CPU->fpu_owner->lock);
fpu_context_save(&CPU->fpu_owner->saved_fpu_context);
fpu_context_save(CPU->fpu_owner->saved_fpu_context);
/* don't prevent migration */
CPU->fpu_owner->fpu_context_engaged=0;
spinlock_unlock(&CPU->fpu_owner->lock);
110,9 → 110,17
 
spinlock_lock(&THREAD->lock);
if (THREAD->fpu_context_exists) {
fpu_context_restore(&THREAD->saved_fpu_context);
fpu_context_restore(THREAD->saved_fpu_context);
} else {
fpu_init(&(THREAD->saved_fpu_context));
/* Allocate FPU context */
if (!THREAD->saved_fpu_context) {
/* Might sleep */
spinlock_unlock(&THREAD->lock);
THREAD->saved_fpu_context = slab_alloc(fpu_context_slab,
0);
spinlock_lock(&THREAD->lock);
}
fpu_init();
THREAD->fpu_context_exists=1;
}
CPU->fpu_owner=THREAD;
274,9 → 282,9
 
if (THREAD) {
spinlock_lock(&THREAD->lock);
#ifndef CONFIG_FPU_LAZY
fpu_context_save(&(THREAD->saved_fpu_context));
#endif
#ifndef CONFIG_FPU_LAZY
fpu_context_save(THREAD->saved_fpu_context);
#endif
if (!context_save(&THREAD->saved_context)) {
/*
* This is the place where threads leave scheduler();
421,14 → 429,14
*/
as_switch(as1, as2);
}
TASK = THREAD->task;
TASK = THREAD->task;
}
 
THREAD->state = Running;
 
#ifdef SCHEDULER_VERBOSE
#ifdef SCHEDULER_VERBOSE
printf("cpu%d: tid %d (priority=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, atomic_get(&CPU->nrdy));
#endif
#endif
 
/*
* Some architectures provide late kernel PA2KA(identity)
546,9 → 554,9
* Ready t on local CPU
*/
spinlock_lock(&t->lock);
#ifdef KCPULB_VERBOSE
#ifdef KCPULB_VERBOSE
printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, atomic_get(&CPU->nrdy), atomic_get(&nrdy) / config.cpu_active);
#endif
#endif
t->flags |= X_STOLEN;
spinlock_unlock(&t->lock);
/kernel/trunk/generic/src/proc/thread.c
63,6 → 63,9
__u32 last_tid = 0;
 
static slab_cache_t *thread_slab;
#ifdef ARCH_HAS_FPU
slab_cache_t *fpu_context_slab;
#endif
 
 
/** Thread wrapper
103,9 → 106,24
link_initialize(&t->th_link);
link_initialize(&t->threads_link);
#ifdef ARCH_HAS_FPU
# ifdef CONFIG_FPU_LAZY
t->saved_fpu_context = NULL;
# else
t->saved_fpu_context = slab_alloc(fpu_context_slab,kmflags);
if (!t->saved_fpu_context)
return -1;
# endif
#endif
 
pfn = frame_alloc_rc(ONE_FRAME, FRAME_KA | kmflags,&status);
if (status)
if (status) {
#ifdef ARCH_HAS_FPU
if (t->saved_fpu_context)
slab_free(fpu_context_slab,t->saved_fpu_context);
#endif
return -1;
}
t->kstack = (__u8 *)PA2KA(PFN2ADDR(pfn));
 
return 0;
117,6 → 135,10
thread_t *t = (thread_t *)obj;
 
frame_free(ADDR2PFN(KA2PA(t->kstack)));
#ifdef ARCH_HAS_FPU
if (t->saved_fpu_context)
slab_free(fpu_context_slab,t->saved_fpu_context);
#endif
return 1; /* One page freed */
}
 
132,6 → 154,12
thread_slab = slab_cache_create("thread_slab",
sizeof(thread_t),0,
thr_constructor, thr_destructor, 0);
#ifdef ARCH_HAS_FPU
fpu_context_slab = slab_cache_create("fpu_slab",
sizeof(fpu_context_t),
FPU_CONTEXT_ALIGN,
NULL, NULL, 0);
#endif
}
 
 
/kernel/trunk/arch/amd64/src/fpu_context.c
34,10 → 34,6
/** Save FPU (mmx, sse) context using fxsave instruction */
void fpu_context_save(fpu_context_t *fctx)
{
/* Align on 16-byte boundary */
if (((__u64)fctx) & 0xf)
fctx = (fpu_context_t *)((((__u64)fctx) | 0xf) + 1);
 
__asm__ volatile (
"fxsave %0"
: "=m"(*fctx)
47,9 → 43,6
/** Restore FPU (mmx,sse) context using fxrstor instruction */
void fpu_context_restore(fpu_context_t *fctx)
{
/* Align on 16-byte boundary */
if (((__u64)fctx) & 0xf)
fctx = (fpu_context_t *)((((__u64)fctx) | 0xf) + 1);
__asm__ volatile (
"fxrstor %0"
: "=m"(*fctx)
56,7 → 49,7
);
}
 
void fpu_init(fpu_context_t *fctx)
void fpu_init()
{
/* TODO: Zero all SSE, MMX etc. registers */
__asm__ volatile (
/kernel/trunk/arch/mips32/include/fpu_context.h
31,6 → 31,9
 
#include <arch/types.h>
 
#define ARCH_HAS_FPU
#define FPU_CONTEXT_ALIGN sizeof(__native)
 
struct fpu_context {
__native dregs[32];
__native cregs[32];
/kernel/trunk/arch/mips32/src/fpu_context.c
50,7 → 50,7
#endif
}
 
void fpu_init(fpu_context_t *fctx)
void fpu_init()
{
/* TODO: Zero all registers */
}
/kernel/trunk/arch/ia32/include/fpu_context.h
31,9 → 31,12
 
#include <arch/types.h>
 
#define ARCH_HAS_FPU
#define FPU_CONTEXT_ALIGN 16
 
struct fpu_context {
/* TODO: We need malloc that aligns structures on 16-byte boundary */
__u8 fpu[512+16]; /* FXSAVE & FXRSTOR storage area */
__u8 fpu[512]; /* FXSAVE & FXRSTOR storage area */
};
 
 
/kernel/trunk/arch/ia32/src/fpu_context.c
48,7 → 48,7
);
}
 
void fpu_init(fpu_context_t *fctx)
void fpu_init()
{
__asm__ volatile (
"fninit;"