Subversion Repositories HelenOS-historic

Compare Revisions

Ignore whitespace Rev 308 → Rev 309

/SPARTAN/trunk/include/fpu_context.h
35,9 → 35,9
 
extern void fpu_context_save(fpu_context_t *);
extern void fpu_context_restore(fpu_context_t *);
extern void fpu_lazy_context_save(fpu_context_t *);
extern void fpu_lazy_context_restore(fpu_context_t *);
extern void fpu_init(void);
extern void fpu_enable(void);
extern void fpu_disable(void);
 
 
#endif /*fpu_context_h*/
/SPARTAN/trunk/include/proc/scheduler.h
51,6 → 51,7
 
extern void scheduler_init(void);
 
extern void scheduler_fpu_lazy_request(void);
extern void scheduler(void);
extern void kcpulb(void *arg);
 
/SPARTAN/trunk/src/proc/scheduler.c
60,10 → 60,42
*/
void before_thread_runs(void)
{
before_thread_runs_arch();
fpu_context_restore(&(THREAD->saved_fpu_context));
before_thread_runs_arch();
#ifdef FPU_LAZY
if(THREAD==CPU->fpu_owner)
fpu_enable();
else
fpu_disable();
#else
fpu_enable();
if (THREAD->fpu_context_exists)
fpu_context_restore(&(THREAD->saved_fpu_context));
else {
fpu_init();
THREAD->fpu_context_exists=1;
}
#endif
}
 
#ifdef FPU_LAZY
void scheduler_fpu_lazy_request(void)
{
fpu_enable();
if (CPU->fpu_owner != NULL) {
fpu_context_save(&CPU->fpu_owner->saved_fpu_context);
/* don't prevent migration */
CPU->fpu_owner->fpu_context_engaged=0;
}
if (THREAD->fpu_context_exists)
fpu_context_restore(&THREAD->saved_fpu_context);
else {
fpu_init();
THREAD->fpu_context_exists=1;
}
CPU->fpu_owner=THREAD;
THREAD->fpu_context_engaged = 1;
}
#endif
 
/** Initialize scheduler
*
240,7 → 272,9
 
if (THREAD) {
spinlock_lock(&THREAD->lock);
#ifndef FPU_LAZY
fpu_context_save(&(THREAD->saved_fpu_context));
#endif
if (!context_save(&THREAD->saved_context)) {
/*
* This is the place where threads leave scheduler();
/SPARTAN/trunk/arch/ppc/src/fpu_context.c
37,13 → 37,3
void fpu_context_restore(fpu_context_t *fctx)
{
}
 
 
void fpu_lazy_context_save(fpu_context_t *fctx)
{
}
 
void fpu_lazy_context_restore(fpu_context_t *fctx)
{
 
}
/SPARTAN/trunk/arch/ppc/src/dummy.s
32,10 → 32,16
.global userspace
.global before_thread_runs_arch
.global dummy
.global fpu_init
.global fpu_enable
.global fpu_disable
 
before_thread_runs_arch:
userspace:
asm_delay_loop:
fpu_init:
fpu_enable:
fpu_disable:
 
dummy:
0:
/SPARTAN/trunk/arch/ia64/src/fpu_context.c
38,11 → 38,3
{
}
 
 
void fpu_lazy_context_save(fpu_context_t *fctx)
{
}
 
void fpu_lazy_context_restore(fpu_context_t *fctx)
{
}
/SPARTAN/trunk/arch/ia64/src/dummy.s
42,6 → 42,9
.global cpu_sleep
.global frame_arch_init
.global dummy
.global fpu_enable
.global fpu_disable
.gloabl fpu_init
 
before_thread_runs_arch:
userspace:
56,6 → 59,9
cpu_priority_restore:
cpu_sleep:
frame_arch_init:
fpu_init:
fpu_enable:
fpu_disable:
 
dummy:
br.ret.sptk.many b0
/SPARTAN/trunk/arch/mips/src/fpu_context.c
38,11 → 38,3
{
}
 
 
void fpu_lazy_context_save(fpu_context_t *fctx)
{
}
 
void fpu_lazy_context_restore(fpu_context_t *fctx)
{
}
/SPARTAN/trunk/arch/mips/src/dummy.s
34,11 → 34,17
.global userspace
.global before_thread_runs_arch
.global dummy
.global fpu_enable
.global fpu_disable
.global fpu_init
 
before_thread_runs_arch:
userspace:
calibrate_delay_loop:
asm_delay_loop:
fpu_enable:
fpu_disable:
fpu_init:
 
dummy:
j $31
/SPARTAN/trunk/arch/amd64/include/cpu.h
51,8 → 51,6
};
 
 
extern void set_TS_flag(void);
extern void reset_TS_flag(void);
extern void set_efer_flag(int flag);
extern __u64 read_efer_flag(void);
void cpu_setup_fpu(void);
/SPARTAN/trunk/arch/amd64/Makefile.inc
10,7 → 10,7
BFD_NAME=elf64-x86-64
BFD_ARCH=i386:x86-64
 
DEFS=-DARCH=$(ARCH)
DEFS=-DARCH=$(ARCH) -DFPU_LAZY
 
ifdef SMP
DEFS+=-D$(SMP)
/SPARTAN/trunk/arch/amd64/src/fpu_context.c
33,20 → 33,7
 
void fpu_context_save(fpu_context_t *fctx)
{
}
 
void fpu_context_restore(fpu_context_t *fctx)
{
if(THREAD==CPU->fpu_owner)
reset_TS_flag();
else
set_TS_flag();
}
 
 
void fpu_lazy_context_save(fpu_context_t *fctx)
{
/* TODO: We need malloc that allocates on 16-byte boundary !! */
/* Align on 16-byte boundary */
if (((__u64)fctx) & 0xf)
fctx = (fpu_context_t *)((((__u64)fctx) | 0xf) + 1);
 
56,7 → 43,7
);
}
 
void fpu_lazy_context_restore(fpu_context_t *fctx)
void fpu_context_restore(fpu_context_t *fctx)
{
/* TODO: We need malloc that allocates on 16-byte boundary !! */
if (((__u64)fctx) & 0xf)
/SPARTAN/trunk/arch/amd64/src/cpu/cpu.c
90,7 → 90,7
* does a lazy fpu context switch.
*
*/
void set_TS_flag(void)
void fpu_disable(void)
{
__asm__ volatile (
"mov %%cr0,%%rax;"
102,7 → 102,7
);
}
 
void reset_TS_flag(void)
void fpu_enable(void)
{
__asm__ volatile (
"mov %%cr0,%%rax;"
/SPARTAN/trunk/arch/amd64/src/interrupt.c
38,9 → 38,9
#include <arch.h>
#include <symtab.h>
#include <arch/asm.h>
#include <proc/scheduler.h>
 
 
 
static void messy_stack_trace(__native *stack)
{
__native *upper_limit = (__native *)(((__native)get_stack_base()) + STACK_SIZE);
138,20 → 138,11
 
void nm_fault(__u8 n, __native stack[])
{
reset_TS_flag();
if (CPU->fpu_owner != NULL) {
fpu_lazy_context_save(&CPU->fpu_owner->saved_fpu_context);
/* don't prevent migration */
CPU->fpu_owner->fpu_context_engaged=0;
}
if (THREAD->fpu_context_exists)
fpu_lazy_context_restore(&THREAD->saved_fpu_context);
else {
fpu_init();
THREAD->fpu_context_exists=1;
}
CPU->fpu_owner=THREAD;
THREAD->fpu_context_engaged = 1;
#ifdef FPU_LAZY
scheduler_fpu_lazy_request();
#else
panic("fpu fault");
#endif
}
 
 
/SPARTAN/trunk/arch/ia32/include/fpu_context.h
37,5 → 37,4
};
 
 
 
#endif
/SPARTAN/trunk/arch/ia32/include/cpu.h
41,8 → 41,4
struct tss *tss;
};
 
 
void set_TS_flag(void);
void reset_TS_flag(void);
 
#endif
/SPARTAN/trunk/arch/ia32/Makefile.inc
7,7 → 7,7
BFD_ARCH=i386
 
 
DEFS:=-DARCH=$(ARCH)
DEFS:=-DARCH=$(ARCH) -DFPU_LAZY
 
ifdef SMP
DEFS+=-D$(SMP)
/SPARTAN/trunk/arch/ia32/src/fpu_context.c
33,23 → 33,6
 
void fpu_context_save(fpu_context_t *fctx)
{
}
 
 
void fpu_context_restore(fpu_context_t *fctx)
{
if (THREAD==CPU->fpu_owner)
reset_TS_flag();
else {
set_TS_flag();
if (CPU->fpu_owner != NULL)
(CPU->fpu_owner)->fpu_context_engaged=1;
}
}
 
 
void fpu_lazy_context_save(fpu_context_t *fctx)
{
__asm__ volatile (
"fnsave %0"
: "=m"(*fctx)
56,7 → 39,8
);
}
 
void fpu_lazy_context_restore(fpu_context_t *fctx)
 
void fpu_context_restore(fpu_context_t *fctx)
{
__asm__ volatile (
"frstor %0"
/SPARTAN/trunk/arch/ia32/src/cpu/cpu.c
62,10 → 62,9
"GenuineIntel"
};
 
void set_TS_flag(void)
void fpu_disable(void)
{
asm
(
__asm__ volatile (
"mov %%cr0,%%eax;"
"or $8,%%eax;"
"mov %%eax,%%cr0;"
75,10 → 74,9
);
}
 
void reset_TS_flag(void)
void fpu_enable(void)
{
asm
(
__asm__ volatile (
"mov %%cr0,%%eax;"
"and $0xffFFffF7,%%eax;"
"mov %%eax,%%cr0;"
/SPARTAN/trunk/arch/ia32/src/interrupt.c
109,18 → 109,11
 
void nm_fault(__u8 n, __native stack[])
{
reset_TS_flag();
if (CPU->fpu_owner != NULL) {
fpu_lazy_context_save(&((CPU->fpu_owner)->saved_fpu_context));
CPU->fpu_owner->fpu_context_engaged=0; /* don't prevent migration */
}
if (THREAD->fpu_context_exists)
fpu_lazy_context_restore(&(THREAD->saved_fpu_context));
else {
fpu_init();
THREAD->fpu_context_exists=1;
}
CPU->fpu_owner=THREAD;
#ifdef FPU_LAZY
scheduler_fpu_lazy_request();
#else
panic("fpu fault");
#endif
}
 
 
/SPARTAN/trunk/arch/ia32/Makefile.inc.cross
7,11 → 7,12
AS=$(IA-32_BINUTILS_DIR)/$(IA-32_TARGET)-as
LD=$(IA-32_BINUTILS_DIR)/$(IA-32_TARGET)-ld
OBJCOPY=$(IA-32_BINUTILS_DIR)/$(IA-32_TARGET)-objcopy
OBJDUMP=$(IA-32_BINUTILS_DIR)/$(IA-32_TARGET)-objdump
 
BFD_NAME=elf32-i386
BFD_ARCH=i386
 
DEFS:=-DARCH=$(ARCH)
DEFS:=-DARCH=$(ARCH) -DFPU_LAZY
 
ifdef SMP
DEFS+=-D$(SMP)
25,6 → 26,9
CFLAGS=$(CPPFLAGS) -nostdlib -fno-builtin -fomit-frame-pointer -Werror-implicit-function-declaration -Wmissing-prototypes -Werror -O3
LFLAGS=-M -no-check-sections
 
../arch/$(ARCH)/_link.ld: ../arch/$(ARCH)/_link.ld.in
$(CC) $(CFLAGS) -E -x c $< | grep -v "^\#" > $@
 
arch_sources= \
arch/context.s \
arch/debug/panic.s \