/trunk/kernel/arch/amd64/src/fpu_context.c |
---|
39,9 → 39,9 |
void fpu_context_save(fpu_context_t *fctx) |
{ |
asm volatile ( |
"fxsave %0" |
: "=m"(*fctx) |
); |
"fxsave %[fctx]\n" |
: [fctx] "=m" (*fctx) |
); |
} |
/** Restore FPU (mmx,sse) context using fxrstor instruction */ |
48,9 → 48,9 |
void fpu_context_restore(fpu_context_t *fctx) |
{ |
asm volatile ( |
"fxrstor %0" |
: "=m"(*fctx) |
); |
"fxrstor %[fctx]\n" |
: [fctx] "=m" (*fctx) |
); |
} |
void fpu_init() |
57,7 → 57,7 |
{ |
/* TODO: Zero all SSE, MMX etc. registers */ |
asm volatile ( |
"fninit;" |
"fninit\n" |
); |
} |
/trunk/kernel/arch/amd64/src/cpu/cpu.c |
---|
77,21 → 77,19 |
void cpu_setup_fpu(void) |
{ |
asm volatile ( |
"movq %%cr0, %%rax;" |
"btsq $1, %%rax;" /* cr0.mp */ |
"btrq $2, %%rax;" /* cr0.em */ |
"movq %%rax, %%cr0;" |
"movq %%cr4, %%rax;" |
"bts $9, %%rax;" /* cr4.osfxsr */ |
"movq %%rax, %%cr4;" |
: |
: |
:"%rax" |
); |
"movq %%cr0, %%rax\n" |
"btsq $1, %%rax\n" /* cr0.mp */ |
"btrq $2, %%rax\n" /* cr0.em */ |
"movq %%rax, %%cr0\n" |
"movq %%cr4, %%rax\n" |
"bts $9, %%rax\n" /* cr4.osfxsr */ |
"movq %%rax, %%cr4\n" |
::: "%rax" |
); |
} |
/** Set the TS flag to 1. |
/** Set the TS flag to 1. |
* |
* If a thread accesses coprocessor, exception is run, which |
* does a lazy fpu context switch. |
99,26 → 97,22 |
*/ |
void fpu_disable(void) |
{ |
asm volatile ( |
"mov %%cr0,%%rax;" |
"bts $3,%%rax;" |
"mov %%rax,%%cr0;" |
: |
: |
:"%rax" |
); |
asm volatile ( |
"mov %%cr0, %%rax\n" |
"bts $3, %%rax\n" |
"mov %%rax, %%cr0\n" |
::: "%rax" |
); |
} |
void fpu_enable(void) |
{ |
asm volatile ( |
"mov %%cr0,%%rax;" |
"btr $3,%%rax;" |
"mov %%rax,%%cr0;" |
: |
: |
:"%rax" |
); |
asm volatile ( |
"mov %%cr0, %%rax\n" |
"btr $3, %%rax\n" |
"mov %%rax, %%cr0\n" |
::: "%rax" |
); |
} |
void cpu_arch_init(void) |
/trunk/kernel/arch/amd64/src/amd64.c |
---|
72,15 → 72,13 |
*/ |
static void clean_IOPL_NT_flags(void) |
{ |
asm ( |
asm volatile ( |
"pushfq\n" |
"pop %%rax\n" |
"and $~(0x7000), %%rax\n" |
"pushq %%rax\n" |
"popfq\n" |
: |
: |
: "%rax" |
::: "%rax" |
); |
} |
90,13 → 88,11 |
*/ |
static void clean_AM_flag(void) |
{ |
asm ( |
asm volatile ( |
"mov %%cr0, %%rax\n" |
"and $~(0x40000), %%rax\n" |
"mov %%rax, %%cr0\n" |
: |
: |
: "%rax" |
::: "%rax" |
); |
} |
/trunk/kernel/arch/amd64/src/userspace.c |
---|
26,7 → 26,7 |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup amd64 |
/** @addtogroup amd64 |
* @{ |
*/ |
/** @file |
47,36 → 47,33 |
*/ |
void userspace(uspace_arg_t *kernel_uarg) |
{ |
ipl_t ipl; |
ipl_t ipl = interrupts_disable(); |
ipl = interrupts_disable(); |
/* Clear CF,PF,AF,ZF,SF,DF,OF */ |
/* Clear CF, PF, AF, ZF, SF, DF, OF */ |
ipl &= ~(0xcd4); |
asm volatile ("" |
"pushq %0\n" |
"pushq %1\n" |
"pushq %2\n" |
"pushq %3\n" |
"pushq %4\n" |
"movq %5, %%rax\n" |
/* %rdi is defined to hold pcb_ptr - set it to 0 */ |
"xorq %%rdi, %%rdi\n" |
"iretq\n" |
: : |
"i" (gdtselector(UDATA_DES) | PL_USER), |
"r" (kernel_uarg->uspace_stack+THREAD_STACK_SIZE), |
"r" (ipl), |
"i" (gdtselector(UTEXT_DES) | PL_USER), |
"r" (kernel_uarg->uspace_entry), |
"r" (kernel_uarg->uspace_uarg) |
: "rax" |
); |
asm volatile ( |
"pushq %[udata_des]\n" |
"pushq %[stack_size]\n" |
"pushq %[ipl]\n" |
"pushq %[utext_des]\n" |
"pushq %[entry]\n" |
"movq %[uarg], %%rax\n" |
/* %rdi is defined to hold pcb_ptr - set it to 0 */ |
"xorq %%rdi, %%rdi\n" |
"iretq\n" |
:: [udata_des] "i" (gdtselector(UDATA_DES) | PL_USER), |
[stack_size] "r" (kernel_uarg->uspace_stack + THREAD_STACK_SIZE), |
[ipl] "r" (ipl), |
[utext_des] "i" (gdtselector(UTEXT_DES) | PL_USER), |
[entry] "r" (kernel_uarg->uspace_entry), |
[uarg] "r" (kernel_uarg->uspace_uarg) |
: "rax" |
); |
/* Unreachable */ |
for(;;) |
; |
while (1); |
} |
/** @} |