/branches/dynload/kernel/arch/amd64/include/types.h |
---|
57,10 → 57,6 |
typedef uint64_t unative_t; |
typedef int64_t native_t; |
typedef volatile uint8_t ioport8_t; |
typedef volatile uint16_t ioport16_t; |
typedef volatile uint32_t ioport32_t; |
typedef struct { |
} fncptr_t; |
/branches/dynload/kernel/arch/amd64/include/atomic.h |
---|
26,7 → 26,7 |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup amd64 |
/** @addtogroup amd64 |
* @{ |
*/ |
/** @file |
41,17 → 41,29 |
static inline void atomic_inc(atomic_t *val) { |
#ifdef CONFIG_SMP |
asm volatile ("lock incq %0\n" : "+m" (val->count)); |
asm volatile ( |
"lock incq %[count]\n" |
: [count] "+m" (val->count) |
); |
#else |
asm volatile ("incq %0\n" : "+m" (val->count)); |
asm volatile ( |
"incq %[count]\n" |
: [count] "+m" (val->count) |
); |
#endif /* CONFIG_SMP */ |
} |
static inline void atomic_dec(atomic_t *val) { |
#ifdef CONFIG_SMP |
asm volatile ("lock decq %0\n" : "+m" (val->count)); |
asm volatile ( |
"lock decq %[count]\n" |
: [count] "+m" (val->count) |
); |
#else |
asm volatile ("decq %0\n" : "+m" (val->count)); |
asm volatile ( |
"decq %[count]\n" |
: [count] "+m" (val->count) |
); |
#endif /* CONFIG_SMP */ |
} |
58,12 → 70,12 |
static inline long atomic_postinc(atomic_t *val) |
{ |
long r = 1; |
asm volatile ( |
"lock xaddq %1, %0\n" |
: "+m" (val->count), "+r" (r) |
"lock xaddq %[r], %[count]\n" |
: [count] "+m" (val->count), [r] "+r" (r) |
); |
return r; |
} |
72,23 → 84,23 |
long r = -1; |
asm volatile ( |
"lock xaddq %1, %0\n" |
: "+m" (val->count), "+r" (r) |
"lock xaddq %[r], %[count]\n" |
: [count] "+m" (val->count), [r] "+r" (r) |
); |
return r; |
} |
#define atomic_preinc(val) (atomic_postinc(val) + 1) |
#define atomic_predec(val) (atomic_postdec(val) - 1) |
#define atomic_preinc(val) (atomic_postinc(val) + 1) |
#define atomic_predec(val) (atomic_postdec(val) - 1) |
static inline uint64_t test_and_set(atomic_t *val) { |
uint64_t v; |
asm volatile ( |
"movq $1, %0\n" |
"xchgq %0, %1\n" |
: "=r" (v), "+m" (val->count) |
"movq $1, %[v]\n" |
"xchgq %[v], %[count]\n" |
: [v] "=r" (v), [count] "+m" (val->count) |
); |
return v; |
99,7 → 111,7 |
static inline void atomic_lock_arch(atomic_t *val) |
{ |
uint64_t tmp; |
preemption_disable(); |
asm volatile ( |
"0:\n" |
106,15 → 118,15 |
#ifdef CONFIG_HT |
"pause\n" |
#endif |
"mov %0, %1\n" |
"testq %1, %1\n" |
"mov %[count], %[tmp]\n" |
"testq %[tmp], %[tmp]\n" |
"jnz 0b\n" /* lightweight looping on locked spinlock */ |
"incq %1\n" /* now use the atomic operation */ |
"xchgq %0, %1\n" |
"testq %1, %1\n" |
"incq %[tmp]\n" /* now use the atomic operation */ |
"xchgq %[count], %[tmp]\n" |
"testq %[tmp], %[tmp]\n" |
"jnz 0b\n" |
: "+m" (val->count), "=&r" (tmp) |
: [count] "+m" (val->count), [tmp] "=&r" (tmp) |
); |
/* |
* Prevent critical section code from bleeding out this way up. |
/branches/dynload/kernel/arch/amd64/include/boot/boot.h |
---|
42,8 → 42,17 |
#define MULTIBOOT_HEADER_MAGIC 0x1BADB002 |
#define MULTIBOOT_HEADER_FLAGS 0x00010003 |
#define MULTIBOOT_LOADER_MAGIC 0x2BADB002 |
#ifndef __ASM__ |
#ifdef CONFIG_SMP |
/* This is only a symbol so the type is dummy. Obtain the value using &. */ |
extern int _hardcoded_unmapped_size; |
#endif /* CONFIG_SMP */ |
#endif /* __ASM__ */ |
#endif |
/** @} |
/branches/dynload/kernel/arch/amd64/include/arch.h |
---|
35,6 → 35,10 |
#ifndef KERN_amd64_ARCH_H_ |
#define KERN_amd64_ARCH_H_ |
#include <genarch/multiboot/multiboot.h> |
extern void arch_pre_main(uint32_t, const multiboot_info_t *); |
#endif |
/** @} |
/branches/dynload/kernel/arch/amd64/include/asm.h |
---|
26,7 → 26,7 |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup amd64 |
/** @addtogroup amd64 |
* @{ |
*/ |
/** @file |
36,6 → 36,8 |
#define KERN_amd64_ASM_H_ |
#include <config.h> |
#include <arch/types.h> |
#include <typedefs.h> |
extern void asm_delay_loop(uint32_t t); |
extern void asm_fake_loop(uint32_t t); |
45,12 → 47,17 |
* Return the base address of the current stack. |
* The stack is assumed to be STACK_SIZE bytes long. |
* The stack must start on page boundary. |
* |
*/ |
static inline uintptr_t get_stack_base(void) |
{ |
uintptr_t v; |
asm volatile ("andq %%rsp, %0\n" : "=r" (v) : "0" (~((uint64_t)STACK_SIZE-1))); |
asm volatile ( |
"andq %%rsp, %[v]\n" |
: [v] "=r" (v) |
: "0" (~((uint64_t) STACK_SIZE-1)) |
); |
return v; |
} |
72,12 → 79,18 |
* |
* @param port Port to read from |
* @return Value read |
* |
*/ |
static inline uint8_t pio_read_8(ioport8_t *port) |
{ |
uint8_t val; |
asm volatile ("inb %w1, %b0 \n" : "=a" (val) : "d" (port)); |
asm volatile ( |
"inb %w[port], %b[val]\n" |
: [val] "=a" (val) |
: [port] "d" (port) |
); |
return val; |
} |
87,12 → 100,18 |
* |
* @param port Port to read from |
* @return Value read |
* |
*/ |
static inline uint16_t pio_read_16(ioport16_t *port) |
{ |
uint16_t val; |
asm volatile ("inw %w1, %w0 \n" : "=a" (val) : "d" (port)); |
asm volatile ( |
"inw %w[port], %w[val]\n" |
: [val] "=a" (val) |
: [port] "d" (port) |
); |
return val; |
} |
102,12 → 121,18 |
* |
* @param port Port to read from |
* @return Value read |
* |
*/ |
static inline uint32_t pio_read_32(ioport32_t *port) |
{ |
uint32_t val; |
asm volatile ("inl %w1, %0 \n" : "=a" (val) : "d" (port)); |
asm volatile ( |
"inl %w[port], %[val]\n" |
: [val] "=a" (val) |
: [port] "d" (port) |
); |
return val; |
} |
117,10 → 142,14 |
* |
* @param port Port to write to |
* @param val Value to write |
* |
*/ |
static inline void pio_write_8(ioport8_t *port, uint8_t val) |
{ |
asm volatile ("outb %b0, %w1\n" : : "a" (val), "d" (port)); |
asm volatile ( |
"outb %b[val], %w[port]\n" |
:: [val] "a" (val), [port] "d" (port) |
); |
} |
/** Word to port |
129,10 → 158,14 |
* |
* @param port Port to write to |
* @param val Value to write |
* |
*/ |
static inline void pio_write_16(ioport16_t *port, uint16_t val) |
{ |
asm volatile ("outw %w0, %w1\n" : : "a" (val), "d" (port)); |
asm volatile ( |
"outw %w[val], %w[port]\n" |
:: [val] "a" (val), [port] "d" (port) |
); |
} |
/** Double word to port |
141,10 → 174,14 |
* |
* @param port Port to write to |
* @param val Value to write |
* |
*/ |
static inline void pio_write_32(ioport32_t *port, uint32_t val) |
{ |
asm volatile ("outl %0, %w1\n" : : "a" (val), "d" (port)); |
asm volatile ( |
"outl %[val], %w[port]\n" |
:: [val] "a" (val), [port] "d" (port) |
); |
} |
/** Swap Hidden part of GS register with visible one */ |
159,15 → 196,18 |
* value of EFLAGS. |
* |
* @return Old interrupt priority level. |
* |
*/ |
static inline ipl_t interrupts_enable(void) { |
ipl_t v; |
__asm__ volatile ( |
asm volatile ( |
"pushfq\n" |
"popq %0\n" |
"popq %[v]\n" |
"sti\n" |
: "=r" (v) |
: [v] "=r" (v) |
); |
return v; |
} |
177,15 → 217,18 |
* value of EFLAGS. |
* |
* @return Old interrupt priority level. |
* |
*/ |
static inline ipl_t interrupts_disable(void) { |
ipl_t v; |
__asm__ volatile ( |
asm volatile ( |
"pushfq\n" |
"popq %0\n" |
"popq %[v]\n" |
"cli\n" |
: "=r" (v) |
); |
: [v] "=r" (v) |
); |
return v; |
} |
194,13 → 237,14 |
* Restore EFLAGS. |
* |
* @param ipl Saved interrupt priority level. |
* |
*/ |
static inline void interrupts_restore(ipl_t ipl) { |
__asm__ volatile ( |
"pushq %0\n" |
asm volatile ( |
"pushq %[ipl]\n" |
"popfq\n" |
: : "r" (ipl) |
); |
:: [ipl] "r" (ipl) |
); |
} |
/** Return interrupt priority level. |
208,14 → 252,17 |
* Return EFLAFS. |
* |
* @return Current interrupt priority level. |
* |
*/ |
static inline ipl_t interrupts_read(void) { |
ipl_t v; |
__asm__ volatile ( |
asm volatile ( |
"pushfq\n" |
"popq %0\n" |
: "=r" (v) |
"popq %[v]\n" |
: [v] "=r" (v) |
); |
return v; |
} |
222,21 → 269,25 |
/** Write to MSR */ |
static inline void write_msr(uint32_t msr, uint64_t value) |
{ |
__asm__ volatile ( |
"wrmsr;" : : "c" (msr), |
"a" ((uint32_t)(value)), |
"d" ((uint32_t)(value >> 32)) |
); |
asm volatile ( |
"wrmsr\n" |
:: "c" (msr), |
"a" ((uint32_t) (value)), |
"d" ((uint32_t) (value >> 32)) |
); |
} |
static inline unative_t read_msr(uint32_t msr) |
{ |
uint32_t ax, dx; |
__asm__ volatile ( |
"rdmsr;" : "=a"(ax), "=d"(dx) : "c" (msr) |
); |
return ((uint64_t)dx << 32) | ax; |
asm volatile ( |
"rdmsr\n" |
: "=a" (ax), "=d" (dx) |
: "c" (msr) |
); |
return ((uint64_t) dx << 32) | ax; |
} |
243,29 → 294,29 |
/** Enable local APIC |
* |
* Enable local APIC in MSR. |
* |
*/ |
static inline void enable_l_apic_in_msr() |
{ |
__asm__ volatile ( |
asm volatile ( |
"movl $0x1b, %%ecx\n" |
"rdmsr\n" |
"orl $(1<<11),%%eax\n" |
"orl $(1 << 11),%%eax\n" |
"orl $(0xfee00000),%%eax\n" |
"wrmsr\n" |
: |
: |
:"%eax","%ecx","%edx" |
); |
::: "%eax","%ecx","%edx" |
); |
} |
static inline uintptr_t * get_ip() |
{ |
uintptr_t *ip; |
__asm__ volatile ( |
"mov %%rip, %0" |
: "=r" (ip) |
); |
asm volatile ( |
"mov %%rip, %[ip]" |
: [ip] "=r" (ip) |
); |
return ip; |
} |
272,59 → 323,84 |
/** Invalidate TLB Entry. |
* |
* @param addr Address on a page whose TLB entry is to be invalidated. |
* |
*/ |
static inline void invlpg(uintptr_t addr) |
{ |
__asm__ volatile ("invlpg %0\n" :: "m" (*((unative_t *)addr))); |
asm volatile ( |
"invlpg %[addr]\n" |
:: [addr] "m" (*((unative_t *) addr)) |
); |
} |
/** Load GDTR register from memory. |
* |
* @param gdtr_reg Address of memory from where to load GDTR. |
* |
*/ |
static inline void gdtr_load(struct ptr_16_64 *gdtr_reg) |
{ |
__asm__ volatile ("lgdtq %0\n" : : "m" (*gdtr_reg)); |
asm volatile ( |
"lgdtq %[gdtr_reg]\n" |
:: [gdtr_reg] "m" (*gdtr_reg) |
); |
} |
/** Store GDTR register to memory. |
* |
* @param gdtr_reg Address of memory to where to load GDTR. |
* |
*/ |
static inline void gdtr_store(struct ptr_16_64 *gdtr_reg) |
{ |
__asm__ volatile ("sgdtq %0\n" : : "m" (*gdtr_reg)); |
asm volatile ( |
"sgdtq %[gdtr_reg]\n" |
:: [gdtr_reg] "m" (*gdtr_reg) |
); |
} |
/** Load IDTR register from memory. |
* |
* @param idtr_reg Address of memory from where to load IDTR. |
* |
*/ |
static inline void idtr_load(struct ptr_16_64 *idtr_reg) |
{ |
__asm__ volatile ("lidtq %0\n" : : "m" (*idtr_reg)); |
asm volatile ( |
"lidtq %[idtr_reg]\n" |
:: [idtr_reg] "m" (*idtr_reg)); |
} |
/** Load TR from descriptor table. |
* |
* @param sel Selector specifying descriptor of TSS segment. |
* |
*/ |
static inline void tr_load(uint16_t sel) |
{ |
__asm__ volatile ("ltr %0" : : "r" (sel)); |
asm volatile ( |
"ltr %[sel]" |
:: [sel] "r" (sel) |
); |
} |
#define GEN_READ_REG(reg) static inline unative_t read_ ##reg (void) \ |
{ \ |
unative_t res; \ |
__asm__ volatile ("movq %%" #reg ", %0" : "=r" (res) ); \ |
return res; \ |
} |
{ \ |
unative_t res; \ |
asm volatile ( \ |
"movq %%" #reg ", %[res]" \ |
: [res] "=r" (res) \ |
); \ |
return res; \ |
} |
#define GEN_WRITE_REG(reg) static inline void write_ ##reg (unative_t regn) \ |
{ \ |
__asm__ volatile ("movq %0, %%" #reg : : "r" (regn)); \ |
} |
{ \ |
asm volatile ( \ |
"movq %[regn], %%" #reg \ |
:: [regn] "r" (regn) \ |
); \ |
} |
GEN_READ_REG(cr0) |
GEN_READ_REG(cr2) |
/branches/dynload/kernel/arch/amd64/src/fpu_context.c |
---|
39,9 → 39,9 |
void fpu_context_save(fpu_context_t *fctx) |
{ |
asm volatile ( |
"fxsave %0" |
: "=m"(*fctx) |
); |
"fxsave %[fctx]\n" |
: [fctx] "=m" (*fctx) |
); |
} |
/** Restore FPU (mmx,sse) context using fxrstor instruction */ |
48,9 → 48,9 |
void fpu_context_restore(fpu_context_t *fctx) |
{ |
asm volatile ( |
"fxrstor %0" |
: "=m"(*fctx) |
); |
"fxrstor %[fctx]\n" |
: [fctx] "=m" (*fctx) |
); |
} |
void fpu_init() |
57,7 → 57,7 |
{ |
/* TODO: Zero all SSE, MMX etc. registers */ |
asm volatile ( |
"fninit;" |
"fninit\n" |
); |
} |
/branches/dynload/kernel/arch/amd64/src/cpu/cpu.c |
---|
77,21 → 77,19 |
void cpu_setup_fpu(void) |
{ |
asm volatile ( |
"movq %%cr0, %%rax;" |
"btsq $1, %%rax;" /* cr0.mp */ |
"btrq $2, %%rax;" /* cr0.em */ |
"movq %%rax, %%cr0;" |
"movq %%cr4, %%rax;" |
"bts $9, %%rax;" /* cr4.osfxsr */ |
"movq %%rax, %%cr4;" |
: |
: |
:"%rax" |
); |
"movq %%cr0, %%rax\n" |
"btsq $1, %%rax\n" /* cr0.mp */ |
"btrq $2, %%rax\n" /* cr0.em */ |
"movq %%rax, %%cr0\n" |
"movq %%cr4, %%rax\n" |
"bts $9, %%rax\n" /* cr4.osfxsr */ |
"movq %%rax, %%cr4\n" |
::: "%rax" |
); |
} |
/** Set the TS flag to 1. |
/** Set the TS flag to 1. |
* |
* If a thread accesses coprocessor, exception is run, which |
* does a lazy fpu context switch. |
99,26 → 97,22 |
*/ |
void fpu_disable(void) |
{ |
asm volatile ( |
"mov %%cr0,%%rax;" |
"bts $3,%%rax;" |
"mov %%rax,%%cr0;" |
: |
: |
:"%rax" |
); |
asm volatile ( |
"mov %%cr0, %%rax\n" |
"bts $3, %%rax\n" |
"mov %%rax, %%cr0\n" |
::: "%rax" |
); |
} |
void fpu_enable(void) |
{ |
asm volatile ( |
"mov %%cr0,%%rax;" |
"btr $3,%%rax;" |
"mov %%rax,%%cr0;" |
: |
: |
:"%rax" |
); |
asm volatile ( |
"mov %%cr0, %%rax\n" |
"btr $3, %%rax\n" |
"mov %%rax, %%cr0\n" |
::: "%rax" |
); |
} |
void cpu_arch_init(void) |
/branches/dynload/kernel/arch/amd64/src/amd64.c |
---|
39,6 → 39,7 |
#include <config.h> |
#include <proc/thread.h> |
#include <genarch/multiboot/multiboot.h> |
#include <genarch/drivers/legacy/ia32/io.h> |
#include <genarch/drivers/ega/ega.h> |
#include <arch/drivers/vesa.h> |
45,6 → 46,7 |
#include <genarch/kbd/i8042.h> |
#include <arch/drivers/i8254.h> |
#include <arch/drivers/i8259.h> |
#include <arch/boot/boot.h> |
#ifdef CONFIG_SMP |
#include <arch/smp/apic.h> |
65,7 → 67,6 |
#include <ddi/device.h> |
#include <sysinfo/sysinfo.h> |
/** Disable I/O on non-privileged levels |
* |
* Clean IOPL(12,13) and NT(14) flags in EFLAGS register |
72,15 → 73,13 |
*/ |
static void clean_IOPL_NT_flags(void) |
{ |
asm ( |
asm volatile ( |
"pushfq\n" |
"pop %%rax\n" |
"and $~(0x7000), %%rax\n" |
"pushq %%rax\n" |
"popfq\n" |
: |
: |
: "%rax" |
::: "%rax" |
); |
} |
90,16 → 89,31 |
*/ |
static void clean_AM_flag(void) |
{ |
asm ( |
asm volatile ( |
"mov %%cr0, %%rax\n" |
"and $~(0x40000), %%rax\n" |
"mov %%rax, %%cr0\n" |
: |
: |
: "%rax" |
::: "%rax" |
); |
} |
/** Perform amd64-specific initialization before main_bsp() is called. |
* |
* @param signature Should contain the multiboot signature. |
* @param mi Pointer to the multiboot information structure. |
*/ |
void arch_pre_main(uint32_t signature, const multiboot_info_t *mi) |
{ |
/* Parse multiboot information obtained from the bootloader. */ |
multiboot_info_parse(signature, mi); |
#ifdef CONFIG_SMP |
/* Copy AP bootstrap routines below 1 MB. */ |
memcpy((void *) AP_BOOT_OFFSET, (void *) BOOT_OFFSET, |
(size_t) &_hardcoded_unmapped_size); |
#endif |
} |
void arch_pre_mm_init(void) |
{ |
/* Enable no-execute pages */ |
185,6 → 199,10 |
sysinfo_set_item_val("kbd", NULL, true); |
sysinfo_set_item_val("kbd.devno", NULL, devno); |
sysinfo_set_item_val("kbd.inr", NULL, IRQ_KBD); |
sysinfo_set_item_val("kbd.address.physical", NULL, |
(uintptr_t) I8042_BASE); |
sysinfo_set_item_val("kbd.address.kernel", NULL, |
(uintptr_t) I8042_BASE); |
} |
void calibrate_delay_loop(void) |
/branches/dynload/kernel/arch/amd64/src/boot/boot.S |
---|
175,119 → 175,18 |
.code64 |
start64: |
movq $(PA2KA(START_STACK)), %rsp |
movl grub_eax, %eax |
movl grub_ebx, %ebx |
# arch_pre_main(grub_eax, grub_ebx) |
xorq %rdi, %rdi |
movl grub_eax, %edi |
xorq %rsi, %rsi |
movl grub_ebx, %esi |
call arch_pre_main |
call main_bsp |
# Not reached. |
cmpl $MULTIBOOT_LOADER_MAGIC, %eax # compare GRUB signature |
je valid_boot |
xorl %ecx, %ecx # no memory size or map available |
movl %ecx, e820counter |
jmp invalid_boot |
valid_boot: |
movl (%ebx), %eax # ebx = physical address of struct multiboot_info |
bt $3, %eax # mbi->flags[3] (mods_count, mods_addr valid) |
jc mods_valid |
xorq %rcx, %rcx |
movq %rcx, init |
jmp mods_end |
mods_valid: |
xorq %rcx, %rcx |
movl 20(%ebx), %ecx # mbi->mods_count |
movq %rcx, init |
cmpl $0, %ecx |
je mods_end |
movl 24(%ebx), %esi # mbi->mods_addr |
movq $init, %rdi |
mods_loop: |
xorq %rdx, %rdx |
movl 0(%esi), %edx # mods->mod_start |
movq $0xffff800000000000, %r10 |
addq %r10, %rdx |
movq %rdx, 8(%rdi) |
xorq %rdx, %rdx |
movl 4(%esi), %edx |
subl 0(%esi), %edx # mods->mod_end - mods->mod_start |
movq %rdx, 16(%rdi) |
addl $16, %esi |
addq $48, %rdi |
loop mods_loop |
mods_end: |
bt $6, %eax # mbi->flags[6] (mmap_length, mmap_addr valid) |
jc mmap_valid |
xorl %edx, %edx |
jmp mmap_invalid |
mmap_valid: |
movl 44(%ebx), %ecx # mbi->mmap_length |
movl 48(%ebx), %esi # mbi->mmap_addr |
movq $e820table, %rdi |
xorl %edx, %edx |
mmap_loop: |
cmpl $0, %ecx |
jle mmap_end |
movl 4(%esi), %eax # mmap->base_addr_low |
movl %eax, (%rdi) |
movl 8(%esi), %eax # mmap->base_addr_high |
movl %eax, 4(%rdi) |
movl 12(%esi), %eax # mmap->length_low |
movl %eax, 8(%rdi) |
movl 16(%esi), %eax # mmap->length_high |
movl %eax, 12(%rdi) |
movl 20(%esi), %eax # mmap->type |
movl %eax, 16(%rdi) |
movl (%esi), %eax # mmap->size |
addl $0x4, %eax |
addl %eax, %esi |
subl %eax, %ecx |
addq $MEMMAP_E820_RECORD_SIZE, %rdi |
incl %edx |
jmp mmap_loop |
mmap_end: |
mmap_invalid: |
movl %edx, e820counter |
invalid_boot: |
#ifdef CONFIG_SMP |
# copy AP bootstrap routines below 1 MB |
movq $BOOT_OFFSET, %rsi |
movq $AP_BOOT_OFFSET, %rdi |
movq $_hardcoded_unmapped_size, %rcx |
rep movsb |
#endif |
call main_bsp # never returns |
cli |
hlt |
/branches/dynload/kernel/arch/amd64/src/userspace.c |
---|
26,7 → 26,7 |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup amd64 |
/** @addtogroup amd64 |
* @{ |
*/ |
/** @file |
47,36 → 47,33 |
*/ |
void userspace(uspace_arg_t *kernel_uarg) |
{ |
ipl_t ipl; |
ipl_t ipl = interrupts_disable(); |
ipl = interrupts_disable(); |
/* Clear CF,PF,AF,ZF,SF,DF,OF */ |
/* Clear CF, PF, AF, ZF, SF, DF, OF */ |
ipl &= ~(0xcd4); |
asm volatile ("" |
"pushq %0\n" |
"pushq %1\n" |
"pushq %2\n" |
"pushq %3\n" |
"pushq %4\n" |
"movq %5, %%rax\n" |
/* %rdi is defined to hold pcb_ptr - set it to 0 */ |
"xorq %%rdi, %%rdi\n" |
"iretq\n" |
: : |
"i" (gdtselector(UDATA_DES) | PL_USER), |
"r" (kernel_uarg->uspace_stack+THREAD_STACK_SIZE), |
"r" (ipl), |
"i" (gdtselector(UTEXT_DES) | PL_USER), |
"r" (kernel_uarg->uspace_entry), |
"r" (kernel_uarg->uspace_uarg) |
: "rax" |
); |
asm volatile ( |
"pushq %[udata_des]\n" |
"pushq %[stack_size]\n" |
"pushq %[ipl]\n" |
"pushq %[utext_des]\n" |
"pushq %[entry]\n" |
"movq %[uarg], %%rax\n" |
/* %rdi is defined to hold pcb_ptr - set it to 0 */ |
"xorq %%rdi, %%rdi\n" |
"iretq\n" |
:: [udata_des] "i" (gdtselector(UDATA_DES) | PL_USER), |
[stack_size] "r" (kernel_uarg->uspace_stack + THREAD_STACK_SIZE), |
[ipl] "r" (ipl), |
[utext_des] "i" (gdtselector(UTEXT_DES) | PL_USER), |
[entry] "r" (kernel_uarg->uspace_entry), |
[uarg] "r" (kernel_uarg->uspace_uarg) |
: "rax" |
); |
/* Unreachable */ |
for(;;) |
; |
while (1); |
} |
/** @} |