Subversion Repositories HelenOS

Compare Revisions

Ignore whitespace Rev 4016 → Rev 4017

/trunk/kernel/arch/amd64/include/atomic.h
26,7 → 26,7
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup amd64
/** @addtogroup amd64
* @{
*/
/** @file
41,17 → 41,29
 
static inline void atomic_inc(atomic_t *val) {
#ifdef CONFIG_SMP
asm volatile ("lock incq %0\n" : "+m" (val->count));
asm volatile (
"lock incq %[count]\n"
: [count] "+m" (val->count)
);
#else
asm volatile ("incq %0\n" : "+m" (val->count));
asm volatile (
"incq %[count]\n"
: [count] "+m" (val->count)
);
#endif /* CONFIG_SMP */
}
 
static inline void atomic_dec(atomic_t *val) {
#ifdef CONFIG_SMP
asm volatile ("lock decq %0\n" : "+m" (val->count));
asm volatile (
"lock decq %[count]\n"
: [count] "+m" (val->count)
);
#else
asm volatile ("decq %0\n" : "+m" (val->count));
asm volatile (
"decq %[count]\n"
: [count] "+m" (val->count)
);
#endif /* CONFIG_SMP */
}
 
58,12 → 70,12
static inline long atomic_postinc(atomic_t *val)
{
long r = 1;
 
asm volatile (
"lock xaddq %1, %0\n"
: "+m" (val->count), "+r" (r)
"lock xaddq %[r], %[count]\n"
: [count] "+m" (val->count), [r] "+r" (r)
);
 
return r;
}
 
72,23 → 84,23
long r = -1;
asm volatile (
"lock xaddq %1, %0\n"
: "+m" (val->count), "+r" (r)
"lock xaddq %[r], %[count]\n"
: [count] "+m" (val->count), [r] "+r" (r)
);
return r;
}
 
#define atomic_preinc(val) (atomic_postinc(val) + 1)
#define atomic_predec(val) (atomic_postdec(val) - 1)
#define atomic_preinc(val) (atomic_postinc(val) + 1)
#define atomic_predec(val) (atomic_postdec(val) - 1)
 
static inline uint64_t test_and_set(atomic_t *val) {
uint64_t v;
asm volatile (
"movq $1, %0\n"
"xchgq %0, %1\n"
: "=r" (v), "+m" (val->count)
"movq $1, %[v]\n"
"xchgq %[v], %[count]\n"
: [v] "=r" (v), [count] "+m" (val->count)
);
return v;
99,7 → 111,7
static inline void atomic_lock_arch(atomic_t *val)
{
uint64_t tmp;
 
preemption_disable();
asm volatile (
"0:\n"
106,15 → 118,15
#ifdef CONFIG_HT
"pause\n"
#endif
"mov %0, %1\n"
"testq %1, %1\n"
"mov %[count], %[tmp]\n"
"testq %[tmp], %[tmp]\n"
"jnz 0b\n" /* lightweight looping on locked spinlock */
"incq %1\n" /* now use the atomic operation */
"xchgq %0, %1\n"
"testq %1, %1\n"
"incq %[tmp]\n" /* now use the atomic operation */
"xchgq %[count], %[tmp]\n"
"testq %[tmp], %[tmp]\n"
"jnz 0b\n"
: "+m" (val->count), "=&r" (tmp)
: [count] "+m" (val->count), [tmp] "=&r" (tmp)
);
/*
* Prevent critical section code from bleeding out this way up.
/trunk/kernel/arch/amd64/include/asm.h
26,7 → 26,7
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup amd64
/** @addtogroup amd64
* @{
*/
/** @file
45,12 → 45,17
* Return the base address of the current stack.
* The stack is assumed to be STACK_SIZE bytes long.
* The stack must start on page boundary.
*
*/
static inline uintptr_t get_stack_base(void)
{
uintptr_t v;
asm volatile ("andq %%rsp, %0\n" : "=r" (v) : "0" (~((uint64_t)STACK_SIZE-1)));
asm volatile (
"andq %%rsp, %[v]\n"
: [v] "=r" (v)
: "0" (~((uint64_t) STACK_SIZE-1))
);
return v;
}
72,12 → 77,18
*
* @param port Port to read from
* @return Value read
*
*/
static inline uint8_t pio_read_8(ioport8_t *port)
{
uint8_t val;
 
asm volatile ("inb %w1, %b0 \n" : "=a" (val) : "d" (port));
asm volatile (
"inb %w[port], %b[val]\n"
: [val] "=a" (val)
: [port] "d" (port)
);
return val;
}
 
87,12 → 98,18
*
* @param port Port to read from
* @return Value read
*
*/
static inline uint16_t pio_read_16(ioport16_t *port)
{
uint16_t val;
asm volatile ("inw %w1, %w0 \n" : "=a" (val) : "d" (port));
asm volatile (
"inw %w[port], %w[val]\n"
: [val] "=a" (val)
: [port] "d" (port)
);
return val;
}
 
102,12 → 119,18
*
* @param port Port to read from
* @return Value read
*
*/
static inline uint32_t pio_read_32(ioport32_t *port)
{
uint32_t val;
asm volatile ("inl %w1, %0 \n" : "=a" (val) : "d" (port));
asm volatile (
"inl %w[port], %[val]\n"
: [val] "=a" (val)
: [port] "d" (port)
);
return val;
}
 
117,10 → 140,14
*
* @param port Port to write to
* @param val Value to write
*
*/
static inline void pio_write_8(ioport8_t *port, uint8_t val)
{
asm volatile ("outb %b0, %w1\n" : : "a" (val), "d" (port));
asm volatile (
"outb %b[val], %w[port]\n"
:: [val] "a" (val), [port] "d" (port)
);
}
 
/** Word to port
129,10 → 156,14
*
* @param port Port to write to
* @param val Value to write
*
*/
static inline void pio_write_16(ioport16_t *port, uint16_t val)
{
asm volatile ("outw %w0, %w1\n" : : "a" (val), "d" (port));
asm volatile (
"outw %w[val], %w[port]\n"
:: [val] "a" (val), [port] "d" (port)
);
}
 
/** Double word to port
141,10 → 172,14
*
* @param port Port to write to
* @param val Value to write
*
*/
static inline void pio_write_32(ioport32_t *port, uint32_t val)
{
asm volatile ("outl %0, %w1\n" : : "a" (val), "d" (port));
asm volatile (
"outl %[val], %w[port]\n"
:: [val] "a" (val), [port] "d" (port)
);
}
 
/** Swap Hidden part of GS register with visible one */
159,15 → 194,18
* value of EFLAGS.
*
* @return Old interrupt priority level.
*
*/
static inline ipl_t interrupts_enable(void) {
ipl_t v;
__asm__ volatile (
asm volatile (
"pushfq\n"
"popq %0\n"
"popq %[v]\n"
"sti\n"
: "=r" (v)
: [v] "=r" (v)
);
return v;
}
 
177,15 → 215,18
* value of EFLAGS.
*
* @return Old interrupt priority level.
*
*/
static inline ipl_t interrupts_disable(void) {
ipl_t v;
__asm__ volatile (
asm volatile (
"pushfq\n"
"popq %0\n"
"popq %[v]\n"
"cli\n"
: "=r" (v)
);
: [v] "=r" (v)
);
return v;
}
 
194,13 → 235,14
* Restore EFLAGS.
*
* @param ipl Saved interrupt priority level.
*
*/
static inline void interrupts_restore(ipl_t ipl) {
__asm__ volatile (
"pushq %0\n"
asm volatile (
"pushq %[ipl]\n"
"popfq\n"
: : "r" (ipl)
);
:: [ipl] "r" (ipl)
);
}
 
/** Return interrupt priority level.
208,14 → 250,17
* Return EFLAFS.
*
* @return Current interrupt priority level.
*
*/
static inline ipl_t interrupts_read(void) {
ipl_t v;
__asm__ volatile (
asm volatile (
"pushfq\n"
"popq %0\n"
: "=r" (v)
"popq %[v]\n"
: [v] "=r" (v)
);
return v;
}
 
222,21 → 267,25
/** Write to MSR */
static inline void write_msr(uint32_t msr, uint64_t value)
{
__asm__ volatile (
"wrmsr;" : : "c" (msr),
"a" ((uint32_t)(value)),
"d" ((uint32_t)(value >> 32))
);
asm volatile (
"wrmsr\n"
:: "c" (msr),
"a" ((uint32_t) (value)),
"d" ((uint32_t) (value >> 32))
);
}
 
static inline unative_t read_msr(uint32_t msr)
{
uint32_t ax, dx;
 
__asm__ volatile (
"rdmsr;" : "=a"(ax), "=d"(dx) : "c" (msr)
);
return ((uint64_t)dx << 32) | ax;
asm volatile (
"rdmsr\n"
: "=a" (ax), "=d" (dx)
: "c" (msr)
);
return ((uint64_t) dx << 32) | ax;
}
 
 
243,29 → 292,29
/** Enable local APIC
*
* Enable local APIC in MSR.
*
*/
static inline void enable_l_apic_in_msr()
{
__asm__ volatile (
asm volatile (
"movl $0x1b, %%ecx\n"
"rdmsr\n"
"orl $(1<<11),%%eax\n"
"orl $(1 << 11),%%eax\n"
"orl $(0xfee00000),%%eax\n"
"wrmsr\n"
:
:
:"%eax","%ecx","%edx"
);
::: "%eax","%ecx","%edx"
);
}
 
static inline uintptr_t * get_ip()
{
uintptr_t *ip;
 
__asm__ volatile (
"mov %%rip, %0"
: "=r" (ip)
);
asm volatile (
"mov %%rip, %[ip]"
: [ip] "=r" (ip)
);
return ip;
}
 
272,59 → 321,84
/** Invalidate TLB Entry.
*
* @param addr Address on a page whose TLB entry is to be invalidated.
*
*/
static inline void invlpg(uintptr_t addr)
{
__asm__ volatile ("invlpg %0\n" :: "m" (*((unative_t *)addr)));
asm volatile (
"invlpg %[addr]\n"
:: [addr] "m" (*((unative_t *) addr))
);
}
 
/** Load GDTR register from memory.
*
* @param gdtr_reg Address of memory from where to load GDTR.
*
*/
static inline void gdtr_load(struct ptr_16_64 *gdtr_reg)
{
__asm__ volatile ("lgdtq %0\n" : : "m" (*gdtr_reg));
asm volatile (
"lgdtq %[gdtr_reg]\n"
:: [gdtr_reg] "m" (*gdtr_reg)
);
}
 
/** Store GDTR register to memory.
*
* @param gdtr_reg Address of memory to where to load GDTR.
*
*/
static inline void gdtr_store(struct ptr_16_64 *gdtr_reg)
{
__asm__ volatile ("sgdtq %0\n" : : "m" (*gdtr_reg));
asm volatile (
"sgdtq %[gdtr_reg]\n"
:: [gdtr_reg] "m" (*gdtr_reg)
);
}
 
/** Load IDTR register from memory.
*
* @param idtr_reg Address of memory from where to load IDTR.
*
*/
static inline void idtr_load(struct ptr_16_64 *idtr_reg)
{
__asm__ volatile ("lidtq %0\n" : : "m" (*idtr_reg));
asm volatile (
"lidtq %[idtr_reg]\n"
:: [idtr_reg] "m" (*idtr_reg));
}
 
/** Load TR from descriptor table.
*
* @param sel Selector specifying descriptor of TSS segment.
*
*/
static inline void tr_load(uint16_t sel)
{
__asm__ volatile ("ltr %0" : : "r" (sel));
asm volatile (
"ltr %[sel]"
:: [sel] "r" (sel)
);
}
 
#define GEN_READ_REG(reg) static inline unative_t read_ ##reg (void) \
{ \
unative_t res; \
__asm__ volatile ("movq %%" #reg ", %0" : "=r" (res) ); \
return res; \
}
{ \
unative_t res; \
asm volatile ( \
"movq %%" #reg ", %[res]" \
: [res] "=r" (res) \
); \
return res; \
}
 
#define GEN_WRITE_REG(reg) static inline void write_ ##reg (unative_t regn) \
{ \
__asm__ volatile ("movq %0, %%" #reg : : "r" (regn)); \
}
{ \
asm volatile ( \
"movq %[regn], %%" #reg \
:: [regn] "r" (regn) \
); \
}
 
GEN_READ_REG(cr0)
GEN_READ_REG(cr2)
/trunk/kernel/arch/amd64/src/fpu_context.c
39,9 → 39,9
void fpu_context_save(fpu_context_t *fctx)
{
asm volatile (
"fxsave %0"
: "=m"(*fctx)
);
"fxsave %[fctx]\n"
: [fctx] "=m" (*fctx)
);
}
 
/** Restore FPU (mmx,sse) context using fxrstor instruction */
48,9 → 48,9
void fpu_context_restore(fpu_context_t *fctx)
{
asm volatile (
"fxrstor %0"
: "=m"(*fctx)
);
"fxrstor %[fctx]\n"
: [fctx] "=m" (*fctx)
);
}
 
void fpu_init()
57,7 → 57,7
{
/* TODO: Zero all SSE, MMX etc. registers */
asm volatile (
"fninit;"
"fninit\n"
);
}
 
/trunk/kernel/arch/amd64/src/cpu/cpu.c
77,21 → 77,19
void cpu_setup_fpu(void)
{
asm volatile (
"movq %%cr0, %%rax;"
"btsq $1, %%rax;" /* cr0.mp */
"btrq $2, %%rax;" /* cr0.em */
"movq %%rax, %%cr0;"
 
"movq %%cr4, %%rax;"
"bts $9, %%rax;" /* cr4.osfxsr */
"movq %%rax, %%cr4;"
:
:
:"%rax"
);
"movq %%cr0, %%rax\n"
"btsq $1, %%rax\n" /* cr0.mp */
"btrq $2, %%rax\n" /* cr0.em */
"movq %%rax, %%cr0\n"
"movq %%cr4, %%rax\n"
"bts $9, %%rax\n" /* cr4.osfxsr */
"movq %%rax, %%cr4\n"
::: "%rax"
);
}
 
/** Set the TS flag to 1.
/** Set the TS flag to 1.
*
* If a thread accesses coprocessor, exception is run, which
* does a lazy fpu context switch.
99,26 → 97,22
*/
void fpu_disable(void)
{
asm volatile (
"mov %%cr0,%%rax;"
"bts $3,%%rax;"
"mov %%rax,%%cr0;"
:
:
:"%rax"
);
asm volatile (
"mov %%cr0, %%rax\n"
"bts $3, %%rax\n"
"mov %%rax, %%cr0\n"
::: "%rax"
);
}
 
void fpu_enable(void)
{
asm volatile (
"mov %%cr0,%%rax;"
"btr $3,%%rax;"
"mov %%rax,%%cr0;"
:
:
:"%rax"
);
asm volatile (
"mov %%cr0, %%rax\n"
"btr $3, %%rax\n"
"mov %%rax, %%cr0\n"
::: "%rax"
);
}
 
void cpu_arch_init(void)
/trunk/kernel/arch/amd64/src/amd64.c
72,15 → 72,13
*/
static void clean_IOPL_NT_flags(void)
{
asm (
asm volatile (
"pushfq\n"
"pop %%rax\n"
"and $~(0x7000), %%rax\n"
"pushq %%rax\n"
"popfq\n"
:
:
: "%rax"
::: "%rax"
);
}
 
90,13 → 88,11
*/
static void clean_AM_flag(void)
{
asm (
asm volatile (
"mov %%cr0, %%rax\n"
"and $~(0x40000), %%rax\n"
"mov %%rax, %%cr0\n"
:
:
: "%rax"
::: "%rax"
);
}
 
/trunk/kernel/arch/amd64/src/userspace.c
26,7 → 26,7
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup amd64
/** @addtogroup amd64
* @{
*/
/** @file
47,36 → 47,33
*/
void userspace(uspace_arg_t *kernel_uarg)
{
ipl_t ipl;
ipl_t ipl = interrupts_disable();
ipl = interrupts_disable();
 
/* Clear CF,PF,AF,ZF,SF,DF,OF */
/* Clear CF, PF, AF, ZF, SF, DF, OF */
ipl &= ~(0xcd4);
 
asm volatile (""
"pushq %0\n"
"pushq %1\n"
"pushq %2\n"
"pushq %3\n"
"pushq %4\n"
"movq %5, %%rax\n"
/* %rdi is defined to hold pcb_ptr - set it to 0 */
"xorq %%rdi, %%rdi\n"
"iretq\n"
: :
"i" (gdtselector(UDATA_DES) | PL_USER),
"r" (kernel_uarg->uspace_stack+THREAD_STACK_SIZE),
"r" (ipl),
"i" (gdtselector(UTEXT_DES) | PL_USER),
"r" (kernel_uarg->uspace_entry),
"r" (kernel_uarg->uspace_uarg)
: "rax"
);
asm volatile (
"pushq %[udata_des]\n"
"pushq %[stack_size]\n"
"pushq %[ipl]\n"
"pushq %[utext_des]\n"
"pushq %[entry]\n"
"movq %[uarg], %%rax\n"
/* %rdi is defined to hold pcb_ptr - set it to 0 */
"xorq %%rdi, %%rdi\n"
"iretq\n"
:: [udata_des] "i" (gdtselector(UDATA_DES) | PL_USER),
[stack_size] "r" (kernel_uarg->uspace_stack + THREAD_STACK_SIZE),
[ipl] "r" (ipl),
[utext_des] "i" (gdtselector(UTEXT_DES) | PL_USER),
[entry] "r" (kernel_uarg->uspace_entry),
[uarg] "r" (kernel_uarg->uspace_uarg)
: "rax"
);
/* Unreachable */
for(;;)
;
while (1);
}
 
/** @}