/kernel/trunk/kernel.config |
---|
74,7 → 74,7 |
! [CONFIG_DEBUG=y&(ARCH=amd64|ARCH=ia32)] CONFIG_DEBUG_AS_WATCHPOINT (y/n) |
# Save all interrupt registers |
! [CONFIG_DEBUG=y&(ARCH=amd64|ARCH=mips32)] CONFIG_DEBUG_ALLREGS (y/n) |
! [CONFIG_DEBUG=y&(ARCH=amd64|ARCH=mips32|ARCH=ia32)] CONFIG_DEBUG_ALLREGS (y/n) |
## Run-time configuration directives |
/kernel/trunk/generic/include/synch/spinlock.h |
---|
33,6 → 33,7 |
#include <typedefs.h> |
#include <preemption.h> |
#include <arch/atomic.h> |
#include <debug.h> |
#ifdef CONFIG_SMP |
struct spinlock { |
66,12 → 67,36 |
#endif |
extern void spinlock_initialize(spinlock_t *sl, char *name); |
extern void spinlock_lock(spinlock_t *sl); |
extern int spinlock_trylock(spinlock_t *sl); |
extern void spinlock_unlock(spinlock_t *sl); |
extern void spinlock_lock_debug(spinlock_t *sl); |
#ifdef CONFIG_DEBUG_SPINLOCK |
# define spinlock_lock(x) spinlock_lock_debug(x) |
#else |
# define spinlock_lock(x) atomic_lock_arch(&(x)->val) |
#endif |
/** Unlock spinlock |
* |
* Unlock spinlock. |
* |
* @param sl Pointer to spinlock_t structure. |
*/ |
static inline void spinlock_unlock(spinlock_t *sl) |
{ |
ASSERT(atomic_get(&sl->val) != 0); |
/* |
* Prevent critical section code from bleeding out this way down. |
*/ |
CS_LEAVE_BARRIER(); |
atomic_set(&sl->val,0); |
preemption_enable(); |
} |
#else |
/* On UP systems, spinlocks are effectively left out. */ |
#define SPINLOCK_DECLARE(name) |
#define SPINLOCK_INITIALIZE(name) |
/kernel/trunk/generic/include/syscall/syscall.h |
---|
55,6 → 55,8 |
typedef __native (*syshandler_t)(); |
extern syshandler_t syscall_table[SYSCALL_END]; |
extern __native syscall_handler(__native a1, __native a2, __native a3, |
__native a4, __native id); |
#endif |
/kernel/trunk/generic/src/synch/spinlock.c |
---|
51,7 → 51,6 |
#endif |
} |
#ifdef CONFIG_DEBUG_SPINLOCK |
/** Lock spinlock |
* |
* Lock spinlock. |
60,7 → 59,8 |
* |
* @param sl Pointer to spinlock_t structure. |
*/ |
void spinlock_lock(spinlock_t *sl) |
#ifdef CONFIG_DEBUG_SPINLOCK |
void spinlock_lock_debug(spinlock_t *sl) |
{ |
count_t i = 0; |
char *symbol; |
87,32 → 87,7 |
* Prevent critical section code from bleeding out this way up. |
*/ |
CS_ENTER_BARRIER(); |
} |
#else |
/** Lock spinlock |
* |
* Lock spinlock. |
* |
* @param sl Pointer to spinlock_t structure. |
*/ |
void spinlock_lock(spinlock_t *sl) |
{ |
preemption_disable(); |
/* |
* Each architecture has its own efficient/recommended |
* implementation of spinlock. |
*/ |
spinlock_arch(&sl->val); |
/* |
* Prevent critical section code from bleeding out this way up. |
*/ |
CS_ENTER_BARRIER(); |
} |
#endif |
/** Lock spinlock conditionally |
143,23 → 118,4 |
return rc; |
} |
/** Unlock spinlock |
* |
* Unlock spinlock. |
* |
* @param sl Pointer to spinlock_t structure. |
*/ |
void spinlock_unlock(spinlock_t *sl) |
{ |
ASSERT(atomic_get(&sl->val) != 0); |
/* |
* Prevent critical section code from bleeding out this way down. |
*/ |
CS_LEAVE_BARRIER(); |
atomic_set(&sl->val,0); |
preemption_enable(); |
} |
#endif |
/kernel/trunk/generic/src/lib/func.c |
---|
54,7 → 54,7 |
rundebugger = true; |
} |
#else |
atomic_set(haltstate, 1); |
atomic_set(&haltstate, 1); |
#endif |
interrupts_disable(); |
/kernel/trunk/generic/src/syscall/syscall.c |
---|
62,6 → 62,16 |
return as_remap(AS, (__address) address, size, 0); |
} |
/** Dispatch system call */ |
__native syscall_handler(__native a1, __native a2, __native a3, |
__native a4, __native id) |
{ |
if (id < SYSCALL_END) |
return syscall_table[id](a1,a2,a3,a4); |
else |
panic("Undefined syscall %d", id); |
} |
syshandler_t syscall_table[SYSCALL_END] = { |
sys_io, |
sys_thread_create, |
/kernel/trunk/arch/amd64/include/syscall.h |
---|
31,8 → 31,6 |
#include <arch/types.h> |
extern __native syscall_handler(__native a1,__native a2, __native a3, |
__native a4, __native id); |
extern void syscall_setup_cpu(void); |
#endif |
/kernel/trunk/arch/amd64/include/atomic.h |
---|
30,6 → 30,8 |
#define __amd64_ATOMIC_H__ |
#include <arch/types.h> |
#include <arch/barrier.h> |
#include <preemption.h> |
typedef struct { volatile __u64 count; } atomic_t; |
101,6 → 103,31 |
} |
extern void spinlock_arch(volatile int *val); |
/** AMD64 specific fast spinlock */ |
static inline void atomic_lock_arch(atomic_t *val) |
{ |
__u64 tmp; |
preemption_disable(); |
__asm__ volatile ( |
"0:;" |
#ifdef CONFIG_HT |
"pause;" /* Pentium 4's HT love this instruction */ |
#endif |
"mov %0, %1;" |
"testq %1, %1;" |
"jnz 0b;" /* Leightweight looping on locked spinlock */ |
"incq %1;" /* now use the atomic operation */ |
"xchgq %0, %1;" |
"testq %1, %1;" |
"jnz 0b;" |
: "=m"(val->count),"=r"(tmp) |
); |
/* |
* Prevent critical section code from bleeding out this way up. |
*/ |
CS_ENTER_BARRIER(); |
} |
#endif |
/kernel/trunk/arch/amd64/src/syscall.c |
---|
60,13 → 60,3 |
*/ |
write_msr(AMD_MSR_SFMASK, 0x200); |
} |
/** Dispatch system call */ |
__native syscall_handler(__native a1, __native a2, __native a3, |
__native a4, __native id) |
{ |
if (id < SYSCALL_END) |
return syscall_table[id](a1,a2,a3,a4); |
else |
panic("Undefined syscall %d", id); |
} |
/kernel/trunk/arch/amd64/src/interrupt.c |
---|
54,18 → 54,14 |
printf("%%rip: %Q (%s)\n",istate->rip, symbol); |
printf("ERROR_WORD=%Q\n", istate->error_word); |
printf("%%rcs=%Q, flags=%Q, %%cr0=%Q\n", istate->cs, istate->rflags,read_cr0()); |
printf("%%rax=%Q, %%rbx=%Q, %%rcx=%Q\n",istate->rax,istate->rbx,istate->rcx); |
printf("%%rdx=%Q, %%rsi=%Q, %%rdi=%Q\n",istate->rdx,istate->rsi,istate->rdi); |
printf("%%r8 =%Q, %%r9 =%Q, %%r10=%Q\n",istate->r8,istate->r9,istate->r10); |
printf("%%r11=%Q, %%r12=%Q, %%r13=%Q\n",istate->r11,istate->r12,istate->r13); |
printf("%%r14=%Q, %%r15=%Q, %%rsp=%Q\n",istate->r14,istate->r15,&istate->stack[0]); |
printf("%%rbp=%Q\n",istate->rbp); |
/* |
printf("stack: %Q, %Q, %Q\n", x[5], x[6], x[7]); |
printf(" %Q, %Q, %Q\n", x[8], x[9], x[10]); |
printf(" %Q, %Q, %Q\n", x[11], x[12], x[13]); |
printf(" %Q, %Q, %Q\n", x[14], x[15], x[16]); |
*/ |
printf("%%rax=%Q, %%rcx=%Q, %%rdx=%Q\n",istate->rax,istate->rcx,istate->rdx); |
printf("%%rsi=%Q, %%rdi=%Q, %%r8 =%Q\n",istate->rsi,istate->rdi,istate->r8); |
printf("%%r9 =%Q, %%r10 =%Q, %%r11=%Q\n",istate->r9,istate->r10,istate->r11); |
#ifdef CONFIG_DEBUG_ALLREGS |
printf("%%r12=%Q, %%r13=%Q, %%r14=%Q\n",istate->r12,istate->r13,istate->r14); |
printf("%%r15=%Q, %%rbx=%Q, %%rbp=%Q\n",istate->r15,istate->rbx,&istate->rbp); |
#endif |
printf("%%rsp=%Q\n",&istate->stack[0]); |
} |
/* |
/kernel/trunk/arch/mips32/include/exception.h |
---|
98,6 → 98,5 |
extern void exception_entry(void); |
extern void cache_error_entry(void); |
extern void exception_init(void); |
extern __native syscall_handler(__native a0, __native a1, __native a2, |
__native a3, __native sysnum); |
#endif |
/kernel/trunk/arch/mips32/src/exception.c |
---|
40,7 → 40,6 |
#include <func.h> |
#include <console/kconsole.h> |
#include <arch/debugger.h> |
#include <syscall/syscall.h> |
static char * exctable[] = { |
"Interrupt","TLB Modified","TLB Invalid","TLB Invalid Store", |
129,14 → 128,6 |
exc_dispatch(i+INT_OFFSET, istate); |
} |
__native syscall_handler(__native a0, __native a1, __native a2, |
__native a3, __native sysnum) |
{ |
if (sysnum < SYSCALL_END) |
return syscall_table[sysnum](a0,a1,a2,a3); |
panic("Undefined syscall %d", sysnum); |
} |
/** Handle syscall userspace call */ |
static void syscall_exception(int n, istate_t *istate) |
{ |
/kernel/trunk/arch/mips32/src/start.S |
---|
215,7 → 215,7 |
andi $k1, $k1, 0x1f # cp0_exc_cause() part 2 |
sub $k1, 8 # 8=SYSCALL |
beqz $k1, uspace_shortcut |
beqz $k1, syscall_shortcut |
add $k1, 8 # Revert $k1 back to correct exc number |
REGISTERS_STORE_AND_EXC_RESET $k0 |
228,7 → 228,6 |
REGISTERS_LOAD $sp |
# The $sp is automatically restored to former value |
eret |
nop |
# it seems that mips reserves some space on stack for varfuncs??? |
#define SS_ARG4 16 |
235,7 → 234,7 |
#define SS_SP 20 |
#define SS_STATUS 24 |
#define SS_EPC 28 |
uspace_shortcut: |
syscall_shortcut: |
# We have a lot of space on the stack, with free use |
sw $sp, SS_SP($k0) |
move $sp, $k0 |
/kernel/trunk/arch/ia32/include/interrupt.h |
---|
63,14 → 63,19 |
#define VECTOR_DEBUG_IPI (IVT_FREEBASE+2) |
struct istate { |
__u32 eax; |
__u32 ecx; |
__u32 edx; |
__u32 esi; |
__u32 edi; |
__u32 esi; |
__u32 ebp; |
__u32 esp; |
__u32 ebx; |
__u32 edx; |
__u32 ecx; |
__u32 eax; |
__u32 gs; |
__u32 fs; |
__u32 es; |
__u32 ds; |
__u32 error_word; |
__u32 eip; |
__u32 cs; |
/kernel/trunk/arch/ia32/include/atomic.h |
---|
30,6 → 30,8 |
#define __ia32_ATOMIC_H__ |
#include <arch/types.h> |
#include <arch/barrier.h> |
#include <preemption.h> |
typedef struct { volatile __u32 count; } atomic_t; |
100,7 → 102,31 |
return v; |
} |
/** Ia32 specific fast spinlock */ |
static inline void atomic_lock_arch(atomic_t *val) |
{ |
__u32 tmp; |
extern void spinlock_arch(volatile int *val); |
preemption_disable(); |
__asm__ volatile ( |
"0:;" |
#ifdef CONFIG_HT |
"pause;" /* Pentium 4's HT love this instruction */ |
#endif |
"mov %0, %1;" |
"testl %1, %1;" |
"jnz 0b;" /* Leightweight looping on locked spinlock */ |
"incl %1;" /* now use the atomic operation */ |
"xchgl %0, %1;" |
"testl %1, %1;" |
"jnz 0b;" |
: "=m"(val->count),"=r"(tmp) |
); |
/* |
* Prevent critical section code from bleeding out this way up. |
*/ |
CS_ENTER_BARRIER(); |
} |
#endif |
/kernel/trunk/arch/ia32/src/asm.S |
---|
68,6 → 68,15 |
pop %eax |
ret |
# Clear nested flag |
# overwrites %ecx |
.macro CLEAR_NT_FLAG |
pushfl |
pop %ecx |
and $0xffffbfff,%ecx |
push %ecx |
popfl |
.endm |
## Declare interrupt handlers |
# |
77,8 → 86,40 |
# The handlers setup data segment registers |
# and call exc_dispatch(). |
# |
#define INTERRUPT_ALIGN 64 |
.macro handler i n |
.ifeq \i-0x30 # Syscall handler |
push %ds |
push %es |
push %fs |
push %gs |
# Push arguments on stack |
push %edi |
push %esi |
push %edx |
push %ecx |
push %eax |
# we must fill the data segment registers |
movw $16,%ax |
movw %ax,%ds |
movw %ax,%es |
sti |
call syscall_handler # syscall_handler(ax,cx,dx,si,di) |
cli |
addl $20, %esp # clean-up of parameters |
pop %gs |
pop %fs |
pop %es |
pop %ds |
CLEAR_NT_FLAG |
iret |
.else |
/* |
* This macro distinguishes between two versions of ia32 exceptions. |
* One version has error word and the other does not have it. |
85,16 → 126,11 |
* The latter version fakes the error word on the stack so that the |
* handlers and istate_t can be the same for both types. |
*/ |
.iflt \i-32 |
.if (1 << \i) & ERROR_WORD_INTERRUPT_LIST |
/* |
* Version with error word. |
* Just take space equal to subl $4, %esp. |
/* |
* With error word, do nothing |
*/ |
nop |
nop |
nop |
.else |
/* |
* Version without error word, |
106,41 → 142,59 |
* Version without error word, |
*/ |
subl $4, %esp |
.endif |
pusha |
movl %esp, %ebp |
.endif |
push %ds |
push %es |
push %fs |
push %gs |
#ifdef CONFIG_DEBUG_ALLREGS |
push %ebx |
push %ebp |
push %edi |
push %esi |
#else |
sub $16, %esp |
#endif |
push %edx |
push %ecx |
push %eax |
# we must fill the data segment registers |
movw $16,%ax |
movw %ax,%ds |
movw %ax,%es |
pushl %ebp |
pushl $(\i) |
call exc_dispatch |
addl $8,%esp |
pushl %esp # *istate |
pushl $(\i) # intnum |
call exc_dispatch # excdispatch(intnum, *istate) |
addl $8,%esp # Clear arguments from stack |
CLEAR_NT_FLAG # Modifies %ecx |
pop %eax |
pop %ecx |
pop %edx |
#ifdef CONFIG_DEBUG_ALLREGS |
pop %esi |
pop %edi |
pop %ebp |
pop %ebx |
#else |
add $16, %esp |
#endif |
pop %gs |
pop %fs |
pop %es |
pop %ds |
# Clear Nested Task flag. |
pushfl |
pop %eax |
and $0xffffbfff,%eax |
push %eax |
popfl |
popa |
addl $4,%esp # Skip error word, no matter whether real or fake. |
iret |
.endif |
.align INTERRUPT_ALIGN |
.if (\n-\i)-1 |
handler "(\i+1)",\n |
.endif |
148,12 → 202,10 |
# keep in sync with pm.h !!! |
IDT_ITEMS=64 |
.align INTERRUPT_ALIGN |
interrupt_handlers: |
h_start: |
handler 0 64 |
# handler 64 128 |
# handler 128 192 |
# handler 192 256 |
handler 0 IDT_ITEMS |
h_end: |
.data |
/kernel/trunk/arch/ia32/src/interrupt.c |
---|
64,8 → 64,10 |
printf("%%eip: %X (%s)\n",istate->eip,symbol); |
printf("ERROR_WORD=%X\n", istate->error_word); |
printf("%%cs=%X,flags=%X\n", istate->cs, istate->eflags); |
printf("%%eax=%X, %%ebx=%X, %%ecx=%X, %%edx=%X\n", istate->eax,istate->ebx,istate->ecx,istate->edx); |
printf("%%esi=%X, %%edi=%X, %%ebp=%X, %%esp=%X\n", istate->esi,istate->edi,istate->ebp,istate->esp); |
printf("%%eax=%X, %%ecx=%X, %%edx=%X, %%esp=%X\n", istate->eax,istate->ecx,istate->edx,&istate->stack[0]); |
#ifdef CONFIG_DEBUG_ALLREGS |
printf("%%esi=%X, %%edi=%X, %%ebp=%X, %%ebx=%X\n", istate->esi,istate->edi,istate->ebp,istate->ebx); |
#endif |
printf("stack: %X, %X, %X, %X\n", istate->stack[0], istate->stack[1], istate->stack[2], istate->stack[3]); |
printf(" %X, %X, %X, %X\n", istate->stack[4], istate->stack[5], istate->stack[6], istate->stack[7]); |
} |
125,12 → 127,7 |
void syscall(int n, istate_t *istate) |
{ |
interrupts_enable(); |
if (istate->esi < SYSCALL_END) |
istate->eax = syscall_table[istate->esi](istate->eax, istate->ebx, istate->ecx, istate->edx); |
else |
panic("Undefined syscall %d", istate->esi); |
interrupts_disable(); |
panic("Obsolete syscall handler."); |
} |
void tlb_shootdown_ipi(int n, istate_t *istate) |