/kernel/trunk/contrib/conf/gxemul.sh |
---|
1,4 → 1,4 |
#!/bin/sh |
# Uspace addresses outside of normal memory (kernel has std. 8 or 16MB) |
# we place the pages at 24M |
gxemul -E testmips -X 0x81800000:init kernel.bin |
gxemul $@ -E testmips -X 0x81800000:init kernel.bin |
/kernel/trunk/kernel.config |
---|
74,7 → 74,7 |
! [CONFIG_DEBUG=y&(ARCH=amd64|ARCH=ia32)] CONFIG_DEBUG_AS_WATCHPOINT (y/n) |
# Save all interrupt registers |
! [CONFIG_DEBUG=y&(ARCH=amd64|ARCH=mips)] CONFIG_DEBUG_ALLREGS (y/n) |
! [CONFIG_DEBUG=y&(ARCH=amd64|ARCH=mips32)] CONFIG_DEBUG_ALLREGS (y/n) |
## Run-time configuration directives |
/kernel/trunk/arch/mips32/include/exception.h |
---|
98,4 → 98,6 |
extern void exception_entry(void); |
extern void cache_error_entry(void); |
extern void exception_init(void); |
extern __native syscall_handler(__native a0, __native a1, __native a2, |
__native a3, __native sysnum); |
#endif |
/kernel/trunk/arch/mips32/include/thread.h |
---|
31,6 → 31,6 |
#include <arch/exception.h> |
#define ARCH_THREAD_DATA istate_t *istate |
#define ARCH_THREAD_DATA |
#endif |
/kernel/trunk/arch/mips32/include/cp0.h |
---|
58,46 → 58,58 |
#define cp0_mask_int(it) cp0_status_write(cp0_status_read() & ~(1<<(cp0_status_im_shift+(it)))) |
#define cp0_unmask_int(it) cp0_status_write(cp0_status_read() | (1<<(cp0_status_im_shift+(it)))) |
extern __u32 cp0_index_read(void); |
extern void cp0_index_write(__u32 val); |
#define GEN_READ_CP0(nm,reg) static inline __u32 cp0_ ##nm##_read(void) \ |
{ \ |
__u32 retval; \ |
asm("mfc0 %0, $" #reg : "=r"(retval)); \ |
return retval; \ |
} |
extern __u32 cp0_random_read(void); |
#define GEN_WRITE_CP0(nm,reg) static inline void cp0_ ##nm##_write(__u32 val) \ |
{ \ |
asm("mtc0 %0, $" #reg : : "r"(val) ); \ |
} |
extern __u32 cp0_entry_lo0_read(void); |
extern void cp0_entry_lo0_write(__u32 val); |
GEN_READ_CP0(index, 0); |
GEN_WRITE_CP0(index, 0); |
extern __u32 cp0_entry_lo1_read(void); |
extern void cp0_entry_lo1_write(__u32 val); |
GEN_READ_CP0(random, 1); |
extern __u32 cp0_context_read(void); |
extern void cp0_context_write(__u32 val); |
GEN_READ_CP0(entry_lo0, 2); |
GEN_WRITE_CP0(entry_lo0, 2); |
extern __u32 cp0_pagemask_read(void); |
extern void cp0_pagemask_write(__u32 val); |
GEN_READ_CP0(entry_lo1, 3); |
GEN_WRITE_CP0(entry_lo1, 3); |
extern __u32 cp0_wired_read(void); |
extern void cp0_wired_write(__u32 val); |
GEN_READ_CP0(context, 4); |
GEN_WRITE_CP0(context, 4); |
extern __u32 cp0_badvaddr_read(void); |
GEN_READ_CP0(pagemask, 5); |
GEN_WRITE_CP0(pagemask, 5); |
extern __u32 cp0_count_read(void); |
extern void cp0_count_write(__u32 val); |
GEN_READ_CP0(wired, 6); |
GEN_WRITE_CP0(wired, 6); |
extern __u32 cp0_entry_hi_read(void); |
extern void cp0_entry_hi_write(__u32 val); |
GEN_READ_CP0(badvaddr, 8); |
extern __u32 cp0_compare_read(void); |
extern void cp0_compare_write(__u32 val); |
GEN_READ_CP0(count, 9); |
GEN_WRITE_CP0(count, 9); |
extern __u32 cp0_status_read(void); |
extern void cp0_status_write(__u32 val); |
GEN_READ_CP0(entry_hi, 10); |
GEN_WRITE_CP0(entry_hi, 10); |
extern __u32 cp0_cause_read(void); |
extern void cp0_cause_write(__u32 val); |
GEN_READ_CP0(compare, 11); |
GEN_WRITE_CP0(compare, 11); |
extern __u32 cp0_epc_read(void); |
extern void cp0_epc_write(__u32 val); |
GEN_READ_CP0(status, 12); |
GEN_WRITE_CP0(status, 12); |
extern __u32 cp0_prid_read(void); |
GEN_READ_CP0(cause, 13); |
GEN_WRITE_CP0(cause, 13); |
GEN_READ_CP0(epc, 14); |
GEN_WRITE_CP0(epc, 14); |
GEN_READ_CP0(prid, 15); |
#endif |
/kernel/trunk/arch/mips32/src/exception.c |
---|
129,60 → 129,18 |
exc_dispatch(i+INT_OFFSET, istate); |
} |
#include <debug.h> |
/** Handle syscall userspace call */ |
static void syscall_exception(int n, istate_t *istate) |
__native syscall_handler(__native a0, __native a1, __native a2, |
__native a3, __native sysnum) |
{ |
interrupts_enable(); |
if (istate->t0 < SYSCALL_END) |
istate->v0 = syscall_table[istate->t0](istate->a0, |
istate->a1, |
istate->a2, |
istate->a3); |
else |
panic("Undefined syscall %d", istate->a3); |
istate->epc += 4; |
interrupts_disable(); |
if (sysnum < SYSCALL_END) |
return syscall_table[sysnum](a0,a1,a2,a3); |
panic("Undefined syscall %d", sysnum); |
} |
void exception(istate_t *istate) |
/** Handle syscall userspace call */ |
static void syscall_exception(int n, istate_t *istate) |
{ |
int cause; |
int excno; |
ASSERT(CPU != NULL); |
/* |
* NOTE ON OPERATION ORDERING |
* |
* On entry, interrupts_disable() must be called before |
* exception bit is cleared. |
*/ |
interrupts_disable(); |
cp0_status_write(cp0_status_read() & ~ (cp0_status_exl_exception_bit | |
cp0_status_um_bit)); |
/* Save istate so that the threads can access it */ |
/* If THREAD->istate is set, this is nested exception, |
* do not rewrite it |
*/ |
if (THREAD && !THREAD->istate) |
THREAD->istate = istate; |
cause = cp0_cause_read(); |
excno = cp0_cause_excno(cause); |
/* Dispatch exception */ |
exc_dispatch(excno, istate); |
/* Set to NULL, so that we can still support nested |
* exceptions |
* TODO: We should probably set EXL bit before this command, |
* nesting. On the other hand, if some exception occurs between |
* here and ERET, it won't set anything on the istate anyway. |
*/ |
if (THREAD) |
THREAD->istate = NULL; |
panic("Syscall is handled through shortcut"); |
} |
void exception_init(void) |
/kernel/trunk/arch/mips32/src/fpu_context.c |
---|
36,8 → 36,6 |
{ |
#ifdef ARCH_HAS_FPU |
cp0_status_write(cp0_status_read() & ~cp0_status_fpu_bit); |
if (THREAD && THREAD->istate) |
THREAD->istate->status &= ~cp0_status_fpu_bit; |
#endif |
} |
45,8 → 43,6 |
{ |
#ifdef ARCH_HAS_FPU |
cp0_status_write(cp0_status_read() | cp0_status_fpu_bit); |
if (THREAD && THREAD->istate) |
THREAD->istate->status |= cp0_status_fpu_bit; |
#endif |
} |
/kernel/trunk/arch/mips32/src/asm.S |
---|
46,77 → 46,6 |
.set noreorder |
.set nomacro |
.global cp0_index_read |
.global cp0_index_write |
.global cp0_random_read |
.global cp0_entry_lo0_read |
.global cp0_entry_lo0_write |
.global cp0_entry_lo1_read |
.global cp0_entry_lo1_write |
.global cp0_context_read |
.global cp0_context_write |
.global cp0_pagemask_read |
.global cp0_pagemask_write |
.global cp0_wired_read |
.global cp0_wired_write |
.global cp0_badvaddr_read |
.global cp0_count_read |
.global cp0_count_write |
.global cp0_entry_hi_read |
.global cp0_entry_hi_write |
.global cp0_compare_read |
.global cp0_compare_write |
.global cp0_status_read |
.global cp0_status_write |
.global cp0_cause_read |
.global cp0_cause_write |
.global cp0_epc_read |
.global cp0_epc_write |
.global cp0_prid_read |
cp0_index_read: cp0_read $0 |
cp0_index_write: cp0_write $0 |
cp0_random_read: cp0_read $1 |
cp0_entry_lo0_read: cp0_read $2 |
cp0_entry_lo0_write: cp0_write $2 |
cp0_entry_lo1_read: cp0_read $3 |
cp0_entry_lo1_write: cp0_write $3 |
cp0_context_read: cp0_read $4 |
cp0_context_write: cp0_write $4 |
cp0_pagemask_read: cp0_read $5 |
cp0_pagemask_write: cp0_write $5 |
cp0_wired_read: cp0_read $6 |
cp0_wired_write: cp0_write $6 |
cp0_badvaddr_read: cp0_read $8 |
cp0_count_read: cp0_read $9 |
cp0_count_write: cp0_write $9 |
cp0_entry_hi_read: cp0_read $10 |
cp0_entry_hi_write: cp0_write $10 |
cp0_compare_read: cp0_read $11 |
cp0_compare_write: cp0_write $11 |
cp0_status_read: cp0_read $12 |
cp0_status_write: cp0_write $12 |
cp0_cause_read: cp0_read $13 |
cp0_cause_write: cp0_write $13 |
cp0_epc_read: cp0_read $14 |
cp0_epc_write: cp0_write $14 |
cp0_prid_read: cp0_read $15 |
.global cpu_halt |
cpu_halt: |
j cpu_halt |
/kernel/trunk/arch/mips32/src/start.S |
---|
43,9 → 43,13 |
.global exception_entry |
.global userspace_asm |
# Which status bits should are thread-local |
#define REG_SAVE_MASK 0x1f # KSU(UM), EXL, ERL, IE |
# Save registers to space defined by \r |
# We will change $at on the way |
.macro REGISTERS_STORE r |
# We will change status: Disable ERL,EXL,UM,IE |
# These changes will be automatically reversed in REGISTER_LOAD |
.macro REGISTERS_STORE_AND_EXC_RESET r |
sw $at,EOFFSET_AT(\r) |
sw $v0,EOFFSET_V0(\r) |
sw $v1,EOFFSET_V1(\r) |
85,13 → 89,30 |
sw $ra,EOFFSET_RA(\r) |
sw $sp,EOFFSET_SP(\r) |
mfc0 $at, $status |
sw $at,EOFFSET_STATUS(\r) |
mfc0 $at, $epc |
sw $at,EOFFSET_EPC(\r) |
mfc0 $t0, $status |
mfc0 $t1, $epc |
and $t2, $t0, REG_SAVE_MASK # Save only KSU,EXL,ERL,IE |
li $t3, ~(0x1f) |
and $t0, $t0, $t3 # Clear KSU,EXL,ERL,IE |
sw $t2,EOFFSET_STATUS(\r) |
sw $t1,EOFFSET_EPC(\r) |
mtc0 $t0, $status |
.endm |
.macro REGISTERS_LOAD r |
# Update only UM,EXR,IE from status, the rest |
# is controlled by OS and not bound to task |
mfc0 $t0, $status |
lw $t1,EOFFSET_STATUS(\r) |
li $t2, ~REG_SAVE_MASK # Mask UM,EXL,ERL,IE |
and $t0, $t0, $t2 |
or $t0, $t0, $t1 # Copy UM,EXL,ERL,IE from saved status |
mtc0 $t0, $status |
lw $v0,EOFFSET_V0(\r) |
lw $v1,EOFFSET_V1(\r) |
lw $a0,EOFFSET_A0(\r) |
128,8 → 149,6 |
lw $at,EOFFSET_HI(\r) |
mthi $at |
lw $at,EOFFSET_STATUS(\r) |
mtc0 $at, $status |
lw $at,EOFFSET_EPC(\r) |
mtc0 $at, $epc |
188,23 → 207,81 |
exception_handler: |
KERNEL_STACK_TO_K0 |
mfc0 $k1, $cause |
sub $k0, REGISTER_SPACE |
REGISTERS_STORE $k0 |
add $sp, $k0, 0 |
add $a0, $sp, 0 |
jal exception /* exception(register_space) */ |
nop |
sra $k1, $k1, 0x2 # cp0_exc_cause() part 1 |
andi $k1, $k1, 0x1f # cp0_exc_cause() part 2 |
sub $k1, 8 # 8=SYSCALL |
beqz $k1, uspace_shortcut |
add $k1, 8 # Revert $k1 back to correct exc number |
REGISTERS_STORE_AND_EXC_RESET $k0 |
move $sp, $k0 |
move $a1, $sp |
jal exc_dispatch # exc_dispatch(excno, register_space) |
move $a0, $k1 |
REGISTERS_LOAD $sp |
# The $sp is automatically restored to former value |
eret |
nop |
# it seems that mips reserves some space on stack for varfuncs??? |
#define SS_ARG4 16 |
#define SS_SP 20 |
#define SS_STATUS 24 |
#define SS_EPC 28 |
#define SS_RA 32 |
uspace_shortcut: |
# We have a lot of space on the stack, with free use |
sw $sp, SS_SP($k0) |
move $sp, $k0 |
sw $ra, SS_RA($k0) |
mfc0 $t1, $epc |
mfc0 $t0, $status |
sw $t1,SS_EPC($sp) # Save EPC |
and $t2, $t0, REG_SAVE_MASK # Save only KSU,EXL,ERL,IE |
li $t3, ~(0x1f) |
and $t0, $t0, $t3 # Clear KSU,EXL,ERL |
ori $t0, $t0, 0x1 # Set IE |
sw $t2,SS_STATUS($sp) |
mtc0 $t0, $status |
jal syscall_handler |
sw $v0, SS_ARG4($sp) # save v0 - arg4 to stack |
# Restore RA |
lw $ra, SS_RA($sp) |
# restore epc+4 |
lw $t0,SS_EPC($sp) |
addi $t0, $t0, 4 |
mtc0 $t0, $epc |
# restore status |
mfc0 $t0, $status |
lw $t1,SS_STATUS($sp) |
li $t2, ~REG_SAVE_MASK # Mask UM,EXL,ERL,IE |
and $t0, $t0, $t2 |
or $t0, $t0, $t1 # Copy UM,EXL,ERL,IE from saved status |
mtc0 $t0, $status |
lw $sp,SS_SP($sp) # restore sp |
eret |
tlb_refill_handler: |
KERNEL_STACK_TO_K0 |
sub $k0, REGISTER_SPACE |
REGISTERS_STORE $k0 |
REGISTERS_STORE_AND_EXC_RESET $k0 |
add $sp, $k0, 0 |
add $a0, $sp, 0 |
214,12 → 291,11 |
REGISTERS_LOAD $sp |
eret |
nop |
cache_error_handler: |
KERNEL_STACK_TO_K0 |
sub $sp, REGISTER_SPACE |
REGISTERS_STORE $sp |
REGISTERS_STORE_AND_EXC_RESET $sp |
add $sp, $k0, 0 |
jal cache_error |
228,11 → 304,9 |
REGISTERS_LOAD $sp |
eret |
nop |
userspace_asm: |
add $sp, $a0, 0 |
add $v0, $a1, 0 |
eret |
nop |