/kernel/trunk/arch/mips32/include/cp0.h |
---|
49,9 → 49,8 |
/* |
* Magic value for use in msim. |
* On AMD Duron 800Mhz, this roughly seems like one us. |
*/ |
#define cp0_compare_value 10000 |
#define cp0_compare_value 100000 |
#define cp0_mask_all_int() cp0_status_write(cp0_status_read() & ~(cp0_status_im_mask)) |
#define cp0_unmask_all_int() cp0_status_write(cp0_status_read() | cp0_status_im_mask) |
/kernel/trunk/arch/mips32/include/barrier.h |
---|
35,8 → 35,8 |
#define CS_ENTER_BARRIER() __asm__ volatile ("" ::: "memory") |
#define CS_LEAVE_BARRIER() __asm__ volatile ("" ::: "memory") |
#define memory_barrier() |
#define read_barrier() |
#define write_barrier() |
#define memory_barrier() __asm__ volatile ("" ::: "memory") |
#define read_barrier() __asm__ volatile ("" ::: "memory") |
#define write_barrier() __asm__ volatile ("" ::: "memory") |
#endif |
/kernel/trunk/arch/mips32/src/mips32.c |
---|
80,8 → 80,6 |
/* Initialize dispatch table */ |
exception_init(); |
interrupt_init(); |
arc_init(); |
/* Copy the exception vectors to the right places */ |
89,6 → 87,7 |
memcpy(NORM_EXC, (char *)exception_entry, EXCEPTION_JUMP_SIZE); |
memcpy(CACHE_EXC, (char *)cache_error_entry, EXCEPTION_JUMP_SIZE); |
interrupt_init(); |
/* |
* Switch to BEV normal level so that exception vectors point to the kernel. |
* Clear the error level. |
99,16 → 98,12 |
* Mask all interrupts |
*/ |
cp0_mask_all_int(); |
/* |
* Unmask hardware clock interrupt. |
*/ |
cp0_unmask_int(TIMER_IRQ); |
/* |
* Start hardware clock. |
*/ |
cp0_compare_write(cp0_compare_value + cp0_count_read()); |
console_init(); |
debugger_init(); |
} |
/kernel/trunk/arch/mips32/src/interrupt.c |
---|
76,9 → 76,26 |
return cp0_status_read(); |
} |
/* TODO: This is SMP unsafe!!! */ |
static unsigned long nextcount; |
/** Start hardware clock */ |
static void timer_start(void) |
{ |
nextcount = cp0_compare_value + cp0_count_read(); |
cp0_compare_write(nextcount); |
} |
static void timer_exception(int n, istate_t *istate) |
{ |
cp0_compare_write(cp0_count_read() + cp0_compare_value); |
unsigned long drift; |
drift = cp0_count_read() - nextcount; |
while (drift > cp0_compare_value) { |
drift -= cp0_compare_value; |
CPU->missed_clock_ticks++; |
} |
nextcount = cp0_count_read() + cp0_compare_value - drift; |
cp0_compare_write(nextcount); |
clock(); |
} |
100,6 → 117,7 |
int_register(TIMER_IRQ, "timer", timer_exception); |
int_register(0, "swint0", swint0); |
int_register(1, "swint1", swint1); |
timer_start(); |
} |
static void ipc_int(int n, istate_t *istate) |
/kernel/trunk/arch/ia32/include/barrier.h |
---|
29,8 → 29,6 |
#ifndef __ia32_BARRIER_H__ |
#define __ia32_BARRIER_H__ |
#include <arch/types.h> |
/* |
* NOTE: |
* No barriers for critical section (i.e. spinlock) on IA-32 are needed: |
60,7 → 58,7 |
# ifdef CONFIG_WEAK_MEMORY |
# define write_barrier() __asm__ volatile ("sfence\n" ::: "memory") |
# else |
# define write_barrier() |
# define write_barrier() __asm__ volatile( "" ::: "memory"); |
# endif |
#elif CONFIG_FENCES_P3 |
# define memory_barrier() cpuid_serialization() |
68,7 → 66,7 |
# ifdef CONFIG_WEAK_MEMORY |
# define write_barrier() __asm__ volatile ("sfence\n" ::: "memory") |
# else |
# define write_barrier() |
# define write_barrier() __asm__ volatile( "" ::: "memory"); |
# endif |
#else |
# define memory_barrier() cpuid_serialization() |
76,7 → 74,7 |
# ifdef CONFIG_WEAK_MEMORY |
# define write_barrier() cpuid_serialization() |
# else |
# define write_barrier() |
# define write_barrier() __asm__ volatile( "" ::: "memory"); |
# endif |
#endif |