/kernel/trunk/generic/include/time/clock.h |
---|
32,5 → 32,6 |
#define HZ 100 |
extern void clock(void); |
extern void clock_counter_init(void); |
#endif |
/kernel/trunk/generic/src/main/main.c |
---|
212,6 → 212,7 |
cpu_init(); |
calibrate_delay_loop(); |
clock_counter_init(); |
timeout_init(); |
scheduler_init(); |
task_init(); |
/kernel/trunk/generic/src/time/clock.c |
---|
48,7 → 48,69 |
#include <adt/list.h> |
#include <atomic.h> |
#include <proc/thread.h> |
#include <sysinfo/sysinfo.h> |
#include <arch/barrier.h> |
/* Pointers to public variables with time */ |
struct ptime { |
__native seconds; |
__native useconds; |
__native useconds2; |
}; |
struct ptime *public_time; |
/* Variable holding fragment of second, so that we would update |
* seconds correctly |
*/ |
static __native secfrag = 0; |
/** Initialize realtime clock counter |
* |
* The applications (and sometimes kernel) need to access accurate |
* information about realtime data. We allocate 1 page with these |
* data and update it periodically. |
* |
* |
*/ |
void clock_counter_init(void) |
{ |
void *faddr; |
faddr = (void *)PFN2ADDR(frame_alloc(0, FRAME_ATOMIC)); |
if (!faddr) |
panic("Cannot allocate page for clock"); |
public_time = (struct ptime *)PA2KA(faddr); |
/* TODO: We would need some arch dependent settings here */ |
public_time->seconds = 0; |
public_time->useconds = 0; |
sysinfo_set_item_val("clock.faddr", NULL, (__native)faddr); |
} |
/** Update public counters |
* |
* Update it only on first processor |
* TODO: Do we really need so many write barriers? |
*/ |
static void clock_update_counters(void) |
{ |
if (CPU->id == 0) { |
secfrag += 1000000/HZ; |
if (secfrag >= 1000000) { |
public_time->useconds = 0; |
write_barrier(); |
public_time->seconds++; |
secfrag = 0; |
} else |
public_time->useconds += 1000000/HZ; |
write_barrier(); |
public_time->useconds2 = public_time->useconds; |
write_barrier(); |
} |
} |
/** Clock routine |
* |
* Clock routine executed from clock interrupt handler |
69,6 → 131,7 |
* run all expired timeouts as you visit them. |
*/ |
for (i = 0; i <= CPU->missed_clock_ticks; i++) { |
clock_update_counters(); |
spinlock_lock(&CPU->timeoutlock); |
while ((l = CPU->timeout_active_head.next) != &CPU->timeout_active_head) { |
h = list_get_instance(l, timeout_t, link); |
/kernel/trunk/generic/src/ipc/sysipc.c |
---|
151,6 → 151,7 |
if (!IPC_GET_RETVAL(answer->data)) { |
ipl_t ipl; |
as_t *as; |
int rc; |
ipl = interrupts_disable(); |
spinlock_lock(&answer->sender->lock); |
158,8 → 159,9 |
spinlock_unlock(&answer->sender->lock); |
interrupts_restore(ipl); |
return as_area_share(AS, IPC_GET_ARG1(answer->data), IPC_GET_ARG2(*olddata), |
rc = as_area_share(AS, IPC_GET_ARG1(answer->data), IPC_GET_ARG2(*olddata), |
as, IPC_GET_ARG1(*olddata), IPC_GET_ARG3(*olddata)); |
IPC_SET_RETVAL(answer->data, rc); |
} |
} |
return 0; |
/kernel/trunk/arch/mips32/include/cp0.h |
---|
49,9 → 49,8 |
/* |
* Magic value for use in msim. |
* On AMD Duron 800Mhz, this roughly seems like one us. |
*/ |
#define cp0_compare_value 10000 |
#define cp0_compare_value 100000 |
#define cp0_mask_all_int() cp0_status_write(cp0_status_read() & ~(cp0_status_im_mask)) |
#define cp0_unmask_all_int() cp0_status_write(cp0_status_read() | cp0_status_im_mask) |
/kernel/trunk/arch/mips32/include/barrier.h |
---|
35,8 → 35,8 |
#define CS_ENTER_BARRIER() __asm__ volatile ("" ::: "memory") |
#define CS_LEAVE_BARRIER() __asm__ volatile ("" ::: "memory") |
#define memory_barrier() |
#define read_barrier() |
#define write_barrier() |
#define memory_barrier() __asm__ volatile ("" ::: "memory") |
#define read_barrier() __asm__ volatile ("" ::: "memory") |
#define write_barrier() __asm__ volatile ("" ::: "memory") |
#endif |
/kernel/trunk/arch/mips32/src/mips32.c |
---|
80,8 → 80,6 |
/* Initialize dispatch table */ |
exception_init(); |
interrupt_init(); |
arc_init(); |
/* Copy the exception vectors to the right places */ |
89,6 → 87,7 |
memcpy(NORM_EXC, (char *)exception_entry, EXCEPTION_JUMP_SIZE); |
memcpy(CACHE_EXC, (char *)cache_error_entry, EXCEPTION_JUMP_SIZE); |
interrupt_init(); |
/* |
* Switch to BEV normal level so that exception vectors point to the kernel. |
* Clear the error level. |
99,16 → 98,12 |
* Mask all interrupts |
*/ |
cp0_mask_all_int(); |
/* |
* Unmask hardware clock interrupt. |
*/ |
cp0_unmask_int(TIMER_IRQ); |
/* |
* Start hardware clock. |
*/ |
cp0_compare_write(cp0_compare_value + cp0_count_read()); |
console_init(); |
debugger_init(); |
} |
/kernel/trunk/arch/mips32/src/interrupt.c |
---|
76,9 → 76,26 |
return cp0_status_read(); |
} |
/* TODO: This is SMP unsafe!!! */ |
static unsigned long nextcount; |
/** Start hardware clock */ |
static void timer_start(void) |
{ |
nextcount = cp0_compare_value + cp0_count_read(); |
cp0_compare_write(nextcount); |
} |
static void timer_exception(int n, istate_t *istate) |
{ |
cp0_compare_write(cp0_count_read() + cp0_compare_value); |
unsigned long drift; |
drift = cp0_count_read() - nextcount; |
while (drift > cp0_compare_value) { |
drift -= cp0_compare_value; |
CPU->missed_clock_ticks++; |
} |
nextcount = cp0_count_read() + cp0_compare_value - drift; |
cp0_compare_write(nextcount); |
clock(); |
} |
100,6 → 117,7 |
int_register(TIMER_IRQ, "timer", timer_exception); |
int_register(0, "swint0", swint0); |
int_register(1, "swint1", swint1); |
timer_start(); |
} |
static void ipc_int(int n, istate_t *istate) |
/kernel/trunk/arch/ia32/include/barrier.h |
---|
29,8 → 29,6 |
#ifndef __ia32_BARRIER_H__ |
#define __ia32_BARRIER_H__ |
#include <arch/types.h> |
/* |
* NOTE: |
* No barriers for critical section (i.e. spinlock) on IA-32 are needed: |
60,7 → 58,7 |
# ifdef CONFIG_WEAK_MEMORY |
# define write_barrier() __asm__ volatile ("sfence\n" ::: "memory") |
# else |
# define write_barrier() |
# define write_barrier() __asm__ volatile( "" ::: "memory"); |
# endif |
#elif CONFIG_FENCES_P3 |
# define memory_barrier() cpuid_serialization() |
68,7 → 66,7 |
# ifdef CONFIG_WEAK_MEMORY |
# define write_barrier() __asm__ volatile ("sfence\n" ::: "memory") |
# else |
# define write_barrier() |
# define write_barrier() __asm__ volatile( "" ::: "memory"); |
# endif |
#else |
# define memory_barrier() cpuid_serialization() |
76,7 → 74,7 |
# ifdef CONFIG_WEAK_MEMORY |
# define write_barrier() cpuid_serialization() |
# else |
# define write_barrier() |
# define write_barrier() __asm__ volatile( "" ::: "memory"); |
# endif |
#endif |