/tags/0.2.0/kernel/generic/src/time/clock.c |
---|
0,0 → 1,184 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** |
* @file clock.c |
* @brief High-level clock interrupt handler. |
* |
* This file contains the clock() function which is the source |
* of preemption. It is also responsible for executing expired |
* timeouts. |
*/ |
#include <time/clock.h> |
#include <time/timeout.h> |
#include <arch/types.h> |
#include <config.h> |
#include <synch/spinlock.h> |
#include <synch/waitq.h> |
#include <func.h> |
#include <proc/scheduler.h> |
#include <cpu.h> |
#include <arch.h> |
#include <adt/list.h> |
#include <atomic.h> |
#include <proc/thread.h> |
#include <sysinfo/sysinfo.h> |
#include <arch/barrier.h> |
/* Pointers to public variables with time */ |
struct ptime { |
__native seconds1; |
__native useconds; |
__native seconds2; |
}; |
struct ptime *public_time; |
/* Variable holding fragment of second, so that we would update |
* seconds correctly |
*/ |
static __native secfrag = 0; |
/** Initialize realtime clock counter |
* |
* The applications (and sometimes kernel) need to access accurate |
* information about realtime data. We allocate 1 page with these |
* data and update it periodically. |
* |
* |
*/ |
void clock_counter_init(void) |
{ |
void *faddr; |
faddr = (void *)PFN2ADDR(frame_alloc(0, FRAME_ATOMIC)); |
if (!faddr) |
panic("Cannot allocate page for clock"); |
public_time = (struct ptime *)PA2KA(faddr); |
/* TODO: We would need some arch dependent settings here */ |
public_time->seconds1 = 0; |
public_time->seconds2 = 0; |
public_time->useconds = 0; |
sysinfo_set_item_val("clock.faddr", NULL, (__native)faddr); |
} |
/** Update public counters |
* |
* Update it only on first processor |
* TODO: Do we really need so many write barriers? |
*/ |
static void clock_update_counters(void) |
{ |
if (CPU->id == 0) { |
secfrag += 1000000/HZ; |
if (secfrag >= 1000000) { |
secfrag -= 1000000; |
public_time->seconds1++; |
write_barrier(); |
public_time->useconds = secfrag; |
write_barrier(); |
public_time->seconds2 = public_time->seconds1; |
} else |
public_time->useconds += 1000000/HZ; |
} |
} |
/** Clock routine |
* |
* Clock routine executed from clock interrupt handler |
* (assuming interrupts_disable()'d). Runs expired timeouts |
* and preemptive scheduling. |
* |
*/ |
void clock(void) |
{ |
link_t *l; |
timeout_t *h; |
timeout_handler_t f; |
void *arg; |
count_t missed_clock_ticks = CPU->missed_clock_ticks; |
int i; |
/* |
* To avoid lock ordering problems, |
* run all expired timeouts as you visit them. |
*/ |
for (i = 0; i <= missed_clock_ticks; i++) { |
clock_update_counters(); |
spinlock_lock(&CPU->timeoutlock); |
while ((l = CPU->timeout_active_head.next) != &CPU->timeout_active_head) { |
h = list_get_instance(l, timeout_t, link); |
spinlock_lock(&h->lock); |
if (h->ticks-- != 0) { |
spinlock_unlock(&h->lock); |
break; |
} |
list_remove(l); |
f = h->handler; |
arg = h->arg; |
timeout_reinitialize(h); |
spinlock_unlock(&h->lock); |
spinlock_unlock(&CPU->timeoutlock); |
f(arg); |
spinlock_lock(&CPU->timeoutlock); |
} |
spinlock_unlock(&CPU->timeoutlock); |
} |
CPU->missed_clock_ticks = 0; |
/* |
* Do CPU usage accounting and find out whether to preempt THREAD. |
*/ |
if (THREAD) { |
__u64 ticks; |
spinlock_lock(&CPU->lock); |
CPU->needs_relink += 1 + missed_clock_ticks; |
spinlock_unlock(&CPU->lock); |
spinlock_lock(&THREAD->lock); |
if ((ticks = THREAD->ticks)) { |
if (ticks >= 1 + missed_clock_ticks) |
THREAD->ticks -= 1 + missed_clock_ticks; |
else |
THREAD->ticks = 0; |
} |
spinlock_unlock(&THREAD->lock); |
if (!ticks && !PREEMPTION_DISABLED) { |
scheduler(); |
} |
} |
} |
/tags/0.2.0/kernel/generic/src/time/delay.c |
---|
0,0 → 1,61 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** |
* @file delay.c |
* @brief Active delay function. |
*/ |
#include <time/delay.h> |
#include <arch/types.h> |
#include <cpu.h> |
#include <arch/asm.h> |
#include <arch.h> |
/** Active delay |
* |
* Delay the execution for the given number |
* of microseconds (or slightly more). The delay |
* is implemented as CPU calibrated active loop. |
* |
* @param usec Number of microseconds to sleep. |
*/ |
void delay(__u32 usec) |
{ |
ipl_t ipl; |
/* |
* The delay loop is calibrated for each and every |
* CPU in the system. Therefore it is necessary to |
* call interrupts_disable() before calling the |
* asm_delay_loop(). |
*/ |
ipl = interrupts_disable(); |
asm_delay_loop(usec * CPU->delay_loop_const); |
interrupts_restore(ipl); |
} |
/tags/0.2.0/kernel/generic/src/time/timeout.c |
---|
0,0 → 1,211 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** |
* @file timeout.c |
* @brief Timeout management functions. |
*/ |
#include <time/timeout.h> |
#include <typedefs.h> |
#include <arch/types.h> |
#include <config.h> |
#include <panic.h> |
#include <synch/spinlock.h> |
#include <func.h> |
#include <cpu.h> |
#include <arch/asm.h> |
#include <arch.h> |
/** Initialize timeouts |
* |
* Initialize kernel timeouts. |
* |
*/ |
void timeout_init(void) |
{ |
spinlock_initialize(&CPU->timeoutlock, "timeout_lock"); |
list_initialize(&CPU->timeout_active_head); |
} |
/** Reinitialize timeout |
* |
* Initialize all members except the lock. |
* |
* @param t Timeout to be initialized. |
* |
*/ |
void timeout_reinitialize(timeout_t *t) |
{ |
t->cpu = NULL; |
t->ticks = 0; |
t->handler = NULL; |
t->arg = NULL; |
link_initialize(&t->link); |
} |
/** Initialize timeout |
* |
* Initialize all members including the lock. |
* |
* @param t Timeout to be initialized. |
* |
*/ |
void timeout_initialize(timeout_t *t) |
{ |
spinlock_initialize(&t->lock, "timeout_t_lock"); |
timeout_reinitialize(t); |
} |
/** Register timeout |
* |
* Insert timeout handler f (with argument arg) |
* to timeout list and make it execute in |
* time microseconds (or slightly more). |
* |
* @param t Timeout structure. |
* @param time Number of usec in the future to execute |
* the handler. |
* @param f Timeout handler function. |
* @param arg Timeout handler argument. |
* |
*/ |
void timeout_register(timeout_t *t, __u64 time, timeout_handler_t f, void *arg) |
{ |
timeout_t *hlp = NULL; |
link_t *l, *m; |
ipl_t ipl; |
__u64 sum; |
ipl = interrupts_disable(); |
spinlock_lock(&CPU->timeoutlock); |
spinlock_lock(&t->lock); |
if (t->cpu) |
panic("t->cpu != 0"); |
t->cpu = CPU; |
t->ticks = us2ticks(time); |
t->handler = f; |
t->arg = arg; |
/* |
* Insert t into the active timeouts list according to t->ticks. |
*/ |
sum = 0; |
l = CPU->timeout_active_head.next; |
while (l != &CPU->timeout_active_head) { |
hlp = list_get_instance(l, timeout_t, link); |
spinlock_lock(&hlp->lock); |
if (t->ticks < sum + hlp->ticks) { |
spinlock_unlock(&hlp->lock); |
break; |
} |
sum += hlp->ticks; |
spinlock_unlock(&hlp->lock); |
l = l->next; |
} |
m = l->prev; |
list_prepend(&t->link, m); /* avoid using l->prev */ |
/* |
* Adjust t->ticks according to ticks accumulated in h's predecessors. |
*/ |
t->ticks -= sum; |
/* |
* Decrease ticks of t's immediate succesor by t->ticks. |
*/ |
if (l != &CPU->timeout_active_head) { |
spinlock_lock(&hlp->lock); |
hlp->ticks -= t->ticks; |
spinlock_unlock(&hlp->lock); |
} |
spinlock_unlock(&t->lock); |
spinlock_unlock(&CPU->timeoutlock); |
interrupts_restore(ipl); |
} |
/** Unregister timeout |
* |
* Remove timeout from timeout list. |
* |
* @param t Timeout to unregister. |
* |
* @return true on success, false on failure. |
*/ |
bool timeout_unregister(timeout_t *t) |
{ |
timeout_t *hlp; |
link_t *l; |
ipl_t ipl; |
grab_locks: |
ipl = interrupts_disable(); |
spinlock_lock(&t->lock); |
if (!t->cpu) { |
spinlock_unlock(&t->lock); |
interrupts_restore(ipl); |
return false; |
} |
if (!spinlock_trylock(&t->cpu->timeoutlock)) { |
spinlock_unlock(&t->lock); |
interrupts_restore(ipl); |
goto grab_locks; |
} |
/* |
* Now we know for sure that t hasn't been activated yet |
* and is lurking in t->cpu->timeout_active_head queue. |
*/ |
l = t->link.next; |
if (l != &t->cpu->timeout_active_head) { |
hlp = list_get_instance(l, timeout_t, link); |
spinlock_lock(&hlp->lock); |
hlp->ticks += t->ticks; |
spinlock_unlock(&hlp->lock); |
} |
list_remove(&t->link); |
spinlock_unlock(&t->cpu->timeoutlock); |
timeout_reinitialize(t); |
spinlock_unlock(&t->lock); |
interrupts_restore(ipl); |
return true; |
} |