56,11 → 56,8 |
#include <arch/barrier.h> |
#include <mm/frame.h> |
#include <ddi/ddi.h> |
#if defined CONFIG_TIMEOUT_AVL_TREE || defined CONFIG_TIMEOUT_EXTAVL_TREE |
#include <arch/asm.h> |
#include <arch/types.h> |
#include <panic.h> |
#endif |
|
|
/* Pointer to variable with uptime */ |
uptime_t *uptime; |
|
151,7 → 148,6 |
* To avoid lock ordering problems, |
* run all expired timeouts as you visit them. |
*/ |
|
|
for (; i <= last_clock_tick; i++) { |
clock_update_counters(); |
219,6 → 215,97 |
} |
} |
|
#elif defined CONFIG_TIMEOUT_FAVL_TREE |
|
/** Clock routine |
* |
* Clock routine executed from clock interrupt handler |
* (assuming interrupts_disable()'d). Runs expired timeouts |
* and preemptive scheduling. |
* |
*/ |
void clock(void) |
{ |
timeout_t *h; |
timeout_handler_t f; |
void *arg; |
count_t missed_clock_ticks = CPU->missed_clock_ticks; |
uint64_t i = CPU->timeout_active_tree.base; |
uint64_t last_clock_tick = i + missed_clock_ticks; |
favltree_node_t *expnode; |
|
/* |
* To avoid lock ordering problems, |
* run all expired timeouts as you visit them. |
*/ |
|
for (; i <= last_clock_tick; i++) { |
clock_update_counters(); |
spinlock_lock(&CPU->timeoutlock); |
|
/* |
* Check whether first timeout (with the smallest key in the tree) time out. If so perform |
* callback function and try next timeout (more timeouts can have same timeout). |
* Function favltree_find_min works in contant time. |
*/ |
while ((expnode = favltree_find_min(&CPU->timeout_active_tree)) != NULL) { |
h = favltree_get_instance(expnode,timeout_t,node); |
spinlock_lock(&h->lock); |
if (expnode->key != i) { |
/* |
* Base is increased every for cycle. |
*/ |
(CPU->timeout_active_tree.base)++; |
spinlock_unlock(&h->lock); |
break; |
} |
|
/* |
* Delete minimal key from the tree and repair tree structure in |
* logarithmic time. |
*/ |
favltree_delete_min(&CPU->timeout_active_tree); |
|
f = h->handler; |
arg = h->arg; |
timeout_reinitialize(h); |
spinlock_unlock(&h->lock); |
spinlock_unlock(&CPU->timeoutlock); |
|
f(arg); |
|
spinlock_lock(&CPU->timeoutlock); |
} |
spinlock_unlock(&CPU->timeoutlock); |
} |
|
CPU->missed_clock_ticks = 0; |
|
/* |
* Do CPU usage accounting and find out whether to preempt THREAD. |
*/ |
if (THREAD) { |
uint64_t ticks; |
|
spinlock_lock(&CPU->lock); |
CPU->needs_relink += 1 + missed_clock_ticks; |
spinlock_unlock(&CPU->lock); |
|
spinlock_lock(&THREAD->lock); |
if ((ticks = THREAD->ticks)) { |
if (ticks >= 1 + missed_clock_ticks) |
THREAD->ticks -= 1 + missed_clock_ticks; |
else |
THREAD->ticks = 0; |
} |
spinlock_unlock(&THREAD->lock); |
|
if (!ticks && !PREEMPTION_DISABLED) { |
scheduler(); |
} |
} |
} |
|
#elif defined CONFIG_TIMEOUT_EXTAVL_TREE |
|
/** Clock routine |