Rev 2450 | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
| Rev 2450 | Rev 2461 | ||
|---|---|---|---|
| Line 54... | Line 54... | ||
| 54 | #include <proc/thread.h> |
54 | #include <proc/thread.h> |
| 55 | #include <sysinfo/sysinfo.h> |
55 | #include <sysinfo/sysinfo.h> |
| 56 | #include <arch/barrier.h> |
56 | #include <arch/barrier.h> |
| 57 | #include <mm/frame.h> |
57 | #include <mm/frame.h> |
| 58 | #include <ddi/ddi.h> |
58 | #include <ddi/ddi.h> |
| 59 | #if defined CONFIG_TIMEOUT_AVL_TREE || defined CONFIG_TIMEOUT_EXTAVL_TREE |
- | |
| 60 | #include <arch/asm.h> |
- | |
| 61 | #include <arch/types.h> |
- | |
| 62 | #include <panic.h> |
- | |
| - | 59 | ||
| 63 | #endif |
60 | |
| 64 | /* Pointer to variable with uptime */ |
61 | /* Pointer to variable with uptime */ |
| 65 | uptime_t *uptime; |
62 | uptime_t *uptime; |
| 66 | 63 | ||
| 67 | /** Physical memory area of the real time clock */ |
64 | /** Physical memory area of the real time clock */ |
| 68 | static parea_t clock_parea; |
65 | static parea_t clock_parea; |
| Line 149... | Line 146... | ||
| 149 | 146 | ||
| 150 | /* |
147 | /* |
| 151 | * To avoid lock ordering problems, |
148 | * To avoid lock ordering problems, |
| 152 | * run all expired timeouts as you visit them. |
149 | * run all expired timeouts as you visit them. |
| 153 | */ |
150 | */ |
| 154 | - | ||
| 155 | 151 | ||
| 156 | for (; i <= last_clock_tick; i++) { |
152 | for (; i <= last_clock_tick; i++) { |
| 157 | clock_update_counters(); |
153 | clock_update_counters(); |
| 158 | spinlock_lock(&CPU->timeoutlock); |
154 | spinlock_lock(&CPU->timeoutlock); |
| 159 | 155 | ||
| Line 181... | Line 177... | ||
| 181 | 177 | ||
| 182 | f = h->handler; |
178 | f = h->handler; |
| 183 | arg = h->arg; |
179 | arg = h->arg; |
| 184 | timeout_reinitialize(h); |
180 | timeout_reinitialize(h); |
| 185 | spinlock_unlock(&h->lock); |
181 | spinlock_unlock(&h->lock); |
| - | 182 | spinlock_unlock(&CPU->timeoutlock); |
|
| - | 183 | ||
| - | 184 | f(arg); |
|
| - | 185 | ||
| - | 186 | spinlock_lock(&CPU->timeoutlock); |
|
| - | 187 | } |
|
| - | 188 | spinlock_unlock(&CPU->timeoutlock); |
|
| - | 189 | } |
|
| - | 190 | ||
| - | 191 | CPU->missed_clock_ticks = 0; |
|
| - | 192 | ||
| - | 193 | /* |
|
| - | 194 | * Do CPU usage accounting and find out whether to preempt THREAD. |
|
| - | 195 | */ |
|
| - | 196 | if (THREAD) { |
|
| - | 197 | uint64_t ticks; |
|
| - | 198 | ||
| - | 199 | spinlock_lock(&CPU->lock); |
|
| - | 200 | CPU->needs_relink += 1 + missed_clock_ticks; |
|
| - | 201 | spinlock_unlock(&CPU->lock); |
|
| - | 202 | ||
| - | 203 | spinlock_lock(&THREAD->lock); |
|
| - | 204 | if ((ticks = THREAD->ticks)) { |
|
| - | 205 | if (ticks >= 1 + missed_clock_ticks) |
|
| - | 206 | THREAD->ticks -= 1 + missed_clock_ticks; |
|
| - | 207 | else |
|
| - | 208 | THREAD->ticks = 0; |
|
| - | 209 | } |
|
| - | 210 | spinlock_unlock(&THREAD->lock); |
|
| - | 211 | ||
| - | 212 | if (!ticks && !PREEMPTION_DISABLED) { |
|
| - | 213 | scheduler(); |
|
| - | 214 | } |
|
| - | 215 | } |
|
| - | 216 | } |
|
| - | 217 | ||
| - | 218 | #elif defined CONFIG_TIMEOUT_FAVL_TREE |
|
| - | 219 | ||
| - | 220 | /** Clock routine |
|
| - | 221 | * |
|
| - | 222 | * Clock routine executed from clock interrupt handler |
|
| - | 223 | * (assuming interrupts_disable()'d). Runs expired timeouts |
|
| - | 224 | * and preemptive scheduling. |
|
| - | 225 | * |
|
| - | 226 | */ |
|
| - | 227 | void clock(void) |
|
| - | 228 | { |
|
| - | 229 | timeout_t *h; |
|
| - | 230 | timeout_handler_t f; |
|
| - | 231 | void *arg; |
|
| - | 232 | count_t missed_clock_ticks = CPU->missed_clock_ticks; |
|
| - | 233 | uint64_t i = CPU->timeout_active_tree.base; |
|
| - | 234 | uint64_t last_clock_tick = i + missed_clock_ticks; |
|
| - | 235 | favltree_node_t *expnode; |
|
| - | 236 | ||
| - | 237 | /* |
|
| - | 238 | * To avoid lock ordering problems, |
|
| - | 239 | * run all expired timeouts as you visit them. |
|
| - | 240 | */ |
|
| - | 241 | ||
| - | 242 | for (; i <= last_clock_tick; i++) { |
|
| - | 243 | clock_update_counters(); |
|
| - | 244 | spinlock_lock(&CPU->timeoutlock); |
|
| - | 245 | ||
| - | 246 | /* |
|
| - | 247 | * Check whether first timeout (with the smallest key in the tree) time out. If so perform |
|
| - | 248 | * callback function and try next timeout (more timeouts can have same timeout). |
|
| - | 249 | * Function favltree_find_min works in contant time. |
|
| - | 250 | */ |
|
| - | 251 | while ((expnode = favltree_find_min(&CPU->timeout_active_tree)) != NULL) { |
|
| - | 252 | h = favltree_get_instance(expnode,timeout_t,node); |
|
| - | 253 | spinlock_lock(&h->lock); |
|
| - | 254 | if (expnode->key != i) { |
|
| - | 255 | /* |
|
| - | 256 | * Base is increased every for cycle. |
|
| - | 257 | */ |
|
| - | 258 | (CPU->timeout_active_tree.base)++; |
|
| - | 259 | spinlock_unlock(&h->lock); |
|
| - | 260 | break; |
|
| - | 261 | } |
|
| - | 262 | ||
| - | 263 | /* |
|
| - | 264 | * Delete minimal key from the tree and repair tree structure in |
|
| - | 265 | * logarithmic time. |
|
| - | 266 | */ |
|
| - | 267 | favltree_delete_min(&CPU->timeout_active_tree); |
|
| - | 268 | ||
| - | 269 | f = h->handler; |
|
| - | 270 | arg = h->arg; |
|
| - | 271 | timeout_reinitialize(h); |
|
| - | 272 | spinlock_unlock(&h->lock); |
|
| 186 | spinlock_unlock(&CPU->timeoutlock); |
273 | spinlock_unlock(&CPU->timeoutlock); |
| 187 | 274 | ||
| 188 | f(arg); |
275 | f(arg); |
| 189 | 276 | ||
| 190 | spinlock_lock(&CPU->timeoutlock); |
277 | spinlock_lock(&CPU->timeoutlock); |