Rev 2450 | Details | Compare with Previous | Last modification | View Log | RSS feed
| Rev | Author | Line No. | Line |
|---|---|---|---|
| 1 | jermar | 1 | /* |
| 2336 | mencl | 2 | * Copyright (C) 2001-2004 Jakub Jermar |
| 2450 | mencl | 3 | * Copyright (C) 2007 Vojtech Mencl |
| 1 | jermar | 4 | * All rights reserved. |
| 5 | * |
||
| 6 | * Redistribution and use in source and binary forms, with or without |
||
| 7 | * modification, are permitted provided that the following conditions |
||
| 8 | * are met: |
||
| 9 | * |
||
| 10 | * - Redistributions of source code must retain the above copyright |
||
| 11 | * notice, this list of conditions and the following disclaimer. |
||
| 12 | * - Redistributions in binary form must reproduce the above copyright |
||
| 13 | * notice, this list of conditions and the following disclaimer in the |
||
| 14 | * documentation and/or other materials provided with the distribution. |
||
| 15 | * - The name of the author may not be used to endorse or promote products |
||
| 16 | * derived from this software without specific prior written permission. |
||
| 17 | * |
||
| 18 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
||
| 19 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
||
| 20 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
||
| 21 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
||
| 22 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
||
| 23 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
||
| 24 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
||
| 25 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
||
| 26 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
||
| 27 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
||
| 28 | */ |
||
| 29 | |||
| 1731 | jermar | 30 | /** @addtogroup time |
| 1702 | cejka | 31 | * @{ |
| 32 | */ |
||
| 33 | |||
| 1264 | jermar | 34 | /** |
| 1702 | cejka | 35 | * @file |
| 1264 | jermar | 36 | * @brief High-level clock interrupt handler. |
| 37 | * |
||
| 38 | * This file contains the clock() function which is the source |
||
| 39 | * of preemption. It is also responsible for executing expired |
||
| 40 | * timeouts. |
||
| 41 | */ |
||
| 42 | |||
| 1 | jermar | 43 | #include <time/clock.h> |
| 44 | #include <time/timeout.h> |
||
| 45 | #include <config.h> |
||
| 46 | #include <synch/spinlock.h> |
||
| 47 | #include <synch/waitq.h> |
||
| 48 | #include <func.h> |
||
| 49 | #include <proc/scheduler.h> |
||
| 50 | #include <cpu.h> |
||
| 51 | #include <arch.h> |
||
| 788 | jermar | 52 | #include <adt/list.h> |
| 1104 | jermar | 53 | #include <atomic.h> |
| 391 | jermar | 54 | #include <proc/thread.h> |
| 1434 | palkovsky | 55 | #include <sysinfo/sysinfo.h> |
| 56 | #include <arch/barrier.h> |
||
| 2015 | jermar | 57 | #include <mm/frame.h> |
| 58 | #include <ddi/ddi.h> |
||
| 2461 | mencl | 59 | |
| 60 | |||
| 2307 | hudecek | 61 | /* Pointer to variable with uptime */ |
| 62 | uptime_t *uptime; |
||
| 63 | |||
| 64 | /** Physical memory area of the real time clock */ |
||
| 2015 | jermar | 65 | static parea_t clock_parea; |
| 66 | |||
| 1434 | palkovsky | 67 | /* Variable holding fragment of second, so that we would update |
| 68 | * seconds correctly |
||
| 69 | */ |
||
| 1780 | jermar | 70 | static unative_t secfrag = 0; |
| 1434 | palkovsky | 71 | |
| 72 | /** Initialize realtime clock counter |
||
| 73 | * |
||
| 74 | * The applications (and sometimes kernel) need to access accurate |
||
| 75 | * information about realtime data. We allocate 1 page with these |
||
| 76 | * data and update it periodically. |
||
| 77 | */ |
||
| 78 | void clock_counter_init(void) |
||
| 79 | { |
||
| 80 | void *faddr; |
||
| 81 | |||
| 2015 | jermar | 82 | faddr = frame_alloc(ONE_FRAME, FRAME_ATOMIC); |
| 1434 | palkovsky | 83 | if (!faddr) |
| 84 | panic("Cannot allocate page for clock"); |
||
| 85 | |||
| 2307 | hudecek | 86 | uptime = (uptime_t *) PA2KA(faddr); |
| 87 | |||
| 88 | uptime->seconds1 = 0; |
||
| 89 | uptime->seconds2 = 0; |
||
| 90 | uptime->useconds = 0; |
||
| 1434 | palkovsky | 91 | |
| 2015 | jermar | 92 | clock_parea.pbase = (uintptr_t) faddr; |
| 2307 | hudecek | 93 | clock_parea.vbase = (uintptr_t) uptime; |
| 2015 | jermar | 94 | clock_parea.frames = 1; |
| 95 | clock_parea.cacheable = true; |
||
| 96 | ddi_parea_register(&clock_parea); |
||
| 97 | |||
| 98 | /* |
||
| 99 | * Prepare information for the userspace so that it can successfully |
||
| 100 | * physmem_map() the clock_parea. |
||
| 101 | */ |
||
| 102 | sysinfo_set_item_val("clock.cacheable", NULL, (unative_t) true); |
||
| 103 | sysinfo_set_item_val("clock.faddr", NULL, (unative_t) faddr); |
||
| 1434 | palkovsky | 104 | } |
| 105 | |||
| 106 | |||
| 107 | /** Update public counters |
||
| 108 | * |
||
| 109 | * Update it only on first processor |
||
| 110 | * TODO: Do we really need so many write barriers? |
||
| 111 | */ |
||
| 112 | static void clock_update_counters(void) |
||
| 113 | { |
||
| 114 | if (CPU->id == 0) { |
||
| 2307 | hudecek | 115 | secfrag += 1000000 / HZ; |
| 1434 | palkovsky | 116 | if (secfrag >= 1000000) { |
| 1438 | palkovsky | 117 | secfrag -= 1000000; |
| 2307 | hudecek | 118 | uptime->seconds1++; |
| 1434 | palkovsky | 119 | write_barrier(); |
| 2307 | hudecek | 120 | uptime->useconds = secfrag; |
| 1438 | palkovsky | 121 | write_barrier(); |
| 2307 | hudecek | 122 | uptime->seconds2 = uptime->seconds1; |
| 1434 | palkovsky | 123 | } else |
| 2307 | hudecek | 124 | uptime->useconds += 1000000 / HZ; |
| 1434 | palkovsky | 125 | } |
| 126 | } |
||
| 127 | |||
| 2421 | mencl | 128 | #if defined CONFIG_TIMEOUT_AVL_TREE |
| 2336 | mencl | 129 | |
| 107 | decky | 130 | /** Clock routine |
| 131 | * |
||
| 132 | * Clock routine executed from clock interrupt handler |
||
| 413 | jermar | 133 | * (assuming interrupts_disable()'d). Runs expired timeouts |
| 107 | decky | 134 | * and preemptive scheduling. |
| 135 | * |
||
| 1 | jermar | 136 | */ |
| 137 | void clock(void) |
||
| 138 | { |
||
| 2336 | mencl | 139 | timeout_t *h; |
| 140 | timeout_handler_t f; |
||
| 141 | void *arg; |
||
| 142 | count_t missed_clock_ticks = CPU->missed_clock_ticks; |
||
| 2450 | mencl | 143 | uint64_t i = CPU->timeout_active_tree.base; |
| 144 | uint64_t last_clock_tick = i + missed_clock_ticks; |
||
| 2416 | mencl | 145 | avltree_node_t *expnode; |
| 2421 | mencl | 146 | |
| 147 | /* |
||
| 148 | * To avoid lock ordering problems, |
||
| 149 | * run all expired timeouts as you visit them. |
||
| 150 | */ |
||
| 2450 | mencl | 151 | |
| 152 | for (; i <= last_clock_tick; i++) { |
||
| 2421 | mencl | 153 | clock_update_counters(); |
| 154 | spinlock_lock(&CPU->timeoutlock); |
||
| 155 | |||
| 156 | /* |
||
| 157 | * Check whether first timeout (with the smallest key in the tree) time out. If so perform |
||
| 158 | * callback function and try next timeout (more timeouts can have same timeout). |
||
| 159 | */ |
||
| 160 | while ((expnode = avltree_find_min(&CPU->timeout_active_tree)) != NULL) { |
||
| 161 | h = avltree_get_instance(expnode,timeout_t,node); |
||
| 162 | spinlock_lock(&h->lock); |
||
| 2450 | mencl | 163 | if (expnode->key != i) { |
| 164 | /* |
||
| 165 | * Base is increased every for cycle. |
||
| 166 | */ |
||
| 167 | (CPU->timeout_active_tree.base)++; |
||
| 2421 | mencl | 168 | spinlock_unlock(&h->lock); |
| 169 | break; |
||
| 170 | } |
||
| 171 | |||
| 172 | /* |
||
| 173 | * Delete minimal key from the tree and repair tree structure in |
||
| 174 | * logarithmic time. |
||
| 175 | */ |
||
| 176 | avltree_delete_min(&CPU->timeout_active_tree); |
||
| 177 | |||
| 178 | f = h->handler; |
||
| 179 | arg = h->arg; |
||
| 180 | timeout_reinitialize(h); |
||
| 181 | spinlock_unlock(&h->lock); |
||
| 182 | spinlock_unlock(&CPU->timeoutlock); |
||
| 183 | |||
| 184 | f(arg); |
||
| 185 | |||
| 186 | spinlock_lock(&CPU->timeoutlock); |
||
| 187 | } |
||
| 188 | spinlock_unlock(&CPU->timeoutlock); |
||
| 189 | } |
||
| 190 | |||
| 191 | CPU->missed_clock_ticks = 0; |
||
| 192 | |||
| 193 | /* |
||
| 194 | * Do CPU usage accounting and find out whether to preempt THREAD. |
||
| 195 | */ |
||
| 196 | if (THREAD) { |
||
| 197 | uint64_t ticks; |
||
| 198 | |||
| 199 | spinlock_lock(&CPU->lock); |
||
| 200 | CPU->needs_relink += 1 + missed_clock_ticks; |
||
| 201 | spinlock_unlock(&CPU->lock); |
||
| 202 | |||
| 203 | spinlock_lock(&THREAD->lock); |
||
| 204 | if ((ticks = THREAD->ticks)) { |
||
| 205 | if (ticks >= 1 + missed_clock_ticks) |
||
| 206 | THREAD->ticks -= 1 + missed_clock_ticks; |
||
| 207 | else |
||
| 208 | THREAD->ticks = 0; |
||
| 209 | } |
||
| 210 | spinlock_unlock(&THREAD->lock); |
||
| 211 | |||
| 212 | if (!ticks && !PREEMPTION_DISABLED) { |
||
| 213 | scheduler(); |
||
| 214 | } |
||
| 215 | } |
||
| 216 | } |
||
| 217 | |||
| 2461 | mencl | 218 | #elif defined CONFIG_TIMEOUT_FAVL_TREE |
| 219 | |||
| 220 | /** Clock routine |
||
| 221 | * |
||
| 222 | * Clock routine executed from clock interrupt handler |
||
| 223 | * (assuming interrupts_disable()'d). Runs expired timeouts |
||
| 224 | * and preemptive scheduling. |
||
| 225 | * |
||
| 226 | */ |
||
| 227 | void clock(void) |
||
| 228 | { |
||
| 229 | timeout_t *h; |
||
| 230 | timeout_handler_t f; |
||
| 231 | void *arg; |
||
| 232 | count_t missed_clock_ticks = CPU->missed_clock_ticks; |
||
| 233 | uint64_t i = CPU->timeout_active_tree.base; |
||
| 234 | uint64_t last_clock_tick = i + missed_clock_ticks; |
||
| 235 | favltree_node_t *expnode; |
||
| 236 | |||
| 237 | /* |
||
| 238 | * To avoid lock ordering problems, |
||
| 239 | * run all expired timeouts as you visit them. |
||
| 240 | */ |
||
| 241 | |||
| 242 | for (; i <= last_clock_tick; i++) { |
||
| 243 | clock_update_counters(); |
||
| 244 | spinlock_lock(&CPU->timeoutlock); |
||
| 245 | |||
| 246 | /* |
||
| 247 | * Check whether first timeout (with the smallest key in the tree) time out. If so perform |
||
| 248 | * callback function and try next timeout (more timeouts can have same timeout). |
||
| 249 | * Function favltree_find_min works in contant time. |
||
| 250 | */ |
||
| 251 | while ((expnode = favltree_find_min(&CPU->timeout_active_tree)) != NULL) { |
||
| 252 | h = favltree_get_instance(expnode,timeout_t,node); |
||
| 253 | spinlock_lock(&h->lock); |
||
| 254 | if (expnode->key != i) { |
||
| 255 | /* |
||
| 256 | * Base is increased every for cycle. |
||
| 257 | */ |
||
| 258 | (CPU->timeout_active_tree.base)++; |
||
| 259 | spinlock_unlock(&h->lock); |
||
| 260 | break; |
||
| 261 | } |
||
| 262 | |||
| 263 | /* |
||
| 264 | * Delete minimal key from the tree and repair tree structure in |
||
| 265 | * logarithmic time. |
||
| 266 | */ |
||
| 267 | favltree_delete_min(&CPU->timeout_active_tree); |
||
| 268 | |||
| 269 | f = h->handler; |
||
| 270 | arg = h->arg; |
||
| 271 | timeout_reinitialize(h); |
||
| 272 | spinlock_unlock(&h->lock); |
||
| 273 | spinlock_unlock(&CPU->timeoutlock); |
||
| 274 | |||
| 275 | f(arg); |
||
| 276 | |||
| 277 | spinlock_lock(&CPU->timeoutlock); |
||
| 278 | } |
||
| 279 | spinlock_unlock(&CPU->timeoutlock); |
||
| 280 | } |
||
| 281 | |||
| 282 | CPU->missed_clock_ticks = 0; |
||
| 283 | |||
| 284 | /* |
||
| 285 | * Do CPU usage accounting and find out whether to preempt THREAD. |
||
| 286 | */ |
||
| 287 | if (THREAD) { |
||
| 288 | uint64_t ticks; |
||
| 289 | |||
| 290 | spinlock_lock(&CPU->lock); |
||
| 291 | CPU->needs_relink += 1 + missed_clock_ticks; |
||
| 292 | spinlock_unlock(&CPU->lock); |
||
| 293 | |||
| 294 | spinlock_lock(&THREAD->lock); |
||
| 295 | if ((ticks = THREAD->ticks)) { |
||
| 296 | if (ticks >= 1 + missed_clock_ticks) |
||
| 297 | THREAD->ticks -= 1 + missed_clock_ticks; |
||
| 298 | else |
||
| 299 | THREAD->ticks = 0; |
||
| 300 | } |
||
| 301 | spinlock_unlock(&THREAD->lock); |
||
| 302 | |||
| 303 | if (!ticks && !PREEMPTION_DISABLED) { |
||
| 304 | scheduler(); |
||
| 305 | } |
||
| 306 | } |
||
| 307 | } |
||
| 308 | |||
| 2416 | mencl | 309 | #elif defined CONFIG_TIMEOUT_EXTAVL_TREE |
| 2421 | mencl | 310 | |
| 311 | /** Clock routine |
||
| 312 | * |
||
| 313 | * Clock routine executed from clock interrupt handler |
||
| 314 | * (assuming interrupts_disable()'d). Runs expired timeouts |
||
| 315 | * and preemptive scheduling. |
||
| 316 | * |
||
| 317 | */ |
||
| 318 | void clock(void) |
||
| 319 | { |
||
| 320 | timeout_t *h; |
||
| 321 | timeout_handler_t f; |
||
| 322 | void *arg; |
||
| 323 | count_t missed_clock_ticks = CPU->missed_clock_ticks; |
||
| 2450 | mencl | 324 | uint64_t i = CPU->timeout_active_tree.base; |
| 325 | uint64_t last_clock_tick = i + missed_clock_ticks; |
||
| 2416 | mencl | 326 | extavltree_node_t *expnode; |
| 2450 | mencl | 327 | //ipl_t ipl; |
| 2416 | mencl | 328 | |
| 2336 | mencl | 329 | /* |
| 330 | * To avoid lock ordering problems, |
||
| 331 | * run all expired timeouts as you visit them. |
||
| 332 | */ |
||
| 333 | |||
| 2450 | mencl | 334 | for (; i <= last_clock_tick; i++) { |
| 2336 | mencl | 335 | clock_update_counters(); |
| 336 | spinlock_lock(&CPU->timeoutlock); |
||
| 2416 | mencl | 337 | |
| 338 | /* |
||
| 339 | * Check whether first timeout in list time out. If so perform callback function and try |
||
| 340 | * next timeout (more timeouts can have same timeout). |
||
| 2450 | mencl | 341 | */ |
| 2416 | mencl | 342 | while ((expnode = CPU->timeout_active_tree.head.next) != &(CPU->timeout_active_tree.head)) { |
| 2336 | mencl | 343 | h = extavltree_get_instance(expnode,timeout_t,node); |
| 2450 | mencl | 344 | spinlock_lock(&h->lock); |
| 345 | if (expnode->key != i) { |
||
| 346 | /* |
||
| 347 | * Base is increased every for cycle. |
||
| 348 | */ |
||
| 349 | (CPU->timeout_active_tree.base)++; |
||
| 2336 | mencl | 350 | spinlock_unlock(&h->lock); |
| 351 | break; |
||
| 352 | } |
||
| 353 | |||
| 2416 | mencl | 354 | /* |
| 355 | * Delete first node in the list and repair tree structure in |
||
| 356 | * constant time. |
||
| 357 | */ |
||
| 2336 | mencl | 358 | extavltree_delete_min(&CPU->timeout_active_tree); |
| 359 | |||
| 360 | f = h->handler; |
||
| 361 | arg = h->arg; |
||
| 362 | timeout_reinitialize(h); |
||
| 363 | spinlock_unlock(&h->lock); |
||
| 364 | spinlock_unlock(&CPU->timeoutlock); |
||
| 365 | |||
| 366 | f(arg); |
||
| 367 | |||
| 368 | spinlock_lock(&CPU->timeoutlock); |
||
| 369 | } |
||
| 370 | spinlock_unlock(&CPU->timeoutlock); |
||
| 371 | } |
||
| 372 | |||
| 373 | CPU->missed_clock_ticks = 0; |
||
| 374 | |||
| 375 | /* |
||
| 376 | * Do CPU usage accounting and find out whether to preempt THREAD. |
||
| 377 | */ |
||
| 378 | if (THREAD) { |
||
| 379 | uint64_t ticks; |
||
| 380 | |||
| 381 | spinlock_lock(&CPU->lock); |
||
| 382 | CPU->needs_relink += 1 + missed_clock_ticks; |
||
| 383 | spinlock_unlock(&CPU->lock); |
||
| 384 | |||
| 385 | spinlock_lock(&THREAD->lock); |
||
| 386 | if ((ticks = THREAD->ticks)) { |
||
| 387 | if (ticks >= 1 + missed_clock_ticks) |
||
| 388 | THREAD->ticks -= 1 + missed_clock_ticks; |
||
| 389 | else |
||
| 390 | THREAD->ticks = 0; |
||
| 391 | } |
||
| 392 | spinlock_unlock(&THREAD->lock); |
||
| 393 | |||
| 394 | if (!ticks && !PREEMPTION_DISABLED) { |
||
| 395 | scheduler(); |
||
| 396 | } |
||
| 397 | } |
||
| 398 | } |
||
| 399 | |||
| 2416 | mencl | 400 | #elif defined CONFIG_TIMEOUT_EXTAVLREL_TREE |
| 2336 | mencl | 401 | |
| 2416 | mencl | 402 | /** Clock routine |
| 403 | * |
||
| 404 | * Clock routine executed from clock interrupt handler |
||
| 405 | * (assuming interrupts_disable()'d). Runs expired timeouts |
||
| 406 | * and preemptive scheduling. |
||
| 407 | * |
||
| 408 | */ |
||
| 409 | void clock(void) |
||
| 410 | { |
||
| 2421 | mencl | 411 | extavlreltree_node_t *expnode; |
| 2416 | mencl | 412 | timeout_t *h; |
| 413 | timeout_handler_t f; |
||
| 414 | void *arg; |
||
| 415 | count_t missed_clock_ticks = CPU->missed_clock_ticks; |
||
| 416 | int i; |
||
| 417 | |||
| 418 | /* |
||
| 419 | * To avoid lock ordering problems, |
||
| 420 | * run all expired timeouts as you visit them. |
||
| 421 | */ |
||
| 422 | for (i = 0; i <= missed_clock_ticks; i++) { |
||
| 423 | clock_update_counters(); |
||
| 424 | spinlock_lock(&CPU->timeoutlock); |
||
| 425 | |||
| 426 | /* |
||
| 427 | * Check whether first timeout in list time out. If so perform callback function and try |
||
| 428 | * next timeout (more timeouts can have same timeout). |
||
| 429 | */ |
||
| 430 | while ((expnode = CPU->timeout_active_tree.head.next) != &(CPU->timeout_active_tree.head)) { |
||
| 2421 | mencl | 431 | h = extavlreltree_get_instance(expnode,timeout_t,node); |
| 2416 | mencl | 432 | spinlock_lock(&h->lock); |
| 433 | if (expnode->key != 0) { |
||
| 434 | expnode->key--; |
||
| 435 | spinlock_unlock(&h->lock); |
||
| 436 | break; |
||
| 437 | } |
||
| 438 | |||
| 439 | /* |
||
| 440 | * Delete first node in the list and repair tree structure in |
||
| 441 | * constant time. Be careful of expnode's key, it must be 0! |
||
| 442 | */ |
||
| 2421 | mencl | 443 | extavlreltree_delete_min(&CPU->timeout_active_tree); |
| 2416 | mencl | 444 | |
| 445 | f = h->handler; |
||
| 446 | arg = h->arg; |
||
| 447 | timeout_reinitialize(h); |
||
| 448 | spinlock_unlock(&h->lock); |
||
| 449 | spinlock_unlock(&CPU->timeoutlock); |
||
| 450 | |||
| 451 | f(arg); |
||
| 452 | |||
| 453 | spinlock_lock(&CPU->timeoutlock); |
||
| 454 | } |
||
| 455 | spinlock_unlock(&CPU->timeoutlock); |
||
| 456 | } |
||
| 457 | CPU->missed_clock_ticks = 0; |
||
| 458 | |||
| 459 | /* |
||
| 460 | * Do CPU usage accounting and find out whether to preempt THREAD. |
||
| 461 | */ |
||
| 462 | |||
| 463 | if (THREAD) { |
||
| 464 | uint64_t ticks; |
||
| 465 | |||
| 466 | spinlock_lock(&CPU->lock); |
||
| 467 | CPU->needs_relink += 1 + missed_clock_ticks; |
||
| 468 | spinlock_unlock(&CPU->lock); |
||
| 469 | |||
| 470 | spinlock_lock(&THREAD->lock); |
||
| 471 | if ((ticks = THREAD->ticks)) { |
||
| 472 | if (ticks >= 1 + missed_clock_ticks) |
||
| 473 | THREAD->ticks -= 1 + missed_clock_ticks; |
||
| 474 | else |
||
| 475 | THREAD->ticks = 0; |
||
| 476 | } |
||
| 477 | spinlock_unlock(&THREAD->lock); |
||
| 478 | |||
| 479 | if (!ticks && !PREEMPTION_DISABLED) { |
||
| 480 | scheduler(); |
||
| 481 | } |
||
| 482 | } |
||
| 483 | } |
||
| 484 | |||
| 485 | |||
| 486 | |||
| 2336 | mencl | 487 | #else |
| 488 | |||
| 489 | |||
| 490 | /** Clock routine |
||
| 491 | * |
||
| 492 | * Clock routine executed from clock interrupt handler |
||
| 493 | * (assuming interrupts_disable()'d). Runs expired timeouts |
||
| 494 | * and preemptive scheduling. |
||
| 495 | * |
||
| 496 | */ |
||
| 497 | void clock(void) |
||
| 498 | { |
||
| 1 | jermar | 499 | link_t *l; |
| 500 | timeout_t *h; |
||
| 411 | jermar | 501 | timeout_handler_t f; |
| 1 | jermar | 502 | void *arg; |
| 1457 | jermar | 503 | count_t missed_clock_ticks = CPU->missed_clock_ticks; |
| 1431 | jermar | 504 | int i; |
| 1 | jermar | 505 | |
| 506 | /* |
||
| 507 | * To avoid lock ordering problems, |
||
| 508 | * run all expired timeouts as you visit them. |
||
| 509 | */ |
||
| 1457 | jermar | 510 | for (i = 0; i <= missed_clock_ticks; i++) { |
| 1434 | palkovsky | 511 | clock_update_counters(); |
| 1431 | jermar | 512 | spinlock_lock(&CPU->timeoutlock); |
| 513 | while ((l = CPU->timeout_active_head.next) != &CPU->timeout_active_head) { |
||
| 514 | h = list_get_instance(l, timeout_t, link); |
||
| 515 | spinlock_lock(&h->lock); |
||
| 516 | if (h->ticks-- != 0) { |
||
| 517 | spinlock_unlock(&h->lock); |
||
| 518 | break; |
||
| 519 | } |
||
| 520 | list_remove(l); |
||
| 521 | f = h->handler; |
||
| 522 | arg = h->arg; |
||
| 523 | timeout_reinitialize(h); |
||
| 524 | spinlock_unlock(&h->lock); |
||
| 525 | spinlock_unlock(&CPU->timeoutlock); |
||
| 526 | |||
| 527 | f(arg); |
||
| 528 | |||
| 529 | spinlock_lock(&CPU->timeoutlock); |
||
| 1 | jermar | 530 | } |
| 15 | jermar | 531 | spinlock_unlock(&CPU->timeoutlock); |
| 1 | jermar | 532 | } |
| 1431 | jermar | 533 | CPU->missed_clock_ticks = 0; |
| 1 | jermar | 534 | |
| 535 | /* |
||
| 15 | jermar | 536 | * Do CPU usage accounting and find out whether to preempt THREAD. |
| 1 | jermar | 537 | */ |
| 538 | |||
| 15 | jermar | 539 | if (THREAD) { |
| 1780 | jermar | 540 | uint64_t ticks; |
| 221 | jermar | 541 | |
| 15 | jermar | 542 | spinlock_lock(&CPU->lock); |
| 1457 | jermar | 543 | CPU->needs_relink += 1 + missed_clock_ticks; |
| 15 | jermar | 544 | spinlock_unlock(&CPU->lock); |
| 1 | jermar | 545 | |
| 15 | jermar | 546 | spinlock_lock(&THREAD->lock); |
| 1457 | jermar | 547 | if ((ticks = THREAD->ticks)) { |
| 548 | if (ticks >= 1 + missed_clock_ticks) |
||
| 549 | THREAD->ticks -= 1 + missed_clock_ticks; |
||
| 550 | else |
||
| 551 | THREAD->ticks = 0; |
||
| 552 | } |
||
| 221 | jermar | 553 | spinlock_unlock(&THREAD->lock); |
| 554 | |||
| 555 | if (!ticks && !PREEMPTION_DISABLED) { |
||
| 1 | jermar | 556 | scheduler(); |
| 557 | } |
||
| 558 | } |
||
| 559 | } |
||
| 1702 | cejka | 560 | |
| 2336 | mencl | 561 | #endif |
| 1731 | jermar | 562 | /** @} |
| 1702 | cejka | 563 | */ |