Rev 2421 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
1 | jermar | 1 | /* |
2336 | mencl | 2 | * Copyright (C) 2001-2004 Jakub Jermar |
2450 | mencl | 3 | * Copyright (C) 2007 Vojtech Mencl |
1 | jermar | 4 | * All rights reserved. |
5 | * |
||
6 | * Redistribution and use in source and binary forms, with or without |
||
7 | * modification, are permitted provided that the following conditions |
||
8 | * are met: |
||
9 | * |
||
10 | * - Redistributions of source code must retain the above copyright |
||
11 | * notice, this list of conditions and the following disclaimer. |
||
12 | * - Redistributions in binary form must reproduce the above copyright |
||
13 | * notice, this list of conditions and the following disclaimer in the |
||
14 | * documentation and/or other materials provided with the distribution. |
||
15 | * - The name of the author may not be used to endorse or promote products |
||
16 | * derived from this software without specific prior written permission. |
||
17 | * |
||
18 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
||
19 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
||
20 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
||
21 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
||
22 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
||
23 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
||
24 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
||
25 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
||
26 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
||
27 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
||
28 | */ |
||
29 | |||
1731 | jermar | 30 | /** @addtogroup time |
1702 | cejka | 31 | * @{ |
32 | */ |
||
33 | |||
1264 | jermar | 34 | /** |
1702 | cejka | 35 | * @file |
1264 | jermar | 36 | * @brief High-level clock interrupt handler. |
37 | * |
||
38 | * This file contains the clock() function which is the source |
||
39 | * of preemption. It is also responsible for executing expired |
||
40 | * timeouts. |
||
41 | */ |
||
42 | |||
1 | jermar | 43 | #include <time/clock.h> |
44 | #include <time/timeout.h> |
||
45 | #include <config.h> |
||
46 | #include <synch/spinlock.h> |
||
47 | #include <synch/waitq.h> |
||
48 | #include <func.h> |
||
49 | #include <proc/scheduler.h> |
||
50 | #include <cpu.h> |
||
51 | #include <arch.h> |
||
788 | jermar | 52 | #include <adt/list.h> |
1104 | jermar | 53 | #include <atomic.h> |
391 | jermar | 54 | #include <proc/thread.h> |
1434 | palkovsky | 55 | #include <sysinfo/sysinfo.h> |
56 | #include <arch/barrier.h> |
||
2015 | jermar | 57 | #include <mm/frame.h> |
58 | #include <ddi/ddi.h> |
||
2450 | mencl | 59 | #if defined CONFIG_TIMEOUT_AVL_TREE || defined CONFIG_TIMEOUT_EXTAVL_TREE |
60 | #include <arch/asm.h> |
||
61 | #include <arch/types.h> |
||
62 | #include <panic.h> |
||
63 | #endif |
||
2307 | hudecek | 64 | /* Pointer to variable with uptime */ |
65 | uptime_t *uptime; |
||
66 | |||
67 | /** Physical memory area of the real time clock */ |
||
2015 | jermar | 68 | static parea_t clock_parea; |
69 | |||
1434 | palkovsky | 70 | /* Variable holding fragment of second, so that we would update |
71 | * seconds correctly |
||
72 | */ |
||
1780 | jermar | 73 | static unative_t secfrag = 0; |
1434 | palkovsky | 74 | |
75 | /** Initialize realtime clock counter |
||
76 | * |
||
77 | * The applications (and sometimes kernel) need to access accurate |
||
78 | * information about realtime data. We allocate 1 page with these |
||
79 | * data and update it periodically. |
||
80 | */ |
||
81 | void clock_counter_init(void) |
||
82 | { |
||
83 | void *faddr; |
||
84 | |||
2015 | jermar | 85 | faddr = frame_alloc(ONE_FRAME, FRAME_ATOMIC); |
1434 | palkovsky | 86 | if (!faddr) |
87 | panic("Cannot allocate page for clock"); |
||
88 | |||
2307 | hudecek | 89 | uptime = (uptime_t *) PA2KA(faddr); |
90 | |||
91 | uptime->seconds1 = 0; |
||
92 | uptime->seconds2 = 0; |
||
93 | uptime->useconds = 0; |
||
1434 | palkovsky | 94 | |
2015 | jermar | 95 | clock_parea.pbase = (uintptr_t) faddr; |
2307 | hudecek | 96 | clock_parea.vbase = (uintptr_t) uptime; |
2015 | jermar | 97 | clock_parea.frames = 1; |
98 | clock_parea.cacheable = true; |
||
99 | ddi_parea_register(&clock_parea); |
||
100 | |||
101 | /* |
||
102 | * Prepare information for the userspace so that it can successfully |
||
103 | * physmem_map() the clock_parea. |
||
104 | */ |
||
105 | sysinfo_set_item_val("clock.cacheable", NULL, (unative_t) true); |
||
106 | sysinfo_set_item_val("clock.faddr", NULL, (unative_t) faddr); |
||
1434 | palkovsky | 107 | } |
108 | |||
109 | |||
110 | /** Update public counters |
||
111 | * |
||
112 | * Update it only on first processor |
||
113 | * TODO: Do we really need so many write barriers? |
||
114 | */ |
||
115 | static void clock_update_counters(void) |
||
116 | { |
||
117 | if (CPU->id == 0) { |
||
2307 | hudecek | 118 | secfrag += 1000000 / HZ; |
1434 | palkovsky | 119 | if (secfrag >= 1000000) { |
1438 | palkovsky | 120 | secfrag -= 1000000; |
2307 | hudecek | 121 | uptime->seconds1++; |
1434 | palkovsky | 122 | write_barrier(); |
2307 | hudecek | 123 | uptime->useconds = secfrag; |
1438 | palkovsky | 124 | write_barrier(); |
2307 | hudecek | 125 | uptime->seconds2 = uptime->seconds1; |
1434 | palkovsky | 126 | } else |
2307 | hudecek | 127 | uptime->useconds += 1000000 / HZ; |
1434 | palkovsky | 128 | } |
129 | } |
||
130 | |||
2421 | mencl | 131 | #if defined CONFIG_TIMEOUT_AVL_TREE |
2336 | mencl | 132 | |
107 | decky | 133 | /** Clock routine |
134 | * |
||
135 | * Clock routine executed from clock interrupt handler |
||
413 | jermar | 136 | * (assuming interrupts_disable()'d). Runs expired timeouts |
107 | decky | 137 | * and preemptive scheduling. |
138 | * |
||
1 | jermar | 139 | */ |
140 | void clock(void) |
||
141 | { |
||
2336 | mencl | 142 | timeout_t *h; |
143 | timeout_handler_t f; |
||
144 | void *arg; |
||
145 | count_t missed_clock_ticks = CPU->missed_clock_ticks; |
||
2450 | mencl | 146 | uint64_t i = CPU->timeout_active_tree.base; |
147 | uint64_t last_clock_tick = i + missed_clock_ticks; |
||
2416 | mencl | 148 | avltree_node_t *expnode; |
2421 | mencl | 149 | |
150 | /* |
||
151 | * To avoid lock ordering problems, |
||
152 | * run all expired timeouts as you visit them. |
||
153 | */ |
||
154 | |||
2450 | mencl | 155 | |
156 | for (; i <= last_clock_tick; i++) { |
||
2421 | mencl | 157 | clock_update_counters(); |
158 | spinlock_lock(&CPU->timeoutlock); |
||
159 | |||
160 | /* |
||
161 | * Check whether first timeout (with the smallest key in the tree) time out. If so perform |
||
162 | * callback function and try next timeout (more timeouts can have same timeout). |
||
163 | */ |
||
164 | while ((expnode = avltree_find_min(&CPU->timeout_active_tree)) != NULL) { |
||
165 | h = avltree_get_instance(expnode,timeout_t,node); |
||
166 | spinlock_lock(&h->lock); |
||
2450 | mencl | 167 | if (expnode->key != i) { |
168 | /* |
||
169 | * Base is increased every for cycle. |
||
170 | */ |
||
171 | (CPU->timeout_active_tree.base)++; |
||
2421 | mencl | 172 | spinlock_unlock(&h->lock); |
173 | break; |
||
174 | } |
||
175 | |||
176 | /* |
||
177 | * Delete minimal key from the tree and repair tree structure in |
||
178 | * logarithmic time. |
||
179 | */ |
||
180 | avltree_delete_min(&CPU->timeout_active_tree); |
||
181 | |||
182 | f = h->handler; |
||
183 | arg = h->arg; |
||
184 | timeout_reinitialize(h); |
||
185 | spinlock_unlock(&h->lock); |
||
186 | spinlock_unlock(&CPU->timeoutlock); |
||
187 | |||
188 | f(arg); |
||
189 | |||
190 | spinlock_lock(&CPU->timeoutlock); |
||
191 | } |
||
192 | spinlock_unlock(&CPU->timeoutlock); |
||
193 | } |
||
194 | |||
195 | CPU->missed_clock_ticks = 0; |
||
196 | |||
197 | /* |
||
198 | * Do CPU usage accounting and find out whether to preempt THREAD. |
||
199 | */ |
||
200 | if (THREAD) { |
||
201 | uint64_t ticks; |
||
202 | |||
203 | spinlock_lock(&CPU->lock); |
||
204 | CPU->needs_relink += 1 + missed_clock_ticks; |
||
205 | spinlock_unlock(&CPU->lock); |
||
206 | |||
207 | spinlock_lock(&THREAD->lock); |
||
208 | if ((ticks = THREAD->ticks)) { |
||
209 | if (ticks >= 1 + missed_clock_ticks) |
||
210 | THREAD->ticks -= 1 + missed_clock_ticks; |
||
211 | else |
||
212 | THREAD->ticks = 0; |
||
213 | } |
||
214 | spinlock_unlock(&THREAD->lock); |
||
215 | |||
216 | if (!ticks && !PREEMPTION_DISABLED) { |
||
217 | scheduler(); |
||
218 | } |
||
219 | } |
||
220 | } |
||
221 | |||
2416 | mencl | 222 | #elif defined CONFIG_TIMEOUT_EXTAVL_TREE |
2421 | mencl | 223 | |
224 | /** Clock routine |
||
225 | * |
||
226 | * Clock routine executed from clock interrupt handler |
||
227 | * (assuming interrupts_disable()'d). Runs expired timeouts |
||
228 | * and preemptive scheduling. |
||
229 | * |
||
230 | */ |
||
231 | void clock(void) |
||
232 | { |
||
233 | timeout_t *h; |
||
234 | timeout_handler_t f; |
||
235 | void *arg; |
||
236 | count_t missed_clock_ticks = CPU->missed_clock_ticks; |
||
2450 | mencl | 237 | uint64_t i = CPU->timeout_active_tree.base; |
238 | uint64_t last_clock_tick = i + missed_clock_ticks; |
||
2416 | mencl | 239 | extavltree_node_t *expnode; |
2450 | mencl | 240 | //ipl_t ipl; |
2416 | mencl | 241 | |
2336 | mencl | 242 | /* |
243 | * To avoid lock ordering problems, |
||
244 | * run all expired timeouts as you visit them. |
||
245 | */ |
||
246 | |||
2450 | mencl | 247 | for (; i <= last_clock_tick; i++) { |
2336 | mencl | 248 | clock_update_counters(); |
249 | spinlock_lock(&CPU->timeoutlock); |
||
2416 | mencl | 250 | |
251 | /* |
||
252 | * Check whether first timeout in list time out. If so perform callback function and try |
||
253 | * next timeout (more timeouts can have same timeout). |
||
2450 | mencl | 254 | */ |
2416 | mencl | 255 | while ((expnode = CPU->timeout_active_tree.head.next) != &(CPU->timeout_active_tree.head)) { |
2336 | mencl | 256 | h = extavltree_get_instance(expnode,timeout_t,node); |
2450 | mencl | 257 | spinlock_lock(&h->lock); |
258 | if (expnode->key != i) { |
||
259 | /* |
||
260 | * Base is increased every for cycle. |
||
261 | */ |
||
262 | (CPU->timeout_active_tree.base)++; |
||
2336 | mencl | 263 | spinlock_unlock(&h->lock); |
264 | break; |
||
265 | } |
||
266 | |||
2416 | mencl | 267 | /* |
268 | * Delete first node in the list and repair tree structure in |
||
269 | * constant time. |
||
270 | */ |
||
2336 | mencl | 271 | extavltree_delete_min(&CPU->timeout_active_tree); |
272 | |||
273 | f = h->handler; |
||
274 | arg = h->arg; |
||
275 | timeout_reinitialize(h); |
||
276 | spinlock_unlock(&h->lock); |
||
277 | spinlock_unlock(&CPU->timeoutlock); |
||
278 | |||
279 | f(arg); |
||
280 | |||
281 | spinlock_lock(&CPU->timeoutlock); |
||
282 | } |
||
283 | spinlock_unlock(&CPU->timeoutlock); |
||
284 | } |
||
285 | |||
286 | CPU->missed_clock_ticks = 0; |
||
287 | |||
288 | /* |
||
289 | * Do CPU usage accounting and find out whether to preempt THREAD. |
||
290 | */ |
||
291 | if (THREAD) { |
||
292 | uint64_t ticks; |
||
293 | |||
294 | spinlock_lock(&CPU->lock); |
||
295 | CPU->needs_relink += 1 + missed_clock_ticks; |
||
296 | spinlock_unlock(&CPU->lock); |
||
297 | |||
298 | spinlock_lock(&THREAD->lock); |
||
299 | if ((ticks = THREAD->ticks)) { |
||
300 | if (ticks >= 1 + missed_clock_ticks) |
||
301 | THREAD->ticks -= 1 + missed_clock_ticks; |
||
302 | else |
||
303 | THREAD->ticks = 0; |
||
304 | } |
||
305 | spinlock_unlock(&THREAD->lock); |
||
306 | |||
307 | if (!ticks && !PREEMPTION_DISABLED) { |
||
308 | scheduler(); |
||
309 | } |
||
310 | } |
||
311 | } |
||
312 | |||
2416 | mencl | 313 | #elif defined CONFIG_TIMEOUT_EXTAVLREL_TREE |
2336 | mencl | 314 | |
2416 | mencl | 315 | /** Clock routine |
316 | * |
||
317 | * Clock routine executed from clock interrupt handler |
||
318 | * (assuming interrupts_disable()'d). Runs expired timeouts |
||
319 | * and preemptive scheduling. |
||
320 | * |
||
321 | */ |
||
322 | void clock(void) |
||
323 | { |
||
2421 | mencl | 324 | extavlreltree_node_t *expnode; |
2416 | mencl | 325 | timeout_t *h; |
326 | timeout_handler_t f; |
||
327 | void *arg; |
||
328 | count_t missed_clock_ticks = CPU->missed_clock_ticks; |
||
329 | int i; |
||
330 | |||
331 | /* |
||
332 | * To avoid lock ordering problems, |
||
333 | * run all expired timeouts as you visit them. |
||
334 | */ |
||
335 | for (i = 0; i <= missed_clock_ticks; i++) { |
||
336 | clock_update_counters(); |
||
337 | spinlock_lock(&CPU->timeoutlock); |
||
338 | |||
339 | /* |
||
340 | * Check whether first timeout in list time out. If so perform callback function and try |
||
341 | * next timeout (more timeouts can have same timeout). |
||
342 | */ |
||
343 | while ((expnode = CPU->timeout_active_tree.head.next) != &(CPU->timeout_active_tree.head)) { |
||
2421 | mencl | 344 | h = extavlreltree_get_instance(expnode,timeout_t,node); |
2416 | mencl | 345 | spinlock_lock(&h->lock); |
346 | if (expnode->key != 0) { |
||
347 | expnode->key--; |
||
348 | spinlock_unlock(&h->lock); |
||
349 | break; |
||
350 | } |
||
351 | |||
352 | /* |
||
353 | * Delete first node in the list and repair tree structure in |
||
354 | * constant time. Be careful of expnode's key, it must be 0! |
||
355 | */ |
||
2421 | mencl | 356 | extavlreltree_delete_min(&CPU->timeout_active_tree); |
2416 | mencl | 357 | |
358 | f = h->handler; |
||
359 | arg = h->arg; |
||
360 | timeout_reinitialize(h); |
||
361 | spinlock_unlock(&h->lock); |
||
362 | spinlock_unlock(&CPU->timeoutlock); |
||
363 | |||
364 | f(arg); |
||
365 | |||
366 | spinlock_lock(&CPU->timeoutlock); |
||
367 | } |
||
368 | spinlock_unlock(&CPU->timeoutlock); |
||
369 | } |
||
370 | CPU->missed_clock_ticks = 0; |
||
371 | |||
372 | /* |
||
373 | * Do CPU usage accounting and find out whether to preempt THREAD. |
||
374 | */ |
||
375 | |||
376 | if (THREAD) { |
||
377 | uint64_t ticks; |
||
378 | |||
379 | spinlock_lock(&CPU->lock); |
||
380 | CPU->needs_relink += 1 + missed_clock_ticks; |
||
381 | spinlock_unlock(&CPU->lock); |
||
382 | |||
383 | spinlock_lock(&THREAD->lock); |
||
384 | if ((ticks = THREAD->ticks)) { |
||
385 | if (ticks >= 1 + missed_clock_ticks) |
||
386 | THREAD->ticks -= 1 + missed_clock_ticks; |
||
387 | else |
||
388 | THREAD->ticks = 0; |
||
389 | } |
||
390 | spinlock_unlock(&THREAD->lock); |
||
391 | |||
392 | if (!ticks && !PREEMPTION_DISABLED) { |
||
393 | scheduler(); |
||
394 | } |
||
395 | } |
||
396 | } |
||
397 | |||
398 | |||
399 | |||
2336 | mencl | 400 | #else |
401 | |||
402 | |||
403 | /** Clock routine |
||
404 | * |
||
405 | * Clock routine executed from clock interrupt handler |
||
406 | * (assuming interrupts_disable()'d). Runs expired timeouts |
||
407 | * and preemptive scheduling. |
||
408 | * |
||
409 | */ |
||
410 | void clock(void) |
||
411 | { |
||
1 | jermar | 412 | link_t *l; |
413 | timeout_t *h; |
||
411 | jermar | 414 | timeout_handler_t f; |
1 | jermar | 415 | void *arg; |
1457 | jermar | 416 | count_t missed_clock_ticks = CPU->missed_clock_ticks; |
1431 | jermar | 417 | int i; |
1 | jermar | 418 | |
419 | /* |
||
420 | * To avoid lock ordering problems, |
||
421 | * run all expired timeouts as you visit them. |
||
422 | */ |
||
1457 | jermar | 423 | for (i = 0; i <= missed_clock_ticks; i++) { |
1434 | palkovsky | 424 | clock_update_counters(); |
1431 | jermar | 425 | spinlock_lock(&CPU->timeoutlock); |
426 | while ((l = CPU->timeout_active_head.next) != &CPU->timeout_active_head) { |
||
427 | h = list_get_instance(l, timeout_t, link); |
||
428 | spinlock_lock(&h->lock); |
||
429 | if (h->ticks-- != 0) { |
||
430 | spinlock_unlock(&h->lock); |
||
431 | break; |
||
432 | } |
||
433 | list_remove(l); |
||
434 | f = h->handler; |
||
435 | arg = h->arg; |
||
436 | timeout_reinitialize(h); |
||
437 | spinlock_unlock(&h->lock); |
||
438 | spinlock_unlock(&CPU->timeoutlock); |
||
439 | |||
440 | f(arg); |
||
441 | |||
442 | spinlock_lock(&CPU->timeoutlock); |
||
1 | jermar | 443 | } |
15 | jermar | 444 | spinlock_unlock(&CPU->timeoutlock); |
1 | jermar | 445 | } |
1431 | jermar | 446 | CPU->missed_clock_ticks = 0; |
1 | jermar | 447 | |
448 | /* |
||
15 | jermar | 449 | * Do CPU usage accounting and find out whether to preempt THREAD. |
1 | jermar | 450 | */ |
451 | |||
15 | jermar | 452 | if (THREAD) { |
1780 | jermar | 453 | uint64_t ticks; |
221 | jermar | 454 | |
15 | jermar | 455 | spinlock_lock(&CPU->lock); |
1457 | jermar | 456 | CPU->needs_relink += 1 + missed_clock_ticks; |
15 | jermar | 457 | spinlock_unlock(&CPU->lock); |
1 | jermar | 458 | |
15 | jermar | 459 | spinlock_lock(&THREAD->lock); |
1457 | jermar | 460 | if ((ticks = THREAD->ticks)) { |
461 | if (ticks >= 1 + missed_clock_ticks) |
||
462 | THREAD->ticks -= 1 + missed_clock_ticks; |
||
463 | else |
||
464 | THREAD->ticks = 0; |
||
465 | } |
||
221 | jermar | 466 | spinlock_unlock(&THREAD->lock); |
467 | |||
468 | if (!ticks && !PREEMPTION_DISABLED) { |
||
1 | jermar | 469 | scheduler(); |
470 | } |
||
471 | } |
||
472 | } |
||
1702 | cejka | 473 | |
2336 | mencl | 474 | #endif |
1731 | jermar | 475 | /** @} |
1702 | cejka | 476 | */ |