Rev 2336 | Rev 2421 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
1 | jermar | 1 | /* |
2336 | mencl | 2 | * Copyright (C) 2001-2004 Jakub Jermar |
1 | jermar | 3 | * All rights reserved. |
4 | * |
||
5 | * Redistribution and use in source and binary forms, with or without |
||
6 | * modification, are permitted provided that the following conditions |
||
7 | * are met: |
||
8 | * |
||
9 | * - Redistributions of source code must retain the above copyright |
||
10 | * notice, this list of conditions and the following disclaimer. |
||
11 | * - Redistributions in binary form must reproduce the above copyright |
||
12 | * notice, this list of conditions and the following disclaimer in the |
||
13 | * documentation and/or other materials provided with the distribution. |
||
14 | * - The name of the author may not be used to endorse or promote products |
||
15 | * derived from this software without specific prior written permission. |
||
16 | * |
||
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
||
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
||
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
||
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
||
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
||
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
||
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
||
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
||
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
||
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
||
27 | */ |
||
28 | |||
1731 | jermar | 29 | /** @addtogroup time |
1702 | cejka | 30 | * @{ |
31 | */ |
||
32 | |||
1264 | jermar | 33 | /** |
1702 | cejka | 34 | * @file |
1264 | jermar | 35 | * @brief High-level clock interrupt handler. |
36 | * |
||
37 | * This file contains the clock() function which is the source |
||
38 | * of preemption. It is also responsible for executing expired |
||
39 | * timeouts. |
||
40 | */ |
||
41 | |||
1 | jermar | 42 | #include <time/clock.h> |
43 | #include <time/timeout.h> |
||
44 | #include <config.h> |
||
45 | #include <synch/spinlock.h> |
||
46 | #include <synch/waitq.h> |
||
47 | #include <func.h> |
||
48 | #include <proc/scheduler.h> |
||
49 | #include <cpu.h> |
||
50 | #include <arch.h> |
||
788 | jermar | 51 | #include <adt/list.h> |
1104 | jermar | 52 | #include <atomic.h> |
391 | jermar | 53 | #include <proc/thread.h> |
1434 | palkovsky | 54 | #include <sysinfo/sysinfo.h> |
55 | #include <arch/barrier.h> |
||
2015 | jermar | 56 | #include <mm/frame.h> |
57 | #include <ddi/ddi.h> |
||
1 | jermar | 58 | |
2307 | hudecek | 59 | /* Pointer to variable with uptime */ |
60 | uptime_t *uptime; |
||
61 | |||
62 | /** Physical memory area of the real time clock */ |
||
2015 | jermar | 63 | static parea_t clock_parea; |
64 | |||
1434 | palkovsky | 65 | /* Variable holding fragment of second, so that we would update |
66 | * seconds correctly |
||
67 | */ |
||
1780 | jermar | 68 | static unative_t secfrag = 0; |
1434 | palkovsky | 69 | |
70 | /** Initialize realtime clock counter |
||
71 | * |
||
72 | * The applications (and sometimes kernel) need to access accurate |
||
73 | * information about realtime data. We allocate 1 page with these |
||
74 | * data and update it periodically. |
||
75 | */ |
||
76 | void clock_counter_init(void) |
||
77 | { |
||
78 | void *faddr; |
||
79 | |||
2015 | jermar | 80 | faddr = frame_alloc(ONE_FRAME, FRAME_ATOMIC); |
1434 | palkovsky | 81 | if (!faddr) |
82 | panic("Cannot allocate page for clock"); |
||
83 | |||
2307 | hudecek | 84 | uptime = (uptime_t *) PA2KA(faddr); |
85 | |||
86 | uptime->seconds1 = 0; |
||
87 | uptime->seconds2 = 0; |
||
88 | uptime->useconds = 0; |
||
1434 | palkovsky | 89 | |
2015 | jermar | 90 | clock_parea.pbase = (uintptr_t) faddr; |
2307 | hudecek | 91 | clock_parea.vbase = (uintptr_t) uptime; |
2015 | jermar | 92 | clock_parea.frames = 1; |
93 | clock_parea.cacheable = true; |
||
94 | ddi_parea_register(&clock_parea); |
||
95 | |||
96 | /* |
||
97 | * Prepare information for the userspace so that it can successfully |
||
98 | * physmem_map() the clock_parea. |
||
99 | */ |
||
100 | sysinfo_set_item_val("clock.cacheable", NULL, (unative_t) true); |
||
101 | sysinfo_set_item_val("clock.faddr", NULL, (unative_t) faddr); |
||
1434 | palkovsky | 102 | } |
103 | |||
104 | |||
105 | /** Update public counters |
||
106 | * |
||
107 | * Update it only on first processor |
||
108 | * TODO: Do we really need so many write barriers? |
||
109 | */ |
||
110 | static void clock_update_counters(void) |
||
111 | { |
||
112 | if (CPU->id == 0) { |
||
2307 | hudecek | 113 | secfrag += 1000000 / HZ; |
1434 | palkovsky | 114 | if (secfrag >= 1000000) { |
1438 | palkovsky | 115 | secfrag -= 1000000; |
2307 | hudecek | 116 | uptime->seconds1++; |
1434 | palkovsky | 117 | write_barrier(); |
2307 | hudecek | 118 | uptime->useconds = secfrag; |
1438 | palkovsky | 119 | write_barrier(); |
2307 | hudecek | 120 | uptime->seconds2 = uptime->seconds1; |
1434 | palkovsky | 121 | } else |
2307 | hudecek | 122 | uptime->useconds += 1000000 / HZ; |
1434 | palkovsky | 123 | } |
124 | } |
||
125 | |||
2416 | mencl | 126 | #if defined CONFIG_TIMEOUT_AVL_TREE || \ |
127 | defined CONFIG_TIMEOUT_EXTAVL_TREE |
||
2336 | mencl | 128 | |
107 | decky | 129 | /** Clock routine |
130 | * |
||
131 | * Clock routine executed from clock interrupt handler |
||
413 | jermar | 132 | * (assuming interrupts_disable()'d). Runs expired timeouts |
107 | decky | 133 | * and preemptive scheduling. |
134 | * |
||
1 | jermar | 135 | */ |
136 | void clock(void) |
||
137 | { |
||
2336 | mencl | 138 | timeout_t *h; |
139 | timeout_handler_t f; |
||
140 | void *arg; |
||
141 | count_t missed_clock_ticks = CPU->missed_clock_ticks; |
||
2416 | mencl | 142 | uint64_t *i = &(CPU->timeout_active_tree.base); |
143 | uint64_t absolute_clock_ticks = *i + missed_clock_ticks; |
||
144 | #if defined CONFIG TIMEOUT_AVL_TREE |
||
145 | avltree_node_t *expnode; |
||
146 | #elif defined CONFIG_TIMEOUT_EXTAVL_TREE |
||
147 | extavltree_node_t *expnode; |
||
148 | #endif |
||
149 | |||
2336 | mencl | 150 | /* |
151 | * To avoid lock ordering problems, |
||
152 | * run all expired timeouts as you visit them. |
||
153 | */ |
||
154 | |||
155 | for (; *i <= absolute_clock_ticks; (*i)++) { |
||
2416 | mencl | 156 | /* |
157 | * Basetime is encreased by missed clock ticks + 1 !! |
||
158 | */ |
||
159 | |||
2336 | mencl | 160 | clock_update_counters(); |
161 | spinlock_lock(&CPU->timeoutlock); |
||
2416 | mencl | 162 | |
163 | /* |
||
164 | * Check whether first timeout in list time out. If so perform callback function and try |
||
165 | * next timeout (more timeouts can have same timeout). |
||
166 | */ |
||
167 | while ((expnode = CPU->timeout_active_tree.head.next) != &(CPU->timeout_active_tree.head)) { |
||
2336 | mencl | 168 | h = extavltree_get_instance(expnode,timeout_t,node); |
169 | spinlock_lock(&h->lock); |
||
170 | if (expnode->key != *i) { |
||
171 | spinlock_unlock(&h->lock); |
||
172 | break; |
||
173 | } |
||
174 | |||
2416 | mencl | 175 | /* |
176 | * Delete first node in the list and repair tree structure in |
||
177 | * constant time. |
||
178 | */ |
||
179 | #if defined CONFIG TIMEOUT_AVL_TREE |
||
180 | avltree_delete_min(&CPU->timeout_active_tree); |
||
181 | #elif defined CONFIG_TIMEOUT_EXTAVL_TREE |
||
2336 | mencl | 182 | extavltree_delete_min(&CPU->timeout_active_tree); |
2416 | mencl | 183 | #endif |
2336 | mencl | 184 | |
185 | f = h->handler; |
||
186 | arg = h->arg; |
||
187 | timeout_reinitialize(h); |
||
188 | spinlock_unlock(&h->lock); |
||
189 | spinlock_unlock(&CPU->timeoutlock); |
||
190 | |||
191 | f(arg); |
||
192 | |||
193 | spinlock_lock(&CPU->timeoutlock); |
||
194 | } |
||
195 | spinlock_unlock(&CPU->timeoutlock); |
||
196 | } |
||
197 | |||
198 | CPU->missed_clock_ticks = 0; |
||
199 | |||
200 | /* |
||
201 | * Do CPU usage accounting and find out whether to preempt THREAD. |
||
202 | */ |
||
203 | if (THREAD) { |
||
204 | uint64_t ticks; |
||
205 | |||
206 | spinlock_lock(&CPU->lock); |
||
207 | CPU->needs_relink += 1 + missed_clock_ticks; |
||
208 | spinlock_unlock(&CPU->lock); |
||
209 | |||
210 | spinlock_lock(&THREAD->lock); |
||
211 | if ((ticks = THREAD->ticks)) { |
||
212 | if (ticks >= 1 + missed_clock_ticks) |
||
213 | THREAD->ticks -= 1 + missed_clock_ticks; |
||
214 | else |
||
215 | THREAD->ticks = 0; |
||
216 | } |
||
217 | spinlock_unlock(&THREAD->lock); |
||
218 | |||
219 | if (!ticks && !PREEMPTION_DISABLED) { |
||
220 | scheduler(); |
||
221 | } |
||
222 | } |
||
223 | } |
||
224 | |||
2416 | mencl | 225 | #elif defined CONFIG_TIMEOUT_EXTAVLREL_TREE |
2336 | mencl | 226 | |
2416 | mencl | 227 | /** Clock routine |
228 | * |
||
229 | * Clock routine executed from clock interrupt handler |
||
230 | * (assuming interrupts_disable()'d). Runs expired timeouts |
||
231 | * and preemptive scheduling. |
||
232 | * |
||
233 | */ |
||
234 | void clock(void) |
||
235 | { |
||
236 | extavltree_node_t *expnode; |
||
237 | timeout_t *h; |
||
238 | timeout_handler_t f; |
||
239 | void *arg; |
||
240 | count_t missed_clock_ticks = CPU->missed_clock_ticks; |
||
241 | int i; |
||
242 | |||
243 | /* |
||
244 | * To avoid lock ordering problems, |
||
245 | * run all expired timeouts as you visit them. |
||
246 | */ |
||
247 | for (i = 0; i <= missed_clock_ticks; i++) { |
||
248 | clock_update_counters(); |
||
249 | spinlock_lock(&CPU->timeoutlock); |
||
250 | |||
251 | /* |
||
252 | * Check whether first timeout in list time out. If so perform callback function and try |
||
253 | * next timeout (more timeouts can have same timeout). |
||
254 | */ |
||
255 | while ((expnode = CPU->timeout_active_tree.head.next) != &(CPU->timeout_active_tree.head)) { |
||
256 | h = list_get_instance(l, timeout_t, link); |
||
257 | spinlock_lock(&h->lock); |
||
258 | if (expnode->key != 0) { |
||
259 | expnode->key--; |
||
260 | spinlock_unlock(&h->lock); |
||
261 | break; |
||
262 | } |
||
263 | |||
264 | /* |
||
265 | * Delete first node in the list and repair tree structure in |
||
266 | * constant time. Be careful of expnode's key, it must be 0! |
||
267 | */ |
||
268 | extavltree_delete_min(&CPU->timeout_active_tree); |
||
269 | |||
270 | f = h->handler; |
||
271 | arg = h->arg; |
||
272 | timeout_reinitialize(h); |
||
273 | spinlock_unlock(&h->lock); |
||
274 | spinlock_unlock(&CPU->timeoutlock); |
||
275 | |||
276 | f(arg); |
||
277 | |||
278 | spinlock_lock(&CPU->timeoutlock); |
||
279 | } |
||
280 | spinlock_unlock(&CPU->timeoutlock); |
||
281 | } |
||
282 | CPU->missed_clock_ticks = 0; |
||
283 | |||
284 | /* |
||
285 | * Do CPU usage accounting and find out whether to preempt THREAD. |
||
286 | */ |
||
287 | |||
288 | if (THREAD) { |
||
289 | uint64_t ticks; |
||
290 | |||
291 | spinlock_lock(&CPU->lock); |
||
292 | CPU->needs_relink += 1 + missed_clock_ticks; |
||
293 | spinlock_unlock(&CPU->lock); |
||
294 | |||
295 | spinlock_lock(&THREAD->lock); |
||
296 | if ((ticks = THREAD->ticks)) { |
||
297 | if (ticks >= 1 + missed_clock_ticks) |
||
298 | THREAD->ticks -= 1 + missed_clock_ticks; |
||
299 | else |
||
300 | THREAD->ticks = 0; |
||
301 | } |
||
302 | spinlock_unlock(&THREAD->lock); |
||
303 | |||
304 | if (!ticks && !PREEMPTION_DISABLED) { |
||
305 | scheduler(); |
||
306 | } |
||
307 | } |
||
308 | } |
||
309 | |||
310 | |||
311 | |||
2336 | mencl | 312 | #else |
313 | |||
314 | |||
315 | /** Clock routine |
||
316 | * |
||
317 | * Clock routine executed from clock interrupt handler |
||
318 | * (assuming interrupts_disable()'d). Runs expired timeouts |
||
319 | * and preemptive scheduling. |
||
320 | * |
||
321 | */ |
||
322 | void clock(void) |
||
323 | { |
||
1 | jermar | 324 | link_t *l; |
325 | timeout_t *h; |
||
411 | jermar | 326 | timeout_handler_t f; |
1 | jermar | 327 | void *arg; |
1457 | jermar | 328 | count_t missed_clock_ticks = CPU->missed_clock_ticks; |
1431 | jermar | 329 | int i; |
1 | jermar | 330 | |
331 | /* |
||
332 | * To avoid lock ordering problems, |
||
333 | * run all expired timeouts as you visit them. |
||
334 | */ |
||
1457 | jermar | 335 | for (i = 0; i <= missed_clock_ticks; i++) { |
1434 | palkovsky | 336 | clock_update_counters(); |
1431 | jermar | 337 | spinlock_lock(&CPU->timeoutlock); |
338 | while ((l = CPU->timeout_active_head.next) != &CPU->timeout_active_head) { |
||
339 | h = list_get_instance(l, timeout_t, link); |
||
340 | spinlock_lock(&h->lock); |
||
341 | if (h->ticks-- != 0) { |
||
342 | spinlock_unlock(&h->lock); |
||
343 | break; |
||
344 | } |
||
345 | list_remove(l); |
||
346 | f = h->handler; |
||
347 | arg = h->arg; |
||
348 | timeout_reinitialize(h); |
||
349 | spinlock_unlock(&h->lock); |
||
350 | spinlock_unlock(&CPU->timeoutlock); |
||
351 | |||
352 | f(arg); |
||
353 | |||
354 | spinlock_lock(&CPU->timeoutlock); |
||
1 | jermar | 355 | } |
15 | jermar | 356 | spinlock_unlock(&CPU->timeoutlock); |
1 | jermar | 357 | } |
1431 | jermar | 358 | CPU->missed_clock_ticks = 0; |
1 | jermar | 359 | |
360 | /* |
||
15 | jermar | 361 | * Do CPU usage accounting and find out whether to preempt THREAD. |
1 | jermar | 362 | */ |
363 | |||
15 | jermar | 364 | if (THREAD) { |
1780 | jermar | 365 | uint64_t ticks; |
221 | jermar | 366 | |
15 | jermar | 367 | spinlock_lock(&CPU->lock); |
1457 | jermar | 368 | CPU->needs_relink += 1 + missed_clock_ticks; |
15 | jermar | 369 | spinlock_unlock(&CPU->lock); |
1 | jermar | 370 | |
15 | jermar | 371 | spinlock_lock(&THREAD->lock); |
1457 | jermar | 372 | if ((ticks = THREAD->ticks)) { |
373 | if (ticks >= 1 + missed_clock_ticks) |
||
374 | THREAD->ticks -= 1 + missed_clock_ticks; |
||
375 | else |
||
376 | THREAD->ticks = 0; |
||
377 | } |
||
221 | jermar | 378 | spinlock_unlock(&THREAD->lock); |
379 | |||
380 | if (!ticks && !PREEMPTION_DISABLED) { |
||
1 | jermar | 381 | scheduler(); |
382 | } |
||
383 | } |
||
384 | } |
||
1702 | cejka | 385 | |
2336 | mencl | 386 | #endif |
1731 | jermar | 387 | /** @} |
1702 | cejka | 388 | */ |