Subversion Repositories HelenOS

Rev

Rev 2416 | Rev 2450 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1 jermar 1
/*
2336 mencl 2
 * Copyright (C) 2001-2004 Jakub Jermar
1 jermar 3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
1731 jermar 29
/** @addtogroup time
1702 cejka 30
 * @{
31
 */
32
 
1264 jermar 33
/**
1702 cejka 34
 * @file
1264 jermar 35
 * @brief   High-level clock interrupt handler.
36
 *
37
 * This file contains the clock() function which is the source
38
 * of preemption. It is also responsible for executing expired
39
 * timeouts.
40
 */
41
 
1 jermar 42
#include <time/clock.h>
43
#include <time/timeout.h>
44
#include <config.h>
45
#include <synch/spinlock.h>
46
#include <synch/waitq.h>
47
#include <func.h>
48
#include <proc/scheduler.h>
49
#include <cpu.h>
50
#include <arch.h>
788 jermar 51
#include <adt/list.h>
1104 jermar 52
#include <atomic.h>
391 jermar 53
#include <proc/thread.h>
1434 palkovsky 54
#include <sysinfo/sysinfo.h>
55
#include <arch/barrier.h>
2015 jermar 56
#include <mm/frame.h>
57
#include <ddi/ddi.h>
1 jermar 58
 
2307 hudecek 59
/* Pointer to variable with uptime */
60
uptime_t *uptime;
61
 
62
/** Physical memory area of the real time clock */
2015 jermar 63
static parea_t clock_parea;
64
 
1434 palkovsky 65
/* Variable holding fragment of second, so that we would update
66
 * seconds correctly
67
 */
1780 jermar 68
static unative_t secfrag = 0;
1434 palkovsky 69
 
70
/** Initialize realtime clock counter
71
 *
72
 * The applications (and sometimes kernel) need to access accurate
73
 * information about realtime data. We allocate 1 page with these
74
 * data and update it periodically.
75
 */
76
void clock_counter_init(void)
77
{
78
    void *faddr;
79
 
2015 jermar 80
    faddr = frame_alloc(ONE_FRAME, FRAME_ATOMIC);
1434 palkovsky 81
    if (!faddr)
82
        panic("Cannot allocate page for clock");
83
 
2307 hudecek 84
    uptime = (uptime_t *) PA2KA(faddr);
85
 
86
    uptime->seconds1 = 0;
87
    uptime->seconds2 = 0;
88
    uptime->useconds = 0;
1434 palkovsky 89
 
2015 jermar 90
    clock_parea.pbase = (uintptr_t) faddr;
2307 hudecek 91
    clock_parea.vbase = (uintptr_t) uptime;
2015 jermar 92
    clock_parea.frames = 1;
93
    clock_parea.cacheable = true;
94
    ddi_parea_register(&clock_parea);
95
 
96
    /*
97
     * Prepare information for the userspace so that it can successfully
98
     * physmem_map() the clock_parea.
99
     */
100
    sysinfo_set_item_val("clock.cacheable", NULL, (unative_t) true);
101
    sysinfo_set_item_val("clock.faddr", NULL, (unative_t) faddr);
1434 palkovsky 102
}
103
 
104
 
105
/** Update public counters
106
 *
107
 * Update it only on first processor
108
 * TODO: Do we really need so many write barriers?
109
 */
110
static void clock_update_counters(void)
111
{
112
    if (CPU->id == 0) {
2307 hudecek 113
        secfrag += 1000000 / HZ;
1434 palkovsky 114
        if (secfrag >= 1000000) {
1438 palkovsky 115
            secfrag -= 1000000;
2307 hudecek 116
            uptime->seconds1++;
1434 palkovsky 117
            write_barrier();
2307 hudecek 118
            uptime->useconds = secfrag;
1438 palkovsky 119
            write_barrier();
2307 hudecek 120
            uptime->seconds2 = uptime->seconds1;
1434 palkovsky 121
        } else
2307 hudecek 122
            uptime->useconds += 1000000 / HZ;
1434 palkovsky 123
    }
124
}
125
 
2421 mencl 126
#if defined CONFIG_TIMEOUT_AVL_TREE
2336 mencl 127
 
107 decky 128
/** Clock routine
129
 *
130
 * Clock routine executed from clock interrupt handler
413 jermar 131
 * (assuming interrupts_disable()'d). Runs expired timeouts
107 decky 132
 * and preemptive scheduling.
133
 *
1 jermar 134
 */
135
void clock(void)
136
{
2336 mencl 137
    timeout_t *h;
138
    timeout_handler_t f;
139
    void *arg;
140
    count_t missed_clock_ticks = CPU->missed_clock_ticks;
2416 mencl 141
    uint64_t *i = &(CPU->timeout_active_tree.base);
142
    uint64_t absolute_clock_ticks = *i + missed_clock_ticks;
143
    avltree_node_t *expnode;
2421 mencl 144
 
145
    /*
146
     * To avoid lock ordering problems,
147
     * run all expired timeouts as you visit them.
148
     */
149
 
150
    for (; *i <= absolute_clock_ticks; (*i)++) {
151
        /*
152
         * Basetime is encreased by missed clock ticks + 1 !!
153
         */
154
 
155
        clock_update_counters();
156
        spinlock_lock(&CPU->timeoutlock);
157
 
158
 
159
        /*
160
         * Check whether first timeout (with the smallest key in the tree) time out. If so perform
161
         * callback function and try next timeout (more timeouts can have same timeout).
162
         */
163
        while ((expnode = avltree_find_min(&CPU->timeout_active_tree)) != NULL) {
164
            h = avltree_get_instance(expnode,timeout_t,node);
165
            spinlock_lock(&h->lock);
166
            if (expnode->key != *i) {
167
                spinlock_unlock(&h->lock);
168
                break;
169
            }
170
 
171
            /*
172
             * Delete minimal key from the tree and repair tree structure in
173
             * logarithmic time.
174
             */
175
            avltree_delete_min(&CPU->timeout_active_tree);
176
 
177
            f = h->handler;
178
            arg = h->arg;
179
            timeout_reinitialize(h);
180
            spinlock_unlock(&h->lock); 
181
            spinlock_unlock(&CPU->timeoutlock);
182
 
183
            f(arg);
184
 
185
            spinlock_lock(&CPU->timeoutlock);
186
        }
187
        spinlock_unlock(&CPU->timeoutlock);
188
    }
189
 
190
    CPU->missed_clock_ticks = 0;
191
 
192
    /*
193
     * Do CPU usage accounting and find out whether to preempt THREAD.
194
     */
195
    if (THREAD) {
196
        uint64_t ticks;
197
 
198
        spinlock_lock(&CPU->lock);
199
        CPU->needs_relink += 1 + missed_clock_ticks;
200
        spinlock_unlock(&CPU->lock);   
201
 
202
        spinlock_lock(&THREAD->lock);
203
        if ((ticks = THREAD->ticks)) {
204
            if (ticks >= 1 + missed_clock_ticks)
205
                THREAD->ticks -= 1 + missed_clock_ticks;
206
            else
207
                THREAD->ticks = 0;
208
        }
209
        spinlock_unlock(&THREAD->lock);
210
 
211
        if (!ticks && !PREEMPTION_DISABLED) {
212
            scheduler();
213
        }
214
    }
215
}
216
 
2416 mencl 217
#elif defined CONFIG_TIMEOUT_EXTAVL_TREE
2421 mencl 218
 
219
/** Clock routine
220
 *
221
 * Clock routine executed from clock interrupt handler
222
 * (assuming interrupts_disable()'d). Runs expired timeouts
223
 * and preemptive scheduling.
224
 *
225
 */
226
void clock(void)
227
{
228
    timeout_t *h;
229
    timeout_handler_t f;
230
    void *arg;
231
    count_t missed_clock_ticks = CPU->missed_clock_ticks;
232
    uint64_t *i = &(CPU->timeout_active_tree.base);
233
    uint64_t absolute_clock_ticks = *i + missed_clock_ticks;
2416 mencl 234
    extavltree_node_t *expnode;
235
 
2336 mencl 236
    /*
237
     * To avoid lock ordering problems,
238
     * run all expired timeouts as you visit them.
239
     */
240
 
241
    for (; *i <= absolute_clock_ticks; (*i)++) {
2416 mencl 242
        /*
243
         * Basetime is encreased by missed clock ticks + 1 !!
244
         */
245
 
2336 mencl 246
        clock_update_counters();
247
        spinlock_lock(&CPU->timeoutlock);
2416 mencl 248
 
249
        /*
250
         * Check whether first timeout in list time out. If so perform callback function and try
251
         * next timeout (more timeouts can have same timeout).
252
         */
253
        while ((expnode = CPU->timeout_active_tree.head.next) != &(CPU->timeout_active_tree.head)) {
2336 mencl 254
            h = extavltree_get_instance(expnode,timeout_t,node);
255
            spinlock_lock(&h->lock);
256
            if (expnode->key != *i) {
257
                spinlock_unlock(&h->lock);
258
                break;
259
            }
260
 
2416 mencl 261
            /*
262
             * Delete first node in the list and repair tree structure in
263
             * constant time.
264
             */
2336 mencl 265
            extavltree_delete_min(&CPU->timeout_active_tree);
266
 
267
            f = h->handler;
268
            arg = h->arg;
269
            timeout_reinitialize(h);
270
            spinlock_unlock(&h->lock); 
271
            spinlock_unlock(&CPU->timeoutlock);
272
 
273
            f(arg);
274
 
275
            spinlock_lock(&CPU->timeoutlock);
276
        }
277
        spinlock_unlock(&CPU->timeoutlock);
278
    }
279
 
280
    CPU->missed_clock_ticks = 0;
281
 
282
    /*
283
     * Do CPU usage accounting and find out whether to preempt THREAD.
284
     */
285
    if (THREAD) {
286
        uint64_t ticks;
287
 
288
        spinlock_lock(&CPU->lock);
289
        CPU->needs_relink += 1 + missed_clock_ticks;
290
        spinlock_unlock(&CPU->lock);   
291
 
292
        spinlock_lock(&THREAD->lock);
293
        if ((ticks = THREAD->ticks)) {
294
            if (ticks >= 1 + missed_clock_ticks)
295
                THREAD->ticks -= 1 + missed_clock_ticks;
296
            else
297
                THREAD->ticks = 0;
298
        }
299
        spinlock_unlock(&THREAD->lock);
300
 
301
        if (!ticks && !PREEMPTION_DISABLED) {
302
            scheduler();
303
        }
304
    }
305
}
306
 
2416 mencl 307
#elif defined CONFIG_TIMEOUT_EXTAVLREL_TREE
2336 mencl 308
 
2416 mencl 309
/** Clock routine
310
 *
311
 * Clock routine executed from clock interrupt handler
312
 * (assuming interrupts_disable()'d). Runs expired timeouts
313
 * and preemptive scheduling.
314
 *
315
 */
316
void clock(void)
317
{
2421 mencl 318
    extavlreltree_node_t *expnode;
2416 mencl 319
    timeout_t *h;
320
    timeout_handler_t f;
321
    void *arg;
322
    count_t missed_clock_ticks = CPU->missed_clock_ticks;
323
    int i;
324
 
325
    /*
326
     * To avoid lock ordering problems,
327
     * run all expired timeouts as you visit them.
328
     */
329
    for (i = 0; i <= missed_clock_ticks; i++) {
330
        clock_update_counters();
331
        spinlock_lock(&CPU->timeoutlock);
332
 
333
        /*
334
         * Check whether first timeout in list time out. If so perform callback function and try
335
         * next timeout (more timeouts can have same timeout).
336
         */
337
        while ((expnode = CPU->timeout_active_tree.head.next) != &(CPU->timeout_active_tree.head)) {
2421 mencl 338
            h = extavlreltree_get_instance(expnode,timeout_t,node);
2416 mencl 339
            spinlock_lock(&h->lock);
340
            if (expnode->key != 0) {
341
                expnode->key--;
342
                spinlock_unlock(&h->lock);
343
                break;
344
            }
345
 
346
            /*
347
             * Delete first node in the list and repair tree structure in
348
             * constant time. Be careful of expnode's key, it must be 0!
349
             */
2421 mencl 350
            extavlreltree_delete_min(&CPU->timeout_active_tree);
2416 mencl 351
 
352
            f = h->handler;
353
            arg = h->arg;
354
            timeout_reinitialize(h);
355
            spinlock_unlock(&h->lock); 
356
            spinlock_unlock(&CPU->timeoutlock);
357
 
358
            f(arg);
359
 
360
            spinlock_lock(&CPU->timeoutlock);
361
        }
362
        spinlock_unlock(&CPU->timeoutlock);
363
    }
364
    CPU->missed_clock_ticks = 0;
365
 
366
    /*
367
     * Do CPU usage accounting and find out whether to preempt THREAD.
368
     */
369
 
370
    if (THREAD) {
371
        uint64_t ticks;
372
 
373
        spinlock_lock(&CPU->lock);
374
        CPU->needs_relink += 1 + missed_clock_ticks;
375
        spinlock_unlock(&CPU->lock);   
376
 
377
        spinlock_lock(&THREAD->lock);
378
        if ((ticks = THREAD->ticks)) {
379
            if (ticks >= 1 + missed_clock_ticks)
380
                THREAD->ticks -= 1 + missed_clock_ticks;
381
            else
382
                THREAD->ticks = 0;
383
        }
384
        spinlock_unlock(&THREAD->lock);
385
 
386
        if (!ticks && !PREEMPTION_DISABLED) {
387
            scheduler();
388
        }
389
    }
390
}
391
 
392
 
393
 
2336 mencl 394
#else
395
 
396
 
397
/** Clock routine
398
 *
399
 * Clock routine executed from clock interrupt handler
400
 * (assuming interrupts_disable()'d). Runs expired timeouts
401
 * and preemptive scheduling.
402
 *
403
 */
404
void clock(void)
405
{
1 jermar 406
    link_t *l;
407
    timeout_t *h;
411 jermar 408
    timeout_handler_t f;
1 jermar 409
    void *arg;
1457 jermar 410
    count_t missed_clock_ticks = CPU->missed_clock_ticks;
1431 jermar 411
    int i;
1 jermar 412
 
413
    /*
414
     * To avoid lock ordering problems,
415
     * run all expired timeouts as you visit them.
416
     */
1457 jermar 417
    for (i = 0; i <= missed_clock_ticks; i++) {
1434 palkovsky 418
        clock_update_counters();
1431 jermar 419
        spinlock_lock(&CPU->timeoutlock);
420
        while ((l = CPU->timeout_active_head.next) != &CPU->timeout_active_head) {
421
            h = list_get_instance(l, timeout_t, link);
422
            spinlock_lock(&h->lock);
423
            if (h->ticks-- != 0) {
424
                spinlock_unlock(&h->lock);
425
                break;
426
            }
427
            list_remove(l);
428
            f = h->handler;
429
            arg = h->arg;
430
            timeout_reinitialize(h);
431
            spinlock_unlock(&h->lock); 
432
            spinlock_unlock(&CPU->timeoutlock);
433
 
434
            f(arg);
435
 
436
            spinlock_lock(&CPU->timeoutlock);
1 jermar 437
        }
15 jermar 438
        spinlock_unlock(&CPU->timeoutlock);
1 jermar 439
    }
1431 jermar 440
    CPU->missed_clock_ticks = 0;
1 jermar 441
 
442
    /*
15 jermar 443
     * Do CPU usage accounting and find out whether to preempt THREAD.
1 jermar 444
     */
445
 
15 jermar 446
    if (THREAD) {
1780 jermar 447
        uint64_t ticks;
221 jermar 448
 
15 jermar 449
        spinlock_lock(&CPU->lock);
1457 jermar 450
        CPU->needs_relink += 1 + missed_clock_ticks;
15 jermar 451
        spinlock_unlock(&CPU->lock);   
1 jermar 452
 
15 jermar 453
        spinlock_lock(&THREAD->lock);
1457 jermar 454
        if ((ticks = THREAD->ticks)) {
455
            if (ticks >= 1 + missed_clock_ticks)
456
                THREAD->ticks -= 1 + missed_clock_ticks;
457
            else
458
                THREAD->ticks = 0;
459
        }
221 jermar 460
        spinlock_unlock(&THREAD->lock);
461
 
462
        if (!ticks && !PREEMPTION_DISABLED) {
1 jermar 463
            scheduler();
464
        }
465
    }
466
}
1702 cejka 467
 
2336 mencl 468
#endif
1731 jermar 469
/** @}
1702 cejka 470
 */