Subversion Repositories HelenOS

Rev

Rev 4153 | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 4153 Rev 4581
1
/*
1
/*
2
 * Copyright (c) 2001-2004 Jakub Jermar
2
 * Copyright (c) 2001-2004 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
/** @addtogroup time
29
/** @addtogroup time
30
 * @{
30
 * @{
31
 */
31
 */
32
 
32
 
33
/**
33
/**
34
 * @file
34
 * @file
35
 * @brief   High-level clock interrupt handler.
35
 * @brief   High-level clock interrupt handler.
36
 *
36
 *
37
 * This file contains the clock() function which is the source
37
 * This file contains the clock() function which is the source
38
 * of preemption. It is also responsible for executing expired
38
 * of preemption. It is also responsible for executing expired
39
 * timeouts.
39
 * timeouts.
40
 */
40
 */
41
 
41
 
42
#include <time/clock.h>
42
#include <time/clock.h>
43
#include <time/timeout.h>
43
#include <time/timeout.h>
44
#include <config.h>
44
#include <config.h>
45
#include <synch/spinlock.h>
45
#include <synch/spinlock.h>
46
#include <synch/waitq.h>
46
#include <synch/waitq.h>
47
#include <func.h>
47
#include <func.h>
48
#include <proc/scheduler.h>
48
#include <proc/scheduler.h>
49
#include <cpu.h>
49
#include <cpu.h>
50
#include <arch.h>
50
#include <arch.h>
51
#include <adt/list.h>
51
#include <adt/list.h>
52
#include <atomic.h>
52
#include <atomic.h>
53
#include <proc/thread.h>
53
#include <proc/thread.h>
54
#include <sysinfo/sysinfo.h>
54
#include <sysinfo/sysinfo.h>
55
#include <arch/barrier.h>
55
#include <arch/barrier.h>
56
#include <mm/frame.h>
56
#include <mm/frame.h>
57
#include <ddi/ddi.h>
57
#include <ddi/ddi.h>
58
 
58
 
59
/* Pointer to variable with uptime */
59
/* Pointer to variable with uptime */
60
uptime_t *uptime;
60
uptime_t *uptime;
61
 
61
 
62
/** Physical memory area of the real time clock */
62
/** Physical memory area of the real time clock */
63
static parea_t clock_parea;
63
static parea_t clock_parea;
64
 
64
 
65
/* Variable holding fragment of second, so that we would update
65
/* Variable holding fragment of second, so that we would update
66
 * seconds correctly
66
 * seconds correctly
67
 */
67
 */
68
static unative_t secfrag = 0;
68
static unative_t secfrag = 0;
69
 
69
 
70
/** Initialize realtime clock counter
70
/** Initialize realtime clock counter
71
 *
71
 *
72
 * The applications (and sometimes kernel) need to access accurate
72
 * The applications (and sometimes kernel) need to access accurate
73
 * information about realtime data. We allocate 1 page with these
73
 * information about realtime data. We allocate 1 page with these
74
 * data and update it periodically.
74
 * data and update it periodically.
75
 */
75
 */
76
void clock_counter_init(void)
76
void clock_counter_init(void)
77
{
77
{
78
    void *faddr;
78
    void *faddr;
79
 
79
 
80
    faddr = frame_alloc(ONE_FRAME, FRAME_ATOMIC);
80
    faddr = frame_alloc(ONE_FRAME, FRAME_ATOMIC);
81
    if (!faddr)
81
    if (!faddr)
82
        panic("Cannot allocate page for clock.");
82
        panic("Cannot allocate page for clock.");
83
   
83
   
84
    uptime = (uptime_t *) PA2KA(faddr);
84
    uptime = (uptime_t *) PA2KA(faddr);
85
   
85
   
86
    uptime->seconds1 = 0;
86
    uptime->seconds1 = 0;
87
    uptime->seconds2 = 0;
87
    uptime->seconds2 = 0;
88
    uptime->useconds = 0;
88
    uptime->useconds = 0;
89
 
89
 
90
    clock_parea.pbase = (uintptr_t) faddr;
90
    clock_parea.pbase = (uintptr_t) faddr;
91
    clock_parea.frames = 1;
91
    clock_parea.frames = 1;
92
    ddi_parea_register(&clock_parea);
92
    ddi_parea_register(&clock_parea);
93
 
93
 
94
    /*
94
    /*
95
     * Prepare information for the userspace so that it can successfully
95
     * Prepare information for the userspace so that it can successfully
96
     * physmem_map() the clock_parea.
96
     * physmem_map() the clock_parea.
97
     */
97
     */
98
    sysinfo_set_item_val("clock.cacheable", NULL, (unative_t) true);
98
    sysinfo_set_item_val("clock.cacheable", NULL, (unative_t) true);
99
    sysinfo_set_item_val("clock.faddr", NULL, (unative_t) faddr);
99
    sysinfo_set_item_val("clock.faddr", NULL, (unative_t) faddr);
100
}
100
}
101
 
101
 
102
 
102
 
103
/** Update public counters
103
/** Update public counters
104
 *
104
 *
105
 * Update it only on first processor
105
 * Update it only on first processor
106
 * TODO: Do we really need so many write barriers?
106
 * TODO: Do we really need so many write barriers?
107
 */
107
 */
108
static void clock_update_counters(void)
108
static void clock_update_counters(void)
109
{
109
{
110
    if (CPU->id == 0) {
110
    if (CPU->id == 0) {
111
        secfrag += 1000000 / HZ;
111
        secfrag += 1000000 / HZ;
112
        if (secfrag >= 1000000) {
112
        if (secfrag >= 1000000) {
113
            secfrag -= 1000000;
113
            secfrag -= 1000000;
114
            uptime->seconds1++;
114
            uptime->seconds1++;
115
            write_barrier();
115
            write_barrier();
116
            uptime->useconds = secfrag;
116
            uptime->useconds = secfrag;
117
            write_barrier();
117
            write_barrier();
118
            uptime->seconds2 = uptime->seconds1;
118
            uptime->seconds2 = uptime->seconds1;
119
        } else
119
        } else
120
            uptime->useconds += 1000000 / HZ;
120
            uptime->useconds += 1000000 / HZ;
121
    }
121
    }
122
}
122
}
123
 
123
 
124
/** Clock routine
124
/** Clock routine
125
 *
125
 *
126
 * Clock routine executed from clock interrupt handler
126
 * Clock routine executed from clock interrupt handler
127
 * (assuming interrupts_disable()'d). Runs expired timeouts
127
 * (assuming interrupts_disable()'d). Runs expired timeouts
128
 * and preemptive scheduling.
128
 * and preemptive scheduling.
129
 *
129
 *
130
 */
130
 */
131
void clock(void)
131
void clock(void)
132
{
132
{
133
    link_t *l;
133
    link_t *l;
134
    timeout_t *h;
134
    timeout_t *h;
135
    timeout_handler_t f;
135
    timeout_handler_t f;
136
    void *arg;
136
    void *arg;
137
    count_t missed_clock_ticks = CPU->missed_clock_ticks;
137
    size_t missed_clock_ticks = CPU->missed_clock_ticks;
138
    unsigned int i;
138
    unsigned int i;
139
 
139
 
140
    /*
140
    /*
141
     * To avoid lock ordering problems,
141
     * To avoid lock ordering problems,
142
     * run all expired timeouts as you visit them.
142
     * run all expired timeouts as you visit them.
143
     */
143
     */
144
    for (i = 0; i <= missed_clock_ticks; i++) {
144
    for (i = 0; i <= missed_clock_ticks; i++) {
145
        clock_update_counters();
145
        clock_update_counters();
146
        spinlock_lock(&CPU->timeoutlock);
146
        spinlock_lock(&CPU->timeoutlock);
147
        while ((l = CPU->timeout_active_head.next) != &CPU->timeout_active_head) {
147
        while ((l = CPU->timeout_active_head.next) != &CPU->timeout_active_head) {
148
            h = list_get_instance(l, timeout_t, link);
148
            h = list_get_instance(l, timeout_t, link);
149
            spinlock_lock(&h->lock);
149
            spinlock_lock(&h->lock);
150
            if (h->ticks-- != 0) {
150
            if (h->ticks-- != 0) {
151
                spinlock_unlock(&h->lock);
151
                spinlock_unlock(&h->lock);
152
                break;
152
                break;
153
            }
153
            }
154
            list_remove(l);
154
            list_remove(l);
155
            f = h->handler;
155
            f = h->handler;
156
            arg = h->arg;
156
            arg = h->arg;
157
            timeout_reinitialize(h);
157
            timeout_reinitialize(h);
158
            spinlock_unlock(&h->lock); 
158
            spinlock_unlock(&h->lock); 
159
            spinlock_unlock(&CPU->timeoutlock);
159
            spinlock_unlock(&CPU->timeoutlock);
160
 
160
 
161
            f(arg);
161
            f(arg);
162
 
162
 
163
            spinlock_lock(&CPU->timeoutlock);
163
            spinlock_lock(&CPU->timeoutlock);
164
        }
164
        }
165
        spinlock_unlock(&CPU->timeoutlock);
165
        spinlock_unlock(&CPU->timeoutlock);
166
    }
166
    }
167
    CPU->missed_clock_ticks = 0;
167
    CPU->missed_clock_ticks = 0;
168
 
168
 
169
    /*
169
    /*
170
     * Do CPU usage accounting and find out whether to preempt THREAD.
170
     * Do CPU usage accounting and find out whether to preempt THREAD.
171
     */
171
     */
172
 
172
 
173
    if (THREAD) {
173
    if (THREAD) {
174
        uint64_t ticks;
174
        uint64_t ticks;
175
       
175
       
176
        spinlock_lock(&CPU->lock);
176
        spinlock_lock(&CPU->lock);
177
        CPU->needs_relink += 1 + missed_clock_ticks;
177
        CPU->needs_relink += 1 + missed_clock_ticks;
178
        spinlock_unlock(&CPU->lock);   
178
        spinlock_unlock(&CPU->lock);   
179
   
179
   
180
        spinlock_lock(&THREAD->lock);
180
        spinlock_lock(&THREAD->lock);
181
        if ((ticks = THREAD->ticks)) {
181
        if ((ticks = THREAD->ticks)) {
182
            if (ticks >= 1 + missed_clock_ticks)
182
            if (ticks >= 1 + missed_clock_ticks)
183
                THREAD->ticks -= 1 + missed_clock_ticks;
183
                THREAD->ticks -= 1 + missed_clock_ticks;
184
            else
184
            else
185
                THREAD->ticks = 0;
185
                THREAD->ticks = 0;
186
        }
186
        }
187
        spinlock_unlock(&THREAD->lock);
187
        spinlock_unlock(&THREAD->lock);
188
       
188
       
189
        if (!ticks && !PREEMPTION_DISABLED) {
189
        if (!ticks && !PREEMPTION_DISABLED) {
190
#ifdef CONFIG_UDEBUG
190
#ifdef CONFIG_UDEBUG
191
            istate_t *istate;
191
            istate_t *istate;
192
#endif
192
#endif
193
            scheduler();
193
            scheduler();
194
#ifdef CONFIG_UDEBUG
194
#ifdef CONFIG_UDEBUG
195
            /*
195
            /*
196
             * Give udebug chance to stop the thread
196
             * Give udebug chance to stop the thread
197
             * before it begins executing userspace code.
197
             * before it begins executing userspace code.
198
             */
198
             */
199
            istate = THREAD->udebug.uspace_state;
199
            istate = THREAD->udebug.uspace_state;
200
            if (istate && istate_from_uspace(istate))
200
            if (istate && istate_from_uspace(istate))
201
                udebug_before_thread_runs();
201
                udebug_before_thread_runs();
202
#endif
202
#endif
203
        }
203
        }
204
    }
204
    }
205
 
205
 
206
}
206
}
207
 
207
 
208
/** @}
208
/** @}
209
 */
209
 */
210
 
210