Rev 897 | Rev 906 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 897 | Rev 898 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | #include <proc/scheduler.h> |
29 | #include <proc/scheduler.h> |
30 | #include <proc/thread.h> |
30 | #include <proc/thread.h> |
31 | #include <proc/task.h> |
31 | #include <proc/task.h> |
32 | #include <mm/frame.h> |
32 | #include <mm/frame.h> |
33 | #include <mm/page.h> |
33 | #include <mm/page.h> |
34 | #include <mm/as.h> |
34 | #include <mm/as.h> |
35 | #include <arch/asm.h> |
35 | #include <arch/asm.h> |
36 | #include <arch/faddr.h> |
36 | #include <arch/faddr.h> |
37 | #include <arch/atomic.h> |
37 | #include <arch/atomic.h> |
38 | #include <synch/spinlock.h> |
38 | #include <synch/spinlock.h> |
39 | #include <config.h> |
39 | #include <config.h> |
40 | #include <context.h> |
40 | #include <context.h> |
41 | #include <func.h> |
41 | #include <func.h> |
42 | #include <arch.h> |
42 | #include <arch.h> |
43 | #include <adt/list.h> |
43 | #include <adt/list.h> |
44 | #include <panic.h> |
44 | #include <panic.h> |
45 | #include <typedefs.h> |
45 | #include <typedefs.h> |
46 | #include <cpu.h> |
46 | #include <cpu.h> |
47 | #include <print.h> |
47 | #include <print.h> |
48 | #include <debug.h> |
48 | #include <debug.h> |
49 | 49 | ||
50 | atomic_t nrdy; |
50 | static void scheduler_separated_stack(void); |
- | 51 | ||
- | 52 | atomic_t nrdy; /**< Number of ready threads in the system. */ |
|
51 | 53 | ||
52 | /** Take actions before new thread runs. |
54 | /** Take actions before new thread runs. |
53 | * |
55 | * |
54 | * Perform actions that need to be |
56 | * Perform actions that need to be |
55 | * taken before the newly selected |
57 | * taken before the newly selected |
56 | * tread is passed control. |
58 | * tread is passed control. |
57 | * |
59 | * |
58 | * THREAD->lock is locked on entry |
60 | * THREAD->lock is locked on entry |
59 | * |
61 | * |
60 | */ |
62 | */ |
61 | void before_thread_runs(void) |
63 | void before_thread_runs(void) |
62 | { |
64 | { |
63 | before_thread_runs_arch(); |
65 | before_thread_runs_arch(); |
64 | #ifdef CONFIG_FPU_LAZY |
66 | #ifdef CONFIG_FPU_LAZY |
65 | if(THREAD==CPU->fpu_owner) |
67 | if(THREAD==CPU->fpu_owner) |
66 | fpu_enable(); |
68 | fpu_enable(); |
67 | else |
69 | else |
68 | fpu_disable(); |
70 | fpu_disable(); |
69 | #else |
71 | #else |
70 | fpu_enable(); |
72 | fpu_enable(); |
71 | if (THREAD->fpu_context_exists) |
73 | if (THREAD->fpu_context_exists) |
72 | fpu_context_restore(&(THREAD->saved_fpu_context)); |
74 | fpu_context_restore(&(THREAD->saved_fpu_context)); |
73 | else { |
75 | else { |
74 | fpu_init(&(THREAD->saved_fpu_context)); |
76 | fpu_init(&(THREAD->saved_fpu_context)); |
75 | THREAD->fpu_context_exists=1; |
77 | THREAD->fpu_context_exists=1; |
76 | } |
78 | } |
77 | #endif |
79 | #endif |
78 | } |
80 | } |
79 | 81 | ||
80 | /** Take actions after old thread ran. |
82 | /** Take actions after THREAD had run. |
81 | * |
83 | * |
82 | * Perform actions that need to be |
84 | * Perform actions that need to be |
83 | * taken after the running thread |
85 | * taken after the running thread |
84 | * was preempted by the scheduler. |
86 | * had been preempted by the scheduler. |
85 | * |
87 | * |
86 | * THREAD->lock is locked on entry |
88 | * THREAD->lock is locked on entry |
87 | * |
89 | * |
88 | */ |
90 | */ |
89 | void after_thread_ran(void) |
91 | void after_thread_ran(void) |
90 | { |
92 | { |
91 | after_thread_ran_arch(); |
93 | after_thread_ran_arch(); |
92 | } |
94 | } |
93 | 95 | ||
94 | #ifdef CONFIG_FPU_LAZY |
96 | #ifdef CONFIG_FPU_LAZY |
95 | void scheduler_fpu_lazy_request(void) |
97 | void scheduler_fpu_lazy_request(void) |
96 | { |
98 | { |
97 | fpu_enable(); |
99 | fpu_enable(); |
98 | spinlock_lock(&CPU->lock); |
100 | spinlock_lock(&CPU->lock); |
99 | 101 | ||
100 | /* Save old context */ |
102 | /* Save old context */ |
101 | if (CPU->fpu_owner != NULL) { |
103 | if (CPU->fpu_owner != NULL) { |
102 | spinlock_lock(&CPU->fpu_owner->lock); |
104 | spinlock_lock(&CPU->fpu_owner->lock); |
103 | fpu_context_save(&CPU->fpu_owner->saved_fpu_context); |
105 | fpu_context_save(&CPU->fpu_owner->saved_fpu_context); |
104 | /* don't prevent migration */ |
106 | /* don't prevent migration */ |
105 | CPU->fpu_owner->fpu_context_engaged=0; |
107 | CPU->fpu_owner->fpu_context_engaged=0; |
106 | spinlock_unlock(&CPU->fpu_owner->lock); |
108 | spinlock_unlock(&CPU->fpu_owner->lock); |
107 | } |
109 | } |
108 | 110 | ||
109 | spinlock_lock(&THREAD->lock); |
111 | spinlock_lock(&THREAD->lock); |
110 | if (THREAD->fpu_context_exists) |
112 | if (THREAD->fpu_context_exists) { |
111 | fpu_context_restore(&THREAD->saved_fpu_context); |
113 | fpu_context_restore(&THREAD->saved_fpu_context); |
112 | else { |
114 | } else { |
113 | fpu_init(&(THREAD->saved_fpu_context)); |
115 | fpu_init(&(THREAD->saved_fpu_context)); |
114 | THREAD->fpu_context_exists=1; |
116 | THREAD->fpu_context_exists=1; |
115 | } |
117 | } |
116 | CPU->fpu_owner=THREAD; |
118 | CPU->fpu_owner=THREAD; |
117 | THREAD->fpu_context_engaged = 1; |
119 | THREAD->fpu_context_engaged = 1; |
118 | - | ||
119 | spinlock_unlock(&THREAD->lock); |
120 | spinlock_unlock(&THREAD->lock); |
- | 121 | ||
120 | spinlock_unlock(&CPU->lock); |
122 | spinlock_unlock(&CPU->lock); |
121 | } |
123 | } |
122 | #endif |
124 | #endif |
123 | 125 | ||
124 | /** Initialize scheduler |
126 | /** Initialize scheduler |
125 | * |
127 | * |
126 | * Initialize kernel scheduler. |
128 | * Initialize kernel scheduler. |
127 | * |
129 | * |
128 | */ |
130 | */ |
129 | void scheduler_init(void) |
131 | void scheduler_init(void) |
130 | { |
132 | { |
131 | } |
133 | } |
132 | 134 | ||
133 | - | ||
134 | /** Get thread to be scheduled |
135 | /** Get thread to be scheduled |
135 | * |
136 | * |
136 | * Get the optimal thread to be scheduled |
137 | * Get the optimal thread to be scheduled |
137 | * according to thread accounting and scheduler |
138 | * according to thread accounting and scheduler |
138 | * policy. |
139 | * policy. |
139 | * |
140 | * |
140 | * @return Thread to be scheduled. |
141 | * @return Thread to be scheduled. |
141 | * |
142 | * |
142 | */ |
143 | */ |
143 | static thread_t *find_best_thread(void) |
144 | static thread_t *find_best_thread(void) |
144 | { |
145 | { |
145 | thread_t *t; |
146 | thread_t *t; |
146 | runq_t *r; |
147 | runq_t *r; |
147 | int i; |
148 | int i; |
148 | 149 | ||
149 | ASSERT(CPU != NULL); |
150 | ASSERT(CPU != NULL); |
150 | 151 | ||
151 | loop: |
152 | loop: |
152 | interrupts_enable(); |
153 | interrupts_enable(); |
153 | 154 | ||
154 | if (atomic_get(&CPU->nrdy) == 0) { |
155 | if (atomic_get(&CPU->nrdy) == 0) { |
155 | /* |
156 | /* |
156 | * For there was nothing to run, the CPU goes to sleep |
157 | * For there was nothing to run, the CPU goes to sleep |
157 | * until a hardware interrupt or an IPI comes. |
158 | * until a hardware interrupt or an IPI comes. |
158 | * This improves energy saving and hyperthreading. |
159 | * This improves energy saving and hyperthreading. |
159 | */ |
160 | */ |
160 | 161 | ||
161 | /* |
162 | /* |
162 | * An interrupt might occur right now and wake up a thread. |
163 | * An interrupt might occur right now and wake up a thread. |
163 | * In such case, the CPU will continue to go to sleep |
164 | * In such case, the CPU will continue to go to sleep |
164 | * even though there is a runnable thread. |
165 | * even though there is a runnable thread. |
165 | */ |
166 | */ |
166 | 167 | ||
167 | cpu_sleep(); |
168 | cpu_sleep(); |
168 | goto loop; |
169 | goto loop; |
169 | } |
170 | } |
170 | 171 | ||
171 | interrupts_disable(); |
172 | interrupts_disable(); |
172 | 173 | ||
173 | i = 0; |
- | |
174 | for (; i<RQ_COUNT; i++) { |
174 | for (i = 0; i<RQ_COUNT; i++) { |
175 | r = &CPU->rq[i]; |
175 | r = &CPU->rq[i]; |
176 | spinlock_lock(&r->lock); |
176 | spinlock_lock(&r->lock); |
177 | if (r->n == 0) { |
177 | if (r->n == 0) { |
178 | /* |
178 | /* |
179 | * If this queue is empty, try a lower-priority queue. |
179 | * If this queue is empty, try a lower-priority queue. |
180 | */ |
180 | */ |
181 | spinlock_unlock(&r->lock); |
181 | spinlock_unlock(&r->lock); |
182 | continue; |
182 | continue; |
183 | } |
183 | } |
184 | 184 | ||
185 | atomic_dec(&CPU->nrdy); |
185 | atomic_dec(&CPU->nrdy); |
186 | atomic_dec(&nrdy); |
186 | atomic_dec(&nrdy); |
187 | r->n--; |
187 | r->n--; |
188 | 188 | ||
189 | /* |
189 | /* |
190 | * Take the first thread from the queue. |
190 | * Take the first thread from the queue. |
191 | */ |
191 | */ |
192 | t = list_get_instance(r->rq_head.next, thread_t, rq_link); |
192 | t = list_get_instance(r->rq_head.next, thread_t, rq_link); |
193 | list_remove(&t->rq_link); |
193 | list_remove(&t->rq_link); |
194 | 194 | ||
195 | spinlock_unlock(&r->lock); |
195 | spinlock_unlock(&r->lock); |
196 | 196 | ||
197 | spinlock_lock(&t->lock); |
197 | spinlock_lock(&t->lock); |
198 | t->cpu = CPU; |
198 | t->cpu = CPU; |
199 | 199 | ||
200 | t->ticks = us2ticks((i+1)*10000); |
200 | t->ticks = us2ticks((i+1)*10000); |
201 | t->priority = i; /* eventually correct rq index */ |
201 | t->priority = i; /* correct rq index */ |
202 | 202 | ||
203 | /* |
203 | /* |
204 | * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge. |
204 | * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge. |
205 | */ |
205 | */ |
206 | t->flags &= ~X_STOLEN; |
206 | t->flags &= ~X_STOLEN; |
207 | spinlock_unlock(&t->lock); |
207 | spinlock_unlock(&t->lock); |
208 | 208 | ||
209 | return t; |
209 | return t; |
210 | } |
210 | } |
211 | goto loop; |
211 | goto loop; |
212 | 212 | ||
213 | } |
213 | } |
214 | 214 | ||
215 | - | ||
216 | /** Prevent rq starvation |
215 | /** Prevent rq starvation |
217 | * |
216 | * |
218 | * Prevent low priority threads from starving in rq's. |
217 | * Prevent low priority threads from starving in rq's. |
219 | * |
218 | * |
220 | * When the function decides to relink rq's, it reconnects |
219 | * When the function decides to relink rq's, it reconnects |
221 | * respective pointers so that in result threads with 'pri' |
220 | * respective pointers so that in result threads with 'pri' |
222 | * greater or equal 'start' are moved to a higher-priority queue. |
221 | * greater or equal 'start' are moved to a higher-priority queue. |
223 | * |
222 | * |
224 | * @param start Threshold priority. |
223 | * @param start Threshold priority. |
225 | * |
224 | * |
226 | */ |
225 | */ |
227 | static void relink_rq(int start) |
226 | static void relink_rq(int start) |
228 | { |
227 | { |
229 | link_t head; |
228 | link_t head; |
230 | runq_t *r; |
229 | runq_t *r; |
231 | int i, n; |
230 | int i, n; |
232 | 231 | ||
233 | list_initialize(&head); |
232 | list_initialize(&head); |
234 | spinlock_lock(&CPU->lock); |
233 | spinlock_lock(&CPU->lock); |
235 | if (CPU->needs_relink > NEEDS_RELINK_MAX) { |
234 | if (CPU->needs_relink > NEEDS_RELINK_MAX) { |
236 | for (i = start; i<RQ_COUNT-1; i++) { |
235 | for (i = start; i<RQ_COUNT-1; i++) { |
237 | /* remember and empty rq[i + 1] */ |
236 | /* remember and empty rq[i + 1] */ |
238 | r = &CPU->rq[i + 1]; |
237 | r = &CPU->rq[i + 1]; |
239 | spinlock_lock(&r->lock); |
238 | spinlock_lock(&r->lock); |
240 | list_concat(&head, &r->rq_head); |
239 | list_concat(&head, &r->rq_head); |
241 | n = r->n; |
240 | n = r->n; |
242 | r->n = 0; |
241 | r->n = 0; |
243 | spinlock_unlock(&r->lock); |
242 | spinlock_unlock(&r->lock); |
244 | 243 | ||
245 | /* append rq[i + 1] to rq[i] */ |
244 | /* append rq[i + 1] to rq[i] */ |
246 | r = &CPU->rq[i]; |
245 | r = &CPU->rq[i]; |
247 | spinlock_lock(&r->lock); |
246 | spinlock_lock(&r->lock); |
248 | list_concat(&r->rq_head, &head); |
247 | list_concat(&r->rq_head, &head); |
249 | r->n += n; |
248 | r->n += n; |
250 | spinlock_unlock(&r->lock); |
249 | spinlock_unlock(&r->lock); |
251 | } |
250 | } |
252 | CPU->needs_relink = 0; |
251 | CPU->needs_relink = 0; |
253 | } |
252 | } |
254 | spinlock_unlock(&CPU->lock); |
253 | spinlock_unlock(&CPU->lock); |
255 | 254 | ||
256 | } |
255 | } |
257 | 256 | ||
- | 257 | /** The scheduler |
|
- | 258 | * |
|
- | 259 | * The thread scheduling procedure. |
|
- | 260 | * Passes control directly to |
|
- | 261 | * scheduler_separated_stack(). |
|
- | 262 | * |
|
- | 263 | */ |
|
- | 264 | void scheduler(void) |
|
- | 265 | { |
|
- | 266 | volatile ipl_t ipl; |
|
- | 267 | ||
- | 268 | ASSERT(CPU != NULL); |
|
- | 269 | ||
- | 270 | ipl = interrupts_disable(); |
|
- | 271 | ||
- | 272 | if (atomic_get(&haltstate)) |
|
- | 273 | halt(); |
|
- | 274 | ||
- | 275 | if (THREAD) { |
|
- | 276 | spinlock_lock(&THREAD->lock); |
|
- | 277 | #ifndef CONFIG_FPU_LAZY |
|
- | 278 | fpu_context_save(&(THREAD->saved_fpu_context)); |
|
- | 279 | #endif |
|
- | 280 | if (!context_save(&THREAD->saved_context)) { |
|
- | 281 | /* |
|
- | 282 | * This is the place where threads leave scheduler(); |
|
- | 283 | */ |
|
- | 284 | spinlock_unlock(&THREAD->lock); |
|
- | 285 | interrupts_restore(THREAD->saved_context.ipl); |
|
- | 286 | return; |
|
- | 287 | } |
|
- | 288 | ||
- | 289 | /* |
|
- | 290 | * Interrupt priority level of preempted thread is recorded here |
|
- | 291 | * to facilitate scheduler() invocations from interrupts_disable()'d |
|
- | 292 | * code (e.g. waitq_sleep_timeout()). |
|
- | 293 | */ |
|
- | 294 | THREAD->saved_context.ipl = ipl; |
|
- | 295 | } |
|
- | 296 | ||
- | 297 | /* |
|
- | 298 | * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM |
|
- | 299 | * and preemption counter. At this point THE could be coming either |
|
- | 300 | * from THREAD's or CPU's stack. |
|
- | 301 | */ |
|
- | 302 | the_copy(THE, (the_t *) CPU->stack); |
|
- | 303 | ||
- | 304 | /* |
|
- | 305 | * We may not keep the old stack. |
|
- | 306 | * Reason: If we kept the old stack and got blocked, for instance, in |
|
- | 307 | * find_best_thread(), the old thread could get rescheduled by another |
|
- | 308 | * CPU and overwrite the part of its own stack that was also used by |
|
- | 309 | * the scheduler on this CPU. |
|
- | 310 | * |
|
- | 311 | * Moreover, we have to bypass the compiler-generated POP sequence |
|
- | 312 | * which is fooled by SP being set to the very top of the stack. |
|
- | 313 | * Therefore the scheduler() function continues in |
|
- | 314 | * scheduler_separated_stack(). |
|
- | 315 | */ |
|
- | 316 | context_save(&CPU->saved_context); |
|
- | 317 | context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), (__address) CPU->stack, CPU_STACK_SIZE); |
|
- | 318 | context_restore(&CPU->saved_context); |
|
- | 319 | /* not reached */ |
|
- | 320 | } |
|
258 | 321 | ||
259 | /** Scheduler stack switch wrapper |
322 | /** Scheduler stack switch wrapper |
260 | * |
323 | * |
261 | * Second part of the scheduler() function |
324 | * Second part of the scheduler() function |
262 | * using new stack. Handling the actual context |
325 | * using new stack. Handling the actual context |
263 | * switch to a new thread. |
326 | * switch to a new thread. |
264 | * |
327 | * |
265 | * Assume THREAD->lock is held. |
328 | * Assume THREAD->lock is held. |
266 | */ |
329 | */ |
267 | static void scheduler_separated_stack(void) |
330 | void scheduler_separated_stack(void) |
268 | { |
331 | { |
269 | int priority; |
332 | int priority; |
270 | 333 | ||
271 | ASSERT(CPU != NULL); |
334 | ASSERT(CPU != NULL); |
272 | 335 | ||
273 | if (THREAD) { |
336 | if (THREAD) { |
274 | /* must be run after switch to scheduler stack */ |
337 | /* must be run after the switch to scheduler stack */ |
275 | after_thread_ran(); |
338 | after_thread_ran(); |
276 | 339 | ||
277 | switch (THREAD->state) { |
340 | switch (THREAD->state) { |
278 | case Running: |
341 | case Running: |
279 | THREAD->state = Ready; |
342 | THREAD->state = Ready; |
280 | spinlock_unlock(&THREAD->lock); |
343 | spinlock_unlock(&THREAD->lock); |
281 | thread_ready(THREAD); |
344 | thread_ready(THREAD); |
282 | break; |
345 | break; |
283 | 346 | ||
284 | case Exiting: |
347 | case Exiting: |
285 | thread_destroy(THREAD); |
348 | thread_destroy(THREAD); |
286 | break; |
349 | break; |
287 | 350 | ||
288 | case Sleeping: |
351 | case Sleeping: |
289 | /* |
352 | /* |
290 | * Prefer the thread after it's woken up. |
353 | * Prefer the thread after it's woken up. |
291 | */ |
354 | */ |
292 | THREAD->priority = -1; |
355 | THREAD->priority = -1; |
293 | 356 | ||
294 | /* |
357 | /* |
295 | * We need to release wq->lock which we locked in waitq_sleep(). |
358 | * We need to release wq->lock which we locked in waitq_sleep(). |
296 | * Address of wq->lock is kept in THREAD->sleep_queue. |
359 | * Address of wq->lock is kept in THREAD->sleep_queue. |
297 | */ |
360 | */ |
298 | spinlock_unlock(&THREAD->sleep_queue->lock); |
361 | spinlock_unlock(&THREAD->sleep_queue->lock); |
299 | 362 | ||
300 | /* |
363 | /* |
301 | * Check for possible requests for out-of-context invocation. |
364 | * Check for possible requests for out-of-context invocation. |
302 | */ |
365 | */ |
303 | if (THREAD->call_me) { |
366 | if (THREAD->call_me) { |
304 | THREAD->call_me(THREAD->call_me_with); |
367 | THREAD->call_me(THREAD->call_me_with); |
305 | THREAD->call_me = NULL; |
368 | THREAD->call_me = NULL; |
306 | THREAD->call_me_with = NULL; |
369 | THREAD->call_me_with = NULL; |
307 | } |
370 | } |
308 | 371 | ||
309 | spinlock_unlock(&THREAD->lock); |
372 | spinlock_unlock(&THREAD->lock); |
310 | 373 | ||
311 | break; |
374 | break; |
312 | 375 | ||
313 | default: |
376 | default: |
314 | /* |
377 | /* |
315 | * Entering state is unexpected. |
378 | * Entering state is unexpected. |
316 | */ |
379 | */ |
317 | panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]); |
380 | panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]); |
318 | break; |
381 | break; |
319 | } |
382 | } |
320 | 383 | ||
321 | THREAD = NULL; |
384 | THREAD = NULL; |
322 | } |
385 | } |
323 | 386 | ||
324 | - | ||
325 | THREAD = find_best_thread(); |
387 | THREAD = find_best_thread(); |
326 | 388 | ||
327 | spinlock_lock(&THREAD->lock); |
389 | spinlock_lock(&THREAD->lock); |
328 | priority = THREAD->priority; |
390 | priority = THREAD->priority; |
329 | spinlock_unlock(&THREAD->lock); |
391 | spinlock_unlock(&THREAD->lock); |
330 | 392 | ||
331 | relink_rq(priority); |
393 | relink_rq(priority); |
332 | 394 | ||
333 | spinlock_lock(&THREAD->lock); |
395 | spinlock_lock(&THREAD->lock); |
334 | 396 | ||
335 | /* |
397 | /* |
336 | * If both the old and the new task are the same, lots of work is avoided. |
398 | * If both the old and the new task are the same, lots of work is avoided. |
337 | */ |
399 | */ |
338 | if (TASK != THREAD->task) { |
400 | if (TASK != THREAD->task) { |
339 | as_t *as1 = NULL; |
401 | as_t *as1 = NULL; |
340 | as_t *as2; |
402 | as_t *as2; |
341 | 403 | ||
342 | if (TASK) { |
404 | if (TASK) { |
343 | spinlock_lock(&TASK->lock); |
405 | spinlock_lock(&TASK->lock); |
344 | as1 = TASK->as; |
406 | as1 = TASK->as; |
345 | spinlock_unlock(&TASK->lock); |
407 | spinlock_unlock(&TASK->lock); |
346 | } |
408 | } |
347 | 409 | ||
348 | spinlock_lock(&THREAD->task->lock); |
410 | spinlock_lock(&THREAD->task->lock); |
349 | as2 = THREAD->task->as; |
411 | as2 = THREAD->task->as; |
350 | spinlock_unlock(&THREAD->task->lock); |
412 | spinlock_unlock(&THREAD->task->lock); |
351 | 413 | ||
352 | /* |
414 | /* |
353 | * Note that it is possible for two tasks to share one address space. |
415 | * Note that it is possible for two tasks to share one address space. |
354 | */ |
416 | */ |
355 | if (as1 != as2) { |
417 | if (as1 != as2) { |
356 | /* |
418 | /* |
357 | * Both tasks and address spaces are different. |
419 | * Both tasks and address spaces are different. |
358 | * Replace the old one with the new one. |
420 | * Replace the old one with the new one. |
359 | */ |
421 | */ |
360 | as_switch(as1, as2); |
422 | as_switch(as1, as2); |
361 | } |
423 | } |
362 | TASK = THREAD->task; |
424 | TASK = THREAD->task; |
363 | } |
425 | } |
364 | 426 | ||
365 | THREAD->state = Running; |
427 | THREAD->state = Running; |
366 | 428 | ||
367 | #ifdef SCHEDULER_VERBOSE |
429 | #ifdef SCHEDULER_VERBOSE |
368 | printf("cpu%d: tid %d (priority=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, atomic_get(&CPU->nrdy)); |
430 | printf("cpu%d: tid %d (priority=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, atomic_get(&CPU->nrdy)); |
369 | #endif |
431 | #endif |
370 | 432 | ||
371 | /* |
433 | /* |
372 | * Some architectures provide late kernel PA2KA(identity) |
434 | * Some architectures provide late kernel PA2KA(identity) |
373 | * mapping in a page fault handler. However, the page fault |
435 | * mapping in a page fault handler. However, the page fault |
374 | * handler uses the kernel stack of the running thread and |
436 | * handler uses the kernel stack of the running thread and |
375 | * therefore cannot be used to map it. The kernel stack, if |
437 | * therefore cannot be used to map it. The kernel stack, if |
376 | * necessary, is to be mapped in before_thread_runs(). This |
438 | * necessary, is to be mapped in before_thread_runs(). This |
377 | * function must be executed before the switch to the new stack. |
439 | * function must be executed before the switch to the new stack. |
378 | */ |
440 | */ |
379 | before_thread_runs(); |
441 | before_thread_runs(); |
380 | 442 | ||
381 | /* |
443 | /* |
382 | * Copy the knowledge of CPU, TASK, THREAD and preemption counter to thread's stack. |
444 | * Copy the knowledge of CPU, TASK, THREAD and preemption counter to thread's stack. |
383 | */ |
445 | */ |
384 | the_copy(THE, (the_t *) THREAD->kstack); |
446 | the_copy(THE, (the_t *) THREAD->kstack); |
385 | 447 | ||
386 | context_restore(&THREAD->saved_context); |
448 | context_restore(&THREAD->saved_context); |
387 | /* not reached */ |
449 | /* not reached */ |
388 | } |
450 | } |
389 | 451 | ||
390 | - | ||
391 | /** The scheduler |
- | |
392 | * |
- | |
393 | * The thread scheduling procedure. |
- | |
394 | * Passes control directly to |
- | |
395 | * scheduler_separated_stack(). |
- | |
396 | * |
- | |
397 | */ |
- | |
398 | void scheduler(void) |
- | |
399 | { |
- | |
400 | volatile ipl_t ipl; |
- | |
401 | - | ||
402 | ASSERT(CPU != NULL); |
- | |
403 | - | ||
404 | ipl = interrupts_disable(); |
- | |
405 | - | ||
406 | if (atomic_get(&haltstate)) |
- | |
407 | halt(); |
- | |
408 | - | ||
409 | if (THREAD) { |
- | |
410 | spinlock_lock(&THREAD->lock); |
- | |
411 | #ifndef CONFIG_FPU_LAZY |
- | |
412 | fpu_context_save(&(THREAD->saved_fpu_context)); |
- | |
413 | #endif |
- | |
414 | if (!context_save(&THREAD->saved_context)) { |
- | |
415 | /* |
- | |
416 | * This is the place where threads leave scheduler(); |
- | |
417 | */ |
- | |
418 | spinlock_unlock(&THREAD->lock); |
- | |
419 | interrupts_restore(THREAD->saved_context.ipl); |
- | |
420 | return; |
- | |
421 | } |
- | |
422 | - | ||
423 | /* |
- | |
424 | * Interrupt priority level of preempted thread is recorded here |
- | |
425 | * to facilitate scheduler() invocations from interrupts_disable()'d |
- | |
426 | * code (e.g. waitq_sleep_timeout()). |
- | |
427 | */ |
- | |
428 | THREAD->saved_context.ipl = ipl; |
- | |
429 | } |
- | |
430 | - | ||
431 | /* |
- | |
432 | * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM |
- | |
433 | * and preemption counter. At this point THE could be coming either |
- | |
434 | * from THREAD's or CPU's stack. |
- | |
435 | */ |
- | |
436 | the_copy(THE, (the_t *) CPU->stack); |
- | |
437 | - | ||
438 | /* |
- | |
439 | * We may not keep the old stack. |
- | |
440 | * Reason: If we kept the old stack and got blocked, for instance, in |
- | |
441 | * find_best_thread(), the old thread could get rescheduled by another |
- | |
442 | * CPU and overwrite the part of its own stack that was also used by |
- | |
443 | * the scheduler on this CPU. |
- | |
444 | * |
- | |
445 | * Moreover, we have to bypass the compiler-generated POP sequence |
- | |
446 | * which is fooled by SP being set to the very top of the stack. |
- | |
447 | * Therefore the scheduler() function continues in |
- | |
448 | * scheduler_separated_stack(). |
- | |
449 | */ |
- | |
450 | context_save(&CPU->saved_context); |
- | |
451 | context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), (__address) CPU->stack, CPU_STACK_SIZE); |
- | |
452 | context_restore(&CPU->saved_context); |
- | |
453 | /* not reached */ |
- | |
454 | } |
- | |
455 | - | ||
456 | - | ||
457 | - | ||
458 | - | ||
459 | - | ||
460 | #ifdef CONFIG_SMP |
452 | #ifdef CONFIG_SMP |
461 | /** Load balancing thread |
453 | /** Load balancing thread |
462 | * |
454 | * |
463 | * SMP load balancing thread, supervising thread supplies |
455 | * SMP load balancing thread, supervising thread supplies |
464 | * for the CPU it's wired to. |
456 | * for the CPU it's wired to. |
465 | * |
457 | * |
466 | * @param arg Generic thread argument (unused). |
458 | * @param arg Generic thread argument (unused). |
467 | * |
459 | * |
468 | */ |
460 | */ |
469 | void kcpulb(void *arg) |
461 | void kcpulb(void *arg) |
470 | { |
462 | { |
471 | thread_t *t; |
463 | thread_t *t; |
472 | int count, average, i, j, k = 0; |
464 | int count, average, i, j, k = 0; |
473 | ipl_t ipl; |
465 | ipl_t ipl; |
474 | 466 | ||
475 | loop: |
467 | loop: |
476 | /* |
468 | /* |
477 | * Work in 1s intervals. |
469 | * Work in 1s intervals. |
478 | */ |
470 | */ |
479 | thread_sleep(1); |
471 | thread_sleep(1); |
480 | 472 | ||
481 | not_satisfied: |
473 | not_satisfied: |
482 | /* |
474 | /* |
483 | * Calculate the number of threads that will be migrated/stolen from |
475 | * Calculate the number of threads that will be migrated/stolen from |
484 | * other CPU's. Note that situation can have changed between two |
476 | * other CPU's. Note that situation can have changed between two |
485 | * passes. Each time get the most up to date counts. |
477 | * passes. Each time get the most up to date counts. |
486 | */ |
478 | */ |
487 | average = atomic_get(&nrdy) / config.cpu_active + 1; |
479 | average = atomic_get(&nrdy) / config.cpu_active + 1; |
488 | count = average - atomic_get(&CPU->nrdy); |
480 | count = average - atomic_get(&CPU->nrdy); |
489 | 481 | ||
490 | if (count <= 0) |
482 | if (count <= 0) |
491 | goto satisfied; |
483 | goto satisfied; |
492 | 484 | ||
493 | /* |
485 | /* |
494 | * Searching least priority queues on all CPU's first and most priority queues on all CPU's last. |
486 | * Searching least priority queues on all CPU's first and most priority queues on all CPU's last. |
495 | */ |
487 | */ |
496 | for (j=RQ_COUNT-1; j >= 0; j--) { |
488 | for (j=RQ_COUNT-1; j >= 0; j--) { |
497 | for (i=0; i < config.cpu_active; i++) { |
489 | for (i=0; i < config.cpu_active; i++) { |
498 | link_t *l; |
490 | link_t *l; |
499 | runq_t *r; |
491 | runq_t *r; |
500 | cpu_t *cpu; |
492 | cpu_t *cpu; |
501 | 493 | ||
502 | cpu = &cpus[(i + k) % config.cpu_active]; |
494 | cpu = &cpus[(i + k) % config.cpu_active]; |
503 | 495 | ||
504 | /* |
496 | /* |
505 | * Not interested in ourselves. |
497 | * Not interested in ourselves. |
506 | * Doesn't require interrupt disabling for kcpulb is X_WIRED. |
498 | * Doesn't require interrupt disabling for kcpulb is X_WIRED. |
507 | */ |
499 | */ |
508 | if (CPU == cpu) |
500 | if (CPU == cpu) |
509 | continue; |
501 | continue; |
510 | if (atomic_get(&cpu->nrdy) <= average) |
502 | if (atomic_get(&cpu->nrdy) <= average) |
511 | continue; |
503 | continue; |
512 | 504 | ||
513 | ipl = interrupts_disable(); |
505 | ipl = interrupts_disable(); |
514 | r = &cpu->rq[j]; |
506 | r = &cpu->rq[j]; |
515 | spinlock_lock(&r->lock); |
507 | spinlock_lock(&r->lock); |
516 | if (r->n == 0) { |
508 | if (r->n == 0) { |
517 | spinlock_unlock(&r->lock); |
509 | spinlock_unlock(&r->lock); |
518 | interrupts_restore(ipl); |
510 | interrupts_restore(ipl); |
519 | continue; |
511 | continue; |
520 | } |
512 | } |
521 | 513 | ||
522 | t = NULL; |
514 | t = NULL; |
523 | l = r->rq_head.prev; /* search rq from the back */ |
515 | l = r->rq_head.prev; /* search rq from the back */ |
524 | while (l != &r->rq_head) { |
516 | while (l != &r->rq_head) { |
525 | t = list_get_instance(l, thread_t, rq_link); |
517 | t = list_get_instance(l, thread_t, rq_link); |
526 | /* |
518 | /* |
527 | * We don't want to steal CPU-wired threads neither threads already stolen. |
519 | * We don't want to steal CPU-wired threads neither threads already stolen. |
528 | * The latter prevents threads from migrating between CPU's without ever being run. |
520 | * The latter prevents threads from migrating between CPU's without ever being run. |
529 | * We don't want to steal threads whose FPU context is still in CPU. |
521 | * We don't want to steal threads whose FPU context is still in CPU. |
530 | */ |
522 | */ |
531 | spinlock_lock(&t->lock); |
523 | spinlock_lock(&t->lock); |
532 | if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) { |
524 | if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) { |
533 | /* |
525 | /* |
534 | * Remove t from r. |
526 | * Remove t from r. |
535 | */ |
527 | */ |
536 | spinlock_unlock(&t->lock); |
528 | spinlock_unlock(&t->lock); |
537 | 529 | ||
538 | atomic_dec(&cpu->nrdy); |
530 | atomic_dec(&cpu->nrdy); |
539 | atomic_dec(&nrdy); |
531 | atomic_dec(&nrdy); |
540 | 532 | ||
541 | r->n--; |
533 | r->n--; |
542 | list_remove(&t->rq_link); |
534 | list_remove(&t->rq_link); |
543 | 535 | ||
544 | break; |
536 | break; |
545 | } |
537 | } |
546 | spinlock_unlock(&t->lock); |
538 | spinlock_unlock(&t->lock); |
547 | l = l->prev; |
539 | l = l->prev; |
548 | t = NULL; |
540 | t = NULL; |
549 | } |
541 | } |
550 | spinlock_unlock(&r->lock); |
542 | spinlock_unlock(&r->lock); |
551 | 543 | ||
552 | if (t) { |
544 | if (t) { |
553 | /* |
545 | /* |
554 | * Ready t on local CPU |
546 | * Ready t on local CPU |
555 | */ |
547 | */ |
556 | spinlock_lock(&t->lock); |
548 | spinlock_lock(&t->lock); |
557 | #ifdef KCPULB_VERBOSE |
549 | #ifdef KCPULB_VERBOSE |
558 | printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, atomic_get(&CPU->nrdy), atomic_get(&nrdy) / config.cpu_active); |
550 | printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, atomic_get(&CPU->nrdy), atomic_get(&nrdy) / config.cpu_active); |
559 | #endif |
551 | #endif |
560 | t->flags |= X_STOLEN; |
552 | t->flags |= X_STOLEN; |
561 | spinlock_unlock(&t->lock); |
553 | spinlock_unlock(&t->lock); |
562 | 554 | ||
563 | thread_ready(t); |
555 | thread_ready(t); |
564 | 556 | ||
565 | interrupts_restore(ipl); |
557 | interrupts_restore(ipl); |
566 | 558 | ||
567 | if (--count == 0) |
559 | if (--count == 0) |
568 | goto satisfied; |
560 | goto satisfied; |
569 | 561 | ||
570 | /* |
562 | /* |
571 | * We are not satisfied yet, focus on another CPU next time. |
563 | * We are not satisfied yet, focus on another CPU next time. |
572 | */ |
564 | */ |
573 | k++; |
565 | k++; |
574 | 566 | ||
575 | continue; |
567 | continue; |
576 | } |
568 | } |
577 | interrupts_restore(ipl); |
569 | interrupts_restore(ipl); |
578 | } |
570 | } |
579 | } |
571 | } |
580 | 572 | ||
581 | if (atomic_get(&CPU->nrdy)) { |
573 | if (atomic_get(&CPU->nrdy)) { |
582 | /* |
574 | /* |
583 | * Be a little bit light-weight and let migrated threads run. |
575 | * Be a little bit light-weight and let migrated threads run. |
584 | */ |
576 | */ |
585 | scheduler(); |
577 | scheduler(); |
586 | } else { |
578 | } else { |
587 | /* |
579 | /* |
588 | * We failed to migrate a single thread. |
580 | * We failed to migrate a single thread. |
589 | * Give up this turn. |
581 | * Give up this turn. |
590 | */ |
582 | */ |
591 | goto loop; |
583 | goto loop; |
592 | } |
584 | } |
593 | 585 | ||
594 | goto not_satisfied; |
586 | goto not_satisfied; |
595 | 587 | ||
596 | satisfied: |
588 | satisfied: |
597 | goto loop; |
589 | goto loop; |
598 | } |
590 | } |
599 | 591 | ||
600 | #endif /* CONFIG_SMP */ |
592 | #endif /* CONFIG_SMP */ |
601 | 593 | ||
602 | 594 | ||
603 | /** Print information about threads & scheduler queues */ |
595 | /** Print information about threads & scheduler queues */ |
604 | void sched_print_list(void) |
596 | void sched_print_list(void) |
605 | { |
597 | { |
606 | ipl_t ipl; |
598 | ipl_t ipl; |
607 | int cpu,i; |
599 | int cpu,i; |
608 | runq_t *r; |
600 | runq_t *r; |
609 | thread_t *t; |
601 | thread_t *t; |
610 | link_t *cur; |
602 | link_t *cur; |
611 | 603 | ||
612 | /* We are going to mess with scheduler structures, |
604 | /* We are going to mess with scheduler structures, |
613 | * let's not be interrupted */ |
605 | * let's not be interrupted */ |
614 | ipl = interrupts_disable(); |
606 | ipl = interrupts_disable(); |
615 | printf("*********** Scheduler dump ***********\n"); |
607 | printf("Scheduler dump:\n"); |
616 | for (cpu=0;cpu < config.cpu_count; cpu++) { |
608 | for (cpu=0;cpu < config.cpu_count; cpu++) { |
- | 609 | ||
617 | if (!cpus[cpu].active) |
610 | if (!cpus[cpu].active) |
618 | continue; |
611 | continue; |
- | 612 | ||
619 | spinlock_lock(&cpus[cpu].lock); |
613 | spinlock_lock(&cpus[cpu].lock); |
620 | printf("cpu%d: nrdy: %d needs_relink: %d\n", |
614 | printf("cpu%d: nrdy: %d, needs_relink: %d\n", |
621 | cpus[cpu].id, atomic_get(&cpus[cpu].nrdy), cpus[cpu].needs_relink); |
615 | cpus[cpu].id, atomic_get(&cpus[cpu].nrdy), cpus[cpu].needs_relink); |
622 | 616 | ||
623 | for (i=0; i<RQ_COUNT; i++) { |
617 | for (i=0; i<RQ_COUNT; i++) { |
624 | r = &cpus[cpu].rq[i]; |
618 | r = &cpus[cpu].rq[i]; |
625 | spinlock_lock(&r->lock); |
619 | spinlock_lock(&r->lock); |
626 | if (!r->n) { |
620 | if (!r->n) { |
627 | spinlock_unlock(&r->lock); |
621 | spinlock_unlock(&r->lock); |
628 | continue; |
622 | continue; |
629 | } |
623 | } |
630 | printf("\tRq %d: ", i); |
624 | printf("\trq[%d]: ", i); |
631 | for (cur=r->rq_head.next; cur!=&r->rq_head; cur=cur->next) { |
625 | for (cur=r->rq_head.next; cur!=&r->rq_head; cur=cur->next) { |
632 | t = list_get_instance(cur, thread_t, rq_link); |
626 | t = list_get_instance(cur, thread_t, rq_link); |
633 | printf("%d(%s) ", t->tid, |
627 | printf("%d(%s) ", t->tid, |
634 | thread_states[t->state]); |
628 | thread_states[t->state]); |
635 | } |
629 | } |
636 | printf("\n"); |
630 | printf("\n"); |
637 | spinlock_unlock(&r->lock); |
631 | spinlock_unlock(&r->lock); |
638 | } |
632 | } |
639 | spinlock_unlock(&cpus[cpu].lock); |
633 | spinlock_unlock(&cpus[cpu].lock); |
640 | } |
634 | } |
641 | 635 | ||
642 | interrupts_restore(ipl); |
636 | interrupts_restore(ipl); |
643 | } |
637 | } |
644 | 638 |