Rev 430 | Rev 458 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 430 | Rev 452 | ||
---|---|---|---|
Line 114... | Line 114... | ||
114 | * policy. |
114 | * policy. |
115 | * |
115 | * |
116 | * @return Thread to be scheduled. |
116 | * @return Thread to be scheduled. |
117 | * |
117 | * |
118 | */ |
118 | */ |
119 | struct thread *find_best_thread(void) |
119 | static struct thread *find_best_thread(void) |
120 | { |
120 | { |
121 | thread_t *t; |
121 | thread_t *t; |
122 | runq_t *r; |
122 | runq_t *r; |
123 | int i, n; |
123 | int i, n; |
124 | 124 | ||
Line 220... | Line 220... | ||
220 | * greater or equal 'start' are moved to a higher-priority queue. |
220 | * greater or equal 'start' are moved to a higher-priority queue. |
221 | * |
221 | * |
222 | * @param start Threshold priority. |
222 | * @param start Threshold priority. |
223 | * |
223 | * |
224 | */ |
224 | */ |
225 | void relink_rq(int start) |
225 | static void relink_rq(int start) |
226 | { |
226 | { |
227 | link_t head; |
227 | link_t head; |
228 | runq_t *r; |
228 | runq_t *r; |
229 | int i, n; |
229 | int i, n; |
230 | 230 | ||
Line 252... | Line 252... | ||
252 | spinlock_unlock(&CPU->lock); |
252 | spinlock_unlock(&CPU->lock); |
253 | 253 | ||
254 | } |
254 | } |
255 | 255 | ||
256 | 256 | ||
257 | /** The scheduler |
- | |
258 | * |
- | |
259 | * The thread scheduling procedure. |
- | |
260 | * |
- | |
261 | */ |
- | |
262 | void scheduler(void) |
- | |
263 | { |
- | |
264 | volatile ipl_t ipl; |
- | |
265 | - | ||
266 | ASSERT(CPU != NULL); |
- | |
267 | - | ||
268 | ipl = interrupts_disable(); |
- | |
269 | - | ||
270 | if (haltstate) |
- | |
271 | halt(); |
- | |
272 | - | ||
273 | if (THREAD) { |
- | |
274 | spinlock_lock(&THREAD->lock); |
- | |
275 | #ifndef FPU_LAZY |
- | |
276 | fpu_context_save(&(THREAD->saved_fpu_context)); |
- | |
277 | #endif |
- | |
278 | if (!context_save(&THREAD->saved_context)) { |
- | |
279 | /* |
- | |
280 | * This is the place where threads leave scheduler(); |
- | |
281 | */ |
- | |
282 | before_thread_runs(); |
- | |
283 | spinlock_unlock(&THREAD->lock); |
- | |
284 | interrupts_restore(THREAD->saved_context.ipl); |
- | |
285 | return; |
- | |
286 | } |
- | |
287 | - | ||
288 | /* |
- | |
289 | * Interrupt priority level of preempted thread is recorded here |
- | |
290 | * to facilitate scheduler() invocations from interrupts_disable()'d |
- | |
291 | * code (e.g. waitq_sleep_timeout()). |
- | |
292 | */ |
- | |
293 | THREAD->saved_context.ipl = ipl; |
- | |
294 | } |
- | |
295 | - | ||
296 | /* |
- | |
297 | * Through the 'THE' structure, we keep track of THREAD, TASK, CPU |
- | |
298 | * and preemption counter. At this point THE could be coming either |
- | |
299 | * from THREAD's or CPU's stack. |
- | |
300 | */ |
- | |
301 | the_copy(THE, (the_t *) CPU->stack); |
- | |
302 | - | ||
303 | /* |
- | |
304 | * We may not keep the old stack. |
- | |
305 | * Reason: If we kept the old stack and got blocked, for instance, in |
- | |
306 | * find_best_thread(), the old thread could get rescheduled by another |
- | |
307 | * CPU and overwrite the part of its own stack that was also used by |
- | |
308 | * the scheduler on this CPU. |
- | |
309 | * |
- | |
310 | * Moreover, we have to bypass the compiler-generated POP sequence |
- | |
311 | * which is fooled by SP being set to the very top of the stack. |
- | |
312 | * Therefore the scheduler() function continues in |
- | |
313 | * scheduler_separated_stack(). |
- | |
314 | */ |
- | |
315 | context_save(&CPU->saved_context); |
- | |
316 | context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), (__address) CPU->stack, CPU_STACK_SIZE); |
- | |
317 | context_restore(&CPU->saved_context); |
- | |
318 | /* not reached */ |
- | |
319 | } |
- | |
320 | - | ||
321 | - | ||
322 | /** Scheduler stack switch wrapper |
257 | /** Scheduler stack switch wrapper |
323 | * |
258 | * |
324 | * Second part of the scheduler() function |
259 | * Second part of the scheduler() function |
325 | * using new stack. Handling the actual context |
260 | * using new stack. Handling the actual context |
326 | * switch to a new thread. |
261 | * switch to a new thread. |
327 | * |
262 | * |
328 | */ |
263 | */ |
329 | void scheduler_separated_stack(void) |
264 | static void scheduler_separated_stack(void) |
330 | { |
265 | { |
331 | int priority; |
266 | int priority; |
332 | 267 | ||
333 | ASSERT(CPU != NULL); |
268 | ASSERT(CPU != NULL); |
334 | 269 | ||
Line 457... | Line 392... | ||
457 | context_restore(&THREAD->saved_context); |
392 | context_restore(&THREAD->saved_context); |
458 | /* not reached */ |
393 | /* not reached */ |
459 | } |
394 | } |
460 | 395 | ||
461 | 396 | ||
- | 397 | /** The scheduler |
|
- | 398 | * |
|
- | 399 | * The thread scheduling procedure. |
|
- | 400 | * |
|
- | 401 | */ |
|
- | 402 | void scheduler(void) |
|
- | 403 | { |
|
- | 404 | volatile ipl_t ipl; |
|
- | 405 | ||
- | 406 | ASSERT(CPU != NULL); |
|
- | 407 | ||
- | 408 | ipl = interrupts_disable(); |
|
- | 409 | ||
- | 410 | if (haltstate) |
|
- | 411 | halt(); |
|
- | 412 | ||
- | 413 | if (THREAD) { |
|
- | 414 | spinlock_lock(&THREAD->lock); |
|
- | 415 | #ifndef FPU_LAZY |
|
- | 416 | fpu_context_save(&(THREAD->saved_fpu_context)); |
|
- | 417 | #endif |
|
- | 418 | if (!context_save(&THREAD->saved_context)) { |
|
- | 419 | /* |
|
- | 420 | * This is the place where threads leave scheduler(); |
|
- | 421 | */ |
|
- | 422 | before_thread_runs(); |
|
- | 423 | spinlock_unlock(&THREAD->lock); |
|
- | 424 | interrupts_restore(THREAD->saved_context.ipl); |
|
- | 425 | return; |
|
- | 426 | } |
|
- | 427 | ||
- | 428 | /* |
|
- | 429 | * Interrupt priority level of preempted thread is recorded here |
|
- | 430 | * to facilitate scheduler() invocations from interrupts_disable()'d |
|
- | 431 | * code (e.g. waitq_sleep_timeout()). |
|
- | 432 | */ |
|
- | 433 | THREAD->saved_context.ipl = ipl; |
|
- | 434 | } |
|
- | 435 | ||
- | 436 | /* |
|
- | 437 | * Through the 'THE' structure, we keep track of THREAD, TASK, CPU |
|
- | 438 | * and preemption counter. At this point THE could be coming either |
|
- | 439 | * from THREAD's or CPU's stack. |
|
- | 440 | */ |
|
- | 441 | the_copy(THE, (the_t *) CPU->stack); |
|
- | 442 | ||
- | 443 | /* |
|
- | 444 | * We may not keep the old stack. |
|
- | 445 | * Reason: If we kept the old stack and got blocked, for instance, in |
|
- | 446 | * find_best_thread(), the old thread could get rescheduled by another |
|
- | 447 | * CPU and overwrite the part of its own stack that was also used by |
|
- | 448 | * the scheduler on this CPU. |
|
- | 449 | * |
|
- | 450 | * Moreover, we have to bypass the compiler-generated POP sequence |
|
- | 451 | * which is fooled by SP being set to the very top of the stack. |
|
- | 452 | * Therefore the scheduler() function continues in |
|
- | 453 | * scheduler_separated_stack(). |
|
- | 454 | */ |
|
- | 455 | context_save(&CPU->saved_context); |
|
- | 456 | context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), (__address) CPU->stack, CPU_STACK_SIZE); |
|
- | 457 | context_restore(&CPU->saved_context); |
|
- | 458 | /* not reached */ |
|
- | 459 | } |
|
- | 460 | ||
- | 461 | ||
- | 462 | ||
- | 463 | ||
- | 464 | ||
462 | #ifdef __SMP__ |
465 | #ifdef __SMP__ |
463 | /** Load balancing thread |
466 | /** Load balancing thread |
464 | * |
467 | * |
465 | * SMP load balancing thread, supervising thread supplies |
468 | * SMP load balancing thread, supervising thread supplies |
466 | * for the CPU it's wired to. |
469 | * for the CPU it's wired to. |