Rev 897 | Rev 906 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 897 | Rev 898 | ||
---|---|---|---|
Line 45... | Line 45... | ||
45 | #include <typedefs.h> |
45 | #include <typedefs.h> |
46 | #include <cpu.h> |
46 | #include <cpu.h> |
47 | #include <print.h> |
47 | #include <print.h> |
48 | #include <debug.h> |
48 | #include <debug.h> |
49 | 49 | ||
50 | atomic_t nrdy; |
50 | static void scheduler_separated_stack(void); |
- | 51 | ||
- | 52 | atomic_t nrdy; /**< Number of ready threads in the system. */ |
|
51 | 53 | ||
52 | /** Take actions before new thread runs. |
54 | /** Take actions before new thread runs. |
53 | * |
55 | * |
54 | * Perform actions that need to be |
56 | * Perform actions that need to be |
55 | * taken before the newly selected |
57 | * taken before the newly selected |
Line 59... | Line 61... | ||
59 | * |
61 | * |
60 | */ |
62 | */ |
61 | void before_thread_runs(void) |
63 | void before_thread_runs(void) |
62 | { |
64 | { |
63 | before_thread_runs_arch(); |
65 | before_thread_runs_arch(); |
64 | #ifdef CONFIG_FPU_LAZY |
66 | #ifdef CONFIG_FPU_LAZY |
65 | if(THREAD==CPU->fpu_owner) |
67 | if(THREAD==CPU->fpu_owner) |
66 | fpu_enable(); |
68 | fpu_enable(); |
67 | else |
69 | else |
68 | fpu_disable(); |
70 | fpu_disable(); |
69 | #else |
71 | #else |
70 | fpu_enable(); |
72 | fpu_enable(); |
71 | if (THREAD->fpu_context_exists) |
73 | if (THREAD->fpu_context_exists) |
72 | fpu_context_restore(&(THREAD->saved_fpu_context)); |
74 | fpu_context_restore(&(THREAD->saved_fpu_context)); |
73 | else { |
75 | else { |
74 | fpu_init(&(THREAD->saved_fpu_context)); |
76 | fpu_init(&(THREAD->saved_fpu_context)); |
75 | THREAD->fpu_context_exists=1; |
77 | THREAD->fpu_context_exists=1; |
76 | } |
78 | } |
77 | #endif |
79 | #endif |
78 | } |
80 | } |
79 | 81 | ||
80 | /** Take actions after old thread ran. |
82 | /** Take actions after THREAD had run. |
81 | * |
83 | * |
82 | * Perform actions that need to be |
84 | * Perform actions that need to be |
83 | * taken after the running thread |
85 | * taken after the running thread |
84 | * was preempted by the scheduler. |
86 | * had been preempted by the scheduler. |
85 | * |
87 | * |
86 | * THREAD->lock is locked on entry |
88 | * THREAD->lock is locked on entry |
87 | * |
89 | * |
88 | */ |
90 | */ |
89 | void after_thread_ran(void) |
91 | void after_thread_ran(void) |
Line 105... | Line 107... | ||
105 | CPU->fpu_owner->fpu_context_engaged=0; |
107 | CPU->fpu_owner->fpu_context_engaged=0; |
106 | spinlock_unlock(&CPU->fpu_owner->lock); |
108 | spinlock_unlock(&CPU->fpu_owner->lock); |
107 | } |
109 | } |
108 | 110 | ||
109 | spinlock_lock(&THREAD->lock); |
111 | spinlock_lock(&THREAD->lock); |
110 | if (THREAD->fpu_context_exists) |
112 | if (THREAD->fpu_context_exists) { |
111 | fpu_context_restore(&THREAD->saved_fpu_context); |
113 | fpu_context_restore(&THREAD->saved_fpu_context); |
112 | else { |
114 | } else { |
113 | fpu_init(&(THREAD->saved_fpu_context)); |
115 | fpu_init(&(THREAD->saved_fpu_context)); |
114 | THREAD->fpu_context_exists=1; |
116 | THREAD->fpu_context_exists=1; |
115 | } |
117 | } |
116 | CPU->fpu_owner=THREAD; |
118 | CPU->fpu_owner=THREAD; |
117 | THREAD->fpu_context_engaged = 1; |
119 | THREAD->fpu_context_engaged = 1; |
118 | - | ||
119 | spinlock_unlock(&THREAD->lock); |
120 | spinlock_unlock(&THREAD->lock); |
- | 121 | ||
120 | spinlock_unlock(&CPU->lock); |
122 | spinlock_unlock(&CPU->lock); |
121 | } |
123 | } |
122 | #endif |
124 | #endif |
123 | 125 | ||
124 | /** Initialize scheduler |
126 | /** Initialize scheduler |
Line 128... | Line 130... | ||
128 | */ |
130 | */ |
129 | void scheduler_init(void) |
131 | void scheduler_init(void) |
130 | { |
132 | { |
131 | } |
133 | } |
132 | 134 | ||
133 | - | ||
134 | /** Get thread to be scheduled |
135 | /** Get thread to be scheduled |
135 | * |
136 | * |
136 | * Get the optimal thread to be scheduled |
137 | * Get the optimal thread to be scheduled |
137 | * according to thread accounting and scheduler |
138 | * according to thread accounting and scheduler |
138 | * policy. |
139 | * policy. |
Line 168... | Line 169... | ||
168 | goto loop; |
169 | goto loop; |
169 | } |
170 | } |
170 | 171 | ||
171 | interrupts_disable(); |
172 | interrupts_disable(); |
172 | 173 | ||
173 | i = 0; |
- | |
174 | for (; i<RQ_COUNT; i++) { |
174 | for (i = 0; i<RQ_COUNT; i++) { |
175 | r = &CPU->rq[i]; |
175 | r = &CPU->rq[i]; |
176 | spinlock_lock(&r->lock); |
176 | spinlock_lock(&r->lock); |
177 | if (r->n == 0) { |
177 | if (r->n == 0) { |
178 | /* |
178 | /* |
179 | * If this queue is empty, try a lower-priority queue. |
179 | * If this queue is empty, try a lower-priority queue. |
Line 196... | Line 196... | ||
196 | 196 | ||
197 | spinlock_lock(&t->lock); |
197 | spinlock_lock(&t->lock); |
198 | t->cpu = CPU; |
198 | t->cpu = CPU; |
199 | 199 | ||
200 | t->ticks = us2ticks((i+1)*10000); |
200 | t->ticks = us2ticks((i+1)*10000); |
201 | t->priority = i; /* eventually correct rq index */ |
201 | t->priority = i; /* correct rq index */ |
202 | 202 | ||
203 | /* |
203 | /* |
204 | * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge. |
204 | * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge. |
205 | */ |
205 | */ |
206 | t->flags &= ~X_STOLEN; |
206 | t->flags &= ~X_STOLEN; |
Line 210... | Line 210... | ||
210 | } |
210 | } |
211 | goto loop; |
211 | goto loop; |
212 | 212 | ||
213 | } |
213 | } |
214 | 214 | ||
215 | - | ||
216 | /** Prevent rq starvation |
215 | /** Prevent rq starvation |
217 | * |
216 | * |
218 | * Prevent low priority threads from starving in rq's. |
217 | * Prevent low priority threads from starving in rq's. |
219 | * |
218 | * |
220 | * When the function decides to relink rq's, it reconnects |
219 | * When the function decides to relink rq's, it reconnects |
Line 253... | Line 252... | ||
253 | } |
252 | } |
254 | spinlock_unlock(&CPU->lock); |
253 | spinlock_unlock(&CPU->lock); |
255 | 254 | ||
256 | } |
255 | } |
257 | 256 | ||
- | 257 | /** The scheduler |
|
- | 258 | * |
|
- | 259 | * The thread scheduling procedure. |
|
- | 260 | * Passes control directly to |
|
- | 261 | * scheduler_separated_stack(). |
|
- | 262 | * |
|
- | 263 | */ |
|
- | 264 | void scheduler(void) |
|
- | 265 | { |
|
- | 266 | volatile ipl_t ipl; |
|
- | 267 | ||
- | 268 | ASSERT(CPU != NULL); |
|
- | 269 | ||
- | 270 | ipl = interrupts_disable(); |
|
- | 271 | ||
- | 272 | if (atomic_get(&haltstate)) |
|
- | 273 | halt(); |
|
- | 274 | ||
- | 275 | if (THREAD) { |
|
- | 276 | spinlock_lock(&THREAD->lock); |
|
- | 277 | #ifndef CONFIG_FPU_LAZY |
|
- | 278 | fpu_context_save(&(THREAD->saved_fpu_context)); |
|
- | 279 | #endif |
|
- | 280 | if (!context_save(&THREAD->saved_context)) { |
|
- | 281 | /* |
|
- | 282 | * This is the place where threads leave scheduler(); |
|
- | 283 | */ |
|
- | 284 | spinlock_unlock(&THREAD->lock); |
|
- | 285 | interrupts_restore(THREAD->saved_context.ipl); |
|
- | 286 | return; |
|
- | 287 | } |
|
- | 288 | ||
- | 289 | /* |
|
- | 290 | * Interrupt priority level of preempted thread is recorded here |
|
- | 291 | * to facilitate scheduler() invocations from interrupts_disable()'d |
|
- | 292 | * code (e.g. waitq_sleep_timeout()). |
|
- | 293 | */ |
|
- | 294 | THREAD->saved_context.ipl = ipl; |
|
- | 295 | } |
|
- | 296 | ||
- | 297 | /* |
|
- | 298 | * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM |
|
- | 299 | * and preemption counter. At this point THE could be coming either |
|
- | 300 | * from THREAD's or CPU's stack. |
|
- | 301 | */ |
|
- | 302 | the_copy(THE, (the_t *) CPU->stack); |
|
- | 303 | ||
- | 304 | /* |
|
- | 305 | * We may not keep the old stack. |
|
- | 306 | * Reason: If we kept the old stack and got blocked, for instance, in |
|
- | 307 | * find_best_thread(), the old thread could get rescheduled by another |
|
- | 308 | * CPU and overwrite the part of its own stack that was also used by |
|
- | 309 | * the scheduler on this CPU. |
|
- | 310 | * |
|
- | 311 | * Moreover, we have to bypass the compiler-generated POP sequence |
|
- | 312 | * which is fooled by SP being set to the very top of the stack. |
|
- | 313 | * Therefore the scheduler() function continues in |
|
- | 314 | * scheduler_separated_stack(). |
|
- | 315 | */ |
|
- | 316 | context_save(&CPU->saved_context); |
|
- | 317 | context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), (__address) CPU->stack, CPU_STACK_SIZE); |
|
- | 318 | context_restore(&CPU->saved_context); |
|
- | 319 | /* not reached */ |
|
- | 320 | } |
|
258 | 321 | ||
259 | /** Scheduler stack switch wrapper |
322 | /** Scheduler stack switch wrapper |
260 | * |
323 | * |
261 | * Second part of the scheduler() function |
324 | * Second part of the scheduler() function |
262 | * using new stack. Handling the actual context |
325 | * using new stack. Handling the actual context |
263 | * switch to a new thread. |
326 | * switch to a new thread. |
264 | * |
327 | * |
265 | * Assume THREAD->lock is held. |
328 | * Assume THREAD->lock is held. |
266 | */ |
329 | */ |
267 | static void scheduler_separated_stack(void) |
330 | void scheduler_separated_stack(void) |
268 | { |
331 | { |
269 | int priority; |
332 | int priority; |
270 | 333 | ||
271 | ASSERT(CPU != NULL); |
334 | ASSERT(CPU != NULL); |
272 | 335 | ||
273 | if (THREAD) { |
336 | if (THREAD) { |
274 | /* must be run after switch to scheduler stack */ |
337 | /* must be run after the switch to scheduler stack */ |
275 | after_thread_ran(); |
338 | after_thread_ran(); |
276 | 339 | ||
277 | switch (THREAD->state) { |
340 | switch (THREAD->state) { |
278 | case Running: |
341 | case Running: |
279 | THREAD->state = Ready; |
342 | THREAD->state = Ready; |
Line 319... | Line 382... | ||
319 | } |
382 | } |
320 | 383 | ||
321 | THREAD = NULL; |
384 | THREAD = NULL; |
322 | } |
385 | } |
323 | 386 | ||
324 | - | ||
325 | THREAD = find_best_thread(); |
387 | THREAD = find_best_thread(); |
326 | 388 | ||
327 | spinlock_lock(&THREAD->lock); |
389 | spinlock_lock(&THREAD->lock); |
328 | priority = THREAD->priority; |
390 | priority = THREAD->priority; |
329 | spinlock_unlock(&THREAD->lock); |
391 | spinlock_unlock(&THREAD->lock); |
Line 385... | Line 447... | ||
385 | 447 | ||
386 | context_restore(&THREAD->saved_context); |
448 | context_restore(&THREAD->saved_context); |
387 | /* not reached */ |
449 | /* not reached */ |
388 | } |
450 | } |
389 | 451 | ||
390 | - | ||
391 | /** The scheduler |
- | |
392 | * |
- | |
393 | * The thread scheduling procedure. |
- | |
394 | * Passes control directly to |
- | |
395 | * scheduler_separated_stack(). |
- | |
396 | * |
- | |
397 | */ |
- | |
398 | void scheduler(void) |
- | |
399 | { |
- | |
400 | volatile ipl_t ipl; |
- | |
401 | - | ||
402 | ASSERT(CPU != NULL); |
- | |
403 | - | ||
404 | ipl = interrupts_disable(); |
- | |
405 | - | ||
406 | if (atomic_get(&haltstate)) |
- | |
407 | halt(); |
- | |
408 | - | ||
409 | if (THREAD) { |
- | |
410 | spinlock_lock(&THREAD->lock); |
- | |
411 | #ifndef CONFIG_FPU_LAZY |
- | |
412 | fpu_context_save(&(THREAD->saved_fpu_context)); |
- | |
413 | #endif |
- | |
414 | if (!context_save(&THREAD->saved_context)) { |
- | |
415 | /* |
- | |
416 | * This is the place where threads leave scheduler(); |
- | |
417 | */ |
- | |
418 | spinlock_unlock(&THREAD->lock); |
- | |
419 | interrupts_restore(THREAD->saved_context.ipl); |
- | |
420 | return; |
- | |
421 | } |
- | |
422 | - | ||
423 | /* |
- | |
424 | * Interrupt priority level of preempted thread is recorded here |
- | |
425 | * to facilitate scheduler() invocations from interrupts_disable()'d |
- | |
426 | * code (e.g. waitq_sleep_timeout()). |
- | |
427 | */ |
- | |
428 | THREAD->saved_context.ipl = ipl; |
- | |
429 | } |
- | |
430 | - | ||
431 | /* |
- | |
432 | * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM |
- | |
433 | * and preemption counter. At this point THE could be coming either |
- | |
434 | * from THREAD's or CPU's stack. |
- | |
435 | */ |
- | |
436 | the_copy(THE, (the_t *) CPU->stack); |
- | |
437 | - | ||
438 | /* |
- | |
439 | * We may not keep the old stack. |
- | |
440 | * Reason: If we kept the old stack and got blocked, for instance, in |
- | |
441 | * find_best_thread(), the old thread could get rescheduled by another |
- | |
442 | * CPU and overwrite the part of its own stack that was also used by |
- | |
443 | * the scheduler on this CPU. |
- | |
444 | * |
- | |
445 | * Moreover, we have to bypass the compiler-generated POP sequence |
- | |
446 | * which is fooled by SP being set to the very top of the stack. |
- | |
447 | * Therefore the scheduler() function continues in |
- | |
448 | * scheduler_separated_stack(). |
- | |
449 | */ |
- | |
450 | context_save(&CPU->saved_context); |
- | |
451 | context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), (__address) CPU->stack, CPU_STACK_SIZE); |
- | |
452 | context_restore(&CPU->saved_context); |
- | |
453 | /* not reached */ |
- | |
454 | } |
- | |
455 | - | ||
456 | - | ||
457 | - | ||
458 | - | ||
459 | - | ||
460 | #ifdef CONFIG_SMP |
452 | #ifdef CONFIG_SMP |
461 | /** Load balancing thread |
453 | /** Load balancing thread |
462 | * |
454 | * |
463 | * SMP load balancing thread, supervising thread supplies |
455 | * SMP load balancing thread, supervising thread supplies |
464 | * for the CPU it's wired to. |
456 | * for the CPU it's wired to. |
Line 610... | Line 602... | ||
610 | link_t *cur; |
602 | link_t *cur; |
611 | 603 | ||
612 | /* We are going to mess with scheduler structures, |
604 | /* We are going to mess with scheduler structures, |
613 | * let's not be interrupted */ |
605 | * let's not be interrupted */ |
614 | ipl = interrupts_disable(); |
606 | ipl = interrupts_disable(); |
615 | printf("*********** Scheduler dump ***********\n"); |
607 | printf("Scheduler dump:\n"); |
616 | for (cpu=0;cpu < config.cpu_count; cpu++) { |
608 | for (cpu=0;cpu < config.cpu_count; cpu++) { |
- | 609 | ||
617 | if (!cpus[cpu].active) |
610 | if (!cpus[cpu].active) |
618 | continue; |
611 | continue; |
- | 612 | ||
619 | spinlock_lock(&cpus[cpu].lock); |
613 | spinlock_lock(&cpus[cpu].lock); |
620 | printf("cpu%d: nrdy: %d needs_relink: %d\n", |
614 | printf("cpu%d: nrdy: %d, needs_relink: %d\n", |
621 | cpus[cpu].id, atomic_get(&cpus[cpu].nrdy), cpus[cpu].needs_relink); |
615 | cpus[cpu].id, atomic_get(&cpus[cpu].nrdy), cpus[cpu].needs_relink); |
622 | 616 | ||
623 | for (i=0; i<RQ_COUNT; i++) { |
617 | for (i=0; i<RQ_COUNT; i++) { |
624 | r = &cpus[cpu].rq[i]; |
618 | r = &cpus[cpu].rq[i]; |
625 | spinlock_lock(&r->lock); |
619 | spinlock_lock(&r->lock); |
626 | if (!r->n) { |
620 | if (!r->n) { |
627 | spinlock_unlock(&r->lock); |
621 | spinlock_unlock(&r->lock); |
628 | continue; |
622 | continue; |
629 | } |
623 | } |
630 | printf("\tRq %d: ", i); |
624 | printf("\trq[%d]: ", i); |
631 | for (cur=r->rq_head.next; cur!=&r->rq_head; cur=cur->next) { |
625 | for (cur=r->rq_head.next; cur!=&r->rq_head; cur=cur->next) { |
632 | t = list_get_instance(cur, thread_t, rq_link); |
626 | t = list_get_instance(cur, thread_t, rq_link); |
633 | printf("%d(%s) ", t->tid, |
627 | printf("%d(%s) ", t->tid, |
634 | thread_states[t->state]); |
628 | thread_states[t->state]); |
635 | } |
629 | } |