Rev 814 | Rev 827 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
1 | jermar | 1 | /* |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
||
3 | * All rights reserved. |
||
4 | * |
||
5 | * Redistribution and use in source and binary forms, with or without |
||
6 | * modification, are permitted provided that the following conditions |
||
7 | * are met: |
||
8 | * |
||
9 | * - Redistributions of source code must retain the above copyright |
||
10 | * notice, this list of conditions and the following disclaimer. |
||
11 | * - Redistributions in binary form must reproduce the above copyright |
||
12 | * notice, this list of conditions and the following disclaimer in the |
||
13 | * documentation and/or other materials provided with the distribution. |
||
14 | * - The name of the author may not be used to endorse or promote products |
||
15 | * derived from this software without specific prior written permission. |
||
16 | * |
||
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
||
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
||
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
||
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
||
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
||
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
||
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
||
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
||
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
||
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
||
27 | */ |
||
28 | |||
29 | #include <proc/scheduler.h> |
||
30 | #include <proc/thread.h> |
||
31 | #include <proc/task.h> |
||
378 | jermar | 32 | #include <mm/frame.h> |
33 | #include <mm/page.h> |
||
703 | jermar | 34 | #include <mm/as.h> |
378 | jermar | 35 | #include <arch/asm.h> |
36 | #include <arch/faddr.h> |
||
37 | #include <arch/atomic.h> |
||
38 | #include <synch/spinlock.h> |
||
1 | jermar | 39 | #include <config.h> |
40 | #include <context.h> |
||
41 | #include <func.h> |
||
42 | #include <arch.h> |
||
788 | jermar | 43 | #include <adt/list.h> |
68 | decky | 44 | #include <panic.h> |
1 | jermar | 45 | #include <typedefs.h> |
378 | jermar | 46 | #include <cpu.h> |
195 | vana | 47 | #include <print.h> |
227 | jermar | 48 | #include <debug.h> |
1 | jermar | 49 | |
475 | jermar | 50 | atomic_t nrdy; |
195 | vana | 51 | |
118 | jermar | 52 | /** Take actions before new thread runs |
107 | decky | 53 | * |
118 | jermar | 54 | * Perform actions that need to be |
55 | * taken before the newly selected |
||
56 | * tread is passed control. |
||
107 | decky | 57 | * |
58 | */ |
||
52 | vana | 59 | void before_thread_runs(void) |
60 | { |
||
309 | palkovsky | 61 | before_thread_runs_arch(); |
458 | decky | 62 | #ifdef CONFIG_FPU_LAZY |
309 | palkovsky | 63 | if(THREAD==CPU->fpu_owner) |
64 | fpu_enable(); |
||
65 | else |
||
66 | fpu_disable(); |
||
67 | #else |
||
68 | fpu_enable(); |
||
69 | if (THREAD->fpu_context_exists) |
||
70 | fpu_context_restore(&(THREAD->saved_fpu_context)); |
||
71 | else { |
||
72 | fpu_init(); |
||
73 | THREAD->fpu_context_exists=1; |
||
74 | } |
||
75 | #endif |
||
52 | vana | 76 | } |
77 | |||
458 | decky | 78 | #ifdef CONFIG_FPU_LAZY |
309 | palkovsky | 79 | void scheduler_fpu_lazy_request(void) |
80 | { |
||
81 | fpu_enable(); |
||
82 | if (CPU->fpu_owner != NULL) { |
||
83 | fpu_context_save(&CPU->fpu_owner->saved_fpu_context); |
||
84 | /* don't prevent migration */ |
||
85 | CPU->fpu_owner->fpu_context_engaged=0; |
||
86 | } |
||
87 | if (THREAD->fpu_context_exists) |
||
88 | fpu_context_restore(&THREAD->saved_fpu_context); |
||
89 | else { |
||
90 | fpu_init(); |
||
91 | THREAD->fpu_context_exists=1; |
||
92 | } |
||
93 | CPU->fpu_owner=THREAD; |
||
94 | THREAD->fpu_context_engaged = 1; |
||
95 | } |
||
96 | #endif |
||
52 | vana | 97 | |
107 | decky | 98 | /** Initialize scheduler |
99 | * |
||
100 | * Initialize kernel scheduler. |
||
101 | * |
||
102 | */ |
||
1 | jermar | 103 | void scheduler_init(void) |
104 | { |
||
105 | } |
||
106 | |||
107 | decky | 107 | |
108 | /** Get thread to be scheduled |
||
109 | * |
||
110 | * Get the optimal thread to be scheduled |
||
109 | jermar | 111 | * according to thread accounting and scheduler |
107 | decky | 112 | * policy. |
113 | * |
||
114 | * @return Thread to be scheduled. |
||
115 | * |
||
116 | */ |
||
483 | jermar | 117 | static thread_t *find_best_thread(void) |
1 | jermar | 118 | { |
119 | thread_t *t; |
||
120 | runq_t *r; |
||
783 | palkovsky | 121 | int i; |
1 | jermar | 122 | |
227 | jermar | 123 | ASSERT(CPU != NULL); |
124 | |||
1 | jermar | 125 | loop: |
413 | jermar | 126 | interrupts_enable(); |
1 | jermar | 127 | |
783 | palkovsky | 128 | if (atomic_get(&CPU->nrdy) == 0) { |
1 | jermar | 129 | /* |
130 | * For there was nothing to run, the CPU goes to sleep |
||
131 | * until a hardware interrupt or an IPI comes. |
||
132 | * This improves energy saving and hyperthreading. |
||
133 | */ |
||
785 | jermar | 134 | |
135 | /* |
||
136 | * An interrupt might occur right now and wake up a thread. |
||
137 | * In such case, the CPU will continue to go to sleep |
||
138 | * even though there is a runnable thread. |
||
139 | */ |
||
140 | |||
1 | jermar | 141 | cpu_sleep(); |
142 | goto loop; |
||
143 | } |
||
144 | |||
413 | jermar | 145 | interrupts_disable(); |
114 | jermar | 146 | |
147 | i = 0; |
||
148 | for (; i<RQ_COUNT; i++) { |
||
15 | jermar | 149 | r = &CPU->rq[i]; |
1 | jermar | 150 | spinlock_lock(&r->lock); |
151 | if (r->n == 0) { |
||
152 | /* |
||
153 | * If this queue is empty, try a lower-priority queue. |
||
154 | */ |
||
155 | spinlock_unlock(&r->lock); |
||
156 | continue; |
||
157 | } |
||
213 | jermar | 158 | |
783 | palkovsky | 159 | atomic_dec(&CPU->nrdy); |
475 | jermar | 160 | atomic_dec(&nrdy); |
1 | jermar | 161 | r->n--; |
162 | |||
163 | /* |
||
164 | * Take the first thread from the queue. |
||
165 | */ |
||
166 | t = list_get_instance(r->rq_head.next, thread_t, rq_link); |
||
167 | list_remove(&t->rq_link); |
||
168 | |||
169 | spinlock_unlock(&r->lock); |
||
170 | |||
171 | spinlock_lock(&t->lock); |
||
15 | jermar | 172 | t->cpu = CPU; |
1 | jermar | 173 | |
174 | t->ticks = us2ticks((i+1)*10000); |
||
413 | jermar | 175 | t->priority = i; /* eventually correct rq index */ |
1 | jermar | 176 | |
177 | /* |
||
178 | * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge. |
||
179 | */ |
||
180 | t->flags &= ~X_STOLEN; |
||
181 | spinlock_unlock(&t->lock); |
||
182 | |||
183 | return t; |
||
184 | } |
||
185 | goto loop; |
||
186 | |||
187 | } |
||
188 | |||
107 | decky | 189 | |
190 | /** Prevent rq starvation |
||
191 | * |
||
192 | * Prevent low priority threads from starving in rq's. |
||
193 | * |
||
194 | * When the function decides to relink rq's, it reconnects |
||
195 | * respective pointers so that in result threads with 'pri' |
||
196 | * greater or equal 'start' are moved to a higher-priority queue. |
||
197 | * |
||
198 | * @param start Threshold priority. |
||
199 | * |
||
1 | jermar | 200 | */ |
452 | decky | 201 | static void relink_rq(int start) |
1 | jermar | 202 | { |
203 | link_t head; |
||
204 | runq_t *r; |
||
205 | int i, n; |
||
206 | |||
207 | list_initialize(&head); |
||
15 | jermar | 208 | spinlock_lock(&CPU->lock); |
209 | if (CPU->needs_relink > NEEDS_RELINK_MAX) { |
||
1 | jermar | 210 | for (i = start; i<RQ_COUNT-1; i++) { |
211 | /* remember and empty rq[i + 1] */ |
||
15 | jermar | 212 | r = &CPU->rq[i + 1]; |
1 | jermar | 213 | spinlock_lock(&r->lock); |
214 | list_concat(&head, &r->rq_head); |
||
215 | n = r->n; |
||
216 | r->n = 0; |
||
217 | spinlock_unlock(&r->lock); |
||
218 | |||
219 | /* append rq[i + 1] to rq[i] */ |
||
15 | jermar | 220 | r = &CPU->rq[i]; |
1 | jermar | 221 | spinlock_lock(&r->lock); |
222 | list_concat(&r->rq_head, &head); |
||
223 | r->n += n; |
||
224 | spinlock_unlock(&r->lock); |
||
225 | } |
||
15 | jermar | 226 | CPU->needs_relink = 0; |
1 | jermar | 227 | } |
784 | palkovsky | 228 | spinlock_unlock(&CPU->lock); |
1 | jermar | 229 | |
230 | } |
||
231 | |||
107 | decky | 232 | |
233 | /** Scheduler stack switch wrapper |
||
234 | * |
||
235 | * Second part of the scheduler() function |
||
236 | * using new stack. Handling the actual context |
||
237 | * switch to a new thread. |
||
238 | * |
||
787 | palkovsky | 239 | * Assume THREAD->lock is held. |
107 | decky | 240 | */ |
452 | decky | 241 | static void scheduler_separated_stack(void) |
1 | jermar | 242 | { |
243 | int priority; |
||
244 | |||
227 | jermar | 245 | ASSERT(CPU != NULL); |
246 | |||
15 | jermar | 247 | if (THREAD) { |
248 | switch (THREAD->state) { |
||
1 | jermar | 249 | case Running: |
125 | jermar | 250 | THREAD->state = Ready; |
251 | spinlock_unlock(&THREAD->lock); |
||
252 | thread_ready(THREAD); |
||
253 | break; |
||
1 | jermar | 254 | |
255 | case Exiting: |
||
787 | palkovsky | 256 | thread_destroy(THREAD); |
125 | jermar | 257 | break; |
787 | palkovsky | 258 | |
1 | jermar | 259 | case Sleeping: |
125 | jermar | 260 | /* |
261 | * Prefer the thread after it's woken up. |
||
262 | */ |
||
413 | jermar | 263 | THREAD->priority = -1; |
1 | jermar | 264 | |
125 | jermar | 265 | /* |
266 | * We need to release wq->lock which we locked in waitq_sleep(). |
||
267 | * Address of wq->lock is kept in THREAD->sleep_queue. |
||
268 | */ |
||
269 | spinlock_unlock(&THREAD->sleep_queue->lock); |
||
1 | jermar | 270 | |
125 | jermar | 271 | /* |
272 | * Check for possible requests for out-of-context invocation. |
||
273 | */ |
||
274 | if (THREAD->call_me) { |
||
275 | THREAD->call_me(THREAD->call_me_with); |
||
276 | THREAD->call_me = NULL; |
||
277 | THREAD->call_me_with = NULL; |
||
278 | } |
||
1 | jermar | 279 | |
125 | jermar | 280 | spinlock_unlock(&THREAD->lock); |
1 | jermar | 281 | |
125 | jermar | 282 | break; |
283 | |||
1 | jermar | 284 | default: |
125 | jermar | 285 | /* |
286 | * Entering state is unexpected. |
||
287 | */ |
||
288 | panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]); |
||
289 | break; |
||
1 | jermar | 290 | } |
15 | jermar | 291 | THREAD = NULL; |
1 | jermar | 292 | } |
198 | jermar | 293 | |
214 | vana | 294 | |
15 | jermar | 295 | THREAD = find_best_thread(); |
1 | jermar | 296 | |
15 | jermar | 297 | spinlock_lock(&THREAD->lock); |
413 | jermar | 298 | priority = THREAD->priority; |
15 | jermar | 299 | spinlock_unlock(&THREAD->lock); |
192 | jermar | 300 | |
1 | jermar | 301 | relink_rq(priority); |
302 | |||
15 | jermar | 303 | spinlock_lock(&THREAD->lock); |
1 | jermar | 304 | |
305 | /* |
||
306 | * If both the old and the new task are the same, lots of work is avoided. |
||
307 | */ |
||
15 | jermar | 308 | if (TASK != THREAD->task) { |
703 | jermar | 309 | as_t *as1 = NULL; |
310 | as_t *as2; |
||
1 | jermar | 311 | |
15 | jermar | 312 | if (TASK) { |
313 | spinlock_lock(&TASK->lock); |
||
703 | jermar | 314 | as1 = TASK->as; |
15 | jermar | 315 | spinlock_unlock(&TASK->lock); |
1 | jermar | 316 | } |
317 | |||
15 | jermar | 318 | spinlock_lock(&THREAD->task->lock); |
703 | jermar | 319 | as2 = THREAD->task->as; |
15 | jermar | 320 | spinlock_unlock(&THREAD->task->lock); |
1 | jermar | 321 | |
322 | /* |
||
703 | jermar | 323 | * Note that it is possible for two tasks to share one address space. |
1 | jermar | 324 | */ |
703 | jermar | 325 | if (as1 != as2) { |
1 | jermar | 326 | /* |
703 | jermar | 327 | * Both tasks and address spaces are different. |
1 | jermar | 328 | * Replace the old one with the new one. |
329 | */ |
||
823 | jermar | 330 | as_switch(as1, as2); |
1 | jermar | 331 | } |
15 | jermar | 332 | TASK = THREAD->task; |
1 | jermar | 333 | } |
334 | |||
15 | jermar | 335 | THREAD->state = Running; |
1 | jermar | 336 | |
337 | #ifdef SCHEDULER_VERBOSE |
||
823 | jermar | 338 | printf("cpu%d: tid %d (priority=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, atomic_get(&CPU->nrdy)); |
1 | jermar | 339 | #endif |
340 | |||
213 | jermar | 341 | /* |
342 | * Copy the knowledge of CPU, TASK, THREAD and preemption counter to thread's stack. |
||
343 | */ |
||
184 | jermar | 344 | the_copy(THE, (the_t *) THREAD->kstack); |
345 | |||
15 | jermar | 346 | context_restore(&THREAD->saved_context); |
1 | jermar | 347 | /* not reached */ |
348 | } |
||
349 | |||
107 | decky | 350 | |
452 | decky | 351 | /** The scheduler |
352 | * |
||
353 | * The thread scheduling procedure. |
||
675 | jermar | 354 | * Passes control directly to |
355 | * scheduler_separated_stack(). |
||
452 | decky | 356 | * |
357 | */ |
||
358 | void scheduler(void) |
||
359 | { |
||
360 | volatile ipl_t ipl; |
||
361 | |||
362 | ASSERT(CPU != NULL); |
||
363 | |||
364 | ipl = interrupts_disable(); |
||
365 | |||
631 | palkovsky | 366 | if (atomic_get(&haltstate)) |
452 | decky | 367 | halt(); |
368 | |||
369 | if (THREAD) { |
||
370 | spinlock_lock(&THREAD->lock); |
||
458 | decky | 371 | #ifndef CONFIG_FPU_LAZY |
452 | decky | 372 | fpu_context_save(&(THREAD->saved_fpu_context)); |
373 | #endif |
||
374 | if (!context_save(&THREAD->saved_context)) { |
||
375 | /* |
||
376 | * This is the place where threads leave scheduler(); |
||
377 | */ |
||
378 | before_thread_runs(); |
||
379 | spinlock_unlock(&THREAD->lock); |
||
380 | interrupts_restore(THREAD->saved_context.ipl); |
||
381 | return; |
||
382 | } |
||
383 | |||
384 | /* |
||
385 | * Interrupt priority level of preempted thread is recorded here |
||
386 | * to facilitate scheduler() invocations from interrupts_disable()'d |
||
387 | * code (e.g. waitq_sleep_timeout()). |
||
388 | */ |
||
389 | THREAD->saved_context.ipl = ipl; |
||
390 | } |
||
391 | |||
392 | /* |
||
557 | jermar | 393 | * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM |
452 | decky | 394 | * and preemption counter. At this point THE could be coming either |
395 | * from THREAD's or CPU's stack. |
||
396 | */ |
||
397 | the_copy(THE, (the_t *) CPU->stack); |
||
398 | |||
399 | /* |
||
400 | * We may not keep the old stack. |
||
401 | * Reason: If we kept the old stack and got blocked, for instance, in |
||
402 | * find_best_thread(), the old thread could get rescheduled by another |
||
403 | * CPU and overwrite the part of its own stack that was also used by |
||
404 | * the scheduler on this CPU. |
||
405 | * |
||
406 | * Moreover, we have to bypass the compiler-generated POP sequence |
||
407 | * which is fooled by SP being set to the very top of the stack. |
||
408 | * Therefore the scheduler() function continues in |
||
409 | * scheduler_separated_stack(). |
||
410 | */ |
||
411 | context_save(&CPU->saved_context); |
||
412 | context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), (__address) CPU->stack, CPU_STACK_SIZE); |
||
413 | context_restore(&CPU->saved_context); |
||
414 | /* not reached */ |
||
415 | } |
||
416 | |||
417 | |||
418 | |||
419 | |||
420 | |||
458 | decky | 421 | #ifdef CONFIG_SMP |
107 | decky | 422 | /** Load balancing thread |
423 | * |
||
424 | * SMP load balancing thread, supervising thread supplies |
||
425 | * for the CPU it's wired to. |
||
426 | * |
||
427 | * @param arg Generic thread argument (unused). |
||
428 | * |
||
1 | jermar | 429 | */ |
430 | void kcpulb(void *arg) |
||
431 | { |
||
432 | thread_t *t; |
||
783 | palkovsky | 433 | int count, average, i, j, k = 0; |
413 | jermar | 434 | ipl_t ipl; |
1 | jermar | 435 | |
436 | loop: |
||
437 | /* |
||
779 | jermar | 438 | * Work in 1s intervals. |
1 | jermar | 439 | */ |
779 | jermar | 440 | thread_sleep(1); |
1 | jermar | 441 | |
442 | not_satisfied: |
||
443 | /* |
||
444 | * Calculate the number of threads that will be migrated/stolen from |
||
445 | * other CPU's. Note that situation can have changed between two |
||
446 | * passes. Each time get the most up to date counts. |
||
447 | */ |
||
784 | palkovsky | 448 | average = atomic_get(&nrdy) / config.cpu_active + 1; |
783 | palkovsky | 449 | count = average - atomic_get(&CPU->nrdy); |
1 | jermar | 450 | |
784 | palkovsky | 451 | if (count <= 0) |
1 | jermar | 452 | goto satisfied; |
453 | |||
454 | /* |
||
455 | * Searching least priority queues on all CPU's first and most priority queues on all CPU's last. |
||
456 | */ |
||
457 | for (j=RQ_COUNT-1; j >= 0; j--) { |
||
458 | for (i=0; i < config.cpu_active; i++) { |
||
459 | link_t *l; |
||
460 | runq_t *r; |
||
461 | cpu_t *cpu; |
||
462 | |||
463 | cpu = &cpus[(i + k) % config.cpu_active]; |
||
464 | |||
465 | /* |
||
466 | * Not interested in ourselves. |
||
467 | * Doesn't require interrupt disabling for kcpulb is X_WIRED. |
||
468 | */ |
||
15 | jermar | 469 | if (CPU == cpu) |
783 | palkovsky | 470 | continue; |
471 | if (atomic_get(&cpu->nrdy) <= average) |
||
472 | continue; |
||
1 | jermar | 473 | |
784 | palkovsky | 474 | ipl = interrupts_disable(); |
115 | jermar | 475 | r = &cpu->rq[j]; |
1 | jermar | 476 | spinlock_lock(&r->lock); |
477 | if (r->n == 0) { |
||
478 | spinlock_unlock(&r->lock); |
||
413 | jermar | 479 | interrupts_restore(ipl); |
1 | jermar | 480 | continue; |
481 | } |
||
482 | |||
483 | t = NULL; |
||
484 | l = r->rq_head.prev; /* search rq from the back */ |
||
485 | while (l != &r->rq_head) { |
||
486 | t = list_get_instance(l, thread_t, rq_link); |
||
487 | /* |
||
125 | jermar | 488 | * We don't want to steal CPU-wired threads neither threads already stolen. |
1 | jermar | 489 | * The latter prevents threads from migrating between CPU's without ever being run. |
125 | jermar | 490 | * We don't want to steal threads whose FPU context is still in CPU. |
73 | vana | 491 | */ |
1 | jermar | 492 | spinlock_lock(&t->lock); |
73 | vana | 493 | if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) { |
1 | jermar | 494 | /* |
495 | * Remove t from r. |
||
496 | */ |
||
497 | spinlock_unlock(&t->lock); |
||
498 | |||
783 | palkovsky | 499 | atomic_dec(&cpu->nrdy); |
475 | jermar | 500 | atomic_dec(&nrdy); |
1 | jermar | 501 | |
125 | jermar | 502 | r->n--; |
1 | jermar | 503 | list_remove(&t->rq_link); |
504 | |||
505 | break; |
||
506 | } |
||
507 | spinlock_unlock(&t->lock); |
||
508 | l = l->prev; |
||
509 | t = NULL; |
||
510 | } |
||
511 | spinlock_unlock(&r->lock); |
||
512 | |||
513 | if (t) { |
||
514 | /* |
||
515 | * Ready t on local CPU |
||
516 | */ |
||
517 | spinlock_lock(&t->lock); |
||
518 | #ifdef KCPULB_VERBOSE |
||
783 | palkovsky | 519 | printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, atomic_get(&CPU->nrdy), atomic_get(&nrdy) / config.cpu_active); |
1 | jermar | 520 | #endif |
521 | t->flags |= X_STOLEN; |
||
522 | spinlock_unlock(&t->lock); |
||
523 | |||
524 | thread_ready(t); |
||
525 | |||
413 | jermar | 526 | interrupts_restore(ipl); |
1 | jermar | 527 | |
528 | if (--count == 0) |
||
529 | goto satisfied; |
||
530 | |||
531 | /* |
||
125 | jermar | 532 | * We are not satisfied yet, focus on another CPU next time. |
1 | jermar | 533 | */ |
534 | k++; |
||
535 | |||
536 | continue; |
||
537 | } |
||
413 | jermar | 538 | interrupts_restore(ipl); |
1 | jermar | 539 | } |
540 | } |
||
541 | |||
783 | palkovsky | 542 | if (atomic_get(&CPU->nrdy)) { |
1 | jermar | 543 | /* |
544 | * Be a little bit light-weight and let migrated threads run. |
||
545 | */ |
||
546 | scheduler(); |
||
779 | jermar | 547 | } else { |
1 | jermar | 548 | /* |
549 | * We failed to migrate a single thread. |
||
779 | jermar | 550 | * Give up this turn. |
1 | jermar | 551 | */ |
779 | jermar | 552 | goto loop; |
1 | jermar | 553 | } |
554 | |||
555 | goto not_satisfied; |
||
125 | jermar | 556 | |
1 | jermar | 557 | satisfied: |
558 | goto loop; |
||
559 | } |
||
560 | |||
458 | decky | 561 | #endif /* CONFIG_SMP */ |
775 | palkovsky | 562 | |
563 | |||
564 | /** Print information about threads & scheduler queues */ |
||
565 | void sched_print_list(void) |
||
566 | { |
||
567 | ipl_t ipl; |
||
568 | int cpu,i; |
||
569 | runq_t *r; |
||
570 | thread_t *t; |
||
571 | link_t *cur; |
||
572 | |||
573 | /* We are going to mess with scheduler structures, |
||
574 | * let's not be interrupted */ |
||
575 | ipl = interrupts_disable(); |
||
576 | printf("*********** Scheduler dump ***********\n"); |
||
577 | for (cpu=0;cpu < config.cpu_count; cpu++) { |
||
578 | if (!cpus[cpu].active) |
||
579 | continue; |
||
580 | spinlock_lock(&cpus[cpu].lock); |
||
581 | printf("cpu%d: nrdy: %d needs_relink: %d\n", |
||
783 | palkovsky | 582 | cpus[cpu].id, atomic_get(&cpus[cpu].nrdy), cpus[cpu].needs_relink); |
775 | palkovsky | 583 | |
584 | for (i=0; i<RQ_COUNT; i++) { |
||
585 | r = &cpus[cpu].rq[i]; |
||
586 | spinlock_lock(&r->lock); |
||
587 | if (!r->n) { |
||
588 | spinlock_unlock(&r->lock); |
||
589 | continue; |
||
590 | } |
||
779 | jermar | 591 | printf("\tRq %d: ", i); |
775 | palkovsky | 592 | for (cur=r->rq_head.next; cur!=&r->rq_head; cur=cur->next) { |
593 | t = list_get_instance(cur, thread_t, rq_link); |
||
594 | printf("%d(%s) ", t->tid, |
||
595 | thread_states[t->state]); |
||
596 | } |
||
597 | printf("\n"); |
||
598 | spinlock_unlock(&r->lock); |
||
599 | } |
||
600 | spinlock_unlock(&cpus[cpu].lock); |
||
601 | } |
||
602 | |||
603 | interrupts_restore(ipl); |
||
604 | } |