Rev 783 | Rev 785 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
| Rev 783 | Rev 784 | ||
|---|---|---|---|
| Line 129... | Line 129... | ||
| 129 | if (atomic_get(&CPU->nrdy) == 0) { |
129 | if (atomic_get(&CPU->nrdy) == 0) { |
| 130 | /* |
130 | /* |
| 131 | * For there was nothing to run, the CPU goes to sleep |
131 | * For there was nothing to run, the CPU goes to sleep |
| 132 | * until a hardware interrupt or an IPI comes. |
132 | * until a hardware interrupt or an IPI comes. |
| 133 | * This improves energy saving and hyperthreading. |
133 | * This improves energy saving and hyperthreading. |
| - | 134 | * |
|
| - | 135 | * - we might get an interrupt here that makes some thread runnable, |
|
| - | 136 | * in such a case we must wait for the next quantum to come |
|
| 134 | */ |
137 | */ |
| 135 | cpu_sleep(); |
138 | cpu_sleep(); |
| 136 | goto loop; |
139 | goto loop; |
| 137 | } |
140 | } |
| 138 | 141 | ||
| Line 217... | Line 220... | ||
| 217 | r->n += n; |
220 | r->n += n; |
| 218 | spinlock_unlock(&r->lock); |
221 | spinlock_unlock(&r->lock); |
| 219 | } |
222 | } |
| 220 | CPU->needs_relink = 0; |
223 | CPU->needs_relink = 0; |
| 221 | } |
224 | } |
| 222 | spinlock_unlock(&CPU->lock); |
225 | spinlock_unlock(&CPU->lock); |
| 223 | 226 | ||
| 224 | } |
227 | } |
| 225 | 228 | ||
| 226 | 229 | ||
| 227 | /** Scheduler stack switch wrapper |
230 | /** Scheduler stack switch wrapper |
| Line 460... | Line 463... | ||
| 460 | /* |
463 | /* |
| 461 | * Calculate the number of threads that will be migrated/stolen from |
464 | * Calculate the number of threads that will be migrated/stolen from |
| 462 | * other CPU's. Note that situation can have changed between two |
465 | * other CPU's. Note that situation can have changed between two |
| 463 | * passes. Each time get the most up to date counts. |
466 | * passes. Each time get the most up to date counts. |
| 464 | */ |
467 | */ |
| 465 | average = atomic_get(&nrdy) / config.cpu_active; |
468 | average = atomic_get(&nrdy) / config.cpu_active + 1; |
| 466 | count = average - atomic_get(&CPU->nrdy); |
469 | count = average - atomic_get(&CPU->nrdy); |
| 467 | 470 | ||
| 468 | if (count < 0) |
471 | if (count <= 0) |
| 469 | goto satisfied; |
472 | goto satisfied; |
| 470 | 473 | ||
| 471 | if (!count) { /* Try to steal threads from CPU's that have more then average count */ |
- | |
| 472 | count = 1; |
- | |
| 473 | average += 1; |
- | |
| 474 | } |
- | |
| 475 | - | ||
| 476 | /* |
474 | /* |
| 477 | * Searching least priority queues on all CPU's first and most priority queues on all CPU's last. |
475 | * Searching least priority queues on all CPU's first and most priority queues on all CPU's last. |
| 478 | */ |
476 | */ |
| 479 | for (j=RQ_COUNT-1; j >= 0; j--) { |
477 | for (j=RQ_COUNT-1; j >= 0; j--) { |
| 480 | for (i=0; i < config.cpu_active; i++) { |
478 | for (i=0; i < config.cpu_active; i++) { |
| Line 491... | Line 489... | ||
| 491 | if (CPU == cpu) |
489 | if (CPU == cpu) |
| 492 | continue; |
490 | continue; |
| 493 | if (atomic_get(&cpu->nrdy) <= average) |
491 | if (atomic_get(&cpu->nrdy) <= average) |
| 494 | continue; |
492 | continue; |
| 495 | 493 | ||
| 496 | restart: ipl = interrupts_disable(); |
494 | ipl = interrupts_disable(); |
| 497 | r = &cpu->rq[j]; |
495 | r = &cpu->rq[j]; |
| 498 | spinlock_lock(&r->lock); |
496 | spinlock_lock(&r->lock); |
| 499 | if (r->n == 0) { |
497 | if (r->n == 0) { |
| 500 | spinlock_unlock(&r->lock); |
498 | spinlock_unlock(&r->lock); |
| 501 | interrupts_restore(ipl); |
499 | interrupts_restore(ipl); |
| Line 511... | Line 509... | ||
| 511 | * The latter prevents threads from migrating between CPU's without ever being run. |
509 | * The latter prevents threads from migrating between CPU's without ever being run. |
| 512 | * We don't want to steal threads whose FPU context is still in CPU. |
510 | * We don't want to steal threads whose FPU context is still in CPU. |
| 513 | */ |
511 | */ |
| 514 | spinlock_lock(&t->lock); |
512 | spinlock_lock(&t->lock); |
| 515 | if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) { |
513 | if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) { |
| 516 | - | ||
| 517 | /* |
514 | /* |
| 518 | * Remove t from r. |
515 | * Remove t from r. |
| 519 | */ |
516 | */ |
| 520 | - | ||
| 521 | spinlock_unlock(&t->lock); |
517 | spinlock_unlock(&t->lock); |
| 522 | 518 | ||
| 523 | /* |
- | |
| 524 | * Here we have to avoid deadlock with relink_rq(), |
- | |
| 525 | * because it locks cpu and r in a different order than we do. |
- | |
| 526 | */ |
- | |
| 527 | if (!spinlock_trylock(&cpu->lock)) { |
- | |
| 528 | /* Release all locks and try again. */ |
- | |
| 529 | spinlock_unlock(&r->lock); |
- | |
| 530 | interrupts_restore(ipl); |
- | |
| 531 | goto restart; |
- | |
| 532 | } |
- | |
| 533 | atomic_dec(&cpu->nrdy); |
519 | atomic_dec(&cpu->nrdy); |
| 534 | spinlock_unlock(&cpu->lock); |
- | |
| 535 | - | ||
| 536 | atomic_dec(&nrdy); |
520 | atomic_dec(&nrdy); |
| 537 | 521 | ||
| 538 | r->n--; |
522 | r->n--; |
| 539 | list_remove(&t->rq_link); |
523 | list_remove(&t->rq_link); |
| 540 | 524 | ||