Rev 779 | Rev 784 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 779 | Rev 783 | ||
---|---|---|---|
Line 117... | Line 117... | ||
117 | */ |
117 | */ |
118 | static thread_t *find_best_thread(void) |
118 | static thread_t *find_best_thread(void) |
119 | { |
119 | { |
120 | thread_t *t; |
120 | thread_t *t; |
121 | runq_t *r; |
121 | runq_t *r; |
122 | int i, n; |
122 | int i; |
123 | 123 | ||
124 | ASSERT(CPU != NULL); |
124 | ASSERT(CPU != NULL); |
125 | 125 | ||
126 | loop: |
126 | loop: |
127 | interrupts_disable(); |
- | |
128 | - | ||
129 | spinlock_lock(&CPU->lock); |
- | |
130 | n = CPU->nrdy; |
- | |
131 | spinlock_unlock(&CPU->lock); |
- | |
132 | - | ||
133 | interrupts_enable(); |
127 | interrupts_enable(); |
134 | 128 | ||
135 | if (n == 0) { |
129 | if (atomic_get(&CPU->nrdy) == 0) { |
136 | /* |
130 | /* |
137 | * For there was nothing to run, the CPU goes to sleep |
131 | * For there was nothing to run, the CPU goes to sleep |
138 | * until a hardware interrupt or an IPI comes. |
132 | * until a hardware interrupt or an IPI comes. |
139 | * This improves energy saving and hyperthreading. |
133 | * This improves energy saving and hyperthreading. |
140 | */ |
134 | */ |
Line 143... | Line 137... | ||
143 | } |
137 | } |
144 | 138 | ||
145 | interrupts_disable(); |
139 | interrupts_disable(); |
146 | 140 | ||
147 | i = 0; |
141 | i = 0; |
148 | retry: |
- | |
149 | for (; i<RQ_COUNT; i++) { |
142 | for (; i<RQ_COUNT; i++) { |
150 | r = &CPU->rq[i]; |
143 | r = &CPU->rq[i]; |
151 | spinlock_lock(&r->lock); |
144 | spinlock_lock(&r->lock); |
152 | if (r->n == 0) { |
145 | if (r->n == 0) { |
153 | /* |
146 | /* |
Line 155... | Line 148... | ||
155 | */ |
148 | */ |
156 | spinlock_unlock(&r->lock); |
149 | spinlock_unlock(&r->lock); |
157 | continue; |
150 | continue; |
158 | } |
151 | } |
159 | 152 | ||
160 | /* avoid deadlock with relink_rq() */ |
- | |
161 | if (!spinlock_trylock(&CPU->lock)) { |
- | |
162 | /* |
- | |
163 | * Unlock r and try again. |
- | |
164 | */ |
- | |
165 | spinlock_unlock(&r->lock); |
- | |
166 | goto retry; |
- | |
167 | } |
- | |
168 | CPU->nrdy--; |
- | |
169 | spinlock_unlock(&CPU->lock); |
153 | atomic_dec(&CPU->nrdy); |
170 | - | ||
171 | atomic_dec(&nrdy); |
154 | atomic_dec(&nrdy); |
172 | r->n--; |
155 | r->n--; |
173 | 156 | ||
174 | /* |
157 | /* |
175 | * Take the first thread from the queue. |
158 | * Take the first thread from the queue. |
Line 462... | Line 445... | ||
462 | * |
445 | * |
463 | */ |
446 | */ |
464 | void kcpulb(void *arg) |
447 | void kcpulb(void *arg) |
465 | { |
448 | { |
466 | thread_t *t; |
449 | thread_t *t; |
467 | int count, i, j, k = 0; |
450 | int count, average, i, j, k = 0; |
468 | ipl_t ipl; |
451 | ipl_t ipl; |
469 | 452 | ||
470 | loop: |
453 | loop: |
471 | /* |
454 | /* |
472 | * Work in 1s intervals. |
455 | * Work in 1s intervals. |
Line 477... | Line 460... | ||
477 | /* |
460 | /* |
478 | * Calculate the number of threads that will be migrated/stolen from |
461 | * Calculate the number of threads that will be migrated/stolen from |
479 | * other CPU's. Note that situation can have changed between two |
462 | * other CPU's. Note that situation can have changed between two |
480 | * passes. Each time get the most up to date counts. |
463 | * passes. Each time get the most up to date counts. |
481 | */ |
464 | */ |
482 | ipl = interrupts_disable(); |
- | |
483 | spinlock_lock(&CPU->lock); |
- | |
484 | count = atomic_get(&nrdy) / config.cpu_active; |
465 | average = atomic_get(&nrdy) / config.cpu_active; |
485 | count -= CPU->nrdy; |
466 | count = average - atomic_get(&CPU->nrdy); |
486 | spinlock_unlock(&CPU->lock); |
- | |
487 | interrupts_restore(ipl); |
- | |
488 | 467 | ||
489 | if (count <= 0) |
468 | if (count < 0) |
490 | goto satisfied; |
469 | goto satisfied; |
491 | 470 | ||
- | 471 | if (!count) { /* Try to steal threads from CPU's that have more then average count */ |
|
- | 472 | count = 1; |
|
- | 473 | average += 1; |
|
- | 474 | } |
|
- | 475 | ||
492 | /* |
476 | /* |
493 | * Searching least priority queues on all CPU's first and most priority queues on all CPU's last. |
477 | * Searching least priority queues on all CPU's first and most priority queues on all CPU's last. |
494 | */ |
478 | */ |
495 | for (j=RQ_COUNT-1; j >= 0; j--) { |
479 | for (j=RQ_COUNT-1; j >= 0; j--) { |
496 | for (i=0; i < config.cpu_active; i++) { |
480 | for (i=0; i < config.cpu_active; i++) { |
Line 503... | Line 487... | ||
503 | /* |
487 | /* |
504 | * Not interested in ourselves. |
488 | * Not interested in ourselves. |
505 | * Doesn't require interrupt disabling for kcpulb is X_WIRED. |
489 | * Doesn't require interrupt disabling for kcpulb is X_WIRED. |
506 | */ |
490 | */ |
507 | if (CPU == cpu) |
491 | if (CPU == cpu) |
- | 492 | continue; |
|
- | 493 | if (atomic_get(&cpu->nrdy) <= average) |
|
508 | continue; |
494 | continue; |
509 | 495 | ||
510 | restart: ipl = interrupts_disable(); |
496 | restart: ipl = interrupts_disable(); |
511 | r = &cpu->rq[j]; |
497 | r = &cpu->rq[j]; |
512 | spinlock_lock(&r->lock); |
498 | spinlock_lock(&r->lock); |
513 | if (r->n == 0) { |
499 | if (r->n == 0) { |
Line 542... | Line 528... | ||
542 | /* Release all locks and try again. */ |
528 | /* Release all locks and try again. */ |
543 | spinlock_unlock(&r->lock); |
529 | spinlock_unlock(&r->lock); |
544 | interrupts_restore(ipl); |
530 | interrupts_restore(ipl); |
545 | goto restart; |
531 | goto restart; |
546 | } |
532 | } |
547 | cpu->nrdy--; |
533 | atomic_dec(&cpu->nrdy); |
548 | spinlock_unlock(&cpu->lock); |
534 | spinlock_unlock(&cpu->lock); |
549 | 535 | ||
550 | atomic_dec(&nrdy); |
536 | atomic_dec(&nrdy); |
551 | 537 | ||
552 | r->n--; |
538 | r->n--; |
Line 564... | Line 550... | ||
564 | /* |
550 | /* |
565 | * Ready t on local CPU |
551 | * Ready t on local CPU |
566 | */ |
552 | */ |
567 | spinlock_lock(&t->lock); |
553 | spinlock_lock(&t->lock); |
568 | #ifdef KCPULB_VERBOSE |
554 | #ifdef KCPULB_VERBOSE |
569 | printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, CPU->nrdy, atomic_get(&nrdy) / config.cpu_active); |
555 | printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, atomic_get(&CPU->nrdy), atomic_get(&nrdy) / config.cpu_active); |
570 | #endif |
556 | #endif |
571 | t->flags |= X_STOLEN; |
557 | t->flags |= X_STOLEN; |
572 | spinlock_unlock(&t->lock); |
558 | spinlock_unlock(&t->lock); |
573 | 559 | ||
574 | thread_ready(t); |
560 | thread_ready(t); |
Line 587... | Line 573... | ||
587 | } |
573 | } |
588 | interrupts_restore(ipl); |
574 | interrupts_restore(ipl); |
589 | } |
575 | } |
590 | } |
576 | } |
591 | 577 | ||
592 | if (CPU->nrdy) { |
578 | if (atomic_get(&CPU->nrdy)) { |
593 | /* |
579 | /* |
594 | * Be a little bit light-weight and let migrated threads run. |
580 | * Be a little bit light-weight and let migrated threads run. |
595 | */ |
581 | */ |
596 | scheduler(); |
582 | scheduler(); |
597 | } else { |
583 | } else { |
Line 627... | Line 613... | ||
627 | for (cpu=0;cpu < config.cpu_count; cpu++) { |
613 | for (cpu=0;cpu < config.cpu_count; cpu++) { |
628 | if (!cpus[cpu].active) |
614 | if (!cpus[cpu].active) |
629 | continue; |
615 | continue; |
630 | spinlock_lock(&cpus[cpu].lock); |
616 | spinlock_lock(&cpus[cpu].lock); |
631 | printf("cpu%d: nrdy: %d needs_relink: %d\n", |
617 | printf("cpu%d: nrdy: %d needs_relink: %d\n", |
632 | cpus[cpu].id, cpus[cpu].nrdy, cpus[cpu].needs_relink); |
618 | cpus[cpu].id, atomic_get(&cpus[cpu].nrdy), cpus[cpu].needs_relink); |
633 | 619 | ||
634 | for (i=0; i<RQ_COUNT; i++) { |
620 | for (i=0; i<RQ_COUNT; i++) { |
635 | r = &cpus[cpu].rq[i]; |
621 | r = &cpus[cpu].rq[i]; |
636 | spinlock_lock(&r->lock); |
622 | spinlock_lock(&r->lock); |
637 | if (!r->n) { |
623 | if (!r->n) { |