Rev 775 | Rev 783 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 775 | Rev 779 | ||
---|---|---|---|
Line 131... | Line 131... | ||
131 | spinlock_unlock(&CPU->lock); |
131 | spinlock_unlock(&CPU->lock); |
132 | 132 | ||
133 | interrupts_enable(); |
133 | interrupts_enable(); |
134 | 134 | ||
135 | if (n == 0) { |
135 | if (n == 0) { |
136 | #ifdef CONFIG_SMP |
- | |
137 | /* |
- | |
138 | * If the load balancing thread is not running, wake it up and |
- | |
139 | * set CPU-private flag that the kcpulb has been started. |
- | |
140 | */ |
- | |
141 | if (test_and_set(&CPU->kcpulbstarted) == 0) { |
- | |
142 | waitq_wakeup(&CPU->kcpulb_wq, 0); |
- | |
143 | goto loop; |
- | |
144 | } |
- | |
145 | #endif /* CONFIG_SMP */ |
- | |
146 | - | ||
147 | /* |
136 | /* |
148 | * For there was nothing to run, the CPU goes to sleep |
137 | * For there was nothing to run, the CPU goes to sleep |
149 | * until a hardware interrupt or an IPI comes. |
138 | * until a hardware interrupt or an IPI comes. |
150 | * This improves energy saving and hyperthreading. |
139 | * This improves energy saving and hyperthreading. |
151 | * On the other hand, several hardware interrupts can be ignored. |
- | |
152 | */ |
140 | */ |
153 | cpu_sleep(); |
141 | cpu_sleep(); |
154 | goto loop; |
142 | goto loop; |
155 | } |
143 | } |
156 | 144 | ||
Line 479... | Line 467... | ||
479 | int count, i, j, k = 0; |
467 | int count, i, j, k = 0; |
480 | ipl_t ipl; |
468 | ipl_t ipl; |
481 | 469 | ||
482 | loop: |
470 | loop: |
483 | /* |
471 | /* |
484 | * Sleep until there's some work to do. |
472 | * Work in 1s intervals. |
485 | */ |
473 | */ |
486 | waitq_sleep(&CPU->kcpulb_wq); |
474 | thread_sleep(1); |
487 | 475 | ||
488 | not_satisfied: |
476 | not_satisfied: |
489 | /* |
477 | /* |
490 | * Calculate the number of threads that will be migrated/stolen from |
478 | * Calculate the number of threads that will be migrated/stolen from |
491 | * other CPU's. Note that situation can have changed between two |
479 | * other CPU's. Note that situation can have changed between two |
Line 576... | Line 564... | ||
576 | /* |
564 | /* |
577 | * Ready t on local CPU |
565 | * Ready t on local CPU |
578 | */ |
566 | */ |
579 | spinlock_lock(&t->lock); |
567 | spinlock_lock(&t->lock); |
580 | #ifdef KCPULB_VERBOSE |
568 | #ifdef KCPULB_VERBOSE |
581 | printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, CPU->nrdy, nrdy / config.cpu_active); |
569 | printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, CPU->nrdy, atomic_get(&nrdy) / config.cpu_active); |
582 | #endif |
570 | #endif |
583 | t->flags |= X_STOLEN; |
571 | t->flags |= X_STOLEN; |
584 | spinlock_unlock(&t->lock); |
572 | spinlock_unlock(&t->lock); |
585 | 573 | ||
586 | thread_ready(t); |
574 | thread_ready(t); |
Line 604... | Line 592... | ||
604 | if (CPU->nrdy) { |
592 | if (CPU->nrdy) { |
605 | /* |
593 | /* |
606 | * Be a little bit light-weight and let migrated threads run. |
594 | * Be a little bit light-weight and let migrated threads run. |
607 | */ |
595 | */ |
608 | scheduler(); |
596 | scheduler(); |
609 | } |
- | |
610 | else { |
597 | } else { |
611 | /* |
598 | /* |
612 | * We failed to migrate a single thread. |
599 | * We failed to migrate a single thread. |
613 | * Something more sophisticated should be done. |
600 | * Give up this turn. |
614 | */ |
601 | */ |
615 | scheduler(); |
602 | goto loop; |
616 | } |
603 | } |
617 | 604 | ||
618 | goto not_satisfied; |
605 | goto not_satisfied; |
619 | 606 | ||
620 | satisfied: |
607 | satisfied: |
621 | /* |
- | |
622 | * Tell find_best_thread() to wake us up later again. |
- | |
623 | */ |
- | |
624 | atomic_set(&CPU->kcpulbstarted,0); |
- | |
625 | goto loop; |
608 | goto loop; |
626 | } |
609 | } |
627 | 610 | ||
628 | #endif /* CONFIG_SMP */ |
611 | #endif /* CONFIG_SMP */ |
629 | 612 | ||
Line 653... | Line 636... | ||
653 | spinlock_lock(&r->lock); |
636 | spinlock_lock(&r->lock); |
654 | if (!r->n) { |
637 | if (!r->n) { |
655 | spinlock_unlock(&r->lock); |
638 | spinlock_unlock(&r->lock); |
656 | continue; |
639 | continue; |
657 | } |
640 | } |
658 | printf("Rq %d: ", i); |
641 | printf("\tRq %d: ", i); |
659 | for (cur=r->rq_head.next; cur!=&r->rq_head; cur=cur->next) { |
642 | for (cur=r->rq_head.next; cur!=&r->rq_head; cur=cur->next) { |
660 | t = list_get_instance(cur, thread_t, rq_link); |
643 | t = list_get_instance(cur, thread_t, rq_link); |
661 | printf("%d(%s) ", t->tid, |
644 | printf("%d(%s) ", t->tid, |
662 | thread_states[t->state]); |
645 | thread_states[t->state]); |
663 | } |
646 | } |