Subversion Repositories HelenOS

Rev

Rev 3798 | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (c) 2001-2007 Jakub Jermar
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. /** @addtogroup genericproc
  30.  * @{
  31.  */
  32.  
  33. /**
  34.  * @file
  35.  * @brief   Scheduler and load balancing.
  36.  *
  37.  * This file contains the scheduler and kcpulb kernel thread which
  38.  * performs load-balancing of per-CPU run queues.
  39.  */
  40.  
  41. #include <proc/scheduler.h>
  42. #include <proc/thread.h>
  43. #include <proc/task.h>
  44. #include <mm/frame.h>
  45. #include <mm/page.h>
  46. #include <mm/as.h>
  47. #include <time/timeout.h>
  48. #include <time/delay.h>
  49. #include <arch/asm.h>
  50. #include <arch/faddr.h>
  51. #include <arch/cycle.h>
  52. #include <atomic.h>
  53. #include <synch/spinlock.h>
  54. #include <config.h>
  55. #include <context.h>
  56. #include <fpu_context.h>
  57. #include <func.h>
  58. #include <arch.h>
  59. #include <adt/list.h>
  60. #include <panic.h>
  61. #include <cpu.h>
  62. #include <print.h>
  63. #include <debug.h>
  64. #include <arch/smp/sun4v/smp.h>
  65.  
  66. static void before_task_runs(void);
  67. static void before_thread_runs(void);
  68. static void after_thread_ran(void);
  69. static void scheduler_separated_stack(void);
  70.  
  71. atomic_t nrdy;  /**< Number of ready threads in the system. */
  72.  
  73. /** Carry out actions before new task runs. */
  74. void before_task_runs(void)
  75. {
  76.     before_task_runs_arch();
  77. }
  78.  
  79. /** Take actions before new thread runs.
  80.  *
  81.  * Perform actions that need to be
  82.  * taken before the newly selected
  83.  * tread is passed control.
  84.  *
  85.  * THREAD->lock is locked on entry
  86.  *
  87.  */
  88. void before_thread_runs(void)
  89. {
  90.     before_thread_runs_arch();
  91. #ifdef CONFIG_FPU_LAZY
  92.     if(THREAD == CPU->fpu_owner)
  93.         fpu_enable();
  94.     else
  95.         fpu_disable();
  96. #else
  97.     fpu_enable();
  98.     if (THREAD->fpu_context_exists)
  99.         fpu_context_restore(THREAD->saved_fpu_context);
  100.     else {
  101.         fpu_init();
  102.         THREAD->fpu_context_exists = 1;
  103.     }
  104. #endif
  105. }
  106.  
  107. /** Take actions after THREAD had run.
  108.  *
  109.  * Perform actions that need to be
  110.  * taken after the running thread
  111.  * had been preempted by the scheduler.
  112.  *
  113.  * THREAD->lock is locked on entry
  114.  *
  115.  */
  116. void after_thread_ran(void)
  117. {
  118.     after_thread_ran_arch();
  119. }
  120.  
  121. #ifdef CONFIG_FPU_LAZY
  122. void scheduler_fpu_lazy_request(void)
  123. {
  124. restart:
  125.     fpu_enable();
  126.     spinlock_lock(&CPU->lock);
  127.  
  128.     /* Save old context */
  129.     if (CPU->fpu_owner != NULL) {  
  130.         spinlock_lock(&CPU->fpu_owner->lock);
  131.         fpu_context_save(CPU->fpu_owner->saved_fpu_context);
  132.         /* don't prevent migration */
  133.         CPU->fpu_owner->fpu_context_engaged = 0;
  134.         spinlock_unlock(&CPU->fpu_owner->lock);
  135.         CPU->fpu_owner = NULL;
  136.     }
  137.  
  138.     spinlock_lock(&THREAD->lock);
  139.     if (THREAD->fpu_context_exists) {
  140.         fpu_context_restore(THREAD->saved_fpu_context);
  141.     } else {
  142.         /* Allocate FPU context */
  143.         if (!THREAD->saved_fpu_context) {
  144.             /* Might sleep */
  145.             spinlock_unlock(&THREAD->lock);
  146.             spinlock_unlock(&CPU->lock);
  147.             THREAD->saved_fpu_context =
  148.                 (fpu_context_t *) slab_alloc(fpu_context_slab, 0);
  149.             /* We may have switched CPUs during slab_alloc */
  150.             goto restart;
  151.         }
  152.         fpu_init();
  153.         THREAD->fpu_context_exists = 1;
  154.     }
  155.     CPU->fpu_owner = THREAD;
  156.     THREAD->fpu_context_engaged = 1;
  157.     spinlock_unlock(&THREAD->lock);
  158.  
  159.     spinlock_unlock(&CPU->lock);
  160. }
  161. #endif
  162.  
  163. /** Initialize scheduler
  164.  *
  165.  * Initialize kernel scheduler.
  166.  *
  167.  */
  168. void scheduler_init(void)
  169. {
  170. }
  171.  
  172. /** Get thread to be scheduled
  173.  *
  174.  * Get the optimal thread to be scheduled
  175.  * according to thread accounting and scheduler
  176.  * policy.
  177.  *
  178.  * @return Thread to be scheduled.
  179.  *
  180.  */
  181. static thread_t *find_best_thread(void)
  182. {
  183.     thread_t *t;
  184.     runq_t *r;
  185.     int i;
  186.  
  187.     ASSERT(CPU != NULL);
  188.  
  189. loop:
  190.     interrupts_enable();
  191.    
  192.     if (atomic_get(&CPU->nrdy) == 0) {
  193.         /*
  194.          * For there was nothing to run, the CPU goes to sleep
  195.          * until a hardware interrupt or an IPI comes.
  196.          * This improves energy saving and hyperthreading.
  197.          */
  198.  
  199.         /*
  200.          * An interrupt might occur right now and wake up a thread.
  201.          * In such case, the CPU will continue to go to sleep
  202.          * even though there is a runnable thread.
  203.          */
  204.  
  205.          cpu_sleep();
  206.          goto loop;
  207.     }
  208.  
  209.     interrupts_disable();
  210.    
  211.     for (i = 0; i < RQ_COUNT; i++) {
  212.         r = &CPU->rq[i];
  213.         spinlock_lock(&r->lock);
  214.         if (r->n == 0) {
  215.             /*
  216.              * If this queue is empty, try a lower-priority queue.
  217.              */
  218.             spinlock_unlock(&r->lock);
  219.             continue;
  220.         }
  221.  
  222.         atomic_dec(&CPU->nrdy);
  223.         atomic_dec(&nrdy);
  224.         if (CPU->arch.exec_unit)
  225.             atomic_dec(&(CPU->arch.exec_unit->nrdy));
  226.         r->n--;
  227.  
  228.         /*
  229.          * Take the first thread from the queue.
  230.          */
  231.         t = list_get_instance(r->rq_head.next, thread_t, rq_link);
  232.         list_remove(&t->rq_link);
  233.  
  234.         spinlock_unlock(&r->lock);
  235.  
  236.         spinlock_lock(&t->lock);
  237.         t->cpu = CPU;
  238.  
  239.         t->ticks = us2ticks((i + 1) * 10000);
  240.         t->priority = i;    /* correct rq index */
  241.  
  242.         /*
  243.          * Clear the THREAD_FLAG_STOLEN flag so that t can be migrated
  244.          * when load balancing needs emerge.
  245.          */
  246.         t->flags &= ~THREAD_FLAG_STOLEN;
  247.         spinlock_unlock(&t->lock);
  248.  
  249.         return t;
  250.     }
  251.     goto loop;
  252.  
  253. }
  254.  
  255. /** Prevent rq starvation
  256.  *
  257.  * Prevent low priority threads from starving in rq's.
  258.  *
  259.  * When the function decides to relink rq's, it reconnects
  260.  * respective pointers so that in result threads with 'pri'
  261.  * greater or equal start are moved to a higher-priority queue.
  262.  *
  263.  * @param start Threshold priority.
  264.  *
  265.  */
  266. static void relink_rq(int start)
  267. {
  268.     link_t head;
  269.     runq_t *r;
  270.     int i, n;
  271.  
  272.     list_initialize(&head);
  273.     spinlock_lock(&CPU->lock);
  274.     if (CPU->needs_relink > NEEDS_RELINK_MAX) {
  275.         for (i = start; i < RQ_COUNT - 1; i++) {
  276.             /* remember and empty rq[i + 1] */
  277.             r = &CPU->rq[i + 1];
  278.             spinlock_lock(&r->lock);
  279.             list_concat(&head, &r->rq_head);
  280.             n = r->n;
  281.             r->n = 0;
  282.             spinlock_unlock(&r->lock);
  283.        
  284.             /* append rq[i + 1] to rq[i] */
  285.             r = &CPU->rq[i];
  286.             spinlock_lock(&r->lock);
  287.             list_concat(&r->rq_head, &head);
  288.             r->n += n;
  289.             spinlock_unlock(&r->lock);
  290.         }
  291.         CPU->needs_relink = 0;
  292.     }
  293.     spinlock_unlock(&CPU->lock);
  294.  
  295. }
  296.  
  297. /** The scheduler
  298.  *
  299.  * The thread scheduling procedure.
  300.  * Passes control directly to
  301.  * scheduler_separated_stack().
  302.  *
  303.  */
  304. void scheduler(void)
  305. {
  306.     volatile ipl_t ipl;
  307.  
  308.     ASSERT(CPU != NULL);
  309.  
  310.     ipl = interrupts_disable();
  311.  
  312.     if (atomic_get(&haltstate))
  313.         halt();
  314.    
  315.     if (THREAD) {
  316.         spinlock_lock(&THREAD->lock);
  317.        
  318.         /* Update thread accounting */
  319.         THREAD->cycles += get_cycle() - THREAD->last_cycle;
  320.        
  321. #ifndef CONFIG_FPU_LAZY
  322.         fpu_context_save(THREAD->saved_fpu_context);
  323. #endif
  324.         if (!context_save(&THREAD->saved_context)) {
  325.             /*
  326.              * This is the place where threads leave scheduler();
  327.              */
  328.            
  329.             /* Save current CPU cycle */
  330.             THREAD->last_cycle = get_cycle();
  331.            
  332.             spinlock_unlock(&THREAD->lock);
  333.             interrupts_restore(THREAD->saved_context.ipl);
  334.            
  335.             return;
  336.         }
  337.  
  338.         /*
  339.          * Interrupt priority level of preempted thread is recorded
  340.          * here to facilitate scheduler() invocations from
  341.          * interrupts_disable()'d code (e.g. waitq_sleep_timeout()).
  342.          */
  343.         THREAD->saved_context.ipl = ipl;
  344.     }
  345.  
  346.     /*
  347.      * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM
  348.      * and preemption counter. At this point THE could be coming either
  349.      * from THREAD's or CPU's stack.
  350.      */
  351.     the_copy(THE, (the_t *) CPU->stack);
  352.  
  353.     /*
  354.      * We may not keep the old stack.
  355.      * Reason: If we kept the old stack and got blocked, for instance, in
  356.      * find_best_thread(), the old thread could get rescheduled by another
  357.      * CPU and overwrite the part of its own stack that was also used by
  358.      * the scheduler on this CPU.
  359.      *
  360.      * Moreover, we have to bypass the compiler-generated POP sequence
  361.      * which is fooled by SP being set to the very top of the stack.
  362.      * Therefore the scheduler() function continues in
  363.      * scheduler_separated_stack().
  364.      */
  365.     context_save(&CPU->saved_context);
  366.     context_set(&CPU->saved_context, FADDR(scheduler_separated_stack),
  367.         (uintptr_t) CPU->stack, CPU_STACK_SIZE);
  368.     context_restore(&CPU->saved_context);
  369.     /* not reached */
  370. }
  371.  
  372. /** Scheduler stack switch wrapper
  373.  *
  374.  * Second part of the scheduler() function
  375.  * using new stack. Handling the actual context
  376.  * switch to a new thread.
  377.  *
  378.  * Assume THREAD->lock is held.
  379.  */
  380. void scheduler_separated_stack(void)
  381. {
  382.     int priority;
  383.     DEADLOCK_PROBE_INIT(p_joinwq);
  384.  
  385.     ASSERT(CPU != NULL);
  386.  
  387.     if (THREAD) {
  388.         /* must be run after the switch to scheduler stack */
  389.         after_thread_ran();
  390.  
  391.         switch (THREAD->state) {
  392.         case Running:
  393.             spinlock_unlock(&THREAD->lock);
  394.             thread_ready(THREAD);
  395.             break;
  396.  
  397.         case Exiting:
  398. repeat:
  399.             if (THREAD->detached) {
  400.                 thread_destroy(THREAD);
  401.             } else {
  402.                 /*
  403.                  * The thread structure is kept allocated until
  404.                  * somebody calls thread_detach() on it.
  405.                  */
  406.                 if (!spinlock_trylock(&THREAD->join_wq.lock)) {
  407.                     /*
  408.                      * Avoid deadlock.
  409.                      */
  410.                     spinlock_unlock(&THREAD->lock);
  411.                     delay(HZ);
  412.                     spinlock_lock(&THREAD->lock);
  413.                     DEADLOCK_PROBE(p_joinwq,
  414.                         DEADLOCK_THRESHOLD);
  415.                     goto repeat;
  416.                 }
  417.                 _waitq_wakeup_unsafe(&THREAD->join_wq,
  418.                     WAKEUP_FIRST);
  419.                 spinlock_unlock(&THREAD->join_wq.lock);
  420.                
  421.                 THREAD->state = Lingering;
  422.                 spinlock_unlock(&THREAD->lock);
  423.             }
  424.             break;
  425.            
  426.         case Sleeping:
  427.             /*
  428.              * Prefer the thread after it's woken up.
  429.              */
  430.             THREAD->priority = -1;
  431.  
  432.             /*
  433.              * We need to release wq->lock which we locked in
  434.              * waitq_sleep(). Address of wq->lock is kept in
  435.              * THREAD->sleep_queue.
  436.              */
  437.             spinlock_unlock(&THREAD->sleep_queue->lock);
  438.  
  439.             /*
  440.              * Check for possible requests for out-of-context
  441.              * invocation.
  442.              */
  443.             if (THREAD->call_me) {
  444.                 THREAD->call_me(THREAD->call_me_with);
  445.                 THREAD->call_me = NULL;
  446.                 THREAD->call_me_with = NULL;
  447.             }
  448.  
  449.             spinlock_unlock(&THREAD->lock);
  450.  
  451.             break;
  452.  
  453.         default:
  454.             /*
  455.              * Entering state is unexpected.
  456.              */
  457.             panic("tid%" PRIu64 ": unexpected state %s\n",
  458.                 THREAD->tid, thread_states[THREAD->state]);
  459.             break;
  460.         }
  461.  
  462.         THREAD = NULL;
  463.     }
  464.  
  465.     THREAD = find_best_thread();
  466.  
  467.     spinlock_lock(&THREAD->lock);
  468.     priority = THREAD->priority;
  469.     spinlock_unlock(&THREAD->lock);
  470.  
  471.     relink_rq(priority);       
  472.  
  473.     /*
  474.      * If both the old and the new task are the same, lots of work is
  475.      * avoided.
  476.      */
  477.     if (TASK != THREAD->task) {
  478.         as_t *as1 = NULL;
  479.         as_t *as2;
  480.  
  481.         if (TASK) {
  482.             spinlock_lock(&TASK->lock);
  483.             as1 = TASK->as;
  484.             spinlock_unlock(&TASK->lock);
  485.         }
  486.  
  487.         spinlock_lock(&THREAD->task->lock);
  488.         as2 = THREAD->task->as;
  489.         spinlock_unlock(&THREAD->task->lock);
  490.        
  491.         /*
  492.          * Note that it is possible for two tasks to share one address
  493.          * space.
  494.          */
  495.         if (as1 != as2) {
  496.             /*
  497.              * Both tasks and address spaces are different.
  498.              * Replace the old one with the new one.
  499.              */
  500.             as_switch(as1, as2);
  501.         }
  502.         TASK = THREAD->task;
  503.         before_task_runs();
  504.     }
  505.  
  506.     spinlock_lock(&THREAD->lock);  
  507.     THREAD->state = Running;
  508.  
  509. #ifdef SCHEDULER_VERBOSE
  510.     printf("cpu%u: tid %" PRIu64 " (priority=%d, ticks=%" PRIu64
  511.         ", nrdy=%ld)\n", CPU->id, THREAD->tid, THREAD->priority,
  512.         THREAD->ticks, atomic_get(&CPU->nrdy));
  513. #endif 
  514.  
  515.     /*
  516.      * Some architectures provide late kernel PA2KA(identity)
  517.      * mapping in a page fault handler. However, the page fault
  518.      * handler uses the kernel stack of the running thread and
  519.      * therefore cannot be used to map it. The kernel stack, if
  520.      * necessary, is to be mapped in before_thread_runs(). This
  521.      * function must be executed before the switch to the new stack.
  522.      */
  523.     before_thread_runs();
  524.  
  525.     /*
  526.      * Copy the knowledge of CPU, TASK, THREAD and preemption counter to
  527.      * thread's stack.
  528.      */
  529.     the_copy(THE, (the_t *) THREAD->kstack);
  530.    
  531.     context_restore(&THREAD->saved_context);
  532.     /* not reached */
  533. }
  534.  
  535. #ifdef CONFIG_SMP
  536. /** Load balancing thread
  537.  *
  538.  * SMP load balancing thread, supervising thread supplies
  539.  * for the CPU it's wired to.
  540.  *
  541.  * @param arg Generic thread argument (unused).
  542.  *
  543.  */
  544. void kcpulb(void *arg)
  545. {
  546.     thread_t *t;
  547.     int count, average, j, k = 0;
  548.     unsigned int i;
  549.     ipl_t ipl;
  550.  
  551.     /*
  552.      * Detach kcpulb as nobody will call thread_join_timeout() on it.
  553.      */
  554.     thread_detach(THREAD);
  555.    
  556. loop:
  557.     /*
  558.      * Work in 1s intervals.
  559.      */
  560.     thread_sleep(1);
  561.  
  562. not_satisfied:
  563.     /*
  564.      * Calculate the number of threads that will be migrated/stolen from
  565.      * other CPU's. Note that situation can have changed between two
  566.      * passes. Each time get the most up to date counts.
  567.      */
  568.     average = atomic_get(&nrdy) / config.cpu_active + 1;
  569.     count = average - atomic_get(&CPU->nrdy);
  570.  
  571.     /* calculate number of threads to be stolen from other exec. units */
  572.     spinlock_lock(&(CPU->arch.exec_unit->proposed_nrdy_lock));
  573.     bool eu_busy = calculate_optimal_nrdy(CPU->arch.exec_unit);
  574.     unsigned int count_other_eus = CPU->arch.proposed_nrdy
  575.         - atomic_get(&(CPU->nrdy));
  576.     spinlock_unlock(&(CPU->arch.exec_unit->proposed_nrdy_lock));
  577.  
  578.     /*
  579.      * If the CPU's parent core is overloaded, do not do the load
  580.      * balancing, otherwise we would migrate threads which should be
  581.      * migrated to other cores and since a thread cannot be migrated
  582.      * multiple times, it would not be migrated to the other core
  583.      * in the future.
  584.      */
  585.     if (eu_busy)
  586.         return;
  587.  
  588.     /*
  589.      * get the maximum - stole enough threads to satisfy both the need to
  590.      * have all virtual CPUs equally busy and the need to have all the
  591.      * cores equally busy
  592.      */
  593.     if (((int) count_other_eus) > count)
  594.         count = count_other_eus;
  595.  
  596.     if (count <= 0)
  597.         goto satisfied;
  598.  
  599.     /*
  600.      * Searching least priority queues on all CPU's first and most priority
  601.      * queues on all CPU's last.
  602.      */
  603.     for (j = RQ_COUNT - 1; j >= 0; j--) {
  604.         for (i = 0; i < config.cpu_active; i++) {
  605.             link_t *l;
  606.             runq_t *r;
  607.             cpu_t *cpu;
  608.  
  609.             cpu = &cpus[(i + k) % config.cpu_active];
  610.  
  611.             /*
  612.              * Not interested in ourselves.
  613.              * Doesn't require interrupt disabling for kcpulb has
  614.              * THREAD_FLAG_WIRED.
  615.              */
  616.             if (CPU == cpu)
  617.                 continue;
  618.             if (atomic_get(&cpu->nrdy) <= average)
  619.                 continue;
  620.  
  621.             ipl = interrupts_disable();
  622.             r = &cpu->rq[j];
  623.             spinlock_lock(&r->lock);
  624.             if (r->n == 0) {
  625.                 spinlock_unlock(&r->lock);
  626.                 interrupts_restore(ipl);
  627.                 continue;
  628.             }
  629.        
  630.             t = NULL;
  631.             l = r->rq_head.prev;    /* search rq from the back */
  632.             while (l != &r->rq_head) {
  633.                 t = list_get_instance(l, thread_t, rq_link);
  634.                 /*
  635.                  * We don't want to steal CPU-wired threads
  636.                  * neither threads already stolen. The latter
  637.                  * prevents threads from migrating between CPU's
  638.                  * without ever being run. We don't want to
  639.                  * steal threads whose FPU context is still in
  640.                  * CPU.
  641.                  */
  642.                 spinlock_lock(&t->lock);
  643.                 if ((!(t->flags & (THREAD_FLAG_WIRED |
  644.                     THREAD_FLAG_STOLEN))) &&
  645.                     (!(t->fpu_context_engaged))) {
  646.                     /*
  647.                      * Remove t from r.
  648.                      */
  649.                     spinlock_unlock(&t->lock);
  650.                    
  651.                     atomic_dec(&cpu->nrdy);
  652.                     atomic_dec(&nrdy);
  653.                     if (cpu->arch.exec_unit)
  654.                         atomic_dec(&(cpu->arch.exec_unit->nrdy));
  655.  
  656.                     r->n--;
  657.                     list_remove(&t->rq_link);
  658.  
  659.                     break;
  660.                 }
  661.                 spinlock_unlock(&t->lock);
  662.                 l = l->prev;
  663.                 t = NULL;
  664.             }
  665.             spinlock_unlock(&r->lock);
  666.  
  667.             if (t) {
  668.                 /*
  669.                  * Ready t on local CPU
  670.                  */
  671.                 spinlock_lock(&t->lock);
  672. #ifdef KCPULB_VERBOSE
  673.                 printf("kcpulb%u: TID %" PRIu64 " -> cpu%u, "
  674.                     "nrdy=%ld, avg=%ld\n", CPU->id, t->tid,
  675.                     CPU->id, atomic_get(&CPU->nrdy),
  676.                     atomic_get(&nrdy) / config.cpu_active);
  677. #endif
  678.                 t->flags |= THREAD_FLAG_STOLEN;
  679.                 t->state = Entering;
  680.                 spinlock_unlock(&t->lock);
  681.    
  682.                 thread_ready(t);
  683.  
  684.                 interrupts_restore(ipl);
  685.    
  686.                 if (--count == 0)
  687.                     goto satisfied;
  688.                    
  689.                 /*
  690.                  * We are not satisfied yet, focus on another
  691.                  * CPU next time.
  692.                  */
  693.                 k++;
  694.                
  695.                 continue;
  696.             }
  697.             interrupts_restore(ipl);
  698.         }
  699.     }
  700.  
  701.     if (atomic_get(&CPU->nrdy)) {
  702.         /*
  703.          * Be a little bit light-weight and let migrated threads run.
  704.          */
  705.         scheduler();
  706.     } else {
  707.         /*
  708.          * We failed to migrate a single thread.
  709.          * Give up this turn.
  710.          */
  711.         goto loop;
  712.     }
  713.        
  714.     goto not_satisfied;
  715.  
  716. satisfied:
  717.     goto loop;
  718. }
  719.  
  720. #endif /* CONFIG_SMP */
  721.  
  722.  
  723. /** Print information about threads & scheduler queues */
  724. void sched_print_list(void)
  725. {
  726.     ipl_t ipl;
  727.     unsigned int cpu, i;
  728.     runq_t *r;
  729.     thread_t *t;
  730.     link_t *cur;
  731.  
  732.     /* We are going to mess with scheduler structures,
  733.      * let's not be interrupted */
  734.     ipl = interrupts_disable();
  735.     for (cpu = 0; cpu < config.cpu_count; cpu++) {
  736.  
  737.         if (!cpus[cpu].active)
  738.             continue;
  739.  
  740.         spinlock_lock(&cpus[cpu].lock);
  741.         printf("cpu%u: address=%p, nrdy=%ld, needs_relink=%" PRIc "\n",
  742.             cpus[cpu].id, &cpus[cpu], atomic_get(&cpus[cpu].nrdy),
  743.             cpus[cpu].needs_relink);
  744.        
  745.         for (i = 0; i < RQ_COUNT; i++) {
  746.             r = &cpus[cpu].rq[i];
  747.             spinlock_lock(&r->lock);
  748.             if (!r->n) {
  749.                 spinlock_unlock(&r->lock);
  750.                 continue;
  751.             }
  752.             printf("\trq[%u]: ", i);
  753.             for (cur = r->rq_head.next; cur != &r->rq_head;
  754.                 cur = cur->next) {
  755.                 t = list_get_instance(cur, thread_t, rq_link);
  756.                 printf("%" PRIu64 "(%s) ", t->tid,
  757.                     thread_states[t->state]);
  758.             }
  759.             printf("\n");
  760.             spinlock_unlock(&r->lock);
  761.         }
  762.         spinlock_unlock(&cpus[cpu].lock);
  763.     }
  764.    
  765.     interrupts_restore(ipl);
  766. }
  767.  
  768. /** @}
  769.  */
  770.