Subversion Repositories HelenOS-historic

Rev

Rev 1576 | Rev 1705 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (C) 2001-2004 Jakub Jermar
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29.  /**
  30.  * @defgroup proc Proc
  31.  * @ingroup kernel
  32.  * @{
  33.  * @}
  34.  */
  35.  
  36.  /** @addtogroup genericproc generic
  37.  * @ingroup proc
  38.  * @{
  39.  */
  40.  
  41. /**
  42.  * @file
  43.  * @brief   Scheduler and load balancing.
  44.  *
  45.  * This file contains the scheduler and kcpulb kernel thread which
  46.  * performs load-balancing of per-CPU run queues.
  47.  */
  48.  
  49. #include <proc/scheduler.h>
  50. #include <proc/thread.h>
  51. #include <proc/task.h>
  52. #include <mm/frame.h>
  53. #include <mm/page.h>
  54. #include <mm/as.h>
  55. #include <time/delay.h>
  56. #include <arch/asm.h>
  57. #include <arch/faddr.h>
  58. #include <atomic.h>
  59. #include <synch/spinlock.h>
  60. #include <config.h>
  61. #include <context.h>
  62. #include <func.h>
  63. #include <arch.h>
  64. #include <adt/list.h>
  65. #include <panic.h>
  66. #include <typedefs.h>
  67. #include <cpu.h>
  68. #include <print.h>
  69. #include <debug.h>
  70.  
  71. static void before_task_runs(void);
  72. static void before_thread_runs(void);
  73. static void after_thread_ran(void);
  74. static void scheduler_separated_stack(void);
  75.  
  76. atomic_t nrdy;  /**< Number of ready threads in the system. */
  77.  
  78. /** Carry out actions before new task runs. */
  79. void before_task_runs(void)
  80. {
  81.     before_task_runs_arch();
  82. }
  83.  
  84. /** Take actions before new thread runs.
  85.  *
  86.  * Perform actions that need to be
  87.  * taken before the newly selected
  88.  * tread is passed control.
  89.  *
  90.  * THREAD->lock is locked on entry
  91.  *
  92.  */
  93. void before_thread_runs(void)
  94. {
  95.     before_thread_runs_arch();
  96. #ifdef CONFIG_FPU_LAZY
  97.     if(THREAD==CPU->fpu_owner)
  98.         fpu_enable();
  99.     else
  100.         fpu_disable();
  101. #else
  102.     fpu_enable();
  103.     if (THREAD->fpu_context_exists)
  104.         fpu_context_restore(THREAD->saved_fpu_context);
  105.     else {
  106.         fpu_init();
  107.         THREAD->fpu_context_exists=1;
  108.     }
  109. #endif
  110. }
  111.  
  112. /** Take actions after THREAD had run.
  113.  *
  114.  * Perform actions that need to be
  115.  * taken after the running thread
  116.  * had been preempted by the scheduler.
  117.  *
  118.  * THREAD->lock is locked on entry
  119.  *
  120.  */
  121. void after_thread_ran(void)
  122. {
  123.     after_thread_ran_arch();
  124. }
  125.  
  126. #ifdef CONFIG_FPU_LAZY
  127. void scheduler_fpu_lazy_request(void)
  128. {
  129. restart:
  130.     fpu_enable();
  131.     spinlock_lock(&CPU->lock);
  132.  
  133.     /* Save old context */
  134.     if (CPU->fpu_owner != NULL) {  
  135.         spinlock_lock(&CPU->fpu_owner->lock);
  136.         fpu_context_save(CPU->fpu_owner->saved_fpu_context);
  137.         /* don't prevent migration */
  138.         CPU->fpu_owner->fpu_context_engaged=0;
  139.         spinlock_unlock(&CPU->fpu_owner->lock);
  140.         CPU->fpu_owner = NULL;
  141.     }
  142.  
  143.     spinlock_lock(&THREAD->lock);
  144.     if (THREAD->fpu_context_exists) {
  145.         fpu_context_restore(THREAD->saved_fpu_context);
  146.     } else {
  147.         /* Allocate FPU context */
  148.         if (!THREAD->saved_fpu_context) {
  149.             /* Might sleep */
  150.             spinlock_unlock(&THREAD->lock);
  151.             spinlock_unlock(&CPU->lock);
  152.             THREAD->saved_fpu_context = slab_alloc(fpu_context_slab,
  153.                                    0);
  154.             /* We may have switched CPUs during slab_alloc */
  155.             goto restart;
  156.         }
  157.         fpu_init();
  158.         THREAD->fpu_context_exists=1;
  159.     }
  160.     CPU->fpu_owner=THREAD;
  161.     THREAD->fpu_context_engaged = 1;
  162.     spinlock_unlock(&THREAD->lock);
  163.  
  164.     spinlock_unlock(&CPU->lock);
  165. }
  166. #endif
  167.  
  168. /** Initialize scheduler
  169.  *
  170.  * Initialize kernel scheduler.
  171.  *
  172.  */
  173. void scheduler_init(void)
  174. {
  175. }
  176.  
  177. /** Get thread to be scheduled
  178.  *
  179.  * Get the optimal thread to be scheduled
  180.  * according to thread accounting and scheduler
  181.  * policy.
  182.  *
  183.  * @return Thread to be scheduled.
  184.  *
  185.  */
  186. static thread_t *find_best_thread(void)
  187. {
  188.     thread_t *t;
  189.     runq_t *r;
  190.     int i;
  191.  
  192.     ASSERT(CPU != NULL);
  193.  
  194. loop:
  195.     interrupts_enable();
  196.    
  197.     if (atomic_get(&CPU->nrdy) == 0) {
  198.         /*
  199.          * For there was nothing to run, the CPU goes to sleep
  200.          * until a hardware interrupt or an IPI comes.
  201.          * This improves energy saving and hyperthreading.
  202.          */
  203.  
  204.         /*
  205.          * An interrupt might occur right now and wake up a thread.
  206.          * In such case, the CPU will continue to go to sleep
  207.          * even though there is a runnable thread.
  208.          */
  209.  
  210.          cpu_sleep();
  211.          goto loop;
  212.     }
  213.  
  214.     interrupts_disable();
  215.    
  216.     for (i = 0; i<RQ_COUNT; i++) {
  217.         r = &CPU->rq[i];
  218.         spinlock_lock(&r->lock);
  219.         if (r->n == 0) {
  220.             /*
  221.              * If this queue is empty, try a lower-priority queue.
  222.              */
  223.             spinlock_unlock(&r->lock);
  224.             continue;
  225.         }
  226.  
  227.         atomic_dec(&CPU->nrdy);
  228.         atomic_dec(&nrdy);
  229.         r->n--;
  230.  
  231.         /*
  232.          * Take the first thread from the queue.
  233.          */
  234.         t = list_get_instance(r->rq_head.next, thread_t, rq_link);
  235.         list_remove(&t->rq_link);
  236.  
  237.         spinlock_unlock(&r->lock);
  238.  
  239.         spinlock_lock(&t->lock);
  240.         t->cpu = CPU;
  241.  
  242.         t->ticks = us2ticks((i+1)*10000);
  243.         t->priority = i;    /* correct rq index */
  244.  
  245.         /*
  246.          * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge.
  247.          */
  248.         t->flags &= ~X_STOLEN;
  249.         spinlock_unlock(&t->lock);
  250.  
  251.         return t;
  252.     }
  253.     goto loop;
  254.  
  255. }
  256.  
  257. /** Prevent rq starvation
  258.  *
  259.  * Prevent low priority threads from starving in rq's.
  260.  *
  261.  * When the function decides to relink rq's, it reconnects
  262.  * respective pointers so that in result threads with 'pri'
  263.  * greater or equal @start are moved to a higher-priority queue.
  264.  *
  265.  * @param start Threshold priority.
  266.  *
  267.  */
  268. static void relink_rq(int start)
  269. {
  270.     link_t head;
  271.     runq_t *r;
  272.     int i, n;
  273.  
  274.     list_initialize(&head);
  275.     spinlock_lock(&CPU->lock);
  276.     if (CPU->needs_relink > NEEDS_RELINK_MAX) {
  277.         for (i = start; i<RQ_COUNT-1; i++) {
  278.             /* remember and empty rq[i + 1] */
  279.             r = &CPU->rq[i + 1];
  280.             spinlock_lock(&r->lock);
  281.             list_concat(&head, &r->rq_head);
  282.             n = r->n;
  283.             r->n = 0;
  284.             spinlock_unlock(&r->lock);
  285.        
  286.             /* append rq[i + 1] to rq[i] */
  287.             r = &CPU->rq[i];
  288.             spinlock_lock(&r->lock);
  289.             list_concat(&r->rq_head, &head);
  290.             r->n += n;
  291.             spinlock_unlock(&r->lock);
  292.         }
  293.         CPU->needs_relink = 0;
  294.     }
  295.     spinlock_unlock(&CPU->lock);
  296.  
  297. }
  298.  
  299. /** The scheduler
  300.  *
  301.  * The thread scheduling procedure.
  302.  * Passes control directly to
  303.  * scheduler_separated_stack().
  304.  *
  305.  */
  306. void scheduler(void)
  307. {
  308.     volatile ipl_t ipl;
  309.  
  310.     ASSERT(CPU != NULL);
  311.  
  312.     ipl = interrupts_disable();
  313.  
  314.     if (atomic_get(&haltstate))
  315.         halt();
  316.    
  317.     if (THREAD) {
  318.         spinlock_lock(&THREAD->lock);
  319. #ifndef CONFIG_FPU_LAZY
  320.         fpu_context_save(THREAD->saved_fpu_context);
  321. #endif
  322.         if (!context_save(&THREAD->saved_context)) {
  323.             /*
  324.              * This is the place where threads leave scheduler();
  325.              */
  326.             spinlock_unlock(&THREAD->lock);
  327.             interrupts_restore(THREAD->saved_context.ipl);
  328.            
  329.             return;
  330.         }
  331.  
  332.         /*
  333.          * Interrupt priority level of preempted thread is recorded here
  334.          * to facilitate scheduler() invocations from interrupts_disable()'d
  335.          * code (e.g. waitq_sleep_timeout()).
  336.          */
  337.         THREAD->saved_context.ipl = ipl;
  338.     }
  339.  
  340.     /*
  341.      * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM
  342.      * and preemption counter. At this point THE could be coming either
  343.      * from THREAD's or CPU's stack.
  344.      */
  345.     the_copy(THE, (the_t *) CPU->stack);
  346.  
  347.     /*
  348.      * We may not keep the old stack.
  349.      * Reason: If we kept the old stack and got blocked, for instance, in
  350.      * find_best_thread(), the old thread could get rescheduled by another
  351.      * CPU and overwrite the part of its own stack that was also used by
  352.      * the scheduler on this CPU.
  353.      *
  354.      * Moreover, we have to bypass the compiler-generated POP sequence
  355.      * which is fooled by SP being set to the very top of the stack.
  356.      * Therefore the scheduler() function continues in
  357.      * scheduler_separated_stack().
  358.      */
  359.     context_save(&CPU->saved_context);
  360.     context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), (__address) CPU->stack, CPU_STACK_SIZE);
  361.     context_restore(&CPU->saved_context);
  362.     /* not reached */
  363. }
  364.  
  365. /** Scheduler stack switch wrapper
  366.  *
  367.  * Second part of the scheduler() function
  368.  * using new stack. Handling the actual context
  369.  * switch to a new thread.
  370.  *
  371.  * Assume THREAD->lock is held.
  372.  */
  373. void scheduler_separated_stack(void)
  374. {
  375.     int priority;
  376.    
  377.     ASSERT(CPU != NULL);
  378.    
  379.     if (THREAD) {
  380.         /* must be run after the switch to scheduler stack */
  381.         after_thread_ran();
  382.  
  383.         switch (THREAD->state) {
  384.             case Running:
  385.             spinlock_unlock(&THREAD->lock);
  386.             thread_ready(THREAD);
  387.             break;
  388.  
  389.             case Exiting:
  390. repeat:
  391.                 if (THREAD->detached) {
  392.                 thread_destroy(THREAD);
  393.             } else {
  394.                 /*
  395.                  * The thread structure is kept allocated until somebody
  396.                  * calls thread_detach() on it.
  397.                  */
  398.                 if (!spinlock_trylock(&THREAD->join_wq.lock)) {
  399.                     /*
  400.                      * Avoid deadlock.
  401.                      */
  402.                     spinlock_unlock(&THREAD->lock);
  403.                     delay(10);
  404.                     spinlock_lock(&THREAD->lock);
  405.                     goto repeat;
  406.                 }
  407.                 _waitq_wakeup_unsafe(&THREAD->join_wq, false);
  408.                 spinlock_unlock(&THREAD->join_wq.lock);
  409.                
  410.                 THREAD->state = Undead;
  411.                 spinlock_unlock(&THREAD->lock);
  412.             }
  413.             break;
  414.            
  415.             case Sleeping:
  416.             /*
  417.              * Prefer the thread after it's woken up.
  418.              */
  419.             THREAD->priority = -1;
  420.  
  421.             /*
  422.              * We need to release wq->lock which we locked in waitq_sleep().
  423.              * Address of wq->lock is kept in THREAD->sleep_queue.
  424.              */
  425.             spinlock_unlock(&THREAD->sleep_queue->lock);
  426.  
  427.             /*
  428.              * Check for possible requests for out-of-context invocation.
  429.              */
  430.             if (THREAD->call_me) {
  431.                 THREAD->call_me(THREAD->call_me_with);
  432.                 THREAD->call_me = NULL;
  433.                 THREAD->call_me_with = NULL;
  434.             }
  435.  
  436.             spinlock_unlock(&THREAD->lock);
  437.  
  438.             break;
  439.  
  440.             default:
  441.             /*
  442.              * Entering state is unexpected.
  443.              */
  444.             panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]);
  445.             break;
  446.         }
  447.  
  448.         THREAD = NULL;
  449.     }
  450.  
  451.     THREAD = find_best_thread();
  452.    
  453.     spinlock_lock(&THREAD->lock);
  454.     priority = THREAD->priority;
  455.     spinlock_unlock(&THREAD->lock);
  456.  
  457.     relink_rq(priority);       
  458.  
  459.     /*
  460.      * If both the old and the new task are the same, lots of work is avoided.
  461.      */
  462.     if (TASK != THREAD->task) {
  463.         as_t *as1 = NULL;
  464.         as_t *as2;
  465.  
  466.         if (TASK) {
  467.             spinlock_lock(&TASK->lock);
  468.             as1 = TASK->as;
  469.             spinlock_unlock(&TASK->lock);
  470.         }
  471.  
  472.         spinlock_lock(&THREAD->task->lock);
  473.         as2 = THREAD->task->as;
  474.         spinlock_unlock(&THREAD->task->lock);
  475.        
  476.         /*
  477.          * Note that it is possible for two tasks to share one address space.
  478.          */
  479.         if (as1 != as2) {
  480.             /*
  481.              * Both tasks and address spaces are different.
  482.              * Replace the old one with the new one.
  483.              */
  484.             as_switch(as1, as2);
  485.         }
  486.         TASK = THREAD->task;
  487.         before_task_runs();
  488.     }
  489.  
  490.     spinlock_lock(&THREAD->lock);  
  491.     THREAD->state = Running;
  492.  
  493. #ifdef SCHEDULER_VERBOSE
  494.     printf("cpu%d: tid %d (priority=%d,ticks=%lld,nrdy=%ld)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, atomic_get(&CPU->nrdy));
  495. #endif 
  496.  
  497.     /*
  498.      * Some architectures provide late kernel PA2KA(identity)
  499.      * mapping in a page fault handler. However, the page fault
  500.      * handler uses the kernel stack of the running thread and
  501.      * therefore cannot be used to map it. The kernel stack, if
  502.      * necessary, is to be mapped in before_thread_runs(). This
  503.      * function must be executed before the switch to the new stack.
  504.      */
  505.     before_thread_runs();
  506.  
  507.     /*
  508.      * Copy the knowledge of CPU, TASK, THREAD and preemption counter to thread's stack.
  509.      */
  510.     the_copy(THE, (the_t *) THREAD->kstack);
  511.    
  512.     context_restore(&THREAD->saved_context);
  513.     /* not reached */
  514. }
  515.  
  516. #ifdef CONFIG_SMP
  517. /** Load balancing thread
  518.  *
  519.  * SMP load balancing thread, supervising thread supplies
  520.  * for the CPU it's wired to.
  521.  *
  522.  * @param arg Generic thread argument (unused).
  523.  *
  524.  */
  525. void kcpulb(void *arg)
  526. {
  527.     thread_t *t;
  528.     int count, average, i, j, k = 0;
  529.     ipl_t ipl;
  530.  
  531.     /*
  532.      * Detach kcpulb as nobody will call thread_join_timeout() on it.
  533.      */
  534.     thread_detach(THREAD);
  535.    
  536. loop:
  537.     /*
  538.      * Work in 1s intervals.
  539.      */
  540.     thread_sleep(1);
  541.  
  542. not_satisfied:
  543.     /*
  544.      * Calculate the number of threads that will be migrated/stolen from
  545.      * other CPU's. Note that situation can have changed between two
  546.      * passes. Each time get the most up to date counts.
  547.      */
  548.     average = atomic_get(&nrdy) / config.cpu_active + 1;
  549.     count = average - atomic_get(&CPU->nrdy);
  550.  
  551.     if (count <= 0)
  552.         goto satisfied;
  553.  
  554.     /*
  555.      * Searching least priority queues on all CPU's first and most priority queues on all CPU's last.
  556.      */
  557.     for (j=RQ_COUNT-1; j >= 0; j--) {
  558.         for (i=0; i < config.cpu_active; i++) {
  559.             link_t *l;
  560.             runq_t *r;
  561.             cpu_t *cpu;
  562.  
  563.             cpu = &cpus[(i + k) % config.cpu_active];
  564.  
  565.             /*
  566.              * Not interested in ourselves.
  567.              * Doesn't require interrupt disabling for kcpulb is X_WIRED.
  568.              */
  569.             if (CPU == cpu)
  570.                 continue;
  571.             if (atomic_get(&cpu->nrdy) <= average)
  572.                 continue;
  573.  
  574.             ipl = interrupts_disable();
  575.             r = &cpu->rq[j];
  576.             spinlock_lock(&r->lock);
  577.             if (r->n == 0) {
  578.                 spinlock_unlock(&r->lock);
  579.                 interrupts_restore(ipl);
  580.                 continue;
  581.             }
  582.        
  583.             t = NULL;
  584.             l = r->rq_head.prev;    /* search rq from the back */
  585.             while (l != &r->rq_head) {
  586.                 t = list_get_instance(l, thread_t, rq_link);
  587.                 /*
  588.                  * We don't want to steal CPU-wired threads neither threads already stolen.
  589.                  * The latter prevents threads from migrating between CPU's without ever being run.
  590.                  * We don't want to steal threads whose FPU context is still in CPU.
  591.                  */
  592.                 spinlock_lock(&t->lock);
  593.                 if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) {
  594.                     /*
  595.                      * Remove t from r.
  596.                      */
  597.                     spinlock_unlock(&t->lock);
  598.                    
  599.                     atomic_dec(&cpu->nrdy);
  600.                     atomic_dec(&nrdy);
  601.  
  602.                     r->n--;
  603.                     list_remove(&t->rq_link);
  604.  
  605.                     break;
  606.                 }
  607.                 spinlock_unlock(&t->lock);
  608.                 l = l->prev;
  609.                 t = NULL;
  610.             }
  611.             spinlock_unlock(&r->lock);
  612.  
  613.             if (t) {
  614.                 /*
  615.                  * Ready t on local CPU
  616.                  */
  617.                 spinlock_lock(&t->lock);
  618. #ifdef KCPULB_VERBOSE
  619.                 printf("kcpulb%d: TID %d -> cpu%d, nrdy=%ld, avg=%nd\n", CPU->id, t->tid, CPU->id, atomic_get(&CPU->nrdy), atomic_get(&nrdy) / config.cpu_active);
  620. #endif
  621.                 t->flags |= X_STOLEN;
  622.                 t->state = Entering;
  623.                 spinlock_unlock(&t->lock);
  624.    
  625.                 thread_ready(t);
  626.  
  627.                 interrupts_restore(ipl);
  628.    
  629.                 if (--count == 0)
  630.                     goto satisfied;
  631.                    
  632.                 /*
  633.                  * We are not satisfied yet, focus on another CPU next time.
  634.                  */
  635.                 k++;
  636.                
  637.                 continue;
  638.             }
  639.             interrupts_restore(ipl);
  640.         }
  641.     }
  642.  
  643.     if (atomic_get(&CPU->nrdy)) {
  644.         /*
  645.          * Be a little bit light-weight and let migrated threads run.
  646.          */
  647.         scheduler();
  648.     } else {
  649.         /*
  650.          * We failed to migrate a single thread.
  651.          * Give up this turn.
  652.          */
  653.         goto loop;
  654.     }
  655.        
  656.     goto not_satisfied;
  657.  
  658. satisfied:
  659.     goto loop;
  660. }
  661.  
  662. #endif /* CONFIG_SMP */
  663.  
  664.  
  665. /** Print information about threads & scheduler queues */
  666. void sched_print_list(void)
  667. {
  668.     ipl_t ipl;
  669.     int cpu,i;
  670.     runq_t *r;
  671.     thread_t *t;
  672.     link_t *cur;
  673.  
  674.     /* We are going to mess with scheduler structures,
  675.      * let's not be interrupted */
  676.     ipl = interrupts_disable();
  677.     for (cpu=0;cpu < config.cpu_count; cpu++) {
  678.  
  679.         if (!cpus[cpu].active)
  680.             continue;
  681.  
  682.         spinlock_lock(&cpus[cpu].lock);
  683.         printf("cpu%d: address=%p, nrdy=%ld, needs_relink=%ld\n",
  684.                cpus[cpu].id, &cpus[cpu], atomic_get(&cpus[cpu].nrdy), cpus[cpu].needs_relink);
  685.        
  686.         for (i=0; i<RQ_COUNT; i++) {
  687.             r = &cpus[cpu].rq[i];
  688.             spinlock_lock(&r->lock);
  689.             if (!r->n) {
  690.                 spinlock_unlock(&r->lock);
  691.                 continue;
  692.             }
  693.             printf("\trq[%d]: ", i);
  694.             for (cur=r->rq_head.next; cur!=&r->rq_head; cur=cur->next) {
  695.                 t = list_get_instance(cur, thread_t, rq_link);
  696.                 printf("%d(%s) ", t->tid,
  697.                        thread_states[t->state]);
  698.             }
  699.             printf("\n");
  700.             spinlock_unlock(&r->lock);
  701.         }
  702.         spinlock_unlock(&cpus[cpu].lock);
  703.     }
  704.    
  705.     interrupts_restore(ipl);
  706. }
  707.  
  708.  /** @}
  709.  */
  710.  
  711.