Subversion Repositories HelenOS

Rev

Rev 15 | Rev 52 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (C) 2001-2004 Jakub Jermar
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. #include <proc/scheduler.h>
  30. #include <proc/thread.h>
  31. #include <proc/task.h>
  32. #include <cpu.h>
  33. #include <mm/vm.h>
  34. #include <config.h>
  35. #include <context.h>
  36. #include <func.h>
  37. #include <arch.h>
  38. #include <arch/asm.h>
  39. #include <list.h>
  40. #include <typedefs.h>
  41. #include <mm/page.h>
  42. #include <synch/spinlock.h>
  43.  
  44. #ifdef __SMP__
  45. #include <arch/smp/atomic.h>
  46. #endif /* __SMP__ */
  47.  
  48. /*
  49.  * NOTE ON ATOMIC READS:
  50.  * Some architectures cannot read __u32 atomically.
  51.  * For that reason, all accesses to nrdy and the likes must be protected by spinlock.
  52.  */
  53.  
  54. spinlock_t nrdylock;
  55. volatile int nrdy;
  56.  
  57. void scheduler_init(void)
  58. {
  59.     spinlock_initialize(&nrdylock);
  60. }
  61.  
  62. /* cpu_priority_high()'d */
  63. struct thread *find_best_thread(void)
  64. {
  65.     thread_t *t;
  66.     runq_t *r;
  67.     int i, n;
  68.  
  69. loop:
  70.     cpu_priority_high();
  71.  
  72.     spinlock_lock(&CPU->lock);
  73.     n = CPU->nrdy;
  74.     spinlock_unlock(&CPU->lock);
  75.  
  76.     cpu_priority_low();
  77.    
  78.     if (n == 0) {
  79.         #ifdef __SMP__
  80.         /*
  81.          * If the load balancing thread is not running, wake it up and
  82.          * set CPU-private flag that the kcpulb has been started.
  83.          */
  84.         if (test_and_set(&CPU->kcpulbstarted) == 0) {
  85.                 waitq_wakeup(&CPU->kcpulb_wq, 0);
  86.             goto loop;
  87.         }
  88.         #endif /* __SMP__ */
  89.        
  90.         /*
  91.          * For there was nothing to run, the CPU goes to sleep
  92.          * until a hardware interrupt or an IPI comes.
  93.          * This improves energy saving and hyperthreading.
  94.          * On the other hand, several hardware interrupts can be ignored.
  95.          */
  96.          cpu_sleep();
  97.          goto loop;
  98.     }
  99.  
  100.     cpu_priority_high();
  101.  
  102.     for (i = 0; i<RQ_COUNT; i++) {
  103.         r = &CPU->rq[i];
  104.         spinlock_lock(&r->lock);
  105.         if (r->n == 0) {
  106.             /*
  107.              * If this queue is empty, try a lower-priority queue.
  108.              */
  109.             spinlock_unlock(&r->lock);
  110.             continue;
  111.         }
  112.    
  113.         spinlock_lock(&nrdylock);
  114.         nrdy--;
  115.         spinlock_unlock(&nrdylock);    
  116.  
  117.         spinlock_lock(&CPU->lock);
  118.         CPU->nrdy--;
  119.         spinlock_unlock(&CPU->lock);
  120.  
  121.         r->n--;
  122.  
  123.         /*
  124.          * Take the first thread from the queue.
  125.          */
  126.         t = list_get_instance(r->rq_head.next, thread_t, rq_link);
  127.         list_remove(&t->rq_link);
  128.  
  129.         spinlock_unlock(&r->lock);
  130.  
  131.         spinlock_lock(&t->lock);
  132.         t->cpu = CPU;
  133.  
  134.         t->ticks = us2ticks((i+1)*10000);
  135.         t->pri = i; /* eventually correct rq index */
  136.  
  137.         /*
  138.          * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge.
  139.          */
  140.         t->flags &= ~X_STOLEN;
  141.         spinlock_unlock(&t->lock);
  142.  
  143.         return t;
  144.     }
  145.     goto loop;
  146.  
  147. }
  148.  
  149. /*
  150.  * This function prevents low priority threads from starving in rq's.
  151.  * When it decides to relink rq's, it reconnects respective pointers
  152.  * so that in result threads with 'pri' greater or equal 'start' are
  153.  * moved to a higher-priority queue.
  154.  */
  155. void relink_rq(int start)
  156. {
  157.     link_t head;
  158.     runq_t *r;
  159.     int i, n;
  160.  
  161.     list_initialize(&head);
  162.     spinlock_lock(&CPU->lock);
  163.     if (CPU->needs_relink > NEEDS_RELINK_MAX) {
  164.         for (i = start; i<RQ_COUNT-1; i++) {
  165.             /* remember and empty rq[i + 1] */
  166.             r = &CPU->rq[i + 1];
  167.             spinlock_lock(&r->lock);
  168.             list_concat(&head, &r->rq_head);
  169.             n = r->n;
  170.             r->n = 0;
  171.             spinlock_unlock(&r->lock);
  172.        
  173.             /* append rq[i + 1] to rq[i] */
  174.             r = &CPU->rq[i];
  175.             spinlock_lock(&r->lock);
  176.             list_concat(&r->rq_head, &head);
  177.             r->n += n;
  178.             spinlock_unlock(&r->lock);
  179.         }
  180.         CPU->needs_relink = 0;
  181.     }
  182.     spinlock_unlock(&CPU->lock);               
  183.  
  184. }
  185.  
  186. /*
  187.  * The scheduler.
  188.  */
  189. void scheduler(void)
  190. {
  191.     volatile pri_t pri;
  192.  
  193.     pri = cpu_priority_high();
  194.  
  195.     if (haltstate)
  196.         halt();
  197.  
  198.     if (THREAD) {
  199.         spinlock_lock(&THREAD->lock);
  200.         if (!context_save(&THREAD->saved_context)) {
  201.             /*
  202.              * This is the place where threads leave scheduler();
  203.              */
  204.             before_thread_runs();
  205.                 spinlock_unlock(&THREAD->lock);
  206.             cpu_priority_restore(THREAD->saved_context.pri);
  207.             return;
  208.         }
  209.         THREAD->saved_context.pri = pri;
  210.     }
  211.  
  212.     /*
  213.      * We may not keep the old stack.
  214.      * Reason: If we kept the old stack and got blocked, for instance, in
  215.      * find_best_thread(), the old thread could get rescheduled by another
  216.      * CPU and overwrite the part of its own stack that was also used by
  217.      * the scheduler on this CPU.
  218.      *
  219.      * Moreover, we have to bypass the compiler-generated POP sequence
  220.      * which is fooled by SP being set to the very top of the stack.
  221.      * Therefore the scheduler() function continues in
  222.      * scheduler_separated_stack().
  223.      */
  224.     context_save(&CPU->saved_context);
  225.     CPU->saved_context.sp = (__address) &CPU->stack[CPU_STACK_SIZE-8];
  226.     CPU->saved_context.pc = (__address) scheduler_separated_stack;
  227.     context_restore(&CPU->saved_context);
  228.     /* not reached */
  229. }
  230.  
  231. void scheduler_separated_stack(void)
  232. {
  233.     int priority;
  234.  
  235.     if (THREAD) {
  236.         switch (THREAD->state) {
  237.             case Running:
  238.                 THREAD->state = Ready;
  239.                 spinlock_unlock(&THREAD->lock);
  240.                 thread_ready(THREAD);
  241.                 break;
  242.  
  243.             case Exiting:
  244.                 frame_free((__address) THREAD->kstack);
  245.                 if (THREAD->ustack) {
  246.                     frame_free((__address) THREAD->ustack);
  247.                 }
  248.                
  249.                 /*
  250.                  * Detach from the containing task.
  251.                  */
  252.                 spinlock_lock(&TASK->lock);
  253.                 list_remove(&THREAD->th_link);
  254.                 spinlock_unlock(&TASK->lock);
  255.  
  256.                 spinlock_unlock(&THREAD->lock);
  257.                
  258.                 spinlock_lock(&threads_lock);
  259.                 list_remove(&THREAD->threads_link);
  260.                 spinlock_unlock(&threads_lock);
  261.                
  262.                 free(THREAD);
  263.                
  264.                 break;
  265.                
  266.             case Sleeping:
  267.                 /*
  268.                  * Prefer the thread after it's woken up.
  269.                  */
  270.                 THREAD->pri = -1;
  271.  
  272.                 /*
  273.                  * We need to release wq->lock which we locked in waitq_sleep().
  274.                  * Address of wq->lock is kept in THREAD->sleep_queue.
  275.                  */
  276.                 spinlock_unlock(&THREAD->sleep_queue->lock);
  277.  
  278.                 /*
  279.                  * Check for possible requests for out-of-context invocation.
  280.                  */
  281.                 if (THREAD->call_me) {
  282.                     THREAD->call_me(THREAD->call_me_with);
  283.                     THREAD->call_me = NULL;
  284.                     THREAD->call_me_with = NULL;
  285.                 }
  286.  
  287.                 spinlock_unlock(&THREAD->lock);
  288.                
  289.                 break;
  290.  
  291.             default:
  292.                 /*
  293.                  * Entering state is unexpected.
  294.                  */
  295.                 panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]);
  296.                 break;
  297.         }
  298.         THREAD = NULL;
  299.     }
  300.    
  301.     THREAD = find_best_thread();
  302.    
  303.     spinlock_lock(&THREAD->lock);
  304.     priority = THREAD->pri;
  305.     spinlock_unlock(&THREAD->lock);
  306.    
  307.     relink_rq(priority);       
  308.  
  309.     spinlock_lock(&THREAD->lock);  
  310.  
  311.     /*
  312.      * If both the old and the new task are the same, lots of work is avoided.
  313.      */
  314.     if (TASK != THREAD->task) {
  315.         vm_t *m1 = NULL;
  316.         vm_t *m2;
  317.  
  318.         if (TASK) {
  319.             spinlock_lock(&TASK->lock);
  320.             m1 = TASK->vm;
  321.             spinlock_unlock(&TASK->lock);
  322.         }
  323.  
  324.         spinlock_lock(&THREAD->task->lock);
  325.         m2 = THREAD->task->vm;
  326.         spinlock_unlock(&THREAD->task->lock);
  327.        
  328.         /*
  329.          * Note that it is possible for two tasks to share one vm mapping.
  330.          */
  331.         if (m1 != m2) {
  332.             /*
  333.              * Both tasks and vm mappings are different.
  334.              * Replace the old one with the new one.
  335.              */
  336.             if (m1) {
  337.                 vm_uninstall(m1);
  338.             }
  339.             vm_install(m2);
  340.         }
  341.         TASK = THREAD->task;   
  342.     }
  343.  
  344.     THREAD->state = Running;
  345.  
  346.     #ifdef SCHEDULER_VERBOSE
  347.     printf("cpu%d: tid %d (pri=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->pri, THREAD->ticks, CPU->nrdy);
  348.     #endif 
  349.  
  350.     context_restore(&THREAD->saved_context);
  351.     /* not reached */
  352. }
  353.  
  354. #ifdef __SMP__
  355. /*
  356.  * This is the load balancing thread.
  357.  * It supervises thread supplies for the CPU it's wired to.
  358.  */
  359. void kcpulb(void *arg)
  360. {
  361.     thread_t *t;
  362.     int count, i, j, k = 0;
  363.     pri_t pri;
  364.  
  365. loop:
  366.     /*
  367.      * Sleep until there's some work to do.
  368.      */
  369.     waitq_sleep(&CPU->kcpulb_wq);
  370.  
  371. not_satisfied:
  372.     /*
  373.      * Calculate the number of threads that will be migrated/stolen from
  374.      * other CPU's. Note that situation can have changed between two
  375.      * passes. Each time get the most up to date counts.
  376.      */
  377.     pri = cpu_priority_high();
  378.     spinlock_lock(&CPU->lock);
  379.     count = nrdy / config.cpu_active;
  380.     count -= CPU->nrdy;
  381.     spinlock_unlock(&CPU->lock);
  382.     cpu_priority_restore(pri);
  383.  
  384.     if (count <= 0)
  385.         goto satisfied;
  386.  
  387.     /*
  388.      * Searching least priority queues on all CPU's first and most priority queues on all CPU's last.
  389.      */
  390.     for (j=RQ_COUNT-1; j >= 0; j--) {
  391.         for (i=0; i < config.cpu_active; i++) {
  392.             link_t *l;
  393.             runq_t *r;
  394.             cpu_t *cpu;
  395.  
  396.             cpu = &cpus[(i + k) % config.cpu_active];
  397.             r = &cpu->rq[j];
  398.  
  399.             /*
  400.              * Not interested in ourselves.
  401.              * Doesn't require interrupt disabling for kcpulb is X_WIRED.
  402.              */
  403.             if (CPU == cpu)
  404.                 continue;
  405.  
  406. restart:        pri = cpu_priority_high();
  407.             spinlock_lock(&r->lock);
  408.             if (r->n == 0) {
  409.                 spinlock_unlock(&r->lock);
  410.                 cpu_priority_restore(pri);
  411.                 continue;
  412.             }
  413.        
  414.             t = NULL;
  415.             l = r->rq_head.prev;    /* search rq from the back */
  416.             while (l != &r->rq_head) {
  417.                 t = list_get_instance(l, thread_t, rq_link);
  418.                 /*
  419.                      * We don't want to steal CPU-wired threads neither threads already stolen.
  420.                  * The latter prevents threads from migrating between CPU's without ever being run.
  421.                      */
  422.                 spinlock_lock(&t->lock);
  423.                 if (!(t->flags & (X_WIRED | X_STOLEN))) {
  424.                     /*
  425.                      * Remove t from r.
  426.                      */
  427.  
  428.                     spinlock_unlock(&t->lock);
  429.                    
  430.                     /*
  431.                      * Here we have to avoid deadlock with relink_rq(),
  432.                      * because it locks cpu and r in a different order than we do.
  433.                      */
  434.                     if (!spinlock_trylock(&cpu->lock)) {
  435.                         /* Release all locks and try again. */
  436.                         spinlock_unlock(&r->lock);
  437.                         cpu_priority_restore(pri);
  438.                         goto restart;
  439.                     }
  440.                     cpu->nrdy--;
  441.                     spinlock_unlock(&cpu->lock);
  442.  
  443.                     spinlock_lock(&nrdylock);
  444.                     nrdy--;
  445.                     spinlock_unlock(&nrdylock);                
  446.  
  447.                         r->n--;
  448.                     list_remove(&t->rq_link);
  449.  
  450.                     break;
  451.                 }
  452.                 spinlock_unlock(&t->lock);
  453.                 l = l->prev;
  454.                 t = NULL;
  455.             }
  456.             spinlock_unlock(&r->lock);
  457.  
  458.             if (t) {
  459.                 /*
  460.                  * Ready t on local CPU
  461.                  */
  462.                 spinlock_lock(&t->lock);
  463.                 #ifdef KCPULB_VERBOSE
  464.                 printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, CPU->nrdy, nrdy / config.cpu_active);
  465.                 #endif
  466.                 t->flags |= X_STOLEN;
  467.                 spinlock_unlock(&t->lock);
  468.    
  469.                 thread_ready(t);
  470.  
  471.                 cpu_priority_restore(pri);
  472.    
  473.                 if (--count == 0)
  474.                     goto satisfied;
  475.                    
  476.                 /*
  477.                              * We are not satisfied yet, focus on another CPU next time.
  478.                  */
  479.                 k++;
  480.                
  481.                 continue;
  482.             }
  483.             cpu_priority_restore(pri);
  484.         }
  485.     }
  486.  
  487.     if (CPU->nrdy) {
  488.         /*
  489.          * Be a little bit light-weight and let migrated threads run.
  490.          */
  491.         scheduler();
  492.     }
  493.     else {
  494.         /*
  495.          * We failed to migrate a single thread.
  496.          * Something more sophisticated should be done.
  497.          */
  498.         scheduler();
  499.     }
  500.        
  501.     goto not_satisfied;
  502.    
  503. satisfied:
  504.     /*
  505.      * Tell find_best_thread() to wake us up later again.
  506.      */
  507.     CPU->kcpulbstarted = 0;
  508.     goto loop;
  509. }
  510.  
  511. #endif /* __SMP__ */
  512.