Subversion Repositories HelenOS-historic

Rev

Rev 68 | Rev 75 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (C) 2001-2004 Jakub Jermar
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. #include <proc/scheduler.h>
  30. #include <proc/thread.h>
  31. #include <proc/task.h>
  32. #include <cpu.h>
  33. #include <mm/vm.h>
  34. #include <config.h>
  35. #include <context.h>
  36. #include <func.h>
  37. #include <arch.h>
  38. #include <arch/asm.h>
  39. #include <list.h>
  40. #include <panic.h>
  41. #include <typedefs.h>
  42. #include <mm/page.h>
  43. #include <synch/spinlock.h>
  44.  
  45. #ifdef __SMP__
  46. #include <arch/smp/atomic.h>
  47. #endif /* __SMP__ */
  48.  
  49. /*
  50.  * NOTE ON ATOMIC READS:
  51.  * Some architectures cannot read __u32 atomically.
  52.  * For that reason, all accesses to nrdy and the likes must be protected by spinlock.
  53.  */
  54.  
  55. spinlock_t nrdylock;
  56. volatile int nrdy;
  57.  
  58. void before_thread_runs(void)
  59. {
  60.     before_thread_runs_arch();
  61.     fpu_context_restore(&(THREAD->saved_fpu_context));
  62. }
  63.  
  64.  
  65. void scheduler_init(void)
  66. {
  67.     spinlock_initialize(&nrdylock);
  68. }
  69.  
  70. /* cpu_priority_high()'d */
  71. struct thread *find_best_thread(void)
  72. {
  73.     thread_t *t;
  74.     runq_t *r;
  75.     int i, n;
  76.  
  77. loop:
  78.     cpu_priority_high();
  79.  
  80.     spinlock_lock(&CPU->lock);
  81.     n = CPU->nrdy;
  82.     spinlock_unlock(&CPU->lock);
  83.  
  84.     cpu_priority_low();
  85.    
  86.     if (n == 0) {
  87.         #ifdef __SMP__
  88.         /*
  89.          * If the load balancing thread is not running, wake it up and
  90.          * set CPU-private flag that the kcpulb has been started.
  91.          */
  92.         if (test_and_set(&CPU->kcpulbstarted) == 0) {
  93.                 waitq_wakeup(&CPU->kcpulb_wq, 0);
  94.             goto loop;
  95.         }
  96.         #endif /* __SMP__ */
  97.        
  98.         /*
  99.          * For there was nothing to run, the CPU goes to sleep
  100.          * until a hardware interrupt or an IPI comes.
  101.          * This improves energy saving and hyperthreading.
  102.          * On the other hand, several hardware interrupts can be ignored.
  103.          */
  104.          cpu_sleep();
  105.          goto loop;
  106.     }
  107.  
  108.     cpu_priority_high();
  109.  
  110.     for (i = 0; i<RQ_COUNT; i++) {
  111.         r = &CPU->rq[i];
  112.         spinlock_lock(&r->lock);
  113.         if (r->n == 0) {
  114.             /*
  115.              * If this queue is empty, try a lower-priority queue.
  116.              */
  117.             spinlock_unlock(&r->lock);
  118.             continue;
  119.         }
  120.    
  121.         spinlock_lock(&nrdylock);
  122.         nrdy--;
  123.         spinlock_unlock(&nrdylock);    
  124.  
  125.         spinlock_lock(&CPU->lock);
  126.         CPU->nrdy--;
  127.         spinlock_unlock(&CPU->lock);
  128.  
  129.         r->n--;
  130.  
  131.         /*
  132.          * Take the first thread from the queue.
  133.          */
  134.         t = list_get_instance(r->rq_head.next, thread_t, rq_link);
  135.         list_remove(&t->rq_link);
  136.  
  137.         spinlock_unlock(&r->lock);
  138.  
  139.         spinlock_lock(&t->lock);
  140.         t->cpu = CPU;
  141.  
  142.         t->ticks = us2ticks((i+1)*10000);
  143.         t->pri = i; /* eventually correct rq index */
  144.  
  145.         /*
  146.          * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge.
  147.          */
  148.         t->flags &= ~X_STOLEN;
  149.         spinlock_unlock(&t->lock);
  150.  
  151.         return t;
  152.     }
  153.     goto loop;
  154.  
  155. }
  156.  
  157. /*
  158.  * This function prevents low priority threads from starving in rq's.
  159.  * When it decides to relink rq's, it reconnects respective pointers
  160.  * so that in result threads with 'pri' greater or equal 'start' are
  161.  * moved to a higher-priority queue.
  162.  */
  163. void relink_rq(int start)
  164. {
  165.     link_t head;
  166.     runq_t *r;
  167.     int i, n;
  168.  
  169.     list_initialize(&head);
  170.     spinlock_lock(&CPU->lock);
  171.     if (CPU->needs_relink > NEEDS_RELINK_MAX) {
  172.         for (i = start; i<RQ_COUNT-1; i++) {
  173.             /* remember and empty rq[i + 1] */
  174.             r = &CPU->rq[i + 1];
  175.             spinlock_lock(&r->lock);
  176.             list_concat(&head, &r->rq_head);
  177.             n = r->n;
  178.             r->n = 0;
  179.             spinlock_unlock(&r->lock);
  180.        
  181.             /* append rq[i + 1] to rq[i] */
  182.             r = &CPU->rq[i];
  183.             spinlock_lock(&r->lock);
  184.             list_concat(&r->rq_head, &head);
  185.             r->n += n;
  186.             spinlock_unlock(&r->lock);
  187.         }
  188.         CPU->needs_relink = 0;
  189.     }
  190.     spinlock_unlock(&CPU->lock);               
  191.  
  192. }
  193.  
  194. /*
  195.  * The scheduler.
  196.  */
  197. void scheduler(void)
  198. {
  199.     volatile pri_t pri;
  200.  
  201.     pri = cpu_priority_high();
  202.  
  203.     if (haltstate)
  204.         halt();
  205.  
  206.     if (THREAD) {
  207.         spinlock_lock(&THREAD->lock);
  208.         fpu_context_save(&(THREAD->saved_fpu_context));
  209.         if (!context_save(&THREAD->saved_context)) {
  210.             /*
  211.              * This is the place where threads leave scheduler();
  212.              */
  213.             before_thread_runs();
  214.                 spinlock_unlock(&THREAD->lock);
  215.             cpu_priority_restore(THREAD->saved_context.pri);
  216.             return;
  217.         }
  218.         THREAD->saved_context.pri = pri;
  219.     }
  220.  
  221.     /*
  222.      * We may not keep the old stack.
  223.      * Reason: If we kept the old stack and got blocked, for instance, in
  224.      * find_best_thread(), the old thread could get rescheduled by another
  225.      * CPU and overwrite the part of its own stack that was also used by
  226.      * the scheduler on this CPU.
  227.      *
  228.      * Moreover, we have to bypass the compiler-generated POP sequence
  229.      * which is fooled by SP being set to the very top of the stack.
  230.      * Therefore the scheduler() function continues in
  231.      * scheduler_separated_stack().
  232.      */
  233.     context_save(&CPU->saved_context);
  234.     CPU->saved_context.sp = (__address) &CPU->stack[CPU_STACK_SIZE-8];
  235.     CPU->saved_context.pc = (__address) scheduler_separated_stack;
  236.     context_restore(&CPU->saved_context);
  237.     /* not reached */
  238. }
  239.  
  240. void scheduler_separated_stack(void)
  241. {
  242.     int priority;
  243.  
  244.     if (THREAD) {
  245.         switch (THREAD->state) {
  246.             case Running:
  247.                 THREAD->state = Ready;
  248.                 spinlock_unlock(&THREAD->lock);
  249.                 thread_ready(THREAD);
  250.                 break;
  251.  
  252.             case Exiting:
  253.                 frame_free((__address) THREAD->kstack);
  254.                 if (THREAD->ustack) {
  255.                     frame_free((__address) THREAD->ustack);
  256.                 }
  257.                
  258.                 /*
  259.                  * Detach from the containing task.
  260.                  */
  261.                 spinlock_lock(&TASK->lock);
  262.                 list_remove(&THREAD->th_link);
  263.                 spinlock_unlock(&TASK->lock);
  264.  
  265.                 spinlock_unlock(&THREAD->lock);
  266.                
  267.                 spinlock_lock(&threads_lock);
  268.                 list_remove(&THREAD->threads_link);
  269.                 spinlock_unlock(&threads_lock);
  270.  
  271.                 spinlock_lock(&THREAD->cpu->lock);
  272.                 if(THREAD->cpu->arch.fpu_owner==THREAD) THREAD->cpu->arch.fpu_owner=NULL;
  273.                 spinlock_unlock(&THREAD->cpu->lock);
  274.  
  275.                
  276.                 free(THREAD);
  277.                
  278.                 break;
  279.                
  280.             case Sleeping:
  281.                 /*
  282.                  * Prefer the thread after it's woken up.
  283.                  */
  284.                 THREAD->pri = -1;
  285.  
  286.                 /*
  287.                  * We need to release wq->lock which we locked in waitq_sleep().
  288.                  * Address of wq->lock is kept in THREAD->sleep_queue.
  289.                  */
  290.                 spinlock_unlock(&THREAD->sleep_queue->lock);
  291.  
  292.                 /*
  293.                  * Check for possible requests for out-of-context invocation.
  294.                  */
  295.                 if (THREAD->call_me) {
  296.                     THREAD->call_me(THREAD->call_me_with);
  297.                     THREAD->call_me = NULL;
  298.                     THREAD->call_me_with = NULL;
  299.                 }
  300.  
  301.                 spinlock_unlock(&THREAD->lock);
  302.                
  303.                 break;
  304.  
  305.             default:
  306.                 /*
  307.                  * Entering state is unexpected.
  308.                  */
  309.                 panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]);
  310.                 break;
  311.         }
  312.         THREAD = NULL;
  313.     }
  314.    
  315.     THREAD = find_best_thread();
  316.    
  317.     spinlock_lock(&THREAD->lock);
  318.     priority = THREAD->pri;
  319.     spinlock_unlock(&THREAD->lock);
  320.    
  321.     relink_rq(priority);       
  322.  
  323.     spinlock_lock(&THREAD->lock);  
  324.  
  325.     /*
  326.      * If both the old and the new task are the same, lots of work is avoided.
  327.      */
  328.     if (TASK != THREAD->task) {
  329.         vm_t *m1 = NULL;
  330.         vm_t *m2;
  331.  
  332.         if (TASK) {
  333.             spinlock_lock(&TASK->lock);
  334.             m1 = TASK->vm;
  335.             spinlock_unlock(&TASK->lock);
  336.         }
  337.  
  338.         spinlock_lock(&THREAD->task->lock);
  339.         m2 = THREAD->task->vm;
  340.         spinlock_unlock(&THREAD->task->lock);
  341.        
  342.         /*
  343.          * Note that it is possible for two tasks to share one vm mapping.
  344.          */
  345.         if (m1 != m2) {
  346.             /*
  347.              * Both tasks and vm mappings are different.
  348.              * Replace the old one with the new one.
  349.              */
  350.             if (m1) {
  351.                 vm_uninstall(m1);
  352.             }
  353.             vm_install(m2);
  354.         }
  355.         TASK = THREAD->task;   
  356.     }
  357.  
  358.     THREAD->state = Running;
  359.  
  360.     #ifdef SCHEDULER_VERBOSE
  361.     printf("cpu%d: tid %d (pri=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->pri, THREAD->ticks, CPU->nrdy);
  362.     #endif 
  363.  
  364.     context_restore(&THREAD->saved_context);
  365.     /* not reached */
  366. }
  367.  
  368. #ifdef __SMP__
  369. /*
  370.  * This is the load balancing thread.
  371.  * It supervises thread supplies for the CPU it's wired to.
  372.  */
  373. void kcpulb(void *arg)
  374. {
  375.     thread_t *t;
  376.     int count, i, j, k = 0;
  377.     pri_t pri;
  378.  
  379. loop:
  380.     /*
  381.      * Sleep until there's some work to do.
  382.      */
  383.     waitq_sleep(&CPU->kcpulb_wq);
  384.  
  385. not_satisfied:
  386.     /*
  387.      * Calculate the number of threads that will be migrated/stolen from
  388.      * other CPU's. Note that situation can have changed between two
  389.      * passes. Each time get the most up to date counts.
  390.      */
  391.     pri = cpu_priority_high();
  392.     spinlock_lock(&CPU->lock);
  393.     count = nrdy / config.cpu_active;
  394.     count -= CPU->nrdy;
  395.     spinlock_unlock(&CPU->lock);
  396.     cpu_priority_restore(pri);
  397.  
  398.     if (count <= 0)
  399.         goto satisfied;
  400.  
  401.     /*
  402.      * Searching least priority queues on all CPU's first and most priority queues on all CPU's last.
  403.      */
  404.     for (j=RQ_COUNT-1; j >= 0; j--) {
  405.         for (i=0; i < config.cpu_active; i++) {
  406.             link_t *l;
  407.             runq_t *r;
  408.             cpu_t *cpu;
  409.  
  410.             cpu = &cpus[(i + k) % config.cpu_active];
  411.             r = &cpu->rq[j];
  412.  
  413.             /*
  414.              * Not interested in ourselves.
  415.              * Doesn't require interrupt disabling for kcpulb is X_WIRED.
  416.              */
  417.             if (CPU == cpu)
  418.                 continue;
  419.  
  420. restart:        pri = cpu_priority_high();
  421.             spinlock_lock(&r->lock);
  422.             if (r->n == 0) {
  423.                 spinlock_unlock(&r->lock);
  424.                 cpu_priority_restore(pri);
  425.                 continue;
  426.             }
  427.        
  428.             t = NULL;
  429.             l = r->rq_head.prev;    /* search rq from the back */
  430.             while (l != &r->rq_head) {
  431.                 t = list_get_instance(l, thread_t, rq_link);
  432.                 /*
  433.                      * We don't want to steal CPU-wired threads neither threads already stolen.
  434.                  * The latter prevents threads from migrating between CPU's without ever being run.
  435.                      * We don't want to steal threads whose FPU context is still in CPU
  436.                  */
  437.                 spinlock_lock(&t->lock);
  438.                 if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) {
  439.                     /*
  440.                      * Remove t from r.
  441.                      */
  442.  
  443.                     spinlock_unlock(&t->lock);
  444.                    
  445.                     /*
  446.                      * Here we have to avoid deadlock with relink_rq(),
  447.                      * because it locks cpu and r in a different order than we do.
  448.                      */
  449.                     if (!spinlock_trylock(&cpu->lock)) {
  450.                         /* Release all locks and try again. */
  451.                         spinlock_unlock(&r->lock);
  452.                         cpu_priority_restore(pri);
  453.                         goto restart;
  454.                     }
  455.                     cpu->nrdy--;
  456.                     spinlock_unlock(&cpu->lock);
  457.  
  458.                     spinlock_lock(&nrdylock);
  459.                     nrdy--;
  460.                     spinlock_unlock(&nrdylock);                
  461.  
  462.                         r->n--;
  463.                     list_remove(&t->rq_link);
  464.  
  465.                     break;
  466.                 }
  467.                 spinlock_unlock(&t->lock);
  468.                 l = l->prev;
  469.                 t = NULL;
  470.             }
  471.             spinlock_unlock(&r->lock);
  472.  
  473.             if (t) {
  474.                 /*
  475.                  * Ready t on local CPU
  476.                  */
  477.                 spinlock_lock(&t->lock);
  478.                 #ifdef KCPULB_VERBOSE
  479.                 printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, CPU->nrdy, nrdy / config.cpu_active);
  480.                 #endif
  481.                 t->flags |= X_STOLEN;
  482.                 spinlock_unlock(&t->lock);
  483.    
  484.                 thread_ready(t);
  485.  
  486.                 cpu_priority_restore(pri);
  487.    
  488.                 if (--count == 0)
  489.                     goto satisfied;
  490.                    
  491.                 /*
  492.                              * We are not satisfied yet, focus on another CPU next time.
  493.                  */
  494.                 k++;
  495.                
  496.                 continue;
  497.             }
  498.             cpu_priority_restore(pri);
  499.         }
  500.     }
  501.  
  502.     if (CPU->nrdy) {
  503.         /*
  504.          * Be a little bit light-weight and let migrated threads run.
  505.          */
  506.         scheduler();
  507.     }
  508.     else {
  509.         /*
  510.          * We failed to migrate a single thread.
  511.          * Something more sophisticated should be done.
  512.          */
  513.         scheduler();
  514.     }
  515.        
  516.     goto not_satisfied;
  517.    
  518. satisfied:
  519.     /*
  520.      * Tell find_best_thread() to wake us up later again.
  521.      */
  522.     CPU->kcpulbstarted = 0;
  523.     goto loop;
  524. }
  525.  
  526. #endif /* __SMP__ */
  527.