Subversion Repositories HelenOS-historic

Rev

Rev 99 | Rev 109 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (C) 2001-2004 Jakub Jermar
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. #include <proc/scheduler.h>
  30. #include <proc/thread.h>
  31. #include <proc/task.h>
  32. #include <cpu.h>
  33. #include <mm/vm.h>
  34. #include <config.h>
  35. #include <context.h>
  36. #include <func.h>
  37. #include <arch.h>
  38. #include <arch/asm.h>
  39. #include <list.h>
  40. #include <panic.h>
  41. #include <typedefs.h>
  42. #include <mm/page.h>
  43. #include <synch/spinlock.h>
  44. #include <arch/faddr.h>
  45.  
  46. #ifdef __SMP__
  47. #include <arch/smp/atomic.h>
  48. #endif /* __SMP__ */
  49.  
  50. /*
  51.  * NOTE ON ATOMIC READS:
  52.  * Some architectures cannot read __u32 atomically.
  53.  * For that reason, all accesses to nrdy and the likes must be protected by spinlock.
  54.  */
  55.  
  56. spinlock_t nrdylock;
  57. volatile int nrdy;
  58.  
  59.  
  60. /** Initialize context switching
  61.  *
  62.  * Initialize context switching and lazy FPU
  63.  * context switching.
  64.  *
  65.  */
  66. void before_thread_runs(void)
  67. {
  68.     before_thread_runs_arch();
  69.     fpu_context_restore(&(THREAD->saved_fpu_context));
  70. }
  71.  
  72.  
  73. /** Initialize scheduler
  74.  *
  75.  * Initialize kernel scheduler.
  76.  *
  77.  */
  78. void scheduler_init(void)
  79. {
  80.     spinlock_initialize(&nrdylock);
  81. }
  82.  
  83.  
  84. /** Get thread to be scheduled
  85.  *
  86.  * Get the optimal thread to be scheduled
  87.  * according thread accounting and scheduler
  88.  * policy.
  89.  *
  90.  * @return Thread to be scheduled.
  91.  *
  92.  */
  93. struct thread *find_best_thread(void)
  94. {
  95.     thread_t *t;
  96.     runq_t *r;
  97.     int i, n;
  98.  
  99. loop:
  100.     cpu_priority_high();
  101.  
  102.     spinlock_lock(&CPU->lock);
  103.     n = CPU->nrdy;
  104.     spinlock_unlock(&CPU->lock);
  105.  
  106.     cpu_priority_low();
  107.    
  108.     if (n == 0) {
  109.         #ifdef __SMP__
  110.         /*
  111.          * If the load balancing thread is not running, wake it up and
  112.          * set CPU-private flag that the kcpulb has been started.
  113.          */
  114.         if (test_and_set(&CPU->kcpulbstarted) == 0) {
  115.                 waitq_wakeup(&CPU->kcpulb_wq, 0);
  116.             goto loop;
  117.         }
  118.         #endif /* __SMP__ */
  119.        
  120.         /*
  121.          * For there was nothing to run, the CPU goes to sleep
  122.          * until a hardware interrupt or an IPI comes.
  123.          * This improves energy saving and hyperthreading.
  124.          * On the other hand, several hardware interrupts can be ignored.
  125.          */
  126.          cpu_sleep();
  127.          goto loop;
  128.     }
  129.  
  130.     cpu_priority_high();
  131.  
  132.     for (i = 0; i<RQ_COUNT; i++) {
  133.         r = &CPU->rq[i];
  134.         spinlock_lock(&r->lock);
  135.         if (r->n == 0) {
  136.             /*
  137.              * If this queue is empty, try a lower-priority queue.
  138.              */
  139.             spinlock_unlock(&r->lock);
  140.             continue;
  141.         }
  142.    
  143.         spinlock_lock(&nrdylock);
  144.         nrdy--;
  145.         spinlock_unlock(&nrdylock);    
  146.  
  147.         spinlock_lock(&CPU->lock);
  148.         CPU->nrdy--;
  149.         spinlock_unlock(&CPU->lock);
  150.  
  151.         r->n--;
  152.  
  153.         /*
  154.          * Take the first thread from the queue.
  155.          */
  156.         t = list_get_instance(r->rq_head.next, thread_t, rq_link);
  157.         list_remove(&t->rq_link);
  158.  
  159.         spinlock_unlock(&r->lock);
  160.  
  161.         spinlock_lock(&t->lock);
  162.         t->cpu = CPU;
  163.  
  164.         t->ticks = us2ticks((i+1)*10000);
  165.         t->pri = i; /* eventually correct rq index */
  166.  
  167.         /*
  168.          * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge.
  169.          */
  170.         t->flags &= ~X_STOLEN;
  171.         spinlock_unlock(&t->lock);
  172.  
  173.         return t;
  174.     }
  175.     goto loop;
  176.  
  177. }
  178.  
  179.  
  180. /** Prevent rq starvation
  181.  *
  182.  * Prevent low priority threads from starving in rq's.
  183.  *
  184.  * When the function decides to relink rq's, it reconnects
  185.  * respective pointers so that in result threads with 'pri'
  186.  * greater or equal 'start' are moved to a higher-priority queue.
  187.  *
  188.  * @param start Threshold priority.
  189.  *
  190.  */
  191. void relink_rq(int start)
  192. {
  193.     link_t head;
  194.     runq_t *r;
  195.     int i, n;
  196.  
  197.     list_initialize(&head);
  198.     spinlock_lock(&CPU->lock);
  199.     if (CPU->needs_relink > NEEDS_RELINK_MAX) {
  200.         for (i = start; i<RQ_COUNT-1; i++) {
  201.             /* remember and empty rq[i + 1] */
  202.             r = &CPU->rq[i + 1];
  203.             spinlock_lock(&r->lock);
  204.             list_concat(&head, &r->rq_head);
  205.             n = r->n;
  206.             r->n = 0;
  207.             spinlock_unlock(&r->lock);
  208.        
  209.             /* append rq[i + 1] to rq[i] */
  210.             r = &CPU->rq[i];
  211.             spinlock_lock(&r->lock);
  212.             list_concat(&r->rq_head, &head);
  213.             r->n += n;
  214.             spinlock_unlock(&r->lock);
  215.         }
  216.         CPU->needs_relink = 0;
  217.     }
  218.     spinlock_unlock(&CPU->lock);               
  219.  
  220. }
  221.  
  222.  
  223. /** The scheduler
  224.  *
  225.  * The thread scheduling procedure.
  226.  *
  227.  */
  228. void scheduler(void)
  229. {
  230.     volatile pri_t pri;
  231.  
  232.     pri = cpu_priority_high();
  233.  
  234.     if (haltstate)
  235.         halt();
  236.  
  237.     if (THREAD) {
  238.         spinlock_lock(&THREAD->lock);
  239.         fpu_context_save(&(THREAD->saved_fpu_context));
  240.         if (!context_save(&THREAD->saved_context)) {
  241.             /*
  242.              * This is the place where threads leave scheduler();
  243.              */
  244.             before_thread_runs();
  245.                 spinlock_unlock(&THREAD->lock);
  246.             cpu_priority_restore(THREAD->saved_context.pri);
  247.             return;
  248.         }
  249.         THREAD->saved_context.pri = pri;
  250.     }
  251.  
  252.     /*
  253.      * We may not keep the old stack.
  254.      * Reason: If we kept the old stack and got blocked, for instance, in
  255.      * find_best_thread(), the old thread could get rescheduled by another
  256.      * CPU and overwrite the part of its own stack that was also used by
  257.      * the scheduler on this CPU.
  258.      *
  259.      * Moreover, we have to bypass the compiler-generated POP sequence
  260.      * which is fooled by SP being set to the very top of the stack.
  261.      * Therefore the scheduler() function continues in
  262.      * scheduler_separated_stack().
  263.      */
  264.     context_save(&CPU->saved_context);
  265.     context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), CPU->stack, CPU_STACK_SIZE);
  266.     context_restore(&CPU->saved_context);
  267.     /* not reached */
  268. }
  269.  
  270.  
  271. /** Scheduler stack switch wrapper
  272.  *
  273.  * Second part of the scheduler() function
  274.  * using new stack. Handling the actual context
  275.  * switch to a new thread.
  276.  *
  277.  */
  278. void scheduler_separated_stack(void)
  279. {
  280.     int priority;
  281.  
  282.     if (THREAD) {
  283.         switch (THREAD->state) {
  284.             case Running:
  285.                 THREAD->state = Ready;
  286.                 spinlock_unlock(&THREAD->lock);
  287.                 thread_ready(THREAD);
  288.                 break;
  289.  
  290.             case Exiting:
  291.                 frame_free((__address) THREAD->kstack);
  292.                 if (THREAD->ustack) {
  293.                     frame_free((__address) THREAD->ustack);
  294.                 }
  295.                
  296.                 /*
  297.                  * Detach from the containing task.
  298.                  */
  299.                 spinlock_lock(&TASK->lock);
  300.                 list_remove(&THREAD->th_link);
  301.                 spinlock_unlock(&TASK->lock);
  302.  
  303.                 spinlock_unlock(&THREAD->lock);
  304.                
  305.                 spinlock_lock(&threads_lock);
  306.                 list_remove(&THREAD->threads_link);
  307.                 spinlock_unlock(&threads_lock);
  308.  
  309.                 spinlock_lock(&CPU->lock);
  310.                 if(CPU->fpu_owner==THREAD) CPU->fpu_owner=NULL;
  311.                 spinlock_unlock(&CPU->lock);
  312.  
  313.                
  314.                 free(THREAD);
  315.                
  316.                 break;
  317.                
  318.             case Sleeping:
  319.                 /*
  320.                  * Prefer the thread after it's woken up.
  321.                  */
  322.                 THREAD->pri = -1;
  323.  
  324.                 /*
  325.                  * We need to release wq->lock which we locked in waitq_sleep().
  326.                  * Address of wq->lock is kept in THREAD->sleep_queue.
  327.                  */
  328.                 spinlock_unlock(&THREAD->sleep_queue->lock);
  329.  
  330.                 /*
  331.                  * Check for possible requests for out-of-context invocation.
  332.                  */
  333.                 if (THREAD->call_me) {
  334.                     THREAD->call_me(THREAD->call_me_with);
  335.                     THREAD->call_me = NULL;
  336.                     THREAD->call_me_with = NULL;
  337.                 }
  338.  
  339.                 spinlock_unlock(&THREAD->lock);
  340.                
  341.                 break;
  342.  
  343.             default:
  344.                 /*
  345.                  * Entering state is unexpected.
  346.                  */
  347.                 panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]);
  348.                 break;
  349.         }
  350.         THREAD = NULL;
  351.     }
  352.    
  353.     THREAD = find_best_thread();
  354.    
  355.     spinlock_lock(&THREAD->lock);
  356.     priority = THREAD->pri;
  357.     spinlock_unlock(&THREAD->lock);
  358.    
  359.     relink_rq(priority);       
  360.  
  361.     spinlock_lock(&THREAD->lock);  
  362.  
  363.     /*
  364.      * If both the old and the new task are the same, lots of work is avoided.
  365.      */
  366.     if (TASK != THREAD->task) {
  367.         vm_t *m1 = NULL;
  368.         vm_t *m2;
  369.  
  370.         if (TASK) {
  371.             spinlock_lock(&TASK->lock);
  372.             m1 = TASK->vm;
  373.             spinlock_unlock(&TASK->lock);
  374.         }
  375.  
  376.         spinlock_lock(&THREAD->task->lock);
  377.         m2 = THREAD->task->vm;
  378.         spinlock_unlock(&THREAD->task->lock);
  379.        
  380.         /*
  381.          * Note that it is possible for two tasks to share one vm mapping.
  382.          */
  383.         if (m1 != m2) {
  384.             /*
  385.              * Both tasks and vm mappings are different.
  386.              * Replace the old one with the new one.
  387.              */
  388.             if (m1) {
  389.                 vm_uninstall(m1);
  390.             }
  391.             vm_install(m2);
  392.         }
  393.         TASK = THREAD->task;   
  394.     }
  395.  
  396.     THREAD->state = Running;
  397.  
  398.     #ifdef SCHEDULER_VERBOSE
  399.     printf("cpu%d: tid %d (pri=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->pri, THREAD->ticks, CPU->nrdy);
  400.     #endif 
  401.  
  402.     context_restore(&THREAD->saved_context);
  403.     /* not reached */
  404. }
  405.  
  406.  
  407. #ifdef __SMP__
  408. /** Load balancing thread
  409.  *
  410.  * SMP load balancing thread, supervising thread supplies
  411.  * for the CPU it's wired to.
  412.  *
  413.  * @param arg Generic thread argument (unused).
  414.  *
  415.  */
  416. void kcpulb(void *arg)
  417. {
  418.     thread_t *t;
  419.     int count, i, j, k = 0;
  420.     pri_t pri;
  421.  
  422. loop:
  423.     /*
  424.      * Sleep until there's some work to do.
  425.      */
  426.     waitq_sleep(&CPU->kcpulb_wq);
  427.  
  428. not_satisfied:
  429.     /*
  430.      * Calculate the number of threads that will be migrated/stolen from
  431.      * other CPU's. Note that situation can have changed between two
  432.      * passes. Each time get the most up to date counts.
  433.      */
  434.     pri = cpu_priority_high();
  435.     spinlock_lock(&CPU->lock);
  436.     count = nrdy / config.cpu_active;
  437.     count -= CPU->nrdy;
  438.     spinlock_unlock(&CPU->lock);
  439.     cpu_priority_restore(pri);
  440.  
  441.     if (count <= 0)
  442.         goto satisfied;
  443.  
  444.     /*
  445.      * Searching least priority queues on all CPU's first and most priority queues on all CPU's last.
  446.      */
  447.     for (j=RQ_COUNT-1; j >= 0; j--) {
  448.         for (i=0; i < config.cpu_active; i++) {
  449.             link_t *l;
  450.             runq_t *r;
  451.             cpu_t *cpu;
  452.  
  453.             cpu = &cpus[(i + k) % config.cpu_active];
  454.             r = &cpu->rq[j];
  455.  
  456.             /*
  457.              * Not interested in ourselves.
  458.              * Doesn't require interrupt disabling for kcpulb is X_WIRED.
  459.              */
  460.             if (CPU == cpu)
  461.                 continue;
  462.  
  463. restart:        pri = cpu_priority_high();
  464.             spinlock_lock(&r->lock);
  465.             if (r->n == 0) {
  466.                 spinlock_unlock(&r->lock);
  467.                 cpu_priority_restore(pri);
  468.                 continue;
  469.             }
  470.        
  471.             t = NULL;
  472.             l = r->rq_head.prev;    /* search rq from the back */
  473.             while (l != &r->rq_head) {
  474.                 t = list_get_instance(l, thread_t, rq_link);
  475.                 /*
  476.                      * We don't want to steal CPU-wired threads neither threads already stolen.
  477.                  * The latter prevents threads from migrating between CPU's without ever being run.
  478.                      * We don't want to steal threads whose FPU context is still in CPU
  479.                  */
  480.                 spinlock_lock(&t->lock);
  481.                 if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) {
  482.                     /*
  483.                      * Remove t from r.
  484.                      */
  485.  
  486.                     spinlock_unlock(&t->lock);
  487.                    
  488.                     /*
  489.                      * Here we have to avoid deadlock with relink_rq(),
  490.                      * because it locks cpu and r in a different order than we do.
  491.                      */
  492.                     if (!spinlock_trylock(&cpu->lock)) {
  493.                         /* Release all locks and try again. */
  494.                         spinlock_unlock(&r->lock);
  495.                         cpu_priority_restore(pri);
  496.                         goto restart;
  497.                     }
  498.                     cpu->nrdy--;
  499.                     spinlock_unlock(&cpu->lock);
  500.  
  501.                     spinlock_lock(&nrdylock);
  502.                     nrdy--;
  503.                     spinlock_unlock(&nrdylock);                
  504.  
  505.                         r->n--;
  506.                     list_remove(&t->rq_link);
  507.  
  508.                     break;
  509.                 }
  510.                 spinlock_unlock(&t->lock);
  511.                 l = l->prev;
  512.                 t = NULL;
  513.             }
  514.             spinlock_unlock(&r->lock);
  515.  
  516.             if (t) {
  517.                 /*
  518.                  * Ready t on local CPU
  519.                  */
  520.                 spinlock_lock(&t->lock);
  521.                 #ifdef KCPULB_VERBOSE
  522.                 printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, CPU->nrdy, nrdy / config.cpu_active);
  523.                 #endif
  524.                 t->flags |= X_STOLEN;
  525.                 spinlock_unlock(&t->lock);
  526.    
  527.                 thread_ready(t);
  528.  
  529.                 cpu_priority_restore(pri);
  530.    
  531.                 if (--count == 0)
  532.                     goto satisfied;
  533.                    
  534.                 /*
  535.                              * We are not satisfied yet, focus on another CPU next time.
  536.                  */
  537.                 k++;
  538.                
  539.                 continue;
  540.             }
  541.             cpu_priority_restore(pri);
  542.         }
  543.     }
  544.  
  545.     if (CPU->nrdy) {
  546.         /*
  547.          * Be a little bit light-weight and let migrated threads run.
  548.          */
  549.         scheduler();
  550.     }
  551.     else {
  552.         /*
  553.          * We failed to migrate a single thread.
  554.          * Something more sophisticated should be done.
  555.          */
  556.         scheduler();
  557.     }
  558.        
  559.     goto not_satisfied;
  560.    
  561. satisfied:
  562.     /*
  563.      * Tell find_best_thread() to wake us up later again.
  564.      */
  565.     CPU->kcpulbstarted = 0;
  566.     goto loop;
  567. }
  568.  
  569. #endif /* __SMP__ */
  570.