Subversion Repositories HelenOS-historic

Rev

Rev 115 | Rev 125 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (C) 2001-2004 Jakub Jermar
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. #include <proc/scheduler.h>
  30. #include <proc/thread.h>
  31. #include <proc/task.h>
  32. #include <cpu.h>
  33. #include <mm/vm.h>
  34. #include <config.h>
  35. #include <context.h>
  36. #include <func.h>
  37. #include <arch.h>
  38. #include <arch/asm.h>
  39. #include <list.h>
  40. #include <panic.h>
  41. #include <typedefs.h>
  42. #include <mm/page.h>
  43. #include <synch/spinlock.h>
  44. #include <arch/faddr.h>
  45. #include <arch/atomic.h>
  46.  
  47. volatile int nrdy;
  48.  
  49.  
  50. /** Take actions before new thread runs
  51.  *
  52.  * Perform actions that need to be
  53.  * taken before the newly selected
  54.  * tread is passed control.
  55.  *
  56.  */
  57. void before_thread_runs(void)
  58. {
  59.     before_thread_runs_arch();
  60.     fpu_context_restore(&(THREAD->saved_fpu_context));
  61. }
  62.  
  63.  
  64. /** Initialize scheduler
  65.  *
  66.  * Initialize kernel scheduler.
  67.  *
  68.  */
  69. void scheduler_init(void)
  70. {
  71. }
  72.  
  73.  
  74. /** Get thread to be scheduled
  75.  *
  76.  * Get the optimal thread to be scheduled
  77.  * according to thread accounting and scheduler
  78.  * policy.
  79.  *
  80.  * @return Thread to be scheduled.
  81.  *
  82.  */
  83. struct thread *find_best_thread(void)
  84. {
  85.     thread_t *t;
  86.     runq_t *r;
  87.     int i, n;
  88.  
  89. loop:
  90.     cpu_priority_high();
  91.  
  92.     spinlock_lock(&CPU->lock);
  93.     n = CPU->nrdy;
  94.     spinlock_unlock(&CPU->lock);
  95.  
  96.     cpu_priority_low();
  97.    
  98.     if (n == 0) {
  99.         #ifdef __SMP__
  100.         /*
  101.          * If the load balancing thread is not running, wake it up and
  102.          * set CPU-private flag that the kcpulb has been started.
  103.          */
  104.         if (test_and_set(&CPU->kcpulbstarted) == 0) {
  105.                 waitq_wakeup(&CPU->kcpulb_wq, 0);
  106.             goto loop;
  107.         }
  108.         #endif /* __SMP__ */
  109.        
  110.         /*
  111.          * For there was nothing to run, the CPU goes to sleep
  112.          * until a hardware interrupt or an IPI comes.
  113.          * This improves energy saving and hyperthreading.
  114.          * On the other hand, several hardware interrupts can be ignored.
  115.          */
  116.          cpu_sleep();
  117.          goto loop;
  118.     }
  119.  
  120.     cpu_priority_high();
  121.    
  122.     i = 0;
  123. retry:
  124.     for (; i<RQ_COUNT; i++) {
  125.         r = &CPU->rq[i];
  126.         spinlock_lock(&r->lock);
  127.         if (r->n == 0) {
  128.             /*
  129.              * If this queue is empty, try a lower-priority queue.
  130.              */
  131.             spinlock_unlock(&r->lock);
  132.             continue;
  133.         }
  134.    
  135.         /* avoid deadlock with relink_rq() */
  136.         if (!spinlock_trylock(&CPU->lock)) {
  137.             /*
  138.              * Unlock r and try again.
  139.              */
  140.             spinlock_unlock(&r->lock);
  141.             goto retry;
  142.         }
  143.         CPU->nrdy--;
  144.         spinlock_unlock(&CPU->lock);
  145.  
  146.         atomic_dec(&nrdy);
  147.         r->n--;
  148.  
  149.         /*
  150.          * Take the first thread from the queue.
  151.          */
  152.         t = list_get_instance(r->rq_head.next, thread_t, rq_link);
  153.         list_remove(&t->rq_link);
  154.  
  155.         spinlock_unlock(&r->lock);
  156.  
  157.         spinlock_lock(&t->lock);
  158.         t->cpu = CPU;
  159.  
  160.         t->ticks = us2ticks((i+1)*10000);
  161.         t->pri = i; /* eventually correct rq index */
  162.  
  163.         /*
  164.          * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge.
  165.          */
  166.         t->flags &= ~X_STOLEN;
  167.         spinlock_unlock(&t->lock);
  168.  
  169.         return t;
  170.     }
  171.     goto loop;
  172.  
  173. }
  174.  
  175.  
  176. /** Prevent rq starvation
  177.  *
  178.  * Prevent low priority threads from starving in rq's.
  179.  *
  180.  * When the function decides to relink rq's, it reconnects
  181.  * respective pointers so that in result threads with 'pri'
  182.  * greater or equal 'start' are moved to a higher-priority queue.
  183.  *
  184.  * @param start Threshold priority.
  185.  *
  186.  */
  187. void relink_rq(int start)
  188. {
  189.     link_t head;
  190.     runq_t *r;
  191.     int i, n;
  192.  
  193.     list_initialize(&head);
  194.     spinlock_lock(&CPU->lock);
  195.     if (CPU->needs_relink > NEEDS_RELINK_MAX) {
  196.         for (i = start; i<RQ_COUNT-1; i++) {
  197.             /* remember and empty rq[i + 1] */
  198.             r = &CPU->rq[i + 1];
  199.             spinlock_lock(&r->lock);
  200.             list_concat(&head, &r->rq_head);
  201.             n = r->n;
  202.             r->n = 0;
  203.             spinlock_unlock(&r->lock);
  204.        
  205.             /* append rq[i + 1] to rq[i] */
  206.             r = &CPU->rq[i];
  207.             spinlock_lock(&r->lock);
  208.             list_concat(&r->rq_head, &head);
  209.             r->n += n;
  210.             spinlock_unlock(&r->lock);
  211.         }
  212.         CPU->needs_relink = 0;
  213.     }
  214.     spinlock_unlock(&CPU->lock);               
  215.  
  216. }
  217.  
  218.  
  219. /** The scheduler
  220.  *
  221.  * The thread scheduling procedure.
  222.  *
  223.  */
  224. void scheduler(void)
  225. {
  226.     volatile pri_t pri;
  227.  
  228.     pri = cpu_priority_high();
  229.  
  230.     if (haltstate)
  231.         halt();
  232.  
  233.     if (THREAD) {
  234.         spinlock_lock(&THREAD->lock);
  235.         fpu_context_save(&(THREAD->saved_fpu_context));
  236.         if (!context_save(&THREAD->saved_context)) {
  237.             /*
  238.              * This is the place where threads leave scheduler();
  239.              */
  240.             before_thread_runs();
  241.                 spinlock_unlock(&THREAD->lock);
  242.             cpu_priority_restore(THREAD->saved_context.pri);
  243.             return;
  244.         }
  245.         THREAD->saved_context.pri = pri;
  246.     }
  247.  
  248.     /*
  249.      * We may not keep the old stack.
  250.      * Reason: If we kept the old stack and got blocked, for instance, in
  251.      * find_best_thread(), the old thread could get rescheduled by another
  252.      * CPU and overwrite the part of its own stack that was also used by
  253.      * the scheduler on this CPU.
  254.      *
  255.      * Moreover, we have to bypass the compiler-generated POP sequence
  256.      * which is fooled by SP being set to the very top of the stack.
  257.      * Therefore the scheduler() function continues in
  258.      * scheduler_separated_stack().
  259.      */
  260.     context_save(&CPU->saved_context);
  261.     context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), CPU->stack, CPU_STACK_SIZE);
  262.     context_restore(&CPU->saved_context);
  263.     /* not reached */
  264. }
  265.  
  266.  
  267. /** Scheduler stack switch wrapper
  268.  *
  269.  * Second part of the scheduler() function
  270.  * using new stack. Handling the actual context
  271.  * switch to a new thread.
  272.  *
  273.  */
  274. void scheduler_separated_stack(void)
  275. {
  276.     int priority;
  277.  
  278.     if (THREAD) {
  279.         switch (THREAD->state) {
  280.             case Running:
  281.                 THREAD->state = Ready;
  282.                 spinlock_unlock(&THREAD->lock);
  283.                 thread_ready(THREAD);
  284.                 break;
  285.  
  286.             case Exiting:
  287.                 frame_free((__address) THREAD->kstack);
  288.                 if (THREAD->ustack) {
  289.                     frame_free((__address) THREAD->ustack);
  290.                 }
  291.                
  292.                 /*
  293.                  * Detach from the containing task.
  294.                  */
  295.                 spinlock_lock(&TASK->lock);
  296.                 list_remove(&THREAD->th_link);
  297.                 spinlock_unlock(&TASK->lock);
  298.  
  299.                 spinlock_unlock(&THREAD->lock);
  300.                
  301.                 spinlock_lock(&threads_lock);
  302.                 list_remove(&THREAD->threads_link);
  303.                 spinlock_unlock(&threads_lock);
  304.  
  305.                 spinlock_lock(&CPU->lock);
  306.                 if(CPU->fpu_owner==THREAD) CPU->fpu_owner=NULL;
  307.                 spinlock_unlock(&CPU->lock);
  308.  
  309.                
  310.                 free(THREAD);
  311.                
  312.                 break;
  313.                
  314.             case Sleeping:
  315.                 /*
  316.                  * Prefer the thread after it's woken up.
  317.                  */
  318.                 THREAD->pri = -1;
  319.  
  320.                 /*
  321.                  * We need to release wq->lock which we locked in waitq_sleep().
  322.                  * Address of wq->lock is kept in THREAD->sleep_queue.
  323.                  */
  324.                 spinlock_unlock(&THREAD->sleep_queue->lock);
  325.  
  326.                 /*
  327.                  * Check for possible requests for out-of-context invocation.
  328.                  */
  329.                 if (THREAD->call_me) {
  330.                     THREAD->call_me(THREAD->call_me_with);
  331.                     THREAD->call_me = NULL;
  332.                     THREAD->call_me_with = NULL;
  333.                 }
  334.  
  335.                 spinlock_unlock(&THREAD->lock);
  336.                
  337.                 break;
  338.  
  339.             default:
  340.                 /*
  341.                  * Entering state is unexpected.
  342.                  */
  343.                 panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]);
  344.                 break;
  345.         }
  346.         THREAD = NULL;
  347.     }
  348.    
  349.     THREAD = find_best_thread();
  350.    
  351.     spinlock_lock(&THREAD->lock);
  352.     priority = THREAD->pri;
  353.     spinlock_unlock(&THREAD->lock);
  354.    
  355.     relink_rq(priority);       
  356.  
  357.     spinlock_lock(&THREAD->lock);  
  358.  
  359.     /*
  360.      * If both the old and the new task are the same, lots of work is avoided.
  361.      */
  362.     if (TASK != THREAD->task) {
  363.         vm_t *m1 = NULL;
  364.         vm_t *m2;
  365.  
  366.         if (TASK) {
  367.             spinlock_lock(&TASK->lock);
  368.             m1 = TASK->vm;
  369.             spinlock_unlock(&TASK->lock);
  370.         }
  371.  
  372.         spinlock_lock(&THREAD->task->lock);
  373.         m2 = THREAD->task->vm;
  374.         spinlock_unlock(&THREAD->task->lock);
  375.        
  376.         /*
  377.          * Note that it is possible for two tasks to share one vm mapping.
  378.          */
  379.         if (m1 != m2) {
  380.             /*
  381.              * Both tasks and vm mappings are different.
  382.              * Replace the old one with the new one.
  383.              */
  384.             if (m1) {
  385.                 vm_uninstall(m1);
  386.             }
  387.             vm_install(m2);
  388.         }
  389.         TASK = THREAD->task;   
  390.     }
  391.  
  392.     THREAD->state = Running;
  393.  
  394.     #ifdef SCHEDULER_VERBOSE
  395.     printf("cpu%d: tid %d (pri=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->pri, THREAD->ticks, CPU->nrdy);
  396.     #endif 
  397.  
  398.     context_restore(&THREAD->saved_context);
  399.     /* not reached */
  400. }
  401.  
  402.  
  403. #ifdef __SMP__
  404. /** Load balancing thread
  405.  *
  406.  * SMP load balancing thread, supervising thread supplies
  407.  * for the CPU it's wired to.
  408.  *
  409.  * @param arg Generic thread argument (unused).
  410.  *
  411.  */
  412. void kcpulb(void *arg)
  413. {
  414.     thread_t *t;
  415.     int count, i, j, k = 0;
  416.     pri_t pri;
  417.  
  418. loop:
  419.     /*
  420.      * Sleep until there's some work to do.
  421.      */
  422.     waitq_sleep(&CPU->kcpulb_wq);
  423.  
  424. not_satisfied:
  425.     /*
  426.      * Calculate the number of threads that will be migrated/stolen from
  427.      * other CPU's. Note that situation can have changed between two
  428.      * passes. Each time get the most up to date counts.
  429.      */
  430.     pri = cpu_priority_high();
  431.     spinlock_lock(&CPU->lock);
  432.     count = nrdy / config.cpu_active;
  433.     count -= CPU->nrdy;
  434.     spinlock_unlock(&CPU->lock);
  435.     cpu_priority_restore(pri);
  436.  
  437.     if (count <= 0)
  438.         goto satisfied;
  439.  
  440.     /*
  441.      * Searching least priority queues on all CPU's first and most priority queues on all CPU's last.
  442.      */
  443.     for (j=RQ_COUNT-1; j >= 0; j--) {
  444.         for (i=0; i < config.cpu_active; i++) {
  445.             link_t *l;
  446.             runq_t *r;
  447.             cpu_t *cpu;
  448.  
  449.             cpu = &cpus[(i + k) % config.cpu_active];
  450.  
  451.             /*
  452.              * Not interested in ourselves.
  453.              * Doesn't require interrupt disabling for kcpulb is X_WIRED.
  454.              */
  455.             if (CPU == cpu)
  456.                 continue;              
  457.  
  458. restart:        pri = cpu_priority_high();
  459.             r = &cpu->rq[j];
  460.             spinlock_lock(&r->lock);
  461.             if (r->n == 0) {
  462.                 spinlock_unlock(&r->lock);
  463.                 cpu_priority_restore(pri);
  464.                 continue;
  465.             }
  466.        
  467.             t = NULL;
  468.             l = r->rq_head.prev;    /* search rq from the back */
  469.             while (l != &r->rq_head) {
  470.                 t = list_get_instance(l, thread_t, rq_link);
  471.                 /*
  472.                      * We don't want to steal CPU-wired threads neither threads already stolen.
  473.                  * The latter prevents threads from migrating between CPU's without ever being run.
  474.                      * We don't want to steal threads whose FPU context is still in CPU.
  475.                  */
  476.                 spinlock_lock(&t->lock);
  477.                 if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) {
  478.                
  479.                     /*
  480.                      * Remove t from r.
  481.                      */
  482.  
  483.                     spinlock_unlock(&t->lock);
  484.                    
  485.                     /*
  486.                      * Here we have to avoid deadlock with relink_rq(),
  487.                      * because it locks cpu and r in a different order than we do.
  488.                      */
  489.                     if (!spinlock_trylock(&cpu->lock)) {
  490.                         /* Release all locks and try again. */
  491.                         spinlock_unlock(&r->lock);
  492.                         cpu_priority_restore(pri);
  493.                         goto restart;
  494.                     }
  495.                     cpu->nrdy--;
  496.                     spinlock_unlock(&cpu->lock);
  497.  
  498.                     atomic_dec(&nrdy);
  499.  
  500.                         r->n--;
  501.                     list_remove(&t->rq_link);
  502.  
  503.                     break;
  504.                 }
  505.                 spinlock_unlock(&t->lock);
  506.                 l = l->prev;
  507.                 t = NULL;
  508.             }
  509.             spinlock_unlock(&r->lock);
  510.  
  511.             if (t) {
  512.                 /*
  513.                  * Ready t on local CPU
  514.                  */
  515.                 spinlock_lock(&t->lock);
  516.                 #ifdef KCPULB_VERBOSE
  517.                 printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, CPU->nrdy, nrdy / config.cpu_active);
  518.                 #endif
  519.                 t->flags |= X_STOLEN;
  520.                 spinlock_unlock(&t->lock);
  521.    
  522.                 thread_ready(t);
  523.  
  524.                 cpu_priority_restore(pri);
  525.    
  526.                 if (--count == 0)
  527.                     goto satisfied;
  528.                    
  529.                 /*
  530.                              * We are not satisfied yet, focus on another CPU next time.
  531.                  */
  532.                 k++;
  533.                
  534.                 continue;
  535.             }
  536.             cpu_priority_restore(pri);
  537.         }
  538.     }
  539.  
  540.     if (CPU->nrdy) {
  541.         /*
  542.          * Be a little bit light-weight and let migrated threads run.
  543.          */
  544.         scheduler();
  545.     }
  546.     else {
  547.         /*
  548.          * We failed to migrate a single thread.
  549.          * Something more sophisticated should be done.
  550.          */
  551.         scheduler();
  552.     }
  553.        
  554.     goto not_satisfied;
  555.    
  556. satisfied:
  557.     /*
  558.      * Tell find_best_thread() to wake us up later again.
  559.      */
  560.     CPU->kcpulbstarted = 0;
  561.     goto loop;
  562. }
  563.  
  564. #endif /* __SMP__ */
  565.