Subversion Repositories HelenOS-historic

Rev

Rev 114 | Rev 118 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (C) 2001-2004 Jakub Jermar
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. #include <proc/scheduler.h>
  30. #include <proc/thread.h>
  31. #include <proc/task.h>
  32. #include <cpu.h>
  33. #include <mm/vm.h>
  34. #include <config.h>
  35. #include <context.h>
  36. #include <func.h>
  37. #include <arch.h>
  38. #include <arch/asm.h>
  39. #include <list.h>
  40. #include <panic.h>
  41. #include <typedefs.h>
  42. #include <mm/page.h>
  43. #include <synch/spinlock.h>
  44. #include <arch/faddr.h>
  45. #include <arch/atomic.h>
  46.  
  47. volatile int nrdy;
  48.  
  49.  
  50. /** Initialize context switching
  51.  *
  52.  * Initialize context switching and lazy FPU
  53.  * context switching.
  54.  *
  55.  */
  56. void before_thread_runs(void)
  57. {
  58.     before_thread_runs_arch();
  59.     fpu_context_restore(&(THREAD->saved_fpu_context));
  60. }
  61.  
  62.  
  63. /** Initialize scheduler
  64.  *
  65.  * Initialize kernel scheduler.
  66.  *
  67.  */
  68. void scheduler_init(void)
  69. {
  70. }
  71.  
  72.  
  73. /** Get thread to be scheduled
  74.  *
  75.  * Get the optimal thread to be scheduled
  76.  * according to thread accounting and scheduler
  77.  * policy.
  78.  *
  79.  * @return Thread to be scheduled.
  80.  *
  81.  */
  82. struct thread *find_best_thread(void)
  83. {
  84.     thread_t *t;
  85.     runq_t *r;
  86.     int i, n;
  87.  
  88. loop:
  89.     cpu_priority_high();
  90.  
  91.     spinlock_lock(&CPU->lock);
  92.     n = CPU->nrdy;
  93.     spinlock_unlock(&CPU->lock);
  94.  
  95.     cpu_priority_low();
  96.    
  97.     if (n == 0) {
  98.         #ifdef __SMP__
  99.         /*
  100.          * If the load balancing thread is not running, wake it up and
  101.          * set CPU-private flag that the kcpulb has been started.
  102.          */
  103.         if (test_and_set(&CPU->kcpulbstarted) == 0) {
  104.                 waitq_wakeup(&CPU->kcpulb_wq, 0);
  105.             goto loop;
  106.         }
  107.         #endif /* __SMP__ */
  108.        
  109.         /*
  110.          * For there was nothing to run, the CPU goes to sleep
  111.          * until a hardware interrupt or an IPI comes.
  112.          * This improves energy saving and hyperthreading.
  113.          * On the other hand, several hardware interrupts can be ignored.
  114.          */
  115.          cpu_sleep();
  116.          goto loop;
  117.     }
  118.  
  119.     cpu_priority_high();
  120.    
  121.     i = 0;
  122. retry:
  123.     for (; i<RQ_COUNT; i++) {
  124.         r = &CPU->rq[i];
  125.         spinlock_lock(&r->lock);
  126.         if (r->n == 0) {
  127.             /*
  128.              * If this queue is empty, try a lower-priority queue.
  129.              */
  130.             spinlock_unlock(&r->lock);
  131.             continue;
  132.         }
  133.    
  134.         /* avoid deadlock with relink_rq() */
  135.         if (!spinlock_trylock(&CPU->lock)) {
  136.             /*
  137.              * Unlock r and try again.
  138.              */
  139.             spinlock_unlock(&r->lock);
  140.             goto retry;
  141.         }
  142.         CPU->nrdy--;
  143.         spinlock_unlock(&CPU->lock);
  144.  
  145.         atomic_dec(&nrdy);
  146.         r->n--;
  147.  
  148.         /*
  149.          * Take the first thread from the queue.
  150.          */
  151.         t = list_get_instance(r->rq_head.next, thread_t, rq_link);
  152.         list_remove(&t->rq_link);
  153.  
  154.         spinlock_unlock(&r->lock);
  155.  
  156.         spinlock_lock(&t->lock);
  157.         t->cpu = CPU;
  158.  
  159.         t->ticks = us2ticks((i+1)*10000);
  160.         t->pri = i; /* eventually correct rq index */
  161.  
  162.         /*
  163.          * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge.
  164.          */
  165.         t->flags &= ~X_STOLEN;
  166.         spinlock_unlock(&t->lock);
  167.  
  168.         return t;
  169.     }
  170.     goto loop;
  171.  
  172. }
  173.  
  174.  
  175. /** Prevent rq starvation
  176.  *
  177.  * Prevent low priority threads from starving in rq's.
  178.  *
  179.  * When the function decides to relink rq's, it reconnects
  180.  * respective pointers so that in result threads with 'pri'
  181.  * greater or equal 'start' are moved to a higher-priority queue.
  182.  *
  183.  * @param start Threshold priority.
  184.  *
  185.  */
  186. void relink_rq(int start)
  187. {
  188.     link_t head;
  189.     runq_t *r;
  190.     int i, n;
  191.  
  192.     list_initialize(&head);
  193.     spinlock_lock(&CPU->lock);
  194.     if (CPU->needs_relink > NEEDS_RELINK_MAX) {
  195.         for (i = start; i<RQ_COUNT-1; i++) {
  196.             /* remember and empty rq[i + 1] */
  197.             r = &CPU->rq[i + 1];
  198.             spinlock_lock(&r->lock);
  199.             list_concat(&head, &r->rq_head);
  200.             n = r->n;
  201.             r->n = 0;
  202.             spinlock_unlock(&r->lock);
  203.        
  204.             /* append rq[i + 1] to rq[i] */
  205.             r = &CPU->rq[i];
  206.             spinlock_lock(&r->lock);
  207.             list_concat(&r->rq_head, &head);
  208.             r->n += n;
  209.             spinlock_unlock(&r->lock);
  210.         }
  211.         CPU->needs_relink = 0;
  212.     }
  213.     spinlock_unlock(&CPU->lock);               
  214.  
  215. }
  216.  
  217.  
  218. /** The scheduler
  219.  *
  220.  * The thread scheduling procedure.
  221.  *
  222.  */
  223. void scheduler(void)
  224. {
  225.     volatile pri_t pri;
  226.  
  227.     pri = cpu_priority_high();
  228.  
  229.     if (haltstate)
  230.         halt();
  231.  
  232.     if (THREAD) {
  233.         spinlock_lock(&THREAD->lock);
  234.         fpu_context_save(&(THREAD->saved_fpu_context));
  235.         if (!context_save(&THREAD->saved_context)) {
  236.             /*
  237.              * This is the place where threads leave scheduler();
  238.              */
  239.             before_thread_runs();
  240.                 spinlock_unlock(&THREAD->lock);
  241.             cpu_priority_restore(THREAD->saved_context.pri);
  242.             return;
  243.         }
  244.         THREAD->saved_context.pri = pri;
  245.     }
  246.  
  247.     /*
  248.      * We may not keep the old stack.
  249.      * Reason: If we kept the old stack and got blocked, for instance, in
  250.      * find_best_thread(), the old thread could get rescheduled by another
  251.      * CPU and overwrite the part of its own stack that was also used by
  252.      * the scheduler on this CPU.
  253.      *
  254.      * Moreover, we have to bypass the compiler-generated POP sequence
  255.      * which is fooled by SP being set to the very top of the stack.
  256.      * Therefore the scheduler() function continues in
  257.      * scheduler_separated_stack().
  258.      */
  259.     context_save(&CPU->saved_context);
  260.     context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), CPU->stack, CPU_STACK_SIZE);
  261.     context_restore(&CPU->saved_context);
  262.     /* not reached */
  263. }
  264.  
  265.  
  266. /** Scheduler stack switch wrapper
  267.  *
  268.  * Second part of the scheduler() function
  269.  * using new stack. Handling the actual context
  270.  * switch to a new thread.
  271.  *
  272.  */
  273. void scheduler_separated_stack(void)
  274. {
  275.     int priority;
  276.  
  277.     if (THREAD) {
  278.         switch (THREAD->state) {
  279.             case Running:
  280.                 THREAD->state = Ready;
  281.                 spinlock_unlock(&THREAD->lock);
  282.                 thread_ready(THREAD);
  283.                 break;
  284.  
  285.             case Exiting:
  286.                 frame_free((__address) THREAD->kstack);
  287.                 if (THREAD->ustack) {
  288.                     frame_free((__address) THREAD->ustack);
  289.                 }
  290.                
  291.                 /*
  292.                  * Detach from the containing task.
  293.                  */
  294.                 spinlock_lock(&TASK->lock);
  295.                 list_remove(&THREAD->th_link);
  296.                 spinlock_unlock(&TASK->lock);
  297.  
  298.                 spinlock_unlock(&THREAD->lock);
  299.                
  300.                 spinlock_lock(&threads_lock);
  301.                 list_remove(&THREAD->threads_link);
  302.                 spinlock_unlock(&threads_lock);
  303.  
  304.                 spinlock_lock(&CPU->lock);
  305.                 if(CPU->fpu_owner==THREAD) CPU->fpu_owner=NULL;
  306.                 spinlock_unlock(&CPU->lock);
  307.  
  308.                
  309.                 free(THREAD);
  310.                
  311.                 break;
  312.                
  313.             case Sleeping:
  314.                 /*
  315.                  * Prefer the thread after it's woken up.
  316.                  */
  317.                 THREAD->pri = -1;
  318.  
  319.                 /*
  320.                  * We need to release wq->lock which we locked in waitq_sleep().
  321.                  * Address of wq->lock is kept in THREAD->sleep_queue.
  322.                  */
  323.                 spinlock_unlock(&THREAD->sleep_queue->lock);
  324.  
  325.                 /*
  326.                  * Check for possible requests for out-of-context invocation.
  327.                  */
  328.                 if (THREAD->call_me) {
  329.                     THREAD->call_me(THREAD->call_me_with);
  330.                     THREAD->call_me = NULL;
  331.                     THREAD->call_me_with = NULL;
  332.                 }
  333.  
  334.                 spinlock_unlock(&THREAD->lock);
  335.                
  336.                 break;
  337.  
  338.             default:
  339.                 /*
  340.                  * Entering state is unexpected.
  341.                  */
  342.                 panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]);
  343.                 break;
  344.         }
  345.         THREAD = NULL;
  346.     }
  347.    
  348.     THREAD = find_best_thread();
  349.    
  350.     spinlock_lock(&THREAD->lock);
  351.     priority = THREAD->pri;
  352.     spinlock_unlock(&THREAD->lock);
  353.    
  354.     relink_rq(priority);       
  355.  
  356.     spinlock_lock(&THREAD->lock);  
  357.  
  358.     /*
  359.      * If both the old and the new task are the same, lots of work is avoided.
  360.      */
  361.     if (TASK != THREAD->task) {
  362.         vm_t *m1 = NULL;
  363.         vm_t *m2;
  364.  
  365.         if (TASK) {
  366.             spinlock_lock(&TASK->lock);
  367.             m1 = TASK->vm;
  368.             spinlock_unlock(&TASK->lock);
  369.         }
  370.  
  371.         spinlock_lock(&THREAD->task->lock);
  372.         m2 = THREAD->task->vm;
  373.         spinlock_unlock(&THREAD->task->lock);
  374.        
  375.         /*
  376.          * Note that it is possible for two tasks to share one vm mapping.
  377.          */
  378.         if (m1 != m2) {
  379.             /*
  380.              * Both tasks and vm mappings are different.
  381.              * Replace the old one with the new one.
  382.              */
  383.             if (m1) {
  384.                 vm_uninstall(m1);
  385.             }
  386.             vm_install(m2);
  387.         }
  388.         TASK = THREAD->task;   
  389.     }
  390.  
  391.     THREAD->state = Running;
  392.  
  393.     #ifdef SCHEDULER_VERBOSE
  394.     printf("cpu%d: tid %d (pri=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->pri, THREAD->ticks, CPU->nrdy);
  395.     #endif 
  396.  
  397.     context_restore(&THREAD->saved_context);
  398.     /* not reached */
  399. }
  400.  
  401.  
  402. #ifdef __SMP__
  403. /** Load balancing thread
  404.  *
  405.  * SMP load balancing thread, supervising thread supplies
  406.  * for the CPU it's wired to.
  407.  *
  408.  * @param arg Generic thread argument (unused).
  409.  *
  410.  */
  411. void kcpulb(void *arg)
  412. {
  413.     thread_t *t;
  414.     int count, i, j, k = 0;
  415.     pri_t pri;
  416.  
  417. loop:
  418.     /*
  419.      * Sleep until there's some work to do.
  420.      */
  421.     waitq_sleep(&CPU->kcpulb_wq);
  422.  
  423. not_satisfied:
  424.     /*
  425.      * Calculate the number of threads that will be migrated/stolen from
  426.      * other CPU's. Note that situation can have changed between two
  427.      * passes. Each time get the most up to date counts.
  428.      */
  429.     pri = cpu_priority_high();
  430.     spinlock_lock(&CPU->lock);
  431.     count = nrdy / config.cpu_active;
  432.     count -= CPU->nrdy;
  433.     spinlock_unlock(&CPU->lock);
  434.     cpu_priority_restore(pri);
  435.  
  436.     if (count <= 0)
  437.         goto satisfied;
  438.  
  439.     /*
  440.      * Searching least priority queues on all CPU's first and most priority queues on all CPU's last.
  441.      */
  442.     for (j=RQ_COUNT-1; j >= 0; j--) {
  443.         for (i=0; i < config.cpu_active; i++) {
  444.             link_t *l;
  445.             runq_t *r;
  446.             cpu_t *cpu;
  447.  
  448.             cpu = &cpus[(i + k) % config.cpu_active];
  449.  
  450.             /*
  451.              * Not interested in ourselves.
  452.              * Doesn't require interrupt disabling for kcpulb is X_WIRED.
  453.              */
  454.             if (CPU == cpu)
  455.                 continue;              
  456.  
  457. restart:        pri = cpu_priority_high();
  458.             r = &cpu->rq[j];
  459.             spinlock_lock(&r->lock);
  460.             if (r->n == 0) {
  461.                 spinlock_unlock(&r->lock);
  462.                 cpu_priority_restore(pri);
  463.                 continue;
  464.             }
  465.        
  466.             t = NULL;
  467.             l = r->rq_head.prev;    /* search rq from the back */
  468.             while (l != &r->rq_head) {
  469.                 t = list_get_instance(l, thread_t, rq_link);
  470.                 /*
  471.                      * We don't want to steal CPU-wired threads neither threads already stolen.
  472.                  * The latter prevents threads from migrating between CPU's without ever being run.
  473.                      * We don't want to steal threads whose FPU context is still in CPU.
  474.                  */
  475.                 spinlock_lock(&t->lock);
  476.                 if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) {
  477.                
  478.                     /*
  479.                      * Remove t from r.
  480.                      */
  481.  
  482.                     spinlock_unlock(&t->lock);
  483.                    
  484.                     /*
  485.                      * Here we have to avoid deadlock with relink_rq(),
  486.                      * because it locks cpu and r in a different order than we do.
  487.                      */
  488.                     if (!spinlock_trylock(&cpu->lock)) {
  489.                         /* Release all locks and try again. */
  490.                         spinlock_unlock(&r->lock);
  491.                         cpu_priority_restore(pri);
  492.                         goto restart;
  493.                     }
  494.                     cpu->nrdy--;
  495.                     spinlock_unlock(&cpu->lock);
  496.  
  497.                     atomic_dec(&nrdy);
  498.  
  499.                         r->n--;
  500.                     list_remove(&t->rq_link);
  501.  
  502.                     break;
  503.                 }
  504.                 spinlock_unlock(&t->lock);
  505.                 l = l->prev;
  506.                 t = NULL;
  507.             }
  508.             spinlock_unlock(&r->lock);
  509.  
  510.             if (t) {
  511.                 /*
  512.                  * Ready t on local CPU
  513.                  */
  514.                 spinlock_lock(&t->lock);
  515.                 #ifdef KCPULB_VERBOSE
  516.                 printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, CPU->nrdy, nrdy / config.cpu_active);
  517.                 #endif
  518.                 t->flags |= X_STOLEN;
  519.                 spinlock_unlock(&t->lock);
  520.    
  521.                 thread_ready(t);
  522.  
  523.                 cpu_priority_restore(pri);
  524.    
  525.                 if (--count == 0)
  526.                     goto satisfied;
  527.                    
  528.                 /*
  529.                              * We are not satisfied yet, focus on another CPU next time.
  530.                  */
  531.                 k++;
  532.                
  533.                 continue;
  534.             }
  535.             cpu_priority_restore(pri);
  536.         }
  537.     }
  538.  
  539.     if (CPU->nrdy) {
  540.         /*
  541.          * Be a little bit light-weight and let migrated threads run.
  542.          */
  543.         scheduler();
  544.     }
  545.     else {
  546.         /*
  547.          * We failed to migrate a single thread.
  548.          * Something more sophisticated should be done.
  549.          */
  550.         scheduler();
  551.     }
  552.        
  553.     goto not_satisfied;
  554.    
  555. satisfied:
  556.     /*
  557.      * Tell find_best_thread() to wake us up later again.
  558.      */
  559.     CPU->kcpulbstarted = 0;
  560.     goto loop;
  561. }
  562.  
  563. #endif /* __SMP__ */
  564.