Subversion Repositories HelenOS-historic

Rev

Rev 195 | Rev 212 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (C) 2001-2004 Jakub Jermar
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. #include <proc/scheduler.h>
  30. #include <proc/thread.h>
  31. #include <proc/task.h>
  32. #include <cpu.h>
  33. #include <mm/vm.h>
  34. #include <config.h>
  35. #include <context.h>
  36. #include <func.h>
  37. #include <arch.h>
  38. #include <arch/asm.h>
  39. #include <list.h>
  40. #include <panic.h>
  41. #include <typedefs.h>
  42. #include <mm/page.h>
  43. #include <synch/spinlock.h>
  44. #include <arch/faddr.h>
  45. #include <arch/atomic.h>
  46. #include <print.h>
  47. #include <mm/frame.h>
  48. #include <mm/heap.h>
  49.  
  50.  
  51. volatile int nrdy;
  52.  
  53.  
  54. /** Take actions before new thread runs
  55.  *
  56.  * Perform actions that need to be
  57.  * taken before the newly selected
  58.  * tread is passed control.
  59.  *
  60.  */
  61. void before_thread_runs(void)
  62. {
  63.     before_thread_runs_arch();
  64.     fpu_context_restore(&(THREAD->saved_fpu_context));
  65. }
  66.  
  67.  
  68. /** Initialize scheduler
  69.  *
  70.  * Initialize kernel scheduler.
  71.  *
  72.  */
  73. void scheduler_init(void)
  74. {
  75. }
  76.  
  77.  
  78. /** Get thread to be scheduled
  79.  *
  80.  * Get the optimal thread to be scheduled
  81.  * according to thread accounting and scheduler
  82.  * policy.
  83.  *
  84.  * @return Thread to be scheduled.
  85.  *
  86.  */
  87. struct thread *find_best_thread(void)
  88. {
  89.     thread_t *t;
  90.     runq_t *r;
  91.     int i, n;
  92.  
  93. loop:
  94.     cpu_priority_high();
  95.  
  96.     spinlock_lock(&CPU->lock);
  97.     n = CPU->nrdy;
  98.     spinlock_unlock(&CPU->lock);
  99.  
  100.     cpu_priority_low();
  101.    
  102.     if (n == 0) {
  103.         #ifdef __SMP__
  104.         /*
  105.          * If the load balancing thread is not running, wake it up and
  106.          * set CPU-private flag that the kcpulb has been started.
  107.          */
  108.         if (test_and_set(&CPU->kcpulbstarted) == 0) {
  109.             waitq_wakeup(&CPU->kcpulb_wq, 0);
  110.             goto loop;
  111.         }
  112.         #endif /* __SMP__ */
  113.        
  114.         /*
  115.          * For there was nothing to run, the CPU goes to sleep
  116.          * until a hardware interrupt or an IPI comes.
  117.          * This improves energy saving and hyperthreading.
  118.          * On the other hand, several hardware interrupts can be ignored.
  119.          */
  120.          cpu_sleep();
  121.          goto loop;
  122.     }
  123.  
  124.     cpu_priority_high();
  125.    
  126.     i = 0;
  127. retry:
  128.     for (; i<RQ_COUNT; i++) {
  129.         r = &CPU->rq[i];
  130.         spinlock_lock(&r->lock);
  131.         if (r->n == 0) {
  132.             /*
  133.              * If this queue is empty, try a lower-priority queue.
  134.              */
  135.             spinlock_unlock(&r->lock);
  136.             continue;
  137.         }
  138.    
  139.         /* avoid deadlock with relink_rq() */
  140.         if (!spinlock_trylock(&CPU->lock)) {
  141.             /*
  142.              * Unlock r and try again.
  143.              */
  144.             spinlock_unlock(&r->lock);
  145.             goto retry;
  146.         }
  147.         CPU->nrdy--;
  148.         spinlock_unlock(&CPU->lock);
  149.  
  150.         atomic_dec(&nrdy);
  151.         r->n--;
  152.  
  153.         /*
  154.          * Take the first thread from the queue.
  155.          */
  156.         t = list_get_instance(r->rq_head.next, thread_t, rq_link);
  157.         list_remove(&t->rq_link);
  158.  
  159.         spinlock_unlock(&r->lock);
  160.  
  161.         spinlock_lock(&t->lock);
  162.         t->cpu = CPU;
  163.  
  164.         t->ticks = us2ticks((i+1)*10000);
  165.         t->pri = i; /* eventually correct rq index */
  166.  
  167.         /*
  168.          * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge.
  169.          */
  170.         t->flags &= ~X_STOLEN;
  171.         spinlock_unlock(&t->lock);
  172.  
  173.         return t;
  174.     }
  175.     goto loop;
  176.  
  177. }
  178.  
  179.  
  180. /** Prevent rq starvation
  181.  *
  182.  * Prevent low priority threads from starving in rq's.
  183.  *
  184.  * When the function decides to relink rq's, it reconnects
  185.  * respective pointers so that in result threads with 'pri'
  186.  * greater or equal 'start' are moved to a higher-priority queue.
  187.  *
  188.  * @param start Threshold priority.
  189.  *
  190.  */
  191. void relink_rq(int start)
  192. {
  193.     link_t head;
  194.     runq_t *r;
  195.     int i, n;
  196.  
  197.     list_initialize(&head);
  198.     spinlock_lock(&CPU->lock);
  199.     if (CPU->needs_relink > NEEDS_RELINK_MAX) {
  200.         for (i = start; i<RQ_COUNT-1; i++) {
  201.             /* remember and empty rq[i + 1] */
  202.             r = &CPU->rq[i + 1];
  203.             spinlock_lock(&r->lock);
  204.             list_concat(&head, &r->rq_head);
  205.             n = r->n;
  206.             r->n = 0;
  207.             spinlock_unlock(&r->lock);
  208.        
  209.             /* append rq[i + 1] to rq[i] */
  210.             r = &CPU->rq[i];
  211.             spinlock_lock(&r->lock);
  212.             list_concat(&r->rq_head, &head);
  213.             r->n += n;
  214.             spinlock_unlock(&r->lock);
  215.         }
  216.         CPU->needs_relink = 0;
  217.     }
  218.     spinlock_unlock(&CPU->lock);               
  219.  
  220. }
  221.  
  222.  
  223. /** The scheduler
  224.  *
  225.  * The thread scheduling procedure.
  226.  *
  227.  */
  228. void scheduler(void)
  229. {
  230.     volatile pri_t pri;
  231.  
  232.     pri = cpu_priority_high();
  233.  
  234.     if (haltstate)
  235.         halt();
  236.  
  237.     if (THREAD) {
  238.         spinlock_lock(&THREAD->lock);
  239.         fpu_context_save(&(THREAD->saved_fpu_context));
  240.         if (!context_save(&THREAD->saved_context)) {
  241.             /*
  242.              * This is the place where threads leave scheduler();
  243.              */
  244.             before_thread_runs();
  245.             spinlock_unlock(&THREAD->lock);
  246.             cpu_priority_restore(THREAD->saved_context.pri);
  247.             return;
  248.         }
  249.  
  250.         /*
  251.          * CPU priority of preempted thread is recorded here
  252.          * to facilitate scheduler() invocations from
  253.          * cpu_priority_high()'ed code (e.g. waitq_sleep_timeout()).
  254.          */
  255.         THREAD->saved_context.pri = pri;
  256.     }
  257.  
  258.     /*
  259.      * Through the 'THE' structure, we keep track of THREAD, TASK, CPU
  260.      * and preemption counter. At this point THE could be coming either
  261.      * from THREAD's or CPU's stack.
  262.      */
  263.     the_copy(THE, (the_t *) CPU->stack);
  264.  
  265.     /*
  266.      * We may not keep the old stack.
  267.      * Reason: If we kept the old stack and got blocked, for instance, in
  268.      * find_best_thread(), the old thread could get rescheduled by another
  269.      * CPU and overwrite the part of its own stack that was also used by
  270.      * the scheduler on this CPU.
  271.      *
  272.      * Moreover, we have to bypass the compiler-generated POP sequence
  273.      * which is fooled by SP being set to the very top of the stack.
  274.      * Therefore the scheduler() function continues in
  275.      * scheduler_separated_stack().
  276.      */
  277.     context_save(&CPU->saved_context);
  278.     context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), CPU->stack, CPU_STACK_SIZE);
  279.     context_restore(&CPU->saved_context);
  280.     /* not reached */
  281. }
  282.  
  283.  
  284. /** Scheduler stack switch wrapper
  285.  *
  286.  * Second part of the scheduler() function
  287.  * using new stack. Handling the actual context
  288.  * switch to a new thread.
  289.  *
  290.  */
  291. void scheduler_separated_stack(void)
  292. {
  293.     int priority;
  294.  
  295.     if (THREAD) {
  296.         switch (THREAD->state) {
  297.             case Running:
  298.             THREAD->state = Ready;
  299.             spinlock_unlock(&THREAD->lock);
  300.             thread_ready(THREAD);
  301.             break;
  302.  
  303.             case Exiting:
  304.             frame_free((__address) THREAD->kstack);
  305.             if (THREAD->ustack) {
  306.                 frame_free((__address) THREAD->ustack);
  307.             }
  308.  
  309.             /*
  310.              * Detach from the containing task.
  311.              */
  312.             spinlock_lock(&TASK->lock);
  313.             list_remove(&THREAD->th_link);
  314.             spinlock_unlock(&TASK->lock);
  315.  
  316.             spinlock_unlock(&THREAD->lock);
  317.    
  318.             spinlock_lock(&threads_lock);
  319.             list_remove(&THREAD->threads_link);
  320.             spinlock_unlock(&threads_lock);
  321.  
  322.             spinlock_lock(&CPU->lock);
  323.             if(CPU->fpu_owner==THREAD) CPU->fpu_owner=NULL;
  324.             spinlock_unlock(&CPU->lock);
  325.  
  326.             free(THREAD);
  327.  
  328.             break;
  329.    
  330.             case Sleeping:
  331.             /*
  332.              * Prefer the thread after it's woken up.
  333.              */
  334.             THREAD->pri = -1;
  335.  
  336.             /*
  337.              * We need to release wq->lock which we locked in waitq_sleep().
  338.              * Address of wq->lock is kept in THREAD->sleep_queue.
  339.              */
  340.             spinlock_unlock(&THREAD->sleep_queue->lock);
  341.  
  342.             /*
  343.              * Check for possible requests for out-of-context invocation.
  344.              */
  345.             if (THREAD->call_me) {
  346.                 THREAD->call_me(THREAD->call_me_with);
  347.                 THREAD->call_me = NULL;
  348.                 THREAD->call_me_with = NULL;
  349.             }
  350.  
  351.             spinlock_unlock(&THREAD->lock);
  352.  
  353.             break;
  354.  
  355.             default:
  356.             /*
  357.              * Entering state is unexpected.
  358.              */
  359.             panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]);
  360.             break;
  361.         }
  362.         THREAD = NULL;
  363.     }
  364.  
  365.     THREAD = find_best_thread();
  366.    
  367.     spinlock_lock(&THREAD->lock);
  368.     priority = THREAD->pri;
  369.     spinlock_unlock(&THREAD->lock);
  370.  
  371.     relink_rq(priority);       
  372.  
  373.     spinlock_lock(&THREAD->lock);  
  374.  
  375.     /*
  376.      * If both the old and the new task are the same, lots of work is avoided.
  377.      */
  378.     if (TASK != THREAD->task) {
  379.         vm_t *m1 = NULL;
  380.         vm_t *m2;
  381.  
  382.         if (TASK) {
  383.             spinlock_lock(&TASK->lock);
  384.             m1 = TASK->vm;
  385.             spinlock_unlock(&TASK->lock);
  386.         }
  387.  
  388.         spinlock_lock(&THREAD->task->lock);
  389.         m2 = THREAD->task->vm;
  390.         spinlock_unlock(&THREAD->task->lock);
  391.        
  392.         /*
  393.          * Note that it is possible for two tasks to share one vm mapping.
  394.          */
  395.         if (m1 != m2) {
  396.             /*
  397.              * Both tasks and vm mappings are different.
  398.              * Replace the old one with the new one.
  399.              */
  400.             vm_install(m2);
  401.         }
  402.         TASK = THREAD->task;   
  403.     }
  404.  
  405.     THREAD->state = Running;
  406.  
  407.     #ifdef SCHEDULER_VERBOSE
  408.     printf("cpu%d: tid %d (pri=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->pri, THREAD->ticks, CPU->nrdy);
  409.     #endif 
  410.  
  411.     the_copy(THE, (the_t *) THREAD->kstack);
  412.    
  413.     context_restore(&THREAD->saved_context);
  414.     /* not reached */
  415. }
  416.  
  417.  
  418. #ifdef __SMP__
  419. /** Load balancing thread
  420.  *
  421.  * SMP load balancing thread, supervising thread supplies
  422.  * for the CPU it's wired to.
  423.  *
  424.  * @param arg Generic thread argument (unused).
  425.  *
  426.  */
  427. void kcpulb(void *arg)
  428. {
  429.     thread_t *t;
  430.     int count, i, j, k = 0;
  431.     pri_t pri;
  432.  
  433. loop:
  434.     /*
  435.      * Sleep until there's some work to do.
  436.      */
  437.     waitq_sleep(&CPU->kcpulb_wq);
  438.  
  439. not_satisfied:
  440.     /*
  441.      * Calculate the number of threads that will be migrated/stolen from
  442.      * other CPU's. Note that situation can have changed between two
  443.      * passes. Each time get the most up to date counts.
  444.      */
  445.     pri = cpu_priority_high();
  446.     spinlock_lock(&CPU->lock);
  447.     count = nrdy / config.cpu_active;
  448.     count -= CPU->nrdy;
  449.     spinlock_unlock(&CPU->lock);
  450.     cpu_priority_restore(pri);
  451.  
  452.     if (count <= 0)
  453.         goto satisfied;
  454.  
  455.     /*
  456.      * Searching least priority queues on all CPU's first and most priority queues on all CPU's last.
  457.      */
  458.     for (j=RQ_COUNT-1; j >= 0; j--) {
  459.         for (i=0; i < config.cpu_active; i++) {
  460.             link_t *l;
  461.             runq_t *r;
  462.             cpu_t *cpu;
  463.  
  464.             cpu = &cpus[(i + k) % config.cpu_active];
  465.  
  466.             /*
  467.              * Not interested in ourselves.
  468.              * Doesn't require interrupt disabling for kcpulb is X_WIRED.
  469.              */
  470.             if (CPU == cpu)
  471.                 continue;              
  472.  
  473. restart:        pri = cpu_priority_high();
  474.             r = &cpu->rq[j];
  475.             spinlock_lock(&r->lock);
  476.             if (r->n == 0) {
  477.                 spinlock_unlock(&r->lock);
  478.                 cpu_priority_restore(pri);
  479.                 continue;
  480.             }
  481.        
  482.             t = NULL;
  483.             l = r->rq_head.prev;    /* search rq from the back */
  484.             while (l != &r->rq_head) {
  485.                 t = list_get_instance(l, thread_t, rq_link);
  486.                 /*
  487.                  * We don't want to steal CPU-wired threads neither threads already stolen.
  488.                  * The latter prevents threads from migrating between CPU's without ever being run.
  489.                  * We don't want to steal threads whose FPU context is still in CPU.
  490.                  */
  491.                 spinlock_lock(&t->lock);
  492.                 if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) {
  493.                
  494.                     /*
  495.                      * Remove t from r.
  496.                      */
  497.  
  498.                     spinlock_unlock(&t->lock);
  499.                    
  500.                     /*
  501.                      * Here we have to avoid deadlock with relink_rq(),
  502.                      * because it locks cpu and r in a different order than we do.
  503.                      */
  504.                     if (!spinlock_trylock(&cpu->lock)) {
  505.                         /* Release all locks and try again. */
  506.                         spinlock_unlock(&r->lock);
  507.                         cpu_priority_restore(pri);
  508.                         goto restart;
  509.                     }
  510.                     cpu->nrdy--;
  511.                     spinlock_unlock(&cpu->lock);
  512.  
  513.                     atomic_dec(&nrdy);
  514.  
  515.                     r->n--;
  516.                     list_remove(&t->rq_link);
  517.  
  518.                     break;
  519.                 }
  520.                 spinlock_unlock(&t->lock);
  521.                 l = l->prev;
  522.                 t = NULL;
  523.             }
  524.             spinlock_unlock(&r->lock);
  525.  
  526.             if (t) {
  527.                 /*
  528.                  * Ready t on local CPU
  529.                  */
  530.                 spinlock_lock(&t->lock);
  531.                 #ifdef KCPULB_VERBOSE
  532.                 printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, CPU->nrdy, nrdy / config.cpu_active);
  533.                 #endif
  534.                 t->flags |= X_STOLEN;
  535.                 spinlock_unlock(&t->lock);
  536.    
  537.                 thread_ready(t);
  538.  
  539.                 cpu_priority_restore(pri);
  540.    
  541.                 if (--count == 0)
  542.                     goto satisfied;
  543.                    
  544.                 /*
  545.                  * We are not satisfied yet, focus on another CPU next time.
  546.                  */
  547.                 k++;
  548.                
  549.                 continue;
  550.             }
  551.             cpu_priority_restore(pri);
  552.         }
  553.     }
  554.  
  555.     if (CPU->nrdy) {
  556.         /*
  557.          * Be a little bit light-weight and let migrated threads run.
  558.          */
  559.         scheduler();
  560.     }
  561.     else {
  562.         /*
  563.          * We failed to migrate a single thread.
  564.          * Something more sophisticated should be done.
  565.          */
  566.         scheduler();
  567.     }
  568.        
  569.     goto not_satisfied;
  570.  
  571. satisfied:
  572.     /*
  573.      * Tell find_best_thread() to wake us up later again.
  574.      */
  575.     CPU->kcpulbstarted = 0;
  576.     goto loop;
  577. }
  578.  
  579. #endif /* __SMP__ */
  580.