Subversion Repositories HelenOS

Rev

Rev 1962 | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (C) 2001-2004 Jakub Jermar
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. #include <proc/scheduler.h>
  30. #include <proc/thread.h>
  31. #include <proc/task.h>
  32. #include <mm/heap.h>
  33. #include <mm/frame.h>
  34. #include <mm/page.h>
  35. #include <mm/vm.h>
  36. #include <arch/asm.h>
  37. #include <arch/faddr.h>
  38. #include <arch/atomic.h>
  39. #include <synch/spinlock.h>
  40. #include <config.h>
  41. #include <context.h>
  42. #include <func.h>
  43. #include <arch.h>
  44. #include <list.h>
  45. #include <panic.h>
  46. #include <typedefs.h>
  47. #include <cpu.h>
  48. #include <print.h>
  49. #include <debug.h>
  50.  
  51. volatile count_t nrdy;
  52.  
  53.  
  54. /** Take actions before new thread runs
  55.  *
  56.  * Perform actions that need to be
  57.  * taken before the newly selected
  58.  * tread is passed control.
  59.  *
  60.  */
  61. void before_thread_runs(void)
  62. {
  63.     before_thread_runs_arch();
  64. #ifdef FPU_LAZY
  65.     if(THREAD==CPU->fpu_owner)
  66.         fpu_enable();
  67.     else
  68.         fpu_disable();
  69. #else
  70.     fpu_enable();
  71.     if (THREAD->fpu_context_exists)
  72.         fpu_context_restore(&(THREAD->saved_fpu_context));
  73.     else {
  74.         fpu_init();
  75.         THREAD->fpu_context_exists=1;
  76.     }
  77. #endif
  78. }
  79.  
  80. #ifdef FPU_LAZY
  81. void scheduler_fpu_lazy_request(void)
  82. {
  83.     fpu_enable();
  84.     if (CPU->fpu_owner != NULL) {  
  85.         fpu_context_save(&CPU->fpu_owner->saved_fpu_context);
  86.         /* don't prevent migration */
  87.         CPU->fpu_owner->fpu_context_engaged=0;
  88.     }
  89.     if (THREAD->fpu_context_exists)
  90.         fpu_context_restore(&THREAD->saved_fpu_context);
  91.     else {
  92.         fpu_init();
  93.         THREAD->fpu_context_exists=1;
  94.     }
  95.     CPU->fpu_owner=THREAD;
  96.     THREAD->fpu_context_engaged = 1;
  97. }
  98. #endif
  99.  
  100. /** Initialize scheduler
  101.  *
  102.  * Initialize kernel scheduler.
  103.  *
  104.  */
  105. void scheduler_init(void)
  106. {
  107. }
  108.  
  109.  
  110. /** Get thread to be scheduled
  111.  *
  112.  * Get the optimal thread to be scheduled
  113.  * according to thread accounting and scheduler
  114.  * policy.
  115.  *
  116.  * @return Thread to be scheduled.
  117.  *
  118.  */
  119. static struct thread *find_best_thread(void)
  120. {
  121.     thread_t *t;
  122.     runq_t *r;
  123.     int i, n;
  124.  
  125.     ASSERT(CPU != NULL);
  126.  
  127. loop:
  128.     interrupts_disable();
  129.  
  130.     spinlock_lock(&CPU->lock);
  131.     n = CPU->nrdy;
  132.     spinlock_unlock(&CPU->lock);
  133.  
  134.     interrupts_enable();
  135.    
  136.     if (n == 0) {
  137.         #ifdef __SMP__
  138.         /*
  139.          * If the load balancing thread is not running, wake it up and
  140.          * set CPU-private flag that the kcpulb has been started.
  141.          */
  142.         if (test_and_set(&CPU->kcpulbstarted) == 0) {
  143.             waitq_wakeup(&CPU->kcpulb_wq, 0);
  144.             goto loop;
  145.         }
  146.         #endif /* __SMP__ */
  147.        
  148.         /*
  149.          * For there was nothing to run, the CPU goes to sleep
  150.          * until a hardware interrupt or an IPI comes.
  151.          * This improves energy saving and hyperthreading.
  152.          * On the other hand, several hardware interrupts can be ignored.
  153.          */
  154.          cpu_sleep();
  155.          goto loop;
  156.     }
  157.  
  158.     interrupts_disable();
  159.    
  160.     i = 0;
  161. retry:
  162.     for (; i<RQ_COUNT; i++) {
  163.         r = &CPU->rq[i];
  164.         spinlock_lock(&r->lock);
  165.         if (r->n == 0) {
  166.             /*
  167.              * If this queue is empty, try a lower-priority queue.
  168.              */
  169.             spinlock_unlock(&r->lock);
  170.             continue;
  171.         }
  172.  
  173.         /* avoid deadlock with relink_rq() */
  174.         if (!spinlock_trylock(&CPU->lock)) {
  175.             /*
  176.              * Unlock r and try again.
  177.              */
  178.             spinlock_unlock(&r->lock);
  179.             goto retry;
  180.         }
  181.         CPU->nrdy--;
  182.         spinlock_unlock(&CPU->lock);
  183.  
  184.         atomic_dec((int *) &nrdy);
  185.         r->n--;
  186.  
  187.         /*
  188.          * Take the first thread from the queue.
  189.          */
  190.         t = list_get_instance(r->rq_head.next, thread_t, rq_link);
  191.         list_remove(&t->rq_link);
  192.  
  193.         spinlock_unlock(&r->lock);
  194.  
  195.         spinlock_lock(&t->lock);
  196.         t->cpu = CPU;
  197.  
  198.         t->ticks = us2ticks((i+1)*10000);
  199.         t->priority = i;    /* eventually correct rq index */
  200.  
  201.         /*
  202.          * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge.
  203.          */
  204.         t->flags &= ~X_STOLEN;
  205.         spinlock_unlock(&t->lock);
  206.  
  207.         return t;
  208.     }
  209.     goto loop;
  210.  
  211. }
  212.  
  213.  
  214. /** Prevent rq starvation
  215.  *
  216.  * Prevent low priority threads from starving in rq's.
  217.  *
  218.  * When the function decides to relink rq's, it reconnects
  219.  * respective pointers so that in result threads with 'pri'
  220.  * greater or equal 'start' are moved to a higher-priority queue.
  221.  *
  222.  * @param start Threshold priority.
  223.  *
  224.  */
  225. static void relink_rq(int start)
  226. {
  227.     link_t head;
  228.     runq_t *r;
  229.     int i, n;
  230.  
  231.     list_initialize(&head);
  232.     spinlock_lock(&CPU->lock);
  233.     if (CPU->needs_relink > NEEDS_RELINK_MAX) {
  234.         for (i = start; i<RQ_COUNT-1; i++) {
  235.             /* remember and empty rq[i + 1] */
  236.             r = &CPU->rq[i + 1];
  237.             spinlock_lock(&r->lock);
  238.             list_concat(&head, &r->rq_head);
  239.             n = r->n;
  240.             r->n = 0;
  241.             spinlock_unlock(&r->lock);
  242.        
  243.             /* append rq[i + 1] to rq[i] */
  244.             r = &CPU->rq[i];
  245.             spinlock_lock(&r->lock);
  246.             list_concat(&r->rq_head, &head);
  247.             r->n += n;
  248.             spinlock_unlock(&r->lock);
  249.         }
  250.         CPU->needs_relink = 0;
  251.     }
  252.     spinlock_unlock(&CPU->lock);               
  253.  
  254. }
  255.  
  256.  
  257. /** Scheduler stack switch wrapper
  258.  *
  259.  * Second part of the scheduler() function
  260.  * using new stack. Handling the actual context
  261.  * switch to a new thread.
  262.  *
  263.  */
  264. static void scheduler_separated_stack(void)
  265. {
  266.     int priority;
  267.  
  268.     ASSERT(CPU != NULL);
  269.  
  270.     if (THREAD) {
  271.         switch (THREAD->state) {
  272.             case Running:
  273.             THREAD->state = Ready;
  274.             spinlock_unlock(&THREAD->lock);
  275.             thread_ready(THREAD);
  276.             break;
  277.  
  278.             case Exiting:
  279.             frame_free((__address) THREAD->kstack);
  280.             if (THREAD->ustack) {
  281.                 frame_free((__address) THREAD->ustack);
  282.             }
  283.  
  284.             /*
  285.              * Detach from the containing task.
  286.              */
  287.             spinlock_lock(&TASK->lock);
  288.             list_remove(&THREAD->th_link);
  289.             spinlock_unlock(&TASK->lock);
  290.  
  291.             spinlock_unlock(&THREAD->lock);
  292.    
  293.             spinlock_lock(&threads_lock);
  294.             list_remove(&THREAD->threads_link);
  295.             spinlock_unlock(&threads_lock);
  296.  
  297.             spinlock_lock(&CPU->lock);
  298.             if(CPU->fpu_owner==THREAD) CPU->fpu_owner=NULL;
  299.             spinlock_unlock(&CPU->lock);
  300.  
  301.             free(THREAD);
  302.  
  303.             break;
  304.    
  305.             case Sleeping:
  306.             /*
  307.              * Prefer the thread after it's woken up.
  308.              */
  309.             THREAD->priority = -1;
  310.  
  311.             /*
  312.              * We need to release wq->lock which we locked in waitq_sleep().
  313.              * Address of wq->lock is kept in THREAD->sleep_queue.
  314.              */
  315.             spinlock_unlock(&THREAD->sleep_queue->lock);
  316.  
  317.             /*
  318.              * Check for possible requests for out-of-context invocation.
  319.              */
  320.             if (THREAD->call_me) {
  321.                 THREAD->call_me(THREAD->call_me_with);
  322.                 THREAD->call_me = NULL;
  323.                 THREAD->call_me_with = NULL;
  324.             }
  325.  
  326.             spinlock_unlock(&THREAD->lock);
  327.  
  328.             break;
  329.  
  330.             default:
  331.             /*
  332.              * Entering state is unexpected.
  333.              */
  334.             panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]);
  335.             break;
  336.         }
  337.         THREAD = NULL;
  338.     }
  339.  
  340.  
  341.     THREAD = find_best_thread();
  342.    
  343.     spinlock_lock(&THREAD->lock);
  344.     priority = THREAD->priority;
  345.     spinlock_unlock(&THREAD->lock);
  346.  
  347.     relink_rq(priority);       
  348.  
  349.     spinlock_lock(&THREAD->lock);  
  350.  
  351.     /*
  352.      * If both the old and the new task are the same, lots of work is avoided.
  353.      */
  354.     if (TASK != THREAD->task) {
  355.         vm_t *m1 = NULL;
  356.         vm_t *m2;
  357.  
  358.         if (TASK) {
  359.             spinlock_lock(&TASK->lock);
  360.             m1 = TASK->vm;
  361.             spinlock_unlock(&TASK->lock);
  362.         }
  363.  
  364.         spinlock_lock(&THREAD->task->lock);
  365.         m2 = THREAD->task->vm;
  366.         spinlock_unlock(&THREAD->task->lock);
  367.        
  368.         /*
  369.          * Note that it is possible for two tasks to share one vm mapping.
  370.          */
  371.         if (m1 != m2) {
  372.             /*
  373.              * Both tasks and vm mappings are different.
  374.              * Replace the old one with the new one.
  375.              */
  376.             vm_install(m2);
  377.         }
  378.         TASK = THREAD->task;   
  379.     }
  380.  
  381.     THREAD->state = Running;
  382.  
  383.     #ifdef SCHEDULER_VERBOSE
  384.     printf("cpu%d: tid %d (priority=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, CPU->nrdy);
  385.     #endif 
  386.  
  387.     /*
  388.      * Copy the knowledge of CPU, TASK, THREAD and preemption counter to thread's stack.
  389.      */
  390.     the_copy(THE, (the_t *) THREAD->kstack);
  391.    
  392.     context_restore(&THREAD->saved_context);
  393.     /* not reached */
  394. }
  395.  
  396.  
  397. /** The scheduler
  398.  *
  399.  * The thread scheduling procedure.
  400.  *
  401.  */
  402. void scheduler(void)
  403. {
  404.     volatile ipl_t ipl;
  405.  
  406.     ASSERT(CPU != NULL);
  407.  
  408.     ipl = interrupts_disable();
  409.  
  410.     if (haltstate)
  411.         halt();
  412.  
  413.     if (THREAD) {
  414.         spinlock_lock(&THREAD->lock);
  415. #ifndef FPU_LAZY
  416.         fpu_context_save(&(THREAD->saved_fpu_context));
  417. #endif
  418.         if (!context_save(&THREAD->saved_context)) {
  419.             /*
  420.              * This is the place where threads leave scheduler();
  421.              */
  422.             before_thread_runs();
  423.             spinlock_unlock(&THREAD->lock);
  424.             interrupts_restore(THREAD->saved_context.ipl);
  425.             return;
  426.         }
  427.  
  428.         /*
  429.          * Interrupt priority level of preempted thread is recorded here
  430.          * to facilitate scheduler() invocations from interrupts_disable()'d
  431.          * code (e.g. waitq_sleep_timeout()).
  432.          */
  433.         THREAD->saved_context.ipl = ipl;
  434.     }
  435.  
  436.     /*
  437.      * Through the 'THE' structure, we keep track of THREAD, TASK, CPU
  438.      * and preemption counter. At this point THE could be coming either
  439.      * from THREAD's or CPU's stack.
  440.      */
  441.     the_copy(THE, (the_t *) CPU->stack);
  442.  
  443.     /*
  444.      * We may not keep the old stack.
  445.      * Reason: If we kept the old stack and got blocked, for instance, in
  446.      * find_best_thread(), the old thread could get rescheduled by another
  447.      * CPU and overwrite the part of its own stack that was also used by
  448.      * the scheduler on this CPU.
  449.      *
  450.      * Moreover, we have to bypass the compiler-generated POP sequence
  451.      * which is fooled by SP being set to the very top of the stack.
  452.      * Therefore the scheduler() function continues in
  453.      * scheduler_separated_stack().
  454.      */
  455.     context_save(&CPU->saved_context);
  456.     context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), (__address) CPU->stack, CPU_STACK_SIZE);
  457.     context_restore(&CPU->saved_context);
  458.     /* not reached */
  459. }
  460.  
  461.  
  462.  
  463.  
  464.  
  465. #ifdef __SMP__
  466. /** Load balancing thread
  467.  *
  468.  * SMP load balancing thread, supervising thread supplies
  469.  * for the CPU it's wired to.
  470.  *
  471.  * @param arg Generic thread argument (unused).
  472.  *
  473.  */
  474. void kcpulb(void *arg)
  475. {
  476.     thread_t *t;
  477.     int count, i, j, k = 0;
  478.     ipl_t ipl;
  479.  
  480. loop:
  481.     /*
  482.      * Sleep until there's some work to do.
  483.      */
  484.     waitq_sleep(&CPU->kcpulb_wq);
  485.  
  486. not_satisfied:
  487.     /*
  488.      * Calculate the number of threads that will be migrated/stolen from
  489.      * other CPU's. Note that situation can have changed between two
  490.      * passes. Each time get the most up to date counts.
  491.      */
  492.     ipl = interrupts_disable();
  493.     spinlock_lock(&CPU->lock);
  494.     count = nrdy / config.cpu_active;
  495.     count -= CPU->nrdy;
  496.     spinlock_unlock(&CPU->lock);
  497.     interrupts_restore(ipl);
  498.  
  499.     if (count <= 0)
  500.         goto satisfied;
  501.  
  502.     /*
  503.      * Searching least priority queues on all CPU's first and most priority queues on all CPU's last.
  504.      */
  505.     for (j=RQ_COUNT-1; j >= 0; j--) {
  506.         for (i=0; i < config.cpu_active; i++) {
  507.             link_t *l;
  508.             runq_t *r;
  509.             cpu_t *cpu;
  510.  
  511.             cpu = &cpus[(i + k) % config.cpu_active];
  512.  
  513.             /*
  514.              * Not interested in ourselves.
  515.              * Doesn't require interrupt disabling for kcpulb is X_WIRED.
  516.              */
  517.             if (CPU == cpu)
  518.                 continue;              
  519.  
  520. restart:        ipl = interrupts_disable();
  521.             r = &cpu->rq[j];
  522.             spinlock_lock(&r->lock);
  523.             if (r->n == 0) {
  524.                 spinlock_unlock(&r->lock);
  525.                 interrupts_restore(ipl);
  526.                 continue;
  527.             }
  528.        
  529.             t = NULL;
  530.             l = r->rq_head.prev;    /* search rq from the back */
  531.             while (l != &r->rq_head) {
  532.                 t = list_get_instance(l, thread_t, rq_link);
  533.                 /*
  534.                  * We don't want to steal CPU-wired threads neither threads already stolen.
  535.                  * The latter prevents threads from migrating between CPU's without ever being run.
  536.                  * We don't want to steal threads whose FPU context is still in CPU.
  537.                  */
  538.                 spinlock_lock(&t->lock);
  539.                 if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) {
  540.                
  541.                     /*
  542.                      * Remove t from r.
  543.                      */
  544.  
  545.                     spinlock_unlock(&t->lock);
  546.                    
  547.                     /*
  548.                      * Here we have to avoid deadlock with relink_rq(),
  549.                      * because it locks cpu and r in a different order than we do.
  550.                      */
  551.                     if (!spinlock_trylock(&cpu->lock)) {
  552.                         /* Release all locks and try again. */
  553.                         spinlock_unlock(&r->lock);
  554.                         interrupts_restore(ipl);
  555.                         goto restart;
  556.                     }
  557.                     cpu->nrdy--;
  558.                     spinlock_unlock(&cpu->lock);
  559.  
  560.                     atomic_dec((int *)&nrdy);
  561.  
  562.                     r->n--;
  563.                     list_remove(&t->rq_link);
  564.  
  565.                     break;
  566.                 }
  567.                 spinlock_unlock(&t->lock);
  568.                 l = l->prev;
  569.                 t = NULL;
  570.             }
  571.             spinlock_unlock(&r->lock);
  572.  
  573.             if (t) {
  574.                 /*
  575.                  * Ready t on local CPU
  576.                  */
  577.                 spinlock_lock(&t->lock);
  578.                 #ifdef KCPULB_VERBOSE
  579.                 printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, CPU->nrdy, nrdy / config.cpu_active);
  580.                 #endif
  581.                 t->flags |= X_STOLEN;
  582.                 spinlock_unlock(&t->lock);
  583.    
  584.                 thread_ready(t);
  585.  
  586.                 interrupts_restore(ipl);
  587.    
  588.                 if (--count == 0)
  589.                     goto satisfied;
  590.                    
  591.                 /*
  592.                  * We are not satisfied yet, focus on another CPU next time.
  593.                  */
  594.                 k++;
  595.                
  596.                 continue;
  597.             }
  598.             interrupts_restore(ipl);
  599.         }
  600.     }
  601.  
  602.     if (CPU->nrdy) {
  603.         /*
  604.          * Be a little bit light-weight and let migrated threads run.
  605.          */
  606.         scheduler();
  607.     }
  608.     else {
  609.         /*
  610.          * We failed to migrate a single thread.
  611.          * Something more sophisticated should be done.
  612.          */
  613.         scheduler();
  614.     }
  615.        
  616.     goto not_satisfied;
  617.  
  618. satisfied:
  619.     /*
  620.      * Tell find_best_thread() to wake us up later again.
  621.      */
  622.     CPU->kcpulbstarted = 0;
  623.     goto loop;
  624. }
  625.  
  626. #endif /* __SMP__ */
  627.