Subversion Repositories HelenOS-historic

Rev

Rev 897 | Rev 906 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (C) 2001-2004 Jakub Jermar
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. #include <proc/scheduler.h>
  30. #include <proc/thread.h>
  31. #include <proc/task.h>
  32. #include <mm/frame.h>
  33. #include <mm/page.h>
  34. #include <mm/as.h>
  35. #include <arch/asm.h>
  36. #include <arch/faddr.h>
  37. #include <arch/atomic.h>
  38. #include <synch/spinlock.h>
  39. #include <config.h>
  40. #include <context.h>
  41. #include <func.h>
  42. #include <arch.h>
  43. #include <adt/list.h>
  44. #include <panic.h>
  45. #include <typedefs.h>
  46. #include <cpu.h>
  47. #include <print.h>
  48. #include <debug.h>
  49.  
  50. static void scheduler_separated_stack(void);
  51.  
  52. atomic_t nrdy;  /**< Number of ready threads in the system. */
  53.  
  54. /** Take actions before new thread runs.
  55.  *
  56.  * Perform actions that need to be
  57.  * taken before the newly selected
  58.  * tread is passed control.
  59.  *
  60.  * THREAD->lock is locked on entry
  61.  *
  62.  */
  63. void before_thread_runs(void)
  64. {
  65.     before_thread_runs_arch();
  66.     #ifdef CONFIG_FPU_LAZY
  67.     if(THREAD==CPU->fpu_owner)
  68.         fpu_enable();
  69.     else
  70.         fpu_disable();
  71.     #else
  72.     fpu_enable();
  73.     if (THREAD->fpu_context_exists)
  74.         fpu_context_restore(&(THREAD->saved_fpu_context));
  75.     else {
  76.         fpu_init(&(THREAD->saved_fpu_context));
  77.         THREAD->fpu_context_exists=1;
  78.     }
  79.     #endif
  80. }
  81.  
  82. /** Take actions after THREAD had run.
  83.  *
  84.  * Perform actions that need to be
  85.  * taken after the running thread
  86.  * had been preempted by the scheduler.
  87.  *
  88.  * THREAD->lock is locked on entry
  89.  *
  90.  */
  91. void after_thread_ran(void)
  92. {
  93.     after_thread_ran_arch();
  94. }
  95.  
  96. #ifdef CONFIG_FPU_LAZY
  97. void scheduler_fpu_lazy_request(void)
  98. {
  99.     fpu_enable();
  100.     spinlock_lock(&CPU->lock);
  101.  
  102.     /* Save old context */
  103.     if (CPU->fpu_owner != NULL) {  
  104.         spinlock_lock(&CPU->fpu_owner->lock);
  105.         fpu_context_save(&CPU->fpu_owner->saved_fpu_context);
  106.         /* don't prevent migration */
  107.         CPU->fpu_owner->fpu_context_engaged=0;
  108.         spinlock_unlock(&CPU->fpu_owner->lock);
  109.     }
  110.  
  111.     spinlock_lock(&THREAD->lock);
  112.     if (THREAD->fpu_context_exists) {
  113.         fpu_context_restore(&THREAD->saved_fpu_context);
  114.     } else {
  115.         fpu_init(&(THREAD->saved_fpu_context));
  116.         THREAD->fpu_context_exists=1;
  117.     }
  118.     CPU->fpu_owner=THREAD;
  119.     THREAD->fpu_context_engaged = 1;
  120.     spinlock_unlock(&THREAD->lock);
  121.  
  122.     spinlock_unlock(&CPU->lock);
  123. }
  124. #endif
  125.  
  126. /** Initialize scheduler
  127.  *
  128.  * Initialize kernel scheduler.
  129.  *
  130.  */
  131. void scheduler_init(void)
  132. {
  133. }
  134.  
  135. /** Get thread to be scheduled
  136.  *
  137.  * Get the optimal thread to be scheduled
  138.  * according to thread accounting and scheduler
  139.  * policy.
  140.  *
  141.  * @return Thread to be scheduled.
  142.  *
  143.  */
  144. static thread_t *find_best_thread(void)
  145. {
  146.     thread_t *t;
  147.     runq_t *r;
  148.     int i;
  149.  
  150.     ASSERT(CPU != NULL);
  151.  
  152. loop:
  153.     interrupts_enable();
  154.    
  155.     if (atomic_get(&CPU->nrdy) == 0) {
  156.         /*
  157.          * For there was nothing to run, the CPU goes to sleep
  158.          * until a hardware interrupt or an IPI comes.
  159.          * This improves energy saving and hyperthreading.
  160.          */
  161.  
  162.         /*
  163.          * An interrupt might occur right now and wake up a thread.
  164.          * In such case, the CPU will continue to go to sleep
  165.          * even though there is a runnable thread.
  166.          */
  167.  
  168.          cpu_sleep();
  169.          goto loop;
  170.     }
  171.  
  172.     interrupts_disable();
  173.    
  174.     for (i = 0; i<RQ_COUNT; i++) {
  175.         r = &CPU->rq[i];
  176.         spinlock_lock(&r->lock);
  177.         if (r->n == 0) {
  178.             /*
  179.              * If this queue is empty, try a lower-priority queue.
  180.              */
  181.             spinlock_unlock(&r->lock);
  182.             continue;
  183.         }
  184.  
  185.         atomic_dec(&CPU->nrdy);
  186.         atomic_dec(&nrdy);
  187.         r->n--;
  188.  
  189.         /*
  190.          * Take the first thread from the queue.
  191.          */
  192.         t = list_get_instance(r->rq_head.next, thread_t, rq_link);
  193.         list_remove(&t->rq_link);
  194.  
  195.         spinlock_unlock(&r->lock);
  196.  
  197.         spinlock_lock(&t->lock);
  198.         t->cpu = CPU;
  199.  
  200.         t->ticks = us2ticks((i+1)*10000);
  201.         t->priority = i;    /* correct rq index */
  202.  
  203.         /*
  204.          * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge.
  205.          */
  206.         t->flags &= ~X_STOLEN;
  207.         spinlock_unlock(&t->lock);
  208.  
  209.         return t;
  210.     }
  211.     goto loop;
  212.  
  213. }
  214.  
  215. /** Prevent rq starvation
  216.  *
  217.  * Prevent low priority threads from starving in rq's.
  218.  *
  219.  * When the function decides to relink rq's, it reconnects
  220.  * respective pointers so that in result threads with 'pri'
  221.  * greater or equal 'start' are moved to a higher-priority queue.
  222.  *
  223.  * @param start Threshold priority.
  224.  *
  225.  */
  226. static void relink_rq(int start)
  227. {
  228.     link_t head;
  229.     runq_t *r;
  230.     int i, n;
  231.  
  232.     list_initialize(&head);
  233.     spinlock_lock(&CPU->lock);
  234.     if (CPU->needs_relink > NEEDS_RELINK_MAX) {
  235.         for (i = start; i<RQ_COUNT-1; i++) {
  236.             /* remember and empty rq[i + 1] */
  237.             r = &CPU->rq[i + 1];
  238.             spinlock_lock(&r->lock);
  239.             list_concat(&head, &r->rq_head);
  240.             n = r->n;
  241.             r->n = 0;
  242.             spinlock_unlock(&r->lock);
  243.        
  244.             /* append rq[i + 1] to rq[i] */
  245.             r = &CPU->rq[i];
  246.             spinlock_lock(&r->lock);
  247.             list_concat(&r->rq_head, &head);
  248.             r->n += n;
  249.             spinlock_unlock(&r->lock);
  250.         }
  251.         CPU->needs_relink = 0;
  252.     }
  253.     spinlock_unlock(&CPU->lock);
  254.  
  255. }
  256.  
  257. /** The scheduler
  258.  *
  259.  * The thread scheduling procedure.
  260.  * Passes control directly to
  261.  * scheduler_separated_stack().
  262.  *
  263.  */
  264. void scheduler(void)
  265. {
  266.     volatile ipl_t ipl;
  267.  
  268.     ASSERT(CPU != NULL);
  269.  
  270.     ipl = interrupts_disable();
  271.  
  272.     if (atomic_get(&haltstate))
  273.         halt();
  274.  
  275.     if (THREAD) {
  276.         spinlock_lock(&THREAD->lock);
  277.         #ifndef CONFIG_FPU_LAZY
  278.         fpu_context_save(&(THREAD->saved_fpu_context));
  279.         #endif
  280.         if (!context_save(&THREAD->saved_context)) {
  281.             /*
  282.              * This is the place where threads leave scheduler();
  283.              */
  284.             spinlock_unlock(&THREAD->lock);
  285.             interrupts_restore(THREAD->saved_context.ipl);
  286.             return;
  287.         }
  288.  
  289.         /*
  290.          * Interrupt priority level of preempted thread is recorded here
  291.          * to facilitate scheduler() invocations from interrupts_disable()'d
  292.          * code (e.g. waitq_sleep_timeout()).
  293.          */
  294.         THREAD->saved_context.ipl = ipl;
  295.     }
  296.  
  297.     /*
  298.      * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM
  299.      * and preemption counter. At this point THE could be coming either
  300.      * from THREAD's or CPU's stack.
  301.      */
  302.     the_copy(THE, (the_t *) CPU->stack);
  303.  
  304.     /*
  305.      * We may not keep the old stack.
  306.      * Reason: If we kept the old stack and got blocked, for instance, in
  307.      * find_best_thread(), the old thread could get rescheduled by another
  308.      * CPU and overwrite the part of its own stack that was also used by
  309.      * the scheduler on this CPU.
  310.      *
  311.      * Moreover, we have to bypass the compiler-generated POP sequence
  312.      * which is fooled by SP being set to the very top of the stack.
  313.      * Therefore the scheduler() function continues in
  314.      * scheduler_separated_stack().
  315.      */
  316.     context_save(&CPU->saved_context);
  317.     context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), (__address) CPU->stack, CPU_STACK_SIZE);
  318.     context_restore(&CPU->saved_context);
  319.     /* not reached */
  320. }
  321.  
  322. /** Scheduler stack switch wrapper
  323.  *
  324.  * Second part of the scheduler() function
  325.  * using new stack. Handling the actual context
  326.  * switch to a new thread.
  327.  *
  328.  * Assume THREAD->lock is held.
  329.  */
  330. void scheduler_separated_stack(void)
  331. {
  332.     int priority;
  333.  
  334.     ASSERT(CPU != NULL);
  335.  
  336.     if (THREAD) {
  337.         /* must be run after the switch to scheduler stack */
  338.         after_thread_ran();
  339.  
  340.         switch (THREAD->state) {
  341.             case Running:
  342.             THREAD->state = Ready;
  343.             spinlock_unlock(&THREAD->lock);
  344.             thread_ready(THREAD);
  345.             break;
  346.  
  347.             case Exiting:
  348.             thread_destroy(THREAD);
  349.             break;
  350.            
  351.             case Sleeping:
  352.             /*
  353.              * Prefer the thread after it's woken up.
  354.              */
  355.             THREAD->priority = -1;
  356.  
  357.             /*
  358.              * We need to release wq->lock which we locked in waitq_sleep().
  359.              * Address of wq->lock is kept in THREAD->sleep_queue.
  360.              */
  361.             spinlock_unlock(&THREAD->sleep_queue->lock);
  362.  
  363.             /*
  364.              * Check for possible requests for out-of-context invocation.
  365.              */
  366.             if (THREAD->call_me) {
  367.                 THREAD->call_me(THREAD->call_me_with);
  368.                 THREAD->call_me = NULL;
  369.                 THREAD->call_me_with = NULL;
  370.             }
  371.  
  372.             spinlock_unlock(&THREAD->lock);
  373.  
  374.             break;
  375.  
  376.             default:
  377.             /*
  378.              * Entering state is unexpected.
  379.              */
  380.             panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]);
  381.             break;
  382.         }
  383.  
  384.         THREAD = NULL;
  385.     }
  386.  
  387.     THREAD = find_best_thread();
  388.    
  389.     spinlock_lock(&THREAD->lock);
  390.     priority = THREAD->priority;
  391.     spinlock_unlock(&THREAD->lock);
  392.  
  393.     relink_rq(priority);       
  394.  
  395.     spinlock_lock(&THREAD->lock);  
  396.  
  397.     /*
  398.      * If both the old and the new task are the same, lots of work is avoided.
  399.      */
  400.     if (TASK != THREAD->task) {
  401.         as_t *as1 = NULL;
  402.         as_t *as2;
  403.  
  404.         if (TASK) {
  405.             spinlock_lock(&TASK->lock);
  406.             as1 = TASK->as;
  407.             spinlock_unlock(&TASK->lock);
  408.         }
  409.  
  410.         spinlock_lock(&THREAD->task->lock);
  411.         as2 = THREAD->task->as;
  412.         spinlock_unlock(&THREAD->task->lock);
  413.        
  414.         /*
  415.          * Note that it is possible for two tasks to share one address space.
  416.          */
  417.         if (as1 != as2) {
  418.             /*
  419.              * Both tasks and address spaces are different.
  420.              * Replace the old one with the new one.
  421.              */
  422.             as_switch(as1, as2);
  423.         }
  424.         TASK = THREAD->task;   
  425.     }
  426.  
  427.     THREAD->state = Running;
  428.  
  429.     #ifdef SCHEDULER_VERBOSE
  430.     printf("cpu%d: tid %d (priority=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, atomic_get(&CPU->nrdy));
  431.     #endif 
  432.  
  433.     /*
  434.      * Some architectures provide late kernel PA2KA(identity)
  435.      * mapping in a page fault handler. However, the page fault
  436.      * handler uses the kernel stack of the running thread and
  437.      * therefore cannot be used to map it. The kernel stack, if
  438.      * necessary, is to be mapped in before_thread_runs(). This
  439.      * function must be executed before the switch to the new stack.
  440.      */
  441.     before_thread_runs();
  442.  
  443.     /*
  444.      * Copy the knowledge of CPU, TASK, THREAD and preemption counter to thread's stack.
  445.      */
  446.     the_copy(THE, (the_t *) THREAD->kstack);
  447.    
  448.     context_restore(&THREAD->saved_context);
  449.     /* not reached */
  450. }
  451.  
  452. #ifdef CONFIG_SMP
  453. /** Load balancing thread
  454.  *
  455.  * SMP load balancing thread, supervising thread supplies
  456.  * for the CPU it's wired to.
  457.  *
  458.  * @param arg Generic thread argument (unused).
  459.  *
  460.  */
  461. void kcpulb(void *arg)
  462. {
  463.     thread_t *t;
  464.     int count, average, i, j, k = 0;
  465.     ipl_t ipl;
  466.  
  467. loop:
  468.     /*
  469.      * Work in 1s intervals.
  470.      */
  471.     thread_sleep(1);
  472.  
  473. not_satisfied:
  474.     /*
  475.      * Calculate the number of threads that will be migrated/stolen from
  476.      * other CPU's. Note that situation can have changed between two
  477.      * passes. Each time get the most up to date counts.
  478.      */
  479.     average = atomic_get(&nrdy) / config.cpu_active + 1;
  480.     count = average - atomic_get(&CPU->nrdy);
  481.  
  482.     if (count <= 0)
  483.         goto satisfied;
  484.  
  485.     /*
  486.      * Searching least priority queues on all CPU's first and most priority queues on all CPU's last.
  487.      */
  488.     for (j=RQ_COUNT-1; j >= 0; j--) {
  489.         for (i=0; i < config.cpu_active; i++) {
  490.             link_t *l;
  491.             runq_t *r;
  492.             cpu_t *cpu;
  493.  
  494.             cpu = &cpus[(i + k) % config.cpu_active];
  495.  
  496.             /*
  497.              * Not interested in ourselves.
  498.              * Doesn't require interrupt disabling for kcpulb is X_WIRED.
  499.              */
  500.             if (CPU == cpu)
  501.                 continue;
  502.             if (atomic_get(&cpu->nrdy) <= average)
  503.                 continue;
  504.  
  505.             ipl = interrupts_disable();
  506.             r = &cpu->rq[j];
  507.             spinlock_lock(&r->lock);
  508.             if (r->n == 0) {
  509.                 spinlock_unlock(&r->lock);
  510.                 interrupts_restore(ipl);
  511.                 continue;
  512.             }
  513.        
  514.             t = NULL;
  515.             l = r->rq_head.prev;    /* search rq from the back */
  516.             while (l != &r->rq_head) {
  517.                 t = list_get_instance(l, thread_t, rq_link);
  518.                 /*
  519.                  * We don't want to steal CPU-wired threads neither threads already stolen.
  520.                  * The latter prevents threads from migrating between CPU's without ever being run.
  521.                  * We don't want to steal threads whose FPU context is still in CPU.
  522.                  */
  523.                 spinlock_lock(&t->lock);
  524.                 if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) {
  525.                     /*
  526.                      * Remove t from r.
  527.                      */
  528.                     spinlock_unlock(&t->lock);
  529.                    
  530.                     atomic_dec(&cpu->nrdy);
  531.                     atomic_dec(&nrdy);
  532.  
  533.                     r->n--;
  534.                     list_remove(&t->rq_link);
  535.  
  536.                     break;
  537.                 }
  538.                 spinlock_unlock(&t->lock);
  539.                 l = l->prev;
  540.                 t = NULL;
  541.             }
  542.             spinlock_unlock(&r->lock);
  543.  
  544.             if (t) {
  545.                 /*
  546.                  * Ready t on local CPU
  547.                  */
  548.                 spinlock_lock(&t->lock);
  549.                 #ifdef KCPULB_VERBOSE
  550.                 printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, atomic_get(&CPU->nrdy), atomic_get(&nrdy) / config.cpu_active);
  551.                 #endif
  552.                 t->flags |= X_STOLEN;
  553.                 spinlock_unlock(&t->lock);
  554.    
  555.                 thread_ready(t);
  556.  
  557.                 interrupts_restore(ipl);
  558.    
  559.                 if (--count == 0)
  560.                     goto satisfied;
  561.                    
  562.                 /*
  563.                  * We are not satisfied yet, focus on another CPU next time.
  564.                  */
  565.                 k++;
  566.                
  567.                 continue;
  568.             }
  569.             interrupts_restore(ipl);
  570.         }
  571.     }
  572.  
  573.     if (atomic_get(&CPU->nrdy)) {
  574.         /*
  575.          * Be a little bit light-weight and let migrated threads run.
  576.          */
  577.         scheduler();
  578.     } else {
  579.         /*
  580.          * We failed to migrate a single thread.
  581.          * Give up this turn.
  582.          */
  583.         goto loop;
  584.     }
  585.        
  586.     goto not_satisfied;
  587.  
  588. satisfied:
  589.     goto loop;
  590. }
  591.  
  592. #endif /* CONFIG_SMP */
  593.  
  594.  
  595. /** Print information about threads & scheduler queues */
  596. void sched_print_list(void)
  597. {
  598.     ipl_t ipl;
  599.     int cpu,i;
  600.     runq_t *r;
  601.     thread_t *t;
  602.     link_t *cur;
  603.  
  604.     /* We are going to mess with scheduler structures,
  605.      * let's not be interrupted */
  606.     ipl = interrupts_disable();
  607.     printf("Scheduler dump:\n");
  608.     for (cpu=0;cpu < config.cpu_count; cpu++) {
  609.  
  610.         if (!cpus[cpu].active)
  611.             continue;
  612.  
  613.         spinlock_lock(&cpus[cpu].lock);
  614.         printf("cpu%d: nrdy: %d, needs_relink: %d\n",
  615.                cpus[cpu].id, atomic_get(&cpus[cpu].nrdy), cpus[cpu].needs_relink);
  616.        
  617.         for (i=0; i<RQ_COUNT; i++) {
  618.             r = &cpus[cpu].rq[i];
  619.             spinlock_lock(&r->lock);
  620.             if (!r->n) {
  621.                 spinlock_unlock(&r->lock);
  622.                 continue;
  623.             }
  624.             printf("\trq[%d]: ", i);
  625.             for (cur=r->rq_head.next; cur!=&r->rq_head; cur=cur->next) {
  626.                 t = list_get_instance(cur, thread_t, rq_link);
  627.                 printf("%d(%s) ", t->tid,
  628.                        thread_states[t->state]);
  629.             }
  630.             printf("\n");
  631.             spinlock_unlock(&r->lock);
  632.         }
  633.         spinlock_unlock(&cpus[cpu].lock);
  634.     }
  635.    
  636.     interrupts_restore(ipl);
  637. }
  638.