Subversion Repositories HelenOS-historic

Rev

Rev 827 | Rev 898 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (C) 2001-2004 Jakub Jermar
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. #include <proc/scheduler.h>
  30. #include <proc/thread.h>
  31. #include <proc/task.h>
  32. #include <mm/frame.h>
  33. #include <mm/page.h>
  34. #include <mm/as.h>
  35. #include <arch/asm.h>
  36. #include <arch/faddr.h>
  37. #include <arch/atomic.h>
  38. #include <synch/spinlock.h>
  39. #include <config.h>
  40. #include <context.h>
  41. #include <func.h>
  42. #include <arch.h>
  43. #include <adt/list.h>
  44. #include <panic.h>
  45. #include <typedefs.h>
  46. #include <cpu.h>
  47. #include <print.h>
  48. #include <debug.h>
  49.  
  50. atomic_t nrdy;
  51.  
  52. /** Take actions before new thread runs.
  53.  *
  54.  * Perform actions that need to be
  55.  * taken before the newly selected
  56.  * tread is passed control.
  57.  *
  58.  * THREAD->lock is locked on entry
  59.  *
  60.  */
  61. void before_thread_runs(void)
  62. {
  63.     before_thread_runs_arch();
  64. #ifdef CONFIG_FPU_LAZY
  65.     if(THREAD==CPU->fpu_owner)
  66.         fpu_enable();
  67.     else
  68.         fpu_disable();
  69. #else
  70.     fpu_enable();
  71.     if (THREAD->fpu_context_exists)
  72.         fpu_context_restore(&(THREAD->saved_fpu_context));
  73.     else {
  74.         fpu_init(&(THREAD->saved_fpu_context));
  75.         THREAD->fpu_context_exists=1;
  76.     }
  77. #endif
  78. }
  79.  
  80. /** Take actions after old thread ran.
  81.  *
  82.  * Perform actions that need to be
  83.  * taken after the running thread
  84.  * was preempted by the scheduler.
  85.  *
  86.  * THREAD->lock is locked on entry
  87.  *
  88.  */
  89. void after_thread_ran(void)
  90. {
  91.     after_thread_ran_arch();
  92. }
  93.  
  94. #ifdef CONFIG_FPU_LAZY
  95. void scheduler_fpu_lazy_request(void)
  96. {
  97.     fpu_enable();
  98.     spinlock_lock(&CPU->lock);
  99.  
  100.     /* Save old context */
  101.     if (CPU->fpu_owner != NULL) {  
  102.         spinlock_lock(&CPU->fpu_owner->lock);
  103.         fpu_context_save(&CPU->fpu_owner->saved_fpu_context);
  104.         /* don't prevent migration */
  105.         CPU->fpu_owner->fpu_context_engaged=0;
  106.         spinlock_unlock(&CPU->fpu_owner->lock);
  107.     }
  108.  
  109.     spinlock_lock(&THREAD->lock);
  110.     if (THREAD->fpu_context_exists)
  111.         fpu_context_restore(&THREAD->saved_fpu_context);
  112.     else {
  113.         fpu_init(&(THREAD->saved_fpu_context));
  114.         THREAD->fpu_context_exists=1;
  115.     }
  116.     CPU->fpu_owner=THREAD;
  117.     THREAD->fpu_context_engaged = 1;
  118.  
  119.     spinlock_unlock(&THREAD->lock);
  120.     spinlock_unlock(&CPU->lock);
  121. }
  122. #endif
  123.  
  124. /** Initialize scheduler
  125.  *
  126.  * Initialize kernel scheduler.
  127.  *
  128.  */
  129. void scheduler_init(void)
  130. {
  131. }
  132.  
  133.  
  134. /** Get thread to be scheduled
  135.  *
  136.  * Get the optimal thread to be scheduled
  137.  * according to thread accounting and scheduler
  138.  * policy.
  139.  *
  140.  * @return Thread to be scheduled.
  141.  *
  142.  */
  143. static thread_t *find_best_thread(void)
  144. {
  145.     thread_t *t;
  146.     runq_t *r;
  147.     int i;
  148.  
  149.     ASSERT(CPU != NULL);
  150.  
  151. loop:
  152.     interrupts_enable();
  153.    
  154.     if (atomic_get(&CPU->nrdy) == 0) {
  155.         /*
  156.          * For there was nothing to run, the CPU goes to sleep
  157.          * until a hardware interrupt or an IPI comes.
  158.          * This improves energy saving and hyperthreading.
  159.          */
  160.  
  161.         /*
  162.          * An interrupt might occur right now and wake up a thread.
  163.          * In such case, the CPU will continue to go to sleep
  164.          * even though there is a runnable thread.
  165.          */
  166.  
  167.          cpu_sleep();
  168.          goto loop;
  169.     }
  170.  
  171.     interrupts_disable();
  172.    
  173.     i = 0;
  174.     for (; i<RQ_COUNT; i++) {
  175.         r = &CPU->rq[i];
  176.         spinlock_lock(&r->lock);
  177.         if (r->n == 0) {
  178.             /*
  179.              * If this queue is empty, try a lower-priority queue.
  180.              */
  181.             spinlock_unlock(&r->lock);
  182.             continue;
  183.         }
  184.  
  185.         atomic_dec(&CPU->nrdy);
  186.         atomic_dec(&nrdy);
  187.         r->n--;
  188.  
  189.         /*
  190.          * Take the first thread from the queue.
  191.          */
  192.         t = list_get_instance(r->rq_head.next, thread_t, rq_link);
  193.         list_remove(&t->rq_link);
  194.  
  195.         spinlock_unlock(&r->lock);
  196.  
  197.         spinlock_lock(&t->lock);
  198.         t->cpu = CPU;
  199.  
  200.         t->ticks = us2ticks((i+1)*10000);
  201.         t->priority = i;    /* eventually correct rq index */
  202.  
  203.         /*
  204.          * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge.
  205.          */
  206.         t->flags &= ~X_STOLEN;
  207.         spinlock_unlock(&t->lock);
  208.  
  209.         return t;
  210.     }
  211.     goto loop;
  212.  
  213. }
  214.  
  215.  
  216. /** Prevent rq starvation
  217.  *
  218.  * Prevent low priority threads from starving in rq's.
  219.  *
  220.  * When the function decides to relink rq's, it reconnects
  221.  * respective pointers so that in result threads with 'pri'
  222.  * greater or equal 'start' are moved to a higher-priority queue.
  223.  *
  224.  * @param start Threshold priority.
  225.  *
  226.  */
  227. static void relink_rq(int start)
  228. {
  229.     link_t head;
  230.     runq_t *r;
  231.     int i, n;
  232.  
  233.     list_initialize(&head);
  234.     spinlock_lock(&CPU->lock);
  235.     if (CPU->needs_relink > NEEDS_RELINK_MAX) {
  236.         for (i = start; i<RQ_COUNT-1; i++) {
  237.             /* remember and empty rq[i + 1] */
  238.             r = &CPU->rq[i + 1];
  239.             spinlock_lock(&r->lock);
  240.             list_concat(&head, &r->rq_head);
  241.             n = r->n;
  242.             r->n = 0;
  243.             spinlock_unlock(&r->lock);
  244.        
  245.             /* append rq[i + 1] to rq[i] */
  246.             r = &CPU->rq[i];
  247.             spinlock_lock(&r->lock);
  248.             list_concat(&r->rq_head, &head);
  249.             r->n += n;
  250.             spinlock_unlock(&r->lock);
  251.         }
  252.         CPU->needs_relink = 0;
  253.     }
  254.     spinlock_unlock(&CPU->lock);
  255.  
  256. }
  257.  
  258.  
  259. /** Scheduler stack switch wrapper
  260.  *
  261.  * Second part of the scheduler() function
  262.  * using new stack. Handling the actual context
  263.  * switch to a new thread.
  264.  *
  265.  * Assume THREAD->lock is held.
  266.  */
  267. static void scheduler_separated_stack(void)
  268. {
  269.     int priority;
  270.  
  271.     ASSERT(CPU != NULL);
  272.  
  273.     if (THREAD) {
  274.         /* must be run after switch to scheduler stack */
  275.         after_thread_ran();
  276.  
  277.         switch (THREAD->state) {
  278.             case Running:
  279.             THREAD->state = Ready;
  280.             spinlock_unlock(&THREAD->lock);
  281.             thread_ready(THREAD);
  282.             break;
  283.  
  284.             case Exiting:
  285.             thread_destroy(THREAD);
  286.             break;
  287.            
  288.             case Sleeping:
  289.             /*
  290.              * Prefer the thread after it's woken up.
  291.              */
  292.             THREAD->priority = -1;
  293.  
  294.             /*
  295.              * We need to release wq->lock which we locked in waitq_sleep().
  296.              * Address of wq->lock is kept in THREAD->sleep_queue.
  297.              */
  298.             spinlock_unlock(&THREAD->sleep_queue->lock);
  299.  
  300.             /*
  301.              * Check for possible requests for out-of-context invocation.
  302.              */
  303.             if (THREAD->call_me) {
  304.                 THREAD->call_me(THREAD->call_me_with);
  305.                 THREAD->call_me = NULL;
  306.                 THREAD->call_me_with = NULL;
  307.             }
  308.  
  309.             spinlock_unlock(&THREAD->lock);
  310.  
  311.             break;
  312.  
  313.             default:
  314.             /*
  315.              * Entering state is unexpected.
  316.              */
  317.             panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]);
  318.             break;
  319.         }
  320.  
  321.         THREAD = NULL;
  322.     }
  323.  
  324.  
  325.     THREAD = find_best_thread();
  326.    
  327.     spinlock_lock(&THREAD->lock);
  328.     priority = THREAD->priority;
  329.     spinlock_unlock(&THREAD->lock);
  330.  
  331.     relink_rq(priority);       
  332.  
  333.     spinlock_lock(&THREAD->lock);  
  334.  
  335.     /*
  336.      * If both the old and the new task are the same, lots of work is avoided.
  337.      */
  338.     if (TASK != THREAD->task) {
  339.         as_t *as1 = NULL;
  340.         as_t *as2;
  341.  
  342.         if (TASK) {
  343.             spinlock_lock(&TASK->lock);
  344.             as1 = TASK->as;
  345.             spinlock_unlock(&TASK->lock);
  346.         }
  347.  
  348.         spinlock_lock(&THREAD->task->lock);
  349.         as2 = THREAD->task->as;
  350.         spinlock_unlock(&THREAD->task->lock);
  351.        
  352.         /*
  353.          * Note that it is possible for two tasks to share one address space.
  354.          */
  355.         if (as1 != as2) {
  356.             /*
  357.              * Both tasks and address spaces are different.
  358.              * Replace the old one with the new one.
  359.              */
  360.             as_switch(as1, as2);
  361.         }
  362.         TASK = THREAD->task;   
  363.     }
  364.  
  365.     THREAD->state = Running;
  366.  
  367.     #ifdef SCHEDULER_VERBOSE
  368.     printf("cpu%d: tid %d (priority=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, atomic_get(&CPU->nrdy));
  369.     #endif 
  370.  
  371.     /*
  372.      * Some architectures provide late kernel PA2KA(identity)
  373.      * mapping in a page fault handler. However, the page fault
  374.      * handler uses the kernel stack of the running thread and
  375.      * therefore cannot be used to map it. The kernel stack, if
  376.      * necessary, is to be mapped in before_thread_runs(). This
  377.      * function must be executed before the switch to the new stack.
  378.      */
  379.     before_thread_runs();
  380.  
  381.     /*
  382.      * Copy the knowledge of CPU, TASK, THREAD and preemption counter to thread's stack.
  383.      */
  384.     the_copy(THE, (the_t *) THREAD->kstack);
  385.    
  386.     context_restore(&THREAD->saved_context);
  387.     /* not reached */
  388. }
  389.  
  390.  
  391. /** The scheduler
  392.  *
  393.  * The thread scheduling procedure.
  394.  * Passes control directly to
  395.  * scheduler_separated_stack().
  396.  *
  397.  */
  398. void scheduler(void)
  399. {
  400.     volatile ipl_t ipl;
  401.  
  402.     ASSERT(CPU != NULL);
  403.  
  404.     ipl = interrupts_disable();
  405.  
  406.     if (atomic_get(&haltstate))
  407.         halt();
  408.  
  409.     if (THREAD) {
  410.         spinlock_lock(&THREAD->lock);
  411. #ifndef CONFIG_FPU_LAZY
  412.         fpu_context_save(&(THREAD->saved_fpu_context));
  413. #endif
  414.         if (!context_save(&THREAD->saved_context)) {
  415.             /*
  416.              * This is the place where threads leave scheduler();
  417.              */
  418.             spinlock_unlock(&THREAD->lock);
  419.             interrupts_restore(THREAD->saved_context.ipl);
  420.             return;
  421.         }
  422.  
  423.         /*
  424.          * Interrupt priority level of preempted thread is recorded here
  425.          * to facilitate scheduler() invocations from interrupts_disable()'d
  426.          * code (e.g. waitq_sleep_timeout()).
  427.          */
  428.         THREAD->saved_context.ipl = ipl;
  429.     }
  430.  
  431.     /*
  432.      * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM
  433.      * and preemption counter. At this point THE could be coming either
  434.      * from THREAD's or CPU's stack.
  435.      */
  436.     the_copy(THE, (the_t *) CPU->stack);
  437.  
  438.     /*
  439.      * We may not keep the old stack.
  440.      * Reason: If we kept the old stack and got blocked, for instance, in
  441.      * find_best_thread(), the old thread could get rescheduled by another
  442.      * CPU and overwrite the part of its own stack that was also used by
  443.      * the scheduler on this CPU.
  444.      *
  445.      * Moreover, we have to bypass the compiler-generated POP sequence
  446.      * which is fooled by SP being set to the very top of the stack.
  447.      * Therefore the scheduler() function continues in
  448.      * scheduler_separated_stack().
  449.      */
  450.     context_save(&CPU->saved_context);
  451.     context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), (__address) CPU->stack, CPU_STACK_SIZE);
  452.     context_restore(&CPU->saved_context);
  453.     /* not reached */
  454. }
  455.  
  456.  
  457.  
  458.  
  459.  
  460. #ifdef CONFIG_SMP
  461. /** Load balancing thread
  462.  *
  463.  * SMP load balancing thread, supervising thread supplies
  464.  * for the CPU it's wired to.
  465.  *
  466.  * @param arg Generic thread argument (unused).
  467.  *
  468.  */
  469. void kcpulb(void *arg)
  470. {
  471.     thread_t *t;
  472.     int count, average, i, j, k = 0;
  473.     ipl_t ipl;
  474.  
  475. loop:
  476.     /*
  477.      * Work in 1s intervals.
  478.      */
  479.     thread_sleep(1);
  480.  
  481. not_satisfied:
  482.     /*
  483.      * Calculate the number of threads that will be migrated/stolen from
  484.      * other CPU's. Note that situation can have changed between two
  485.      * passes. Each time get the most up to date counts.
  486.      */
  487.     average = atomic_get(&nrdy) / config.cpu_active + 1;
  488.     count = average - atomic_get(&CPU->nrdy);
  489.  
  490.     if (count <= 0)
  491.         goto satisfied;
  492.  
  493.     /*
  494.      * Searching least priority queues on all CPU's first and most priority queues on all CPU's last.
  495.      */
  496.     for (j=RQ_COUNT-1; j >= 0; j--) {
  497.         for (i=0; i < config.cpu_active; i++) {
  498.             link_t *l;
  499.             runq_t *r;
  500.             cpu_t *cpu;
  501.  
  502.             cpu = &cpus[(i + k) % config.cpu_active];
  503.  
  504.             /*
  505.              * Not interested in ourselves.
  506.              * Doesn't require interrupt disabling for kcpulb is X_WIRED.
  507.              */
  508.             if (CPU == cpu)
  509.                 continue;
  510.             if (atomic_get(&cpu->nrdy) <= average)
  511.                 continue;
  512.  
  513.             ipl = interrupts_disable();
  514.             r = &cpu->rq[j];
  515.             spinlock_lock(&r->lock);
  516.             if (r->n == 0) {
  517.                 spinlock_unlock(&r->lock);
  518.                 interrupts_restore(ipl);
  519.                 continue;
  520.             }
  521.        
  522.             t = NULL;
  523.             l = r->rq_head.prev;    /* search rq from the back */
  524.             while (l != &r->rq_head) {
  525.                 t = list_get_instance(l, thread_t, rq_link);
  526.                 /*
  527.                  * We don't want to steal CPU-wired threads neither threads already stolen.
  528.                  * The latter prevents threads from migrating between CPU's without ever being run.
  529.                  * We don't want to steal threads whose FPU context is still in CPU.
  530.                  */
  531.                 spinlock_lock(&t->lock);
  532.                 if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) {
  533.                     /*
  534.                      * Remove t from r.
  535.                      */
  536.                     spinlock_unlock(&t->lock);
  537.                    
  538.                     atomic_dec(&cpu->nrdy);
  539.                     atomic_dec(&nrdy);
  540.  
  541.                     r->n--;
  542.                     list_remove(&t->rq_link);
  543.  
  544.                     break;
  545.                 }
  546.                 spinlock_unlock(&t->lock);
  547.                 l = l->prev;
  548.                 t = NULL;
  549.             }
  550.             spinlock_unlock(&r->lock);
  551.  
  552.             if (t) {
  553.                 /*
  554.                  * Ready t on local CPU
  555.                  */
  556.                 spinlock_lock(&t->lock);
  557.                 #ifdef KCPULB_VERBOSE
  558.                 printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, atomic_get(&CPU->nrdy), atomic_get(&nrdy) / config.cpu_active);
  559.                 #endif
  560.                 t->flags |= X_STOLEN;
  561.                 spinlock_unlock(&t->lock);
  562.    
  563.                 thread_ready(t);
  564.  
  565.                 interrupts_restore(ipl);
  566.    
  567.                 if (--count == 0)
  568.                     goto satisfied;
  569.                    
  570.                 /*
  571.                  * We are not satisfied yet, focus on another CPU next time.
  572.                  */
  573.                 k++;
  574.                
  575.                 continue;
  576.             }
  577.             interrupts_restore(ipl);
  578.         }
  579.     }
  580.  
  581.     if (atomic_get(&CPU->nrdy)) {
  582.         /*
  583.          * Be a little bit light-weight and let migrated threads run.
  584.          */
  585.         scheduler();
  586.     } else {
  587.         /*
  588.          * We failed to migrate a single thread.
  589.          * Give up this turn.
  590.          */
  591.         goto loop;
  592.     }
  593.        
  594.     goto not_satisfied;
  595.  
  596. satisfied:
  597.     goto loop;
  598. }
  599.  
  600. #endif /* CONFIG_SMP */
  601.  
  602.  
  603. /** Print information about threads & scheduler queues */
  604. void sched_print_list(void)
  605. {
  606.     ipl_t ipl;
  607.     int cpu,i;
  608.     runq_t *r;
  609.     thread_t *t;
  610.     link_t *cur;
  611.  
  612.     /* We are going to mess with scheduler structures,
  613.      * let's not be interrupted */
  614.     ipl = interrupts_disable();
  615.     printf("*********** Scheduler dump ***********\n");
  616.     for (cpu=0;cpu < config.cpu_count; cpu++) {
  617.         if (!cpus[cpu].active)
  618.             continue;
  619.         spinlock_lock(&cpus[cpu].lock);
  620.         printf("cpu%d: nrdy: %d needs_relink: %d\n",
  621.                cpus[cpu].id, atomic_get(&cpus[cpu].nrdy), cpus[cpu].needs_relink);
  622.        
  623.         for (i=0; i<RQ_COUNT; i++) {
  624.             r = &cpus[cpu].rq[i];
  625.             spinlock_lock(&r->lock);
  626.             if (!r->n) {
  627.                 spinlock_unlock(&r->lock);
  628.                 continue;
  629.             }
  630.             printf("\tRq %d: ", i);
  631.             for (cur=r->rq_head.next; cur!=&r->rq_head; cur=cur->next) {
  632.                 t = list_get_instance(cur, thread_t, rq_link);
  633.                 printf("%d(%s) ", t->tid,
  634.                        thread_states[t->state]);
  635.             }
  636.             printf("\n");
  637.             spinlock_unlock(&r->lock);
  638.         }
  639.         spinlock_unlock(&cpus[cpu].lock);
  640.     }
  641.    
  642.     interrupts_restore(ipl);
  643. }
  644.