Subversion Repositories HelenOS-historic

Rev

Rev 22 | Rev 76 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (C) 2001-2004 Jakub Jermar
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. #include <proc/scheduler.h>
  30. #include <proc/thread.h>
  31. #include <proc/task.h>
  32. #include <mm/heap.h>
  33. #include <mm/frame.h>
  34. #include <mm/page.h>
  35. #include <arch/asm.h>
  36. #include <arch.h>
  37. #include <synch/synch.h>
  38. #include <synch/spinlock.h>
  39. #include <synch/waitq.h>
  40. #include <synch/rwlock.h>
  41. #include <cpu.h>
  42. #include <func.h>
  43. #include <context.h>
  44. #include <list.h>
  45. #include <typedefs.h>
  46. #include <time/clock.h>
  47. #include <list.h>
  48. #include <config.h>
  49. #include <arch/interrupt.h>
  50. #include <smp/ipi.h>
  51.  
  52. char *thread_states[] = {"Invalid", "Running", "Sleeping", "Ready", "Entering", "Exiting"};
  53.  
  54. spinlock_t threads_lock;
  55. link_t threads_head;
  56.  
  57. static spinlock_t tidlock;
  58. __u32 last_tid = 0;
  59.  
  60. /*
  61.  * cushion() is provided to ensure that every thread
  62.  * makes a call to thread_exit() when its implementing
  63.  * function returns.
  64.  *
  65.  * cpu_priority_high()'d
  66.  */
  67. void cushion(void)
  68. {
  69.     void (*f)(void *) = THREAD->thread_code;
  70.     void *arg = THREAD->thread_arg;
  71.  
  72.     before_thread_runs();
  73.  
  74.     /* this is where each thread wakes up after its creation */
  75.     spinlock_unlock(&THREAD->lock);
  76.     cpu_priority_low();
  77.  
  78.     f(arg);
  79.     thread_exit();
  80.     /* not reached */
  81. }
  82.  
  83. void thread_init(void)
  84. {
  85.     THREAD = NULL;
  86.     nrdy = 0;
  87.     spinlock_initialize(&threads_lock);
  88.     list_initialize(&threads_head);
  89. }
  90.  
  91. void thread_ready(thread_t *t)
  92. {
  93.     cpu_t *cpu;
  94.     runq_t *r;
  95.     pri_t pri;
  96.     int i, avg, send_ipi = 0;
  97.  
  98.     pri = cpu_priority_high();
  99.  
  100.     spinlock_lock(&t->lock);
  101.  
  102.     i = (t->pri < RQ_COUNT -1) ? ++t->pri : t->pri;
  103.    
  104.     cpu = CPU;
  105.     if (t->flags & X_WIRED) {
  106.         cpu = t->cpu;
  107.     }
  108.     spinlock_unlock(&t->lock);
  109.    
  110.         /*
  111.      * Append t to respective ready queue on respective processor.
  112.      */
  113.     r = &cpu->rq[i];
  114.     spinlock_lock(&r->lock);
  115.     list_append(&t->rq_link, &r->rq_head);
  116.     r->n++;
  117.     spinlock_unlock(&r->lock);
  118.  
  119.     spinlock_lock(&nrdylock);
  120.     avg = ++nrdy / config.cpu_active;
  121.     spinlock_unlock(&nrdylock);
  122.  
  123.     spinlock_lock(&cpu->lock);
  124.     if ((++cpu->nrdy) > avg) {
  125.         /*
  126.          * If there are idle halted CPU's, this will wake them up.
  127.          */
  128.         ipi_broadcast(VECTOR_WAKEUP_IPI);
  129.     }  
  130.     spinlock_unlock(&cpu->lock);
  131.    
  132.     cpu_priority_restore(pri);
  133. }
  134.  
  135. thread_t *thread_create(void (* func)(void *), void *arg, task_t *task, int flags)
  136. {
  137.     thread_t *t;
  138.     __address frame_ks, frame_us = NULL;
  139.  
  140.     t = (thread_t *) malloc(sizeof(thread_t));
  141.     if (t) {
  142.         pri_t pri;
  143.    
  144.         spinlock_initialize(&t->lock);
  145.    
  146.         frame_ks = frame_alloc(FRAME_KA);
  147.         if (THREAD_USER_STACK & flags) {
  148.             frame_us = frame_alloc(0);
  149.         }
  150.  
  151.         pri = cpu_priority_high();
  152.         spinlock_lock(&tidlock);
  153.         t->tid = ++last_tid;
  154.         spinlock_unlock(&tidlock);
  155.         cpu_priority_restore(pri);
  156.  
  157.         memsetb(frame_ks, THREAD_STACK_SIZE, 0);
  158.         link_initialize(&t->rq_link);
  159.         link_initialize(&t->wq_link);
  160.         link_initialize(&t->th_link);
  161.         link_initialize(&t->threads_link);
  162.         t->kstack = (__u8 *) frame_ks;
  163.         t->ustack = (__u8 *) frame_us;
  164.        
  165.        
  166.         context_save(&t->saved_context);
  167.         t->saved_context.pc = (__address) cushion;
  168.         t->saved_context.sp = (__address) &t->kstack[THREAD_STACK_SIZE-8];
  169.  
  170.         pri = cpu_priority_high();
  171.         t->saved_context.pri = cpu_priority_read();
  172.         cpu_priority_restore(pri);
  173.        
  174.         t->thread_code = func;
  175.         t->thread_arg = arg;
  176.         t->ticks = -1;
  177.         t->pri = -1;        /* start in rq[0] */
  178.         t->cpu = NULL;
  179.         t->flags = 0;
  180.         t->state = Entering;
  181.         t->call_me = NULL;
  182.         t->call_me_with = NULL;
  183.        
  184.         timeout_initialize(&t->sleep_timeout);
  185.         t->sleep_queue = NULL;
  186.         t->timeout_pending = 0;
  187.        
  188.         t->rwlock_holder_type = RWLOCK_NONE;
  189.        
  190.         t->task = task;
  191.        
  192.         t->fpu_context_exists=0;
  193.         t->fpu_context_engaged=0;
  194.        
  195.         /*
  196.          * Register this thread in the system-wide list.
  197.          */
  198.         pri = cpu_priority_high();     
  199.         spinlock_lock(&threads_lock);
  200.         list_append(&t->threads_link, &threads_head);
  201.         spinlock_unlock(&threads_lock);
  202.  
  203.         /*
  204.          * Attach to the containing task.
  205.          */
  206.         spinlock_lock(&task->lock);
  207.         list_append(&t->th_link, &task->th_head);
  208.         spinlock_unlock(&task->lock);
  209.  
  210.         cpu_priority_restore(pri);
  211.     }
  212.  
  213.     return t;
  214. }
  215.  
  216. void thread_exit(void)
  217. {
  218.     pri_t pri;
  219.  
  220. restart:
  221.     pri = cpu_priority_high();
  222.     spinlock_lock(&THREAD->lock);
  223.     if (THREAD->timeout_pending) { /* busy waiting for timeouts in progress */
  224.         spinlock_unlock(&THREAD->lock);
  225.         cpu_priority_restore(pri);
  226.         goto restart;
  227.     }
  228.     THREAD->state = Exiting;
  229.     spinlock_unlock(&THREAD->lock);
  230.     scheduler();
  231. }
  232.  
  233. void thread_sleep(__u32 sec)
  234. {
  235.         thread_usleep(sec*1000000);
  236. }
  237.    
  238. /*
  239.  * Suspend execution of current thread for usec microseconds.
  240.  */
  241. void thread_usleep(__u32 usec)
  242. {
  243.     waitq_t wq;
  244.                  
  245.     waitq_initialize(&wq);
  246.  
  247.     (void) waitq_sleep_timeout(&wq, usec, SYNCH_NON_BLOCKING);
  248. }
  249.  
  250. void thread_register_call_me(void (* call_me)(void *), void *call_me_with)
  251. {
  252.     pri_t pri;
  253.    
  254.     pri = cpu_priority_high();
  255.     spinlock_lock(&THREAD->lock);
  256.     THREAD->call_me = call_me;
  257.     THREAD->call_me_with = call_me_with;
  258.     spinlock_unlock(&THREAD->lock);
  259.     cpu_priority_restore(pri);
  260. }
  261.