Subversion Repositories HelenOS-historic

Rev

Rev 1158 | Rev 1288 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (C) 2001-2004 Jakub Jermar
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. #include <synch/waitq.h>
  30. #include <synch/synch.h>
  31. #include <synch/spinlock.h>
  32. #include <proc/thread.h>
  33. #include <proc/scheduler.h>
  34. #include <arch/asm.h>
  35. #include <arch/types.h>
  36. #include <typedefs.h>
  37. #include <time/timeout.h>
  38. #include <arch.h>
  39. #include <context.h>
  40. #include <adt/list.h>
  41.  
  42. static void waitq_timeouted_sleep(void *data);
  43.  
  44. /** Initialize wait queue
  45.  *
  46.  * Initialize wait queue.
  47.  *
  48.  * @param wq Pointer to wait queue to be initialized.
  49.  */
  50. void waitq_initialize(waitq_t *wq)
  51. {
  52.     spinlock_initialize(&wq->lock, "waitq_lock");
  53.     list_initialize(&wq->head);
  54.     wq->missed_wakeups = 0;
  55. }
  56.  
  57. /** Handle timeout during waitq_sleep_timeout() call
  58.  *
  59.  * This routine is called when waitq_sleep_timeout() timeouts.
  60.  * Interrupts are disabled.
  61.  *
  62.  * It is supposed to try to remove 'its' thread from the wait queue;
  63.  * it can eventually fail to achieve this goal when these two events
  64.  * overlap. In that case it behaves just as though there was no
  65.  * timeout at all.
  66.  *
  67.  * @param data Pointer to the thread that called waitq_sleep_timeout().
  68.  */
  69. void waitq_timeouted_sleep(void *data)
  70. {
  71.     thread_t *t = (thread_t *) data;
  72.     waitq_t *wq;
  73.     bool do_wakeup = false;
  74.  
  75.     spinlock_lock(&threads_lock);
  76.     if (!thread_exists(t))
  77.         goto out;
  78.  
  79. grab_locks:
  80.     spinlock_lock(&t->lock);
  81.     if ((wq = t->sleep_queue)) {        /* assignment */
  82.         if (!spinlock_trylock(&wq->lock)) {
  83.             spinlock_unlock(&t->lock);
  84.             goto grab_locks;    /* avoid deadlock */
  85.         }
  86.  
  87.         list_remove(&t->wq_link);
  88.         t->saved_context = t->sleep_timeout_context;
  89.         do_wakeup = true;
  90.        
  91.         spinlock_unlock(&wq->lock);
  92.         t->sleep_queue = NULL;
  93.     }
  94.    
  95.     t->timeout_pending = false;
  96.     spinlock_unlock(&t->lock);
  97.    
  98.     if (do_wakeup)
  99.         thread_ready(t);
  100.  
  101. out:
  102.     spinlock_unlock(&threads_lock);
  103. }
  104.  
  105. /** Interrupt sleeping thread.
  106.  *
  107.  * This routine attempts to interrupt a thread from its sleep in a waitqueue.
  108.  * If the thread is not found sleeping, no action is taken.
  109.  *
  110.  * @param t Thread to be interrupted.
  111.  */
  112. void waitq_interrupt_sleep(thread_t *t)
  113. {
  114.     waitq_t *wq;
  115.     bool do_wakeup = false;
  116.     ipl_t ipl;
  117.  
  118.     ipl = interrupts_disable();
  119.     spinlock_lock(&threads_lock);
  120.     if (!thread_exists(t))
  121.         goto out;
  122.  
  123. grab_locks:
  124.     spinlock_lock(&t->lock);
  125.     if ((wq = t->sleep_queue)) {        /* assignment */
  126.         if (!spinlock_trylock(&wq->lock)) {
  127.             spinlock_unlock(&t->lock);
  128.             goto grab_locks;    /* avoid deadlock */
  129.         }
  130.  
  131.         list_remove(&t->wq_link);
  132.         t->saved_context = t->sleep_interruption_context;
  133.         do_wakeup = true;
  134.        
  135.         spinlock_unlock(&wq->lock);
  136.         t->sleep_queue = NULL;
  137.     }
  138.     spinlock_unlock(&t->lock);
  139.  
  140.     if (do_wakeup)
  141.         thread_ready(t);
  142.  
  143. out:
  144.     spinlock_unlock(&threads_lock);
  145.     interrupts_restore(ipl);
  146. }
  147.  
  148.  
  149. /** Sleep until either wakeup, timeout or interruption occurs
  150.  *
  151.  * This is a sleep implementation which allows itself to be
  152.  * interrupted from the sleep, restoring a failover context.
  153.  *
  154.  * Sleepers are organised in FIFO fashion in a structure called wait queue.
  155.  *
  156.  * This function is really basic in that other functions as waitq_sleep()
  157.  * and all the *_timeout() functions use it.
  158.  *
  159.  * @param wq Pointer to wait queue.
  160.  * @param usec Timeout in microseconds.
  161.  * @param nonblocking Blocking vs. non-blocking operation mode switch.
  162.  *
  163.  * If @usec is greater than zero, regardless of the value of @nonblocking,
  164.  * the call will not return until either timeout or wakeup comes.
  165.  *
  166.  * If @usec is zero and @nonblocking is zero (false), the call
  167.  * will not return until wakeup comes.
  168.  *
  169.  * If @usec is zero and @nonblocking is non-zero (true), the call will
  170.  * immediately return, reporting either success or failure.
  171.  *
  172.  * @return Returns one of: ESYNCH_WOULD_BLOCK, ESYNCH_TIMEOUT,
  173.  *         ESYNCH_OK_ATOMIC, ESYNCH_OK_BLOCKED.
  174.  *
  175.  * ESYNCH_WOULD_BLOCK means that the sleep failed because at the time
  176.  * of the call there was no pending wakeup.
  177.  *
  178.  * ESYNCH_TIMEOUT means that the sleep timed out.
  179.  *
  180.  * ESYNCH_INTERRUPTED means that somebody interrupted the sleeping thread.
  181.  *
  182.  * ESYNCH_OK_ATOMIC means that the sleep succeeded and that there was
  183.  * a pending wakeup at the time of the call. The caller was not put
  184.  * asleep at all.
  185.  *
  186.  * ESYNCH_OK_BLOCKED means that the sleep succeeded; the full sleep was
  187.  * attempted.
  188.  */
  189. int waitq_sleep_timeout(waitq_t *wq, __u32 usec, int nonblocking)
  190. {
  191.     volatile ipl_t ipl; /* must be live after context_restore() */
  192.    
  193.    
  194. restart:
  195.     ipl = interrupts_disable();
  196.    
  197.     /*
  198.      * Busy waiting for a delayed timeout.
  199.      * This is an important fix for the race condition between
  200.      * a delayed timeout and a next call to waitq_sleep_timeout().
  201.      * Simply, the thread is not allowed to go to sleep if
  202.      * there are timeouts in progress.
  203.      */
  204.     spinlock_lock(&THREAD->lock);
  205.     if (THREAD->timeout_pending) {
  206.         spinlock_unlock(&THREAD->lock);
  207.         interrupts_restore(ipl);       
  208.         goto restart;
  209.     }
  210.     spinlock_unlock(&THREAD->lock);
  211.    
  212.     spinlock_lock(&wq->lock);
  213.    
  214.     /* checks whether to go to sleep at all */
  215.     if (wq->missed_wakeups) {
  216.         wq->missed_wakeups--;
  217.         spinlock_unlock(&wq->lock);
  218.         interrupts_restore(ipl);
  219.         return ESYNCH_OK_ATOMIC;
  220.     }
  221.     else {
  222.         if (nonblocking && (usec == 0)) {
  223.             /* return immediatelly instead of going to sleep */
  224.             spinlock_unlock(&wq->lock);
  225.             interrupts_restore(ipl);
  226.             return ESYNCH_WOULD_BLOCK;
  227.         }
  228.     }
  229.    
  230.     /*
  231.      * Now we are firmly decided to go to sleep.
  232.      */
  233.     spinlock_lock(&THREAD->lock);
  234.  
  235.     /*
  236.      * Set context that will be restored if the sleep
  237.      * of this thread is ever interrupted.
  238.      */
  239.     if (!context_save(&THREAD->sleep_interruption_context)) {
  240.         /* Short emulation of scheduler() return code. */
  241.         spinlock_unlock(&THREAD->lock);
  242.         interrupts_restore(ipl);
  243.         return ESYNCH_INTERRUPTED;
  244.     }
  245.  
  246.     if (usec) {
  247.         /* We use the timeout variant. */
  248.         if (!context_save(&THREAD->sleep_timeout_context)) {
  249.             /* Short emulation of scheduler() return code. */
  250.             spinlock_unlock(&THREAD->lock);
  251.             interrupts_restore(ipl);
  252.             return ESYNCH_TIMEOUT;
  253.         }
  254.         THREAD->timeout_pending = true;
  255.         timeout_register(&THREAD->sleep_timeout, (__u64) usec, waitq_timeouted_sleep, THREAD);
  256.     }
  257.  
  258.     list_append(&THREAD->wq_link, &wq->head);
  259.  
  260.     /*
  261.      * Suspend execution.
  262.      */
  263.     THREAD->state = Sleeping;
  264.     THREAD->sleep_queue = wq;
  265.  
  266.     spinlock_unlock(&THREAD->lock);
  267.  
  268.     scheduler();    /* wq->lock is released in scheduler_separated_stack() */
  269.     interrupts_restore(ipl);
  270.    
  271.     return ESYNCH_OK_BLOCKED;
  272. }
  273.  
  274.  
  275. /** Wake up first thread sleeping in a wait queue
  276.  *
  277.  * Wake up first thread sleeping in a wait queue.
  278.  * This is the SMP- and IRQ-safe wrapper meant for
  279.  * general use.
  280.  *
  281.  * Besides its 'normal' wakeup operation, it attempts
  282.  * to unregister possible timeout.
  283.  *
  284.  * @param wq Pointer to wait queue.
  285.  * @param all If this is non-zero, all sleeping threads
  286.  *        will be woken up and missed count will be zeroed.
  287.  */
  288. void waitq_wakeup(waitq_t *wq, bool all)
  289. {
  290.     ipl_t ipl;
  291.  
  292.     ipl = interrupts_disable();
  293.     spinlock_lock(&wq->lock);
  294.  
  295.     _waitq_wakeup_unsafe(wq, all);
  296.  
  297.     spinlock_unlock(&wq->lock);
  298.     interrupts_restore(ipl);   
  299. }
  300.  
  301. /** Internal SMP- and IRQ-unsafe version of waitq_wakeup()
  302.  *
  303.  * This is the internal SMP- and IRQ-unsafe version
  304.  * of waitq_wakeup(). It assumes wq->lock is already
  305.  * locked and interrupts are already disabled.
  306.  *
  307.  * @param wq Pointer to wait queue.
  308.  * @param all If this is non-zero, all sleeping threads
  309.  *        will be woken up and missed count will be zeroed.
  310.  */
  311. void _waitq_wakeup_unsafe(waitq_t *wq, bool all)
  312. {
  313.     thread_t *t;
  314.  
  315. loop:  
  316.     if (list_empty(&wq->head)) {
  317.         wq->missed_wakeups++;
  318.         if (all)
  319.             wq->missed_wakeups = 0;
  320.         return;
  321.     }
  322.  
  323.     t = list_get_instance(wq->head.next, thread_t, wq_link);
  324.    
  325.     list_remove(&t->wq_link);
  326.     spinlock_lock(&t->lock);
  327.     if (t->timeout_pending && timeout_unregister(&t->sleep_timeout))
  328.         t->timeout_pending = false;
  329.     t->sleep_queue = NULL;
  330.     spinlock_unlock(&t->lock);
  331.  
  332.     thread_ready(t);
  333.  
  334.     if (all)
  335.         goto loop;
  336. }
  337.