Subversion Repositories HelenOS-historic

Rev

Go to most recent revision | Blame | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Reader/Writer locks
  3.  */
  4.  
  5. /*
  6.  * Copyright (C) 2001-2004 Jakub Jermar
  7.  * All rights reserved.
  8.  *
  9.  * Redistribution and use in source and binary forms, with or without
  10.  * modification, are permitted provided that the following conditions
  11.  * are met:
  12.  *
  13.  * - Redistributions of source code must retain the above copyright
  14.  *   notice, this list of conditions and the following disclaimer.
  15.  * - Redistributions in binary form must reproduce the above copyright
  16.  *   notice, this list of conditions and the following disclaimer in the
  17.  *   documentation and/or other materials provided with the distribution.
  18.  * - The name of the author may not be used to endorse or promote products
  19.  *   derived from this software without specific prior written permission.
  20.  *
  21.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  22.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  23.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  24.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  25.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  26.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  27.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  28.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  29.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  30.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  31.  */
  32.  
  33. /*
  34.  * These locks are not recursive.
  35.  * Neither readers nor writers will suffer starvation.
  36.  *
  37.  * If there is a writer followed by a reader waiting for the rwlock
  38.  * and the writer times out, all leading readers are automatically woken up
  39.  * and allowed in.
  40.  */
  41.  
  42. /*
  43.  * NOTE ON rwlock_holder_type
  44.  * This field is set on an attempt to acquire the exclusive mutex
  45.  * to the respective value depending whether the caller is a reader
  46.  * or a writer. The field is examined only if the thread had been
  47.  * previously blocked on the exclusive mutex. Thus it is save
  48.  * to store the rwlock type in the thread structure, because
  49.  * each thread can block on only one rwlock at a time.
  50.  */
  51.  
  52. #include <synch/synch.h>
  53. #include <synch/rwlock.h>
  54. #include <synch/spinlock.h>
  55. #include <synch/mutex.h>
  56. #include <synch/waitq.h>
  57.  
  58. #include <list.h>
  59. #include <typedefs.h>
  60. #include <arch/asm.h>
  61. #include <arch.h>
  62. #include <proc/thread.h>
  63. #include <panic.h>
  64.  
  65. #define ALLOW_ALL       0
  66. #define ALLOW_READERS_ONLY  1
  67.  
  68. static void let_others_in(rwlock_t *rwl, int readers_only);
  69. static void release_spinlock(void *arg);
  70.  
  71. void rwlock_initialize(rwlock_t *rwl) {
  72.     spinlock_initialize(&rwl->lock);
  73.     mutex_initialize(&rwl->exclusive);
  74.     rwl->readers_in = 0;
  75. }
  76.  
  77. int _rwlock_write_lock_timeout(rwlock_t *rwl, __u32 usec, int trylock)
  78. {
  79.     pri_t pri;
  80.     int rc;
  81.    
  82.     pri = cpu_priority_high();
  83.     spinlock_lock(&the->thread->lock);
  84.     the->thread->rwlock_holder_type = RWLOCK_WRITER;
  85.     spinlock_unlock(&the->thread->lock);   
  86.     cpu_priority_restore(pri);
  87.  
  88.     /*
  89.      * Writers take the easy part.
  90.      * They just need to acquire the exclusive mutex.
  91.      */
  92.     rc = _mutex_lock_timeout(&rwl->exclusive, usec, trylock);
  93.     if (SYNCH_FAILED(rc)) {
  94.  
  95.         /*
  96.          * Lock operation timed out.
  97.          * The state of rwl is UNKNOWN at this point.
  98.          * No claims about its holder can be made.
  99.          */
  100.          
  101.         pri = cpu_priority_high();
  102.         spinlock_lock(&rwl->lock);
  103.         /*
  104.          * Now when rwl is locked, we can inspect it again.
  105.          * If it is held by some readers already, we can let
  106.          * readers from the head of the wait queue in.
  107.          */
  108.         if (rwl->readers_in)
  109.             let_others_in(rwl, ALLOW_READERS_ONLY);
  110.         spinlock_unlock(&rwl->lock);
  111.         cpu_priority_restore(pri);
  112.     }
  113.    
  114.     return rc;
  115. }
  116.  
  117. int _rwlock_read_lock_timeout(rwlock_t *rwl, __u32 usec, int trylock)
  118. {
  119.     int rc;
  120.     pri_t pri;
  121.    
  122.     pri = cpu_priority_high();
  123.     spinlock_lock(&the->thread->lock);
  124.     the->thread->rwlock_holder_type = RWLOCK_READER;
  125.     spinlock_unlock(&the->thread->lock);   
  126.  
  127.     spinlock_lock(&rwl->lock);
  128.  
  129.     /*
  130.      * Find out whether we can get what we want without blocking.
  131.      */
  132.     rc = mutex_trylock(&rwl->exclusive);
  133.     if (SYNCH_FAILED(rc)) {
  134.  
  135.         /*
  136.          * 'exclusive' mutex is being held by someone else.
  137.          * If the holder is a reader and there is no one
  138.          * else waiting for it, we can enter the critical
  139.          * section.
  140.          */
  141.  
  142.         if (rwl->readers_in) {
  143.             spinlock_lock(&rwl->exclusive.sem.wq.lock);
  144.             if (list_empty(&rwl->exclusive.sem.wq.head)) {
  145.                 /*
  146.                  * We can enter.
  147.                  */
  148.                 spinlock_unlock(&rwl->exclusive.sem.wq.lock);
  149.                 goto shortcut;
  150.             }
  151.             spinlock_unlock(&rwl->exclusive.sem.wq.lock);
  152.         }
  153.  
  154.         /*
  155.          * In order to prevent a race condition when a reader
  156.          * could block another reader at the head of the waitq,
  157.          * we register a function to unlock rwl->lock
  158.          * after this thread is put asleep.
  159.          */
  160.         thread_register_call_me(release_spinlock, &rwl->lock);
  161.                  
  162.         rc = _mutex_lock_timeout(&rwl->exclusive, usec, trylock);
  163.         switch (rc) {
  164.             case ESYNCH_WOULD_BLOCK:
  165.                 /*
  166.                  * release_spinlock() wasn't called
  167.                  */
  168.                 thread_register_call_me(NULL, NULL);                 
  169.                 spinlock_unlock(&rwl->lock);
  170.             case ESYNCH_TIMEOUT:
  171.                 /*
  172.                  * The sleep timeouted.
  173.                  * We just restore the cpu priority.
  174.                  */
  175.             case ESYNCH_OK_BLOCKED:    
  176.                 /*
  177.                  * We were woken with rwl->readers_in already incremented.
  178.                  * Note that this arrangement avoids race condition between
  179.                  * two concurrent readers. (Race is avoided if 'exclusive' is
  180.                  * locked at the same time as 'readers_in' is incremented.
  181.                  * Same time means both events happen atomically when
  182.                  * rwl->lock is held.)
  183.                  */
  184.                 cpu_priority_restore(pri);
  185.                 break;
  186.             case ESYNCH_OK_ATOMIC:
  187.                 panic(PANIC "_mutex_lock_timeout()==ESYNCH_OK_ATOMIC");
  188.                 break;
  189.             dafault:
  190.                 panic(PANIC "invalid ESYNCH");
  191.                 break;
  192.         }
  193.         return rc;
  194.     }
  195.  
  196. shortcut:
  197.  
  198.     /*
  199.      * We can increment readers_in only if we didn't go to sleep.
  200.      * For sleepers, rwlock_let_others_in() will do the job.
  201.      */
  202.     rwl->readers_in++;
  203.    
  204.     spinlock_unlock(&rwl->lock);
  205.     cpu_priority_restore(pri);
  206.  
  207.     return ESYNCH_OK_ATOMIC;
  208. }
  209.  
  210. void rwlock_write_unlock(rwlock_t *rwl)
  211. {
  212.     pri_t pri;
  213.    
  214.     pri = cpu_priority_high();
  215.     spinlock_lock(&rwl->lock);
  216.     let_others_in(rwl, ALLOW_ALL);
  217.     spinlock_unlock(&rwl->lock);
  218.     cpu_priority_restore(pri);
  219.    
  220. }
  221.  
  222. void rwlock_read_unlock(rwlock_t *rwl)
  223. {
  224.     pri_t pri;
  225.  
  226.     pri = cpu_priority_high();
  227.     spinlock_lock(&rwl->lock);
  228.     if (!--rwl->readers_in)
  229.         let_others_in(rwl, ALLOW_ALL);
  230.     spinlock_unlock(&rwl->lock);
  231.     cpu_priority_restore(pri);
  232. }
  233.  
  234.  
  235. /*
  236.  * Must be called with rwl->lock locked.
  237.  * Must be called with cpu_priority_high'ed.
  238.  */
  239. /*
  240.  * If readers_only is false: (unlock scenario)
  241.  * Let the first sleeper on 'exclusive' mutex in, no matter
  242.  * whether it is a reader or a writer. If there are more leading
  243.  * readers in line, let each of them in.
  244.  *
  245.  * Otherwise: (timeout scenario)
  246.  * Let all leading readers in.
  247.  */
  248. void let_others_in(rwlock_t *rwl, int readers_only)
  249. {
  250.     rwlock_type_t type = RWLOCK_NONE;
  251.     thread_t *t = NULL;
  252.     int one_more = 1;
  253.    
  254.     spinlock_lock(&rwl->exclusive.sem.wq.lock);
  255.  
  256.     if (!list_empty(&rwl->exclusive.sem.wq.head))
  257.         t = list_get_instance(rwl->exclusive.sem.wq.head.next, thread_t, wq_link);
  258.     do {
  259.         if (t) {
  260.             spinlock_lock(&t->lock);
  261.             type = t->rwlock_holder_type;
  262.             spinlock_unlock(&t->lock);         
  263.         }
  264.    
  265.         /*
  266.          * If readers_only is true, we wake all leading readers
  267.          * if and only if rwl is locked by another reader.
  268.          * Assumption: readers_only ==> rwl->readers_in
  269.          */
  270.         if (readers_only && (type != RWLOCK_READER))
  271.             break;
  272.  
  273.  
  274.         if (type == RWLOCK_READER) {
  275.             /*
  276.              * Waking up a reader.
  277.              * We are responsible for incrementing rwl->readers_in for it.
  278.              */
  279.              rwl->readers_in++;
  280.         }
  281.  
  282.         /*
  283.          * Only the last iteration through this loop can increment
  284.          * rwl->exclusive.sem.wq.missed_wakeup's. All preceeding
  285.          * iterations will wake up a thread.
  286.          */
  287.         /* We call the internal version of waitq_wakeup, which
  288.          * relies on the fact that the waitq is already locked.
  289.          */
  290.         _waitq_wakeup_unsafe(&rwl->exclusive.sem.wq, WAKEUP_FIRST);
  291.        
  292.         t = NULL;
  293.         if (!list_empty(&rwl->exclusive.sem.wq.head)) {
  294.             t = list_get_instance(rwl->exclusive.sem.wq.head.next, thread_t, wq_link);
  295.             if (t) {
  296.                 spinlock_lock(&t->lock);
  297.                 if (t->rwlock_holder_type != RWLOCK_READER)
  298.                     one_more = 0;
  299.                 spinlock_unlock(&t->lock); 
  300.             }
  301.         }
  302.     } while ((type == RWLOCK_READER) && t && one_more);
  303.  
  304.     spinlock_unlock(&rwl->exclusive.sem.wq.lock);
  305. }
  306.  
  307. void release_spinlock(void *arg)
  308. {
  309.     spinlock_unlock((spinlock_t *) arg);
  310. }
  311.