Subversion Repositories HelenOS

Rev

Rev 2400 | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (c) 2007 Jan Hudecek
  3.  * Copyright (c) 2006 Jakub Jermar
  4.  * All rights reserved.
  5.  *
  6.  * Redistribution and use in source and binary forms, with or without
  7.  * modification, are permitted provided that the following conditions
  8.  * are met:
  9.  *
  10.  * - Redistributions of source code must retain the above copyright
  11.  *   notice, this list of conditions and the following disclaimer.
  12.  * - Redistributions in binary form must reproduce the above copyright
  13.  *   notice, this list of conditions and the following disclaimer in the
  14.  *   documentation and/or other materials provided with the distribution.
  15.  * - The name of the author may not be used to endorse or promote products
  16.  *   derived from this software without specific prior written permission.
  17.  *
  18.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  19.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  20.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  21.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  22.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  23.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  24.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  25.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  26.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  27.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  28.  */
  29. /** @addtogroup sync
  30.  * @{
  31.  */
  32. /** @file rcu.c
  33.  *  @brief RCU synchronization primitive
  34.  */
  35.  
  36. #include <synch/rcu.h>
  37. #include <synch/waitq.h>
  38. #include <arch.h>
  39. #include <config.h>
  40. #include <arch/types.h>
  41. #include <proc/tasklet.h>
  42. #include <synch/spinlock.h>
  43. #include <time/delay.h>
  44. #include <panic.h>
  45. #include <print.h>
  46.  
  47.  
  48.  
  49. /** Main data structure of the RCU implementation */
  50. typedef struct  {
  51. #ifdef CONFIG_SMP
  52.     /** flags indicating whether the corresponding CPU has passed QS for this RCU batch */
  53.     bool* cpu_mask;
  54. #endif
  55.     /** RCU batch waiting for finishing of current batch, QS monitoring hasn't been started for this one */
  56.     rcu_callback_list_t* next_batch;
  57.     /** RCU batch that waits for passing of QSs on all CPUs */
  58.     rcu_callback_list_t *current_batch;
  59.     /** RCU batch that has passed all QSs and waits for invocation */
  60.     rcu_callback_list_t *done_batch;
  61. } rcu_cpu_data_t;
  62.  
  63. /** An array of structures holding the callbacks and the progress of QS for each CPU*/
  64. rcu_cpu_data_t* rcu_global=NULL;
  65. /** reference to the RCU tasklet, for scheduling it */
  66. tasklet_descriptor_t* rcu_tasklet_desc;
  67.  
  68.  
  69. /**
  70. * Initializes data structures needed for RCU
  71. */
  72. void rcu_init(void)
  73. {
  74. #ifdef CONFIG_SMP
  75.     int i,j;
  76. #endif
  77.  
  78.     rcu_global = malloc(sizeof(rcu_cpu_data_t)*(config.cpu_count),0);
  79.     rcu_tasklet_desc = tasklet_register(&rcu_tasklet, NULL);
  80.  
  81. #ifdef CONFIG_SMP
  82.     /*
  83.      * Note: I allocate the array for a case when every CPU connected will be active
  84.      * In a case when there will be some inactive CPUs, I will use just the active ones
  85.      */
  86.     for (i=0;i<config.cpu_count;i++) {
  87.         rcu_global[i].done_batch = NULL;
  88.         rcu_global[i].current_batch = NULL;
  89.         rcu_global[i].next_batch = NULL;
  90.         rcu_global[i].cpu_mask = malloc(sizeof(bool)*config.cpu_count,0);
  91.         for (j=0;j<config.cpu_count;j++) {
  92.             rcu_global[i].cpu_mask[j]=false;
  93.         }
  94.     }
  95. #else
  96.     rcu_global[CPU->id].done_batch = NULL;
  97.     rcu_global[CPU->id].current_batch = NULL;
  98.     rcu_global[CPU->id].next_batch = NULL;
  99. #endif
  100. }
  101.  
  102.  
  103. /**
  104. * Blocks until the grace period elapses
  105. */
  106. void rcu_synchronize(void)
  107. {
  108. #ifdef CONFIG_SMP
  109.     waitq_t wq;
  110.     waitq_initialize(&wq);
  111.     rcu_sync_callback_normal_alloc(&rcu_synchronize_callback_function, &wq);
  112.     //sleep until the end of the grace period
  113.     waitq_sleep(&wq);
  114. #endif
  115. }
  116.  
  117. #ifdef CONFIG_SMP
  118. /**
  119. * Just a wakeup for waking up rcu_synchronize when the grace period has elapsed
  120. */
  121. void rcu_synchronize_callback_function(void* waitq)
  122. {
  123.     waitq_wakeup(((waitq_t*)waitq), WAKEUP_ALL);
  124. }
  125. #endif
  126.  
  127.  
  128. /**
  129. * Appends this callback func to the queue of waiting callbacks, the rest
  130. * is handled in rcu_run_callbacks and in the tasklet. This is a lock free variant,
  131. * which must be supplied with a preallocated rcu_callback_list_t structure
  132. * which is deallocated after the callback is called
  133. */
  134. void rcu_sync_callback(void (*func)(void* data), void* data, rcu_callback_list_t* rd)
  135. {
  136. #ifndef CONFIG_SMP
  137.     func(data);
  138. #else
  139.  
  140.     ipl_t ipl;
  141.     rd->func = func;
  142.     rd->data = data;
  143.  
  144.     //disabling interrupts removes need for any synchronization - the list of callbacks is
  145.     //always accessed only on current CPU
  146.     ipl = interrupts_disable();
  147.     //append to the list of callbacks waiting for their batch to begin
  148.     rd->next = rcu_global[CPU->id].next_batch;
  149.     rcu_global[CPU->id].next_batch = rd;
  150.     rcu_passQS();
  151.     interrupts_restore(ipl);
  152. #endif
  153. }
  154.  
  155. /**
  156. * RCU tasklet, tests passing through QSs, moves from current list to done list
  157. */
  158. void rcu_tasklet(void* data)
  159. {
  160.     rcu_callback_list_t* rd;
  161.     bool passed_all_QS;
  162. #ifdef CONFIG_SMP
  163.     int i;
  164. #endif
  165.     ipl_t ipl;
  166.     passed_all_QS = true;
  167.  
  168.  
  169.     ipl = interrupts_disable();
  170.     rcu_passQS();
  171.    
  172. #ifdef CONFIG_SMP
  173.     //check whether all CPUs have passed through QS of this CPU's current batch
  174.     for (i = 0; i < config.cpu_count; i++)
  175.         if (cpus[i].active)
  176.             passed_all_QS &= rcu_global[CPU->id].cpu_mask[i];
  177. #endif
  178.     if (passed_all_QS) {
  179.         //all CPUs have passed through QS -> grace period is over, we can schedule the call to RCU callback
  180.         if (rcu_global[CPU->id].done_batch) {
  181.             rd = rcu_global[CPU->id].done_batch;
  182.  
  183.             while (rd->next) rd = rd->next;
  184.             //append the current list to done list
  185.             rd->next = rcu_global[CPU->id].current_batch;
  186.         } else
  187.             rcu_global[CPU->id].done_batch = rcu_global[CPU->id].current_batch;
  188.         rcu_global[CPU->id].current_batch = NULL;
  189.     }
  190.     interrupts_restore(ipl);
  191. }
  192.  
  193.  
  194. /**
  195. * This function indicates that the current CPU has gone through the quiescent state
  196. */
  197. void rcu_passQS(void)
  198. {
  199. #ifdef CONFIG_SMP
  200.     int i;
  201.     for (i = 0; i < config.cpu_count; i++)
  202.         if (cpus[i].active)
  203.             //on all CPUs indicate that this CPU has gone through QS
  204.             //this can overlap with clearing this flag in rcu_run_callbacks
  205.             rcu_global[i].cpu_mask[CPU->id]=true;
  206. #endif
  207. }
  208.  
  209.  
  210. /**
  211. * Moves RCU callbacks from next list to current list, schedules the RCU tasklet when needed,
  212. * calls the callbacks from done list, frees the rcu_callback_list_t
  213. */
  214. void rcu_run_callbacks(void)
  215. {
  216.     rcu_callback_list_t* rd, *rd2;
  217.     int i;
  218.     ipl_t ipl;
  219.  
  220.     ipl = interrupts_disable();
  221.     if (rcu_global[CPU->id].next_batch) {
  222.         //we cannot append to the current list because the callbacks from next batch
  223.         //haven't passed the QSs
  224.         if (rcu_global[CPU->id].current_batch == NULL) {
  225.             rcu_global[CPU->id].current_batch = rcu_global[CPU->id].next_batch;
  226.             rcu_global[CPU->id].next_batch = NULL;
  227. #ifdef CONFIG_SMP
  228.             //initialize our CPU mask
  229.             for (i = 0; i < config.cpu_count; i++)
  230.                 if (cpus[i].active)
  231.                     rcu_global[CPU->id].cpu_mask[i]=false;
  232. #endif
  233.             //schedule tasklet for all CPUs
  234.             for (i = 0; i < config.cpu_count; i++)
  235.                 if (cpus[i].active)
  236.                     tasklet_schedule_SMP(rcu_tasklet_desc, i);
  237.         }
  238.     }
  239.     //this CPU has passed QS
  240.     rcu_passQS();
  241.     if (rcu_global[CPU->id].done_batch) {
  242.         rd = rcu_global[CPU->id].done_batch;
  243.         rcu_global[CPU->id].done_batch = NULL;
  244.         //the callbacks (and free) can block, we must restore the interrupts
  245.         interrupts_restore(ipl);
  246.         while (rd) {
  247.             //call the callback
  248.             if (rd->func == NULL)
  249.                 panic_printf("RCU callback function NULL, desc:%x", rd);
  250.             rd->func(rd->data);
  251.             rd2 = rd->next;
  252.             //free the structure
  253.             free(rd);
  254.             rd = rd2;
  255.         }
  256.     }
  257.     else
  258.         interrupts_restore(ipl);
  259. }
  260.  
  261.  
  262. /**
  263. * Generic callback for RCU, frees @paramref pointer
  264. * @param pointer
  265. */
  266. void rcu_callback_free(void* pointer)
  267. {
  268.     free(pointer);
  269. }
  270.  
  271.