Subversion Repositories HelenOS

Rev

Rev 2330 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (c) 2007 Jan Hudecek
  3.  * Copyright (c) 2006 Jakub Jermar
  4.  * All rights reserved.
  5.  *
  6.  * Redistribution and use in source and binary forms, with or without
  7.  * modification, are permitted provided that the following conditions
  8.  * are met:
  9.  *
  10.  * - Redistributions of source code must retain the above copyright
  11.  *   notice, this list of conditions and the following disclaimer.
  12.  * - Redistributions in binary form must reproduce the above copyright
  13.  *   notice, this list of conditions and the following disclaimer in the
  14.  *   documentation and/or other materials provided with the distribution.
  15.  * - The name of the author may not be used to endorse or promote products
  16.  *   derived from this software without specific prior written permission.
  17.  *
  18.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  19.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  20.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  21.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  22.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  23.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  24.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  25.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  26.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  27.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  28.  */
  29. /** @addtogroup sync
  30.  * @{
  31.  */
  32. /** @file
  33.  */
  34.  
  35. #include <synch/rcu.h>
  36. #include <synch/waitq.h>
  37. #include <arch.h>
  38. #include <config.h>
  39. #include <arch/types.h>
  40. #include <proc/tasklet.h>
  41. #include <synch/spinlock.h>
  42. #include <time/delay.h>
  43. #include <panic.h>
  44. #include <print.h>
  45.  
  46.  
  47.  
  48.  
  49. typedef struct  {
  50. #ifdef CONFIG_SMP
  51.     bool* cpu_mask;
  52. #endif
  53.     rcu_callback_list_t* next_batch, *current_batch, *done_batch;
  54. } rcu_global_t;
  55.  
  56. /** An array of structures holding the callbacks and the progress of QS for each CPU*/
  57. rcu_global_t* rcu_global=NULL;
  58. /** reference to the RCU tasklet, for scheduling it */
  59. tasklet_descriptor_t* rcu_tasklet_desc;
  60.  
  61.  
  62. /**
  63. * Initializes data structures needed for RCU
  64. */
  65. void rcu_init(void)
  66. {
  67. #ifdef CONFIG_SMP
  68.     int i,j;
  69. #endif
  70.  
  71.     rcu_global = malloc(sizeof(rcu_global_t)*(config.cpu_count),0);
  72.     rcu_tasklet_desc = tasklet_register(&rcu_tasklet, NULL);
  73.  
  74. #ifdef CONFIG_SMP
  75.     /*
  76.      * Note: I allocate the array for a case when every CPU connected will be active
  77.      * In a case when there will be some inactive CPUs, I will use just the first cells.
  78.      */
  79.     for (i=0;i<config.cpu_count;i++) {
  80.         rcu_global[i].done_batch = NULL;
  81.         rcu_global[i].current_batch = NULL;
  82.         rcu_global[i].next_batch = NULL;
  83.         rcu_global[i].cpu_mask = malloc(sizeof(bool)*config.cpu_count,0);
  84.         for (j=0;j<config.cpu_count;j++) {
  85.             rcu_global[i].cpu_mask[j]=false;
  86.         }
  87.     }
  88. #else
  89.     rcu_global[CPU->id].done_batch = NULL;
  90.     rcu_global[CPU->id].current_batch = NULL;
  91.     rcu_global[CPU->id].next_batch = NULL;
  92. #endif
  93. }
  94.  
  95.  
  96. /**
  97. * Blocks until the grace period elapses
  98. */
  99. void rcu_synchronize(void)
  100. {
  101. #ifdef CONFIG_SMP
  102.     waitq_t wq;
  103.     waitq_initialize(&wq);
  104.     rcu_sync_callback_normal_alloc(&rcu_synchronize_callback_function, &wq);
  105.     //sleep until the end of the grace period
  106.     waitq_sleep(&wq);
  107. #endif
  108. }
  109.  
  110. #ifdef CONFIG_SMP
  111. /**
  112. * Just a wakeup for waking up rcu_synchronize when the grace period has elapsed
  113. */
  114. void rcu_synchronize_callback_function(void* waitq)
  115. {
  116.     waitq_wakeup(((waitq_t*)waitq), WAKEUP_ALL);
  117. }
  118. #endif
  119.  
  120.  
  121. /**
  122. * appends this callback func to the queue of waiting callbacks, the rest
  123. * is handled in rcu_run_callbacks and in the tasklet. This is a lock free variant,
  124. * which must be supplied with a preallocated rcu_callback_list_t structure
  125. */
  126. void rcu_sync_callback(void (*func)(void* data), void* data, rcu_callback_list_t* rd)
  127. {
  128. #ifndef CONFIG_SMP
  129.     func(data);
  130. #else
  131.  
  132.     ipl_t ipl;
  133.     rd->func = func;
  134.     rd->data = data;
  135.  
  136.     ipl = interrupts_disable();
  137.     //append to the list of callbacks waiting for their batch to begin
  138.     rd->next = rcu_global[CPU->id].next_batch;
  139.     rcu_global[CPU->id].next_batch = rd;
  140.     interrupts_restore(ipl);
  141.  
  142.     rcu_passQS();
  143. #endif
  144. }
  145.  
  146. /**
  147. * RCU tasklet, tests passing through QSs, moves from current to done
  148. */
  149. void rcu_tasklet(void* data)
  150. {
  151.     rcu_callback_list_t* rd;
  152.     bool passed_all_QS;
  153. #ifdef CONFIG_SMP
  154.     int i;
  155. #endif
  156.     ipl_t ipl;
  157.  
  158.     ipl = interrupts_disable();
  159.  
  160.     rcu_passQS();
  161.     passed_all_QS = true;
  162. #ifdef CONFIG_SMP
  163.     //check whether all CPUs have passed through QS
  164.     for (i = 0; i < config.cpu_active; i++)
  165.         passed_all_QS &= rcu_global[CPU->id].cpu_mask[i];
  166. #endif
  167.     if (passed_all_QS) {
  168.         //all CPUs have passed through QS -> grace period is over, we can schedule the call to RCU callback
  169.         if (rcu_global[CPU->id].done_batch) {
  170.             rd = rcu_global[CPU->id].done_batch;
  171.             while (rd->next) rd = rd->next;
  172.             //append the current list to done list
  173.             rd->next = rcu_global[CPU->id].current_batch;
  174.         } else
  175.             rcu_global[CPU->id].done_batch = rcu_global[CPU->id].current_batch;
  176.         rcu_global[CPU->id].current_batch = NULL;
  177.     }
  178.     interrupts_restore(ipl);
  179. }
  180.  
  181.  
  182. /**
  183. * This function indicates that the current CPU has gone through the quiescent state
  184. */
  185. void rcu_passQS(void)
  186. {
  187. #ifdef CONFIG_SMP
  188.     int i;
  189.     for (i=0;i<config.cpu_active;i++)
  190.         //on all CPUs indicate that this CPU has gone through QS
  191.         rcu_global[i].cpu_mask[CPU->id]=true;
  192. #endif
  193. }
  194.  
  195.  
  196. /**
  197. * Moves RCUs from next to current, schedules RCU tasklet, calls the callbacks, frees the rcu_callback_list_t
  198. */
  199. void rcu_run_callbacks(void)
  200. {
  201.     rcu_callback_list_t* rd, *rd2;
  202.     int i;
  203.     ipl_t ipl;
  204.  
  205.     ipl = interrupts_disable();
  206.     if (rcu_global[CPU->id].next_batch) {
  207.         //we cannot append to the current list because callbacks from next batch
  208.         //haven't passed the QSs
  209.         if (rcu_global[CPU->id].current_batch == NULL) {
  210.             rcu_global[CPU->id].current_batch = rcu_global[CPU->id].next_batch;
  211.             rcu_global[CPU->id].next_batch = NULL;
  212. #ifdef CONFIG_SMP
  213.             //initialize our CPU mask
  214.             for (i=0;i<config.cpu_active;i++)
  215.                 rcu_global[CPU->id].cpu_mask[i]=false;
  216. #endif
  217.             //schedule tasklet for all CPUs
  218.             for (i=0;i<config.cpu_active;i++) {
  219.                 tasklet_schedule_SMP(rcu_tasklet_desc, i);
  220.             }
  221.         }
  222.     }
  223.     //this CPU has passed QS
  224.     rcu_passQS();
  225.     if (rcu_global[CPU->id].done_batch) {
  226.         rd = rcu_global[CPU->id].done_batch;
  227.         rcu_global[CPU->id].done_batch = NULL;
  228.         interrupts_restore(ipl);
  229.         while (rd) {
  230.             //call the callback
  231.             rd->func(rd->data);
  232.             rd2 = rd->next;
  233.             //free the structure
  234.             free(rd);
  235.             rd = rd2;
  236.         }
  237.     }
  238.     else
  239.         interrupts_restore(ipl);
  240. }
  241.  
  242.  
  243. /**
  244. * Generic callback for RCU, frees @paramref pointer
  245. * @param pointer
  246. */
  247. void rcu_callback_free(void* pointer)
  248. {
  249.     free(pointer);
  250. }
  251.  
  252.