Rev 2400 | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
2260 | hudecek | 1 | /* |
2 | * Copyright (c) 2007 Jan Hudecek |
||
3 | * Copyright (c) 2006 Jakub Jermar |
||
4 | * All rights reserved. |
||
5 | * |
||
6 | * Redistribution and use in source and binary forms, with or without |
||
7 | * modification, are permitted provided that the following conditions |
||
8 | * are met: |
||
9 | * |
||
10 | * - Redistributions of source code must retain the above copyright |
||
11 | * notice, this list of conditions and the following disclaimer. |
||
12 | * - Redistributions in binary form must reproduce the above copyright |
||
13 | * notice, this list of conditions and the following disclaimer in the |
||
14 | * documentation and/or other materials provided with the distribution. |
||
15 | * - The name of the author may not be used to endorse or promote products |
||
16 | * derived from this software without specific prior written permission. |
||
17 | * |
||
18 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
||
19 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
||
20 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
||
21 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
||
22 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
||
23 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
||
24 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
||
25 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
||
26 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
||
27 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
||
28 | */ |
||
29 | /** @addtogroup sync |
||
30 | * @{ |
||
31 | */ |
||
2430 | hudecek | 32 | /** @file rcu.c |
33 | * @brief RCU synchronization primitive |
||
2260 | hudecek | 34 | */ |
35 | |||
36 | #include <synch/rcu.h> |
||
37 | #include <synch/waitq.h> |
||
38 | #include <arch.h> |
||
39 | #include <config.h> |
||
40 | #include <arch/types.h> |
||
2265 | hudecek | 41 | #include <proc/tasklet.h> |
2260 | hudecek | 42 | #include <synch/spinlock.h> |
43 | #include <time/delay.h> |
||
44 | #include <panic.h> |
||
45 | #include <print.h> |
||
46 | |||
47 | |||
48 | |||
2430 | hudecek | 49 | /** Main data structure of the RCU implementation */ |
2309 | hudecek | 50 | typedef struct { |
51 | #ifdef CONFIG_SMP |
||
2430 | hudecek | 52 | /** flags indicating whether the corresponding CPU has passed QS for this RCU batch */ |
2309 | hudecek | 53 | bool* cpu_mask; |
54 | #endif |
||
2430 | hudecek | 55 | /** RCU batch waiting for finishing of current batch, QS monitoring hasn't been started for this one */ |
56 | rcu_callback_list_t* next_batch; |
||
57 | /** RCU batch that waits for passing of QSs on all CPUs */ |
||
58 | rcu_callback_list_t *current_batch; |
||
59 | /** RCU batch that has passed all QSs and waits for invocation */ |
||
60 | rcu_callback_list_t *done_batch; |
||
61 | } rcu_cpu_data_t; |
||
2260 | hudecek | 62 | |
2330 | hudecek | 63 | /** An array of structures holding the callbacks and the progress of QS for each CPU*/ |
2430 | hudecek | 64 | rcu_cpu_data_t* rcu_global=NULL; |
2330 | hudecek | 65 | /** reference to the RCU tasklet, for scheduling it */ |
2309 | hudecek | 66 | tasklet_descriptor_t* rcu_tasklet_desc; |
67 | |||
2330 | hudecek | 68 | |
69 | /** |
||
70 | * Initializes data structures needed for RCU |
||
71 | */ |
||
2265 | hudecek | 72 | void rcu_init(void) |
2260 | hudecek | 73 | { |
2309 | hudecek | 74 | #ifdef CONFIG_SMP |
2330 | hudecek | 75 | int i,j; |
2309 | hudecek | 76 | #endif |
77 | |||
2430 | hudecek | 78 | rcu_global = malloc(sizeof(rcu_cpu_data_t)*(config.cpu_count),0); |
2309 | hudecek | 79 | rcu_tasklet_desc = tasklet_register(&rcu_tasklet, NULL); |
80 | |||
81 | #ifdef CONFIG_SMP |
||
2330 | hudecek | 82 | /* |
83 | * Note: I allocate the array for a case when every CPU connected will be active |
||
2430 | hudecek | 84 | * In a case when there will be some inactive CPUs, I will use just the active ones |
2330 | hudecek | 85 | */ |
2309 | hudecek | 86 | for (i=0;i<config.cpu_count;i++) { |
2330 | hudecek | 87 | rcu_global[i].done_batch = NULL; |
88 | rcu_global[i].current_batch = NULL; |
||
89 | rcu_global[i].next_batch = NULL; |
||
90 | rcu_global[i].cpu_mask = malloc(sizeof(bool)*config.cpu_count,0); |
||
91 | for (j=0;j<config.cpu_count;j++) { |
||
92 | rcu_global[i].cpu_mask[j]=false; |
||
93 | } |
||
2309 | hudecek | 94 | } |
95 | #else |
||
2330 | hudecek | 96 | rcu_global[CPU->id].done_batch = NULL; |
97 | rcu_global[CPU->id].current_batch = NULL; |
||
98 | rcu_global[CPU->id].next_batch = NULL; |
||
2309 | hudecek | 99 | #endif |
2260 | hudecek | 100 | } |
101 | |||
2330 | hudecek | 102 | |
103 | /** |
||
104 | * Blocks until the grace period elapses |
||
105 | */ |
||
2260 | hudecek | 106 | void rcu_synchronize(void) |
107 | { |
||
2309 | hudecek | 108 | #ifdef CONFIG_SMP |
2330 | hudecek | 109 | waitq_t wq; |
110 | waitq_initialize(&wq); |
||
2400 | hudecek | 111 | rcu_sync_callback_normal_alloc(&rcu_synchronize_callback_function, &wq); |
2330 | hudecek | 112 | //sleep until the end of the grace period |
113 | waitq_sleep(&wq); |
||
2309 | hudecek | 114 | #endif |
2260 | hudecek | 115 | } |
116 | |||
2309 | hudecek | 117 | #ifdef CONFIG_SMP |
2330 | hudecek | 118 | /** |
119 | * Just a wakeup for waking up rcu_synchronize when the grace period has elapsed |
||
120 | */ |
||
2260 | hudecek | 121 | void rcu_synchronize_callback_function(void* waitq) |
122 | { |
||
2315 | hudecek | 123 | waitq_wakeup(((waitq_t*)waitq), WAKEUP_ALL); |
2260 | hudecek | 124 | } |
2309 | hudecek | 125 | #endif |
2260 | hudecek | 126 | |
2330 | hudecek | 127 | |
128 | /** |
||
2430 | hudecek | 129 | * Appends this callback func to the queue of waiting callbacks, the rest |
2330 | hudecek | 130 | * is handled in rcu_run_callbacks and in the tasklet. This is a lock free variant, |
131 | * which must be supplied with a preallocated rcu_callback_list_t structure |
||
2430 | hudecek | 132 | * which is deallocated after the callback is called |
2330 | hudecek | 133 | */ |
2400 | hudecek | 134 | void rcu_sync_callback(void (*func)(void* data), void* data, rcu_callback_list_t* rd) |
2260 | hudecek | 135 | { |
2309 | hudecek | 136 | #ifndef CONFIG_SMP |
137 | func(data); |
||
138 | #else |
||
2330 | hudecek | 139 | |
140 | ipl_t ipl; |
||
2260 | hudecek | 141 | rd->func = func; |
142 | rd->data = data; |
||
2315 | hudecek | 143 | |
2430 | hudecek | 144 | //disabling interrupts removes need for any synchronization - the list of callbacks is |
145 | //always accessed only on current CPU |
||
2330 | hudecek | 146 | ipl = interrupts_disable(); |
147 | //append to the list of callbacks waiting for their batch to begin |
||
148 | rd->next = rcu_global[CPU->id].next_batch; |
||
149 | rcu_global[CPU->id].next_batch = rd; |
||
2430 | hudecek | 150 | rcu_passQS(); |
2330 | hudecek | 151 | interrupts_restore(ipl); |
2309 | hudecek | 152 | #endif |
153 | } |
||
2260 | hudecek | 154 | |
2330 | hudecek | 155 | /** |
2430 | hudecek | 156 | * RCU tasklet, tests passing through QSs, moves from current list to done list |
2330 | hudecek | 157 | */ |
2309 | hudecek | 158 | void rcu_tasklet(void* data) |
159 | { |
||
160 | rcu_callback_list_t* rd; |
||
161 | bool passed_all_QS; |
||
162 | #ifdef CONFIG_SMP |
||
163 | int i; |
||
164 | #endif |
||
2330 | hudecek | 165 | ipl_t ipl; |
2430 | hudecek | 166 | passed_all_QS = true; |
2330 | hudecek | 167 | |
2430 | hudecek | 168 | |
2330 | hudecek | 169 | ipl = interrupts_disable(); |
2309 | hudecek | 170 | rcu_passQS(); |
2430 | hudecek | 171 | |
2309 | hudecek | 172 | #ifdef CONFIG_SMP |
2430 | hudecek | 173 | //check whether all CPUs have passed through QS of this CPU's current batch |
174 | for (i = 0; i < config.cpu_count; i++) |
||
175 | if (cpus[i].active) |
||
176 | passed_all_QS &= rcu_global[CPU->id].cpu_mask[i]; |
||
2309 | hudecek | 177 | #endif |
178 | if (passed_all_QS) { |
||
2330 | hudecek | 179 | //all CPUs have passed through QS -> grace period is over, we can schedule the call to RCU callback |
180 | if (rcu_global[CPU->id].done_batch) { |
||
181 | rd = rcu_global[CPU->id].done_batch; |
||
2430 | hudecek | 182 | |
2309 | hudecek | 183 | while (rd->next) rd = rd->next; |
184 | //append the current list to done list |
||
2330 | hudecek | 185 | rd->next = rcu_global[CPU->id].current_batch; |
2309 | hudecek | 186 | } else |
2330 | hudecek | 187 | rcu_global[CPU->id].done_batch = rcu_global[CPU->id].current_batch; |
188 | rcu_global[CPU->id].current_batch = NULL; |
||
2309 | hudecek | 189 | } |
2330 | hudecek | 190 | interrupts_restore(ipl); |
2260 | hudecek | 191 | } |
192 | |||
2330 | hudecek | 193 | |
194 | /** |
||
195 | * This function indicates that the current CPU has gone through the quiescent state |
||
196 | */ |
||
197 | void rcu_passQS(void) |
||
2309 | hudecek | 198 | { |
199 | #ifdef CONFIG_SMP |
||
2330 | hudecek | 200 | int i; |
2430 | hudecek | 201 | for (i = 0; i < config.cpu_count; i++) |
202 | if (cpus[i].active) |
||
203 | //on all CPUs indicate that this CPU has gone through QS |
||
204 | //this can overlap with clearing this flag in rcu_run_callbacks |
||
205 | rcu_global[i].cpu_mask[CPU->id]=true; |
||
2309 | hudecek | 206 | #endif |
207 | } |
||
208 | |||
2330 | hudecek | 209 | |
210 | /** |
||
2430 | hudecek | 211 | * Moves RCU callbacks from next list to current list, schedules the RCU tasklet when needed, |
212 | * calls the callbacks from done list, frees the rcu_callback_list_t |
||
2330 | hudecek | 213 | */ |
2309 | hudecek | 214 | void rcu_run_callbacks(void) |
215 | { |
||
2330 | hudecek | 216 | rcu_callback_list_t* rd, *rd2; |
217 | int i; |
||
218 | ipl_t ipl; |
||
219 | |||
220 | ipl = interrupts_disable(); |
||
221 | if (rcu_global[CPU->id].next_batch) { |
||
2430 | hudecek | 222 | //we cannot append to the current list because the callbacks from next batch |
2330 | hudecek | 223 | //haven't passed the QSs |
224 | if (rcu_global[CPU->id].current_batch == NULL) { |
||
225 | rcu_global[CPU->id].current_batch = rcu_global[CPU->id].next_batch; |
||
226 | rcu_global[CPU->id].next_batch = NULL; |
||
227 | #ifdef CONFIG_SMP |
||
228 | //initialize our CPU mask |
||
2430 | hudecek | 229 | for (i = 0; i < config.cpu_count; i++) |
230 | if (cpus[i].active) |
||
231 | rcu_global[CPU->id].cpu_mask[i]=false; |
||
2330 | hudecek | 232 | #endif |
233 | //schedule tasklet for all CPUs |
||
2430 | hudecek | 234 | for (i = 0; i < config.cpu_count; i++) |
235 | if (cpus[i].active) |
||
236 | tasklet_schedule_SMP(rcu_tasklet_desc, i); |
||
2330 | hudecek | 237 | } |
238 | } |
||
239 | //this CPU has passed QS |
||
2309 | hudecek | 240 | rcu_passQS(); |
2330 | hudecek | 241 | if (rcu_global[CPU->id].done_batch) { |
242 | rd = rcu_global[CPU->id].done_batch; |
||
243 | rcu_global[CPU->id].done_batch = NULL; |
||
2430 | hudecek | 244 | //the callbacks (and free) can block, we must restore the interrupts |
2330 | hudecek | 245 | interrupts_restore(ipl); |
246 | while (rd) { |
||
247 | //call the callback |
||
2430 | hudecek | 248 | if (rd->func == NULL) |
249 | panic_printf("RCU callback function NULL, desc:%x", rd); |
||
2330 | hudecek | 250 | rd->func(rd->data); |
251 | rd2 = rd->next; |
||
252 | //free the structure |
||
253 | free(rd); |
||
254 | rd = rd2; |
||
2309 | hudecek | 255 | } |
256 | } |
||
2330 | hudecek | 257 | else |
258 | interrupts_restore(ipl); |
||
2309 | hudecek | 259 | } |
260 | |||
261 | |||
2400 | hudecek | 262 | /** |
263 | * Generic callback for RCU, frees @paramref pointer |
||
264 | * @param pointer |
||
265 | */ |
||
266 | void rcu_callback_free(void* pointer) |
||
267 | { |
||
268 | free(pointer); |
||
269 | } |
||
270 |