Rev 2400 | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
| Rev 2400 | Rev 2430 | ||
|---|---|---|---|
| Line 27... | Line 27... | ||
| 27 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 28 | */ |
28 | */ |
| 29 | /** @addtogroup sync |
29 | /** @addtogroup sync |
| 30 | * @{ |
30 | * @{ |
| 31 | */ |
31 | */ |
| 32 | /** @file |
32 | /** @file rcu.c |
| - | 33 | * @brief RCU synchronization primitive |
|
| 33 | */ |
34 | */ |
| 34 | 35 | ||
| 35 | #include <synch/rcu.h> |
36 | #include <synch/rcu.h> |
| 36 | #include <synch/waitq.h> |
37 | #include <synch/waitq.h> |
| 37 | #include <arch.h> |
38 | #include <arch.h> |
| Line 43... | Line 44... | ||
| 43 | #include <panic.h> |
44 | #include <panic.h> |
| 44 | #include <print.h> |
45 | #include <print.h> |
| 45 | 46 | ||
| 46 | 47 | ||
| 47 | 48 | ||
| 48 | - | ||
| - | 49 | /** Main data structure of the RCU implementation */ |
|
| 49 | typedef struct { |
50 | typedef struct { |
| 50 | #ifdef CONFIG_SMP |
51 | #ifdef CONFIG_SMP |
| - | 52 | /** flags indicating whether the corresponding CPU has passed QS for this RCU batch */ |
|
| 51 | bool* cpu_mask; |
53 | bool* cpu_mask; |
| 52 | #endif |
54 | #endif |
| - | 55 | /** RCU batch waiting for finishing of current batch, QS monitoring hasn't been started for this one */ |
|
| - | 56 | rcu_callback_list_t* next_batch; |
|
| - | 57 | /** RCU batch that waits for passing of QSs on all CPUs */ |
|
| 53 | rcu_callback_list_t* next_batch, *current_batch, *done_batch; |
58 | rcu_callback_list_t *current_batch; |
| - | 59 | /** RCU batch that has passed all QSs and waits for invocation */ |
|
| - | 60 | rcu_callback_list_t *done_batch; |
|
| 54 | } rcu_global_t; |
61 | } rcu_cpu_data_t; |
| 55 | 62 | ||
| 56 | /** An array of structures holding the callbacks and the progress of QS for each CPU*/ |
63 | /** An array of structures holding the callbacks and the progress of QS for each CPU*/ |
| 57 | rcu_global_t* rcu_global=NULL; |
64 | rcu_cpu_data_t* rcu_global=NULL; |
| 58 | /** reference to the RCU tasklet, for scheduling it */ |
65 | /** reference to the RCU tasklet, for scheduling it */ |
| 59 | tasklet_descriptor_t* rcu_tasklet_desc; |
66 | tasklet_descriptor_t* rcu_tasklet_desc; |
| 60 | 67 | ||
| 61 | 68 | ||
| 62 | /** |
69 | /** |
| Line 66... | Line 73... | ||
| 66 | { |
73 | { |
| 67 | #ifdef CONFIG_SMP |
74 | #ifdef CONFIG_SMP |
| 68 | int i,j; |
75 | int i,j; |
| 69 | #endif |
76 | #endif |
| 70 | 77 | ||
| 71 | rcu_global = malloc(sizeof(rcu_global_t)*(config.cpu_count),0); |
78 | rcu_global = malloc(sizeof(rcu_cpu_data_t)*(config.cpu_count),0); |
| 72 | rcu_tasklet_desc = tasklet_register(&rcu_tasklet, NULL); |
79 | rcu_tasklet_desc = tasklet_register(&rcu_tasklet, NULL); |
| 73 | 80 | ||
| 74 | #ifdef CONFIG_SMP |
81 | #ifdef CONFIG_SMP |
| 75 | /* |
82 | /* |
| 76 | * Note: I allocate the array for a case when every CPU connected will be active |
83 | * Note: I allocate the array for a case when every CPU connected will be active |
| 77 | * In a case when there will be some inactive CPUs, I will use just the first cells. |
84 | * In a case when there will be some inactive CPUs, I will use just the active ones |
| 78 | */ |
85 | */ |
| 79 | for (i=0;i<config.cpu_count;i++) { |
86 | for (i=0;i<config.cpu_count;i++) { |
| 80 | rcu_global[i].done_batch = NULL; |
87 | rcu_global[i].done_batch = NULL; |
| 81 | rcu_global[i].current_batch = NULL; |
88 | rcu_global[i].current_batch = NULL; |
| 82 | rcu_global[i].next_batch = NULL; |
89 | rcu_global[i].next_batch = NULL; |
| Line 117... | Line 124... | ||
| 117 | } |
124 | } |
| 118 | #endif |
125 | #endif |
| 119 | 126 | ||
| 120 | 127 | ||
| 121 | /** |
128 | /** |
| 122 | * appends this callback func to the queue of waiting callbacks, the rest |
129 | * Appends this callback func to the queue of waiting callbacks, the rest |
| 123 | * is handled in rcu_run_callbacks and in the tasklet. This is a lock free variant, |
130 | * is handled in rcu_run_callbacks and in the tasklet. This is a lock free variant, |
| 124 | * which must be supplied with a preallocated rcu_callback_list_t structure |
131 | * which must be supplied with a preallocated rcu_callback_list_t structure |
| - | 132 | * which is deallocated after the callback is called |
|
| 125 | */ |
133 | */ |
| 126 | void rcu_sync_callback(void (*func)(void* data), void* data, rcu_callback_list_t* rd) |
134 | void rcu_sync_callback(void (*func)(void* data), void* data, rcu_callback_list_t* rd) |
| 127 | { |
135 | { |
| 128 | #ifndef CONFIG_SMP |
136 | #ifndef CONFIG_SMP |
| 129 | func(data); |
137 | func(data); |
| Line 131... | Line 139... | ||
| 131 | 139 | ||
| 132 | ipl_t ipl; |
140 | ipl_t ipl; |
| 133 | rd->func = func; |
141 | rd->func = func; |
| 134 | rd->data = data; |
142 | rd->data = data; |
| 135 | 143 | ||
| - | 144 | //disabling interrupts removes need for any synchronization - the list of callbacks is |
|
| - | 145 | //always accessed only on current CPU |
|
| 136 | ipl = interrupts_disable(); |
146 | ipl = interrupts_disable(); |
| 137 | //append to the list of callbacks waiting for their batch to begin |
147 | //append to the list of callbacks waiting for their batch to begin |
| 138 | rd->next = rcu_global[CPU->id].next_batch; |
148 | rd->next = rcu_global[CPU->id].next_batch; |
| 139 | rcu_global[CPU->id].next_batch = rd; |
149 | rcu_global[CPU->id].next_batch = rd; |
| 140 | interrupts_restore(ipl); |
- | |
| 141 | - | ||
| 142 | rcu_passQS(); |
150 | rcu_passQS(); |
| - | 151 | interrupts_restore(ipl); |
|
| 143 | #endif |
152 | #endif |
| 144 | } |
153 | } |
| 145 | 154 | ||
| 146 | /** |
155 | /** |
| 147 | * RCU tasklet, tests passing through QSs, moves from current to done |
156 | * RCU tasklet, tests passing through QSs, moves from current list to done list |
| 148 | */ |
157 | */ |
| 149 | void rcu_tasklet(void* data) |
158 | void rcu_tasklet(void* data) |
| 150 | { |
159 | { |
| 151 | rcu_callback_list_t* rd; |
160 | rcu_callback_list_t* rd; |
| 152 | bool passed_all_QS; |
161 | bool passed_all_QS; |
| 153 | #ifdef CONFIG_SMP |
162 | #ifdef CONFIG_SMP |
| 154 | int i; |
163 | int i; |
| 155 | #endif |
164 | #endif |
| 156 | ipl_t ipl; |
165 | ipl_t ipl; |
| - | 166 | passed_all_QS = true; |
|
| 157 | 167 | ||
| 158 | ipl = interrupts_disable(); |
- | |
| 159 | 168 | ||
| - | 169 | ipl = interrupts_disable(); |
|
| 160 | rcu_passQS(); |
170 | rcu_passQS(); |
| 161 | passed_all_QS = true; |
171 | |
| 162 | #ifdef CONFIG_SMP |
172 | #ifdef CONFIG_SMP |
| 163 | //check whether all CPUs have passed through QS |
173 | //check whether all CPUs have passed through QS of this CPU's current batch |
| 164 | for (i = 0; i < config.cpu_active; i++) |
174 | for (i = 0; i < config.cpu_count; i++) |
| - | 175 | if (cpus[i].active) |
|
| 165 | passed_all_QS &= rcu_global[CPU->id].cpu_mask[i]; |
176 | passed_all_QS &= rcu_global[CPU->id].cpu_mask[i]; |
| 166 | #endif |
177 | #endif |
| 167 | if (passed_all_QS) { |
178 | if (passed_all_QS) { |
| 168 | //all CPUs have passed through QS -> grace period is over, we can schedule the call to RCU callback |
179 | //all CPUs have passed through QS -> grace period is over, we can schedule the call to RCU callback |
| 169 | if (rcu_global[CPU->id].done_batch) { |
180 | if (rcu_global[CPU->id].done_batch) { |
| 170 | rd = rcu_global[CPU->id].done_batch; |
181 | rd = rcu_global[CPU->id].done_batch; |
| - | 182 | ||
| 171 | while (rd->next) rd = rd->next; |
183 | while (rd->next) rd = rd->next; |
| 172 | //append the current list to done list |
184 | //append the current list to done list |
| 173 | rd->next = rcu_global[CPU->id].current_batch; |
185 | rd->next = rcu_global[CPU->id].current_batch; |
| 174 | } else |
186 | } else |
| 175 | rcu_global[CPU->id].done_batch = rcu_global[CPU->id].current_batch; |
187 | rcu_global[CPU->id].done_batch = rcu_global[CPU->id].current_batch; |
| Line 184... | Line 196... | ||
| 184 | */ |
196 | */ |
| 185 | void rcu_passQS(void) |
197 | void rcu_passQS(void) |
| 186 | { |
198 | { |
| 187 | #ifdef CONFIG_SMP |
199 | #ifdef CONFIG_SMP |
| 188 | int i; |
200 | int i; |
| 189 | for (i=0;i<config.cpu_active;i++) |
201 | for (i = 0; i < config.cpu_count; i++) |
| - | 202 | if (cpus[i].active) |
|
| 190 | //on all CPUs indicate that this CPU has gone through QS |
203 | //on all CPUs indicate that this CPU has gone through QS |
| - | 204 | //this can overlap with clearing this flag in rcu_run_callbacks |
|
| 191 | rcu_global[i].cpu_mask[CPU->id]=true; |
205 | rcu_global[i].cpu_mask[CPU->id]=true; |
| 192 | #endif |
206 | #endif |
| 193 | } |
207 | } |
| 194 | 208 | ||
| 195 | 209 | ||
| 196 | /** |
210 | /** |
| 197 | * Moves RCUs from next to current, schedules RCU tasklet, calls the callbacks, frees the rcu_callback_list_t |
211 | * Moves RCU callbacks from next list to current list, schedules the RCU tasklet when needed, |
| - | 212 | * calls the callbacks from done list, frees the rcu_callback_list_t |
|
| 198 | */ |
213 | */ |
| 199 | void rcu_run_callbacks(void) |
214 | void rcu_run_callbacks(void) |
| 200 | { |
215 | { |
| 201 | rcu_callback_list_t* rd, *rd2; |
216 | rcu_callback_list_t* rd, *rd2; |
| 202 | int i; |
217 | int i; |
| 203 | ipl_t ipl; |
218 | ipl_t ipl; |
| 204 | 219 | ||
| 205 | ipl = interrupts_disable(); |
220 | ipl = interrupts_disable(); |
| 206 | if (rcu_global[CPU->id].next_batch) { |
221 | if (rcu_global[CPU->id].next_batch) { |
| 207 | //we cannot append to the current list because callbacks from next batch |
222 | //we cannot append to the current list because the callbacks from next batch |
| 208 | //haven't passed the QSs |
223 | //haven't passed the QSs |
| 209 | if (rcu_global[CPU->id].current_batch == NULL) { |
224 | if (rcu_global[CPU->id].current_batch == NULL) { |
| 210 | rcu_global[CPU->id].current_batch = rcu_global[CPU->id].next_batch; |
225 | rcu_global[CPU->id].current_batch = rcu_global[CPU->id].next_batch; |
| 211 | rcu_global[CPU->id].next_batch = NULL; |
226 | rcu_global[CPU->id].next_batch = NULL; |
| 212 | #ifdef CONFIG_SMP |
227 | #ifdef CONFIG_SMP |
| 213 | //initialize our CPU mask |
228 | //initialize our CPU mask |
| 214 | for (i=0;i<config.cpu_active;i++) |
229 | for (i = 0; i < config.cpu_count; i++) |
| - | 230 | if (cpus[i].active) |
|
| 215 | rcu_global[CPU->id].cpu_mask[i]=false; |
231 | rcu_global[CPU->id].cpu_mask[i]=false; |
| 216 | #endif |
232 | #endif |
| 217 | //schedule tasklet for all CPUs |
233 | //schedule tasklet for all CPUs |
| 218 | for (i=0;i<config.cpu_active;i++) { |
234 | for (i = 0; i < config.cpu_count; i++) |
| - | 235 | if (cpus[i].active) |
|
| 219 | tasklet_schedule_SMP(rcu_tasklet_desc, i); |
236 | tasklet_schedule_SMP(rcu_tasklet_desc, i); |
| 220 | } |
- | |
| 221 | } |
237 | } |
| 222 | } |
238 | } |
| 223 | //this CPU has passed QS |
239 | //this CPU has passed QS |
| 224 | rcu_passQS(); |
240 | rcu_passQS(); |
| 225 | if (rcu_global[CPU->id].done_batch) { |
241 | if (rcu_global[CPU->id].done_batch) { |
| 226 | rd = rcu_global[CPU->id].done_batch; |
242 | rd = rcu_global[CPU->id].done_batch; |
| 227 | rcu_global[CPU->id].done_batch = NULL; |
243 | rcu_global[CPU->id].done_batch = NULL; |
| - | 244 | //the callbacks (and free) can block, we must restore the interrupts |
|
| 228 | interrupts_restore(ipl); |
245 | interrupts_restore(ipl); |
| 229 | while (rd) { |
246 | while (rd) { |
| 230 | //call the callback |
247 | //call the callback |
| - | 248 | if (rd->func == NULL) |
|
| - | 249 | panic_printf("RCU callback function NULL, desc:%x", rd); |
|
| 231 | rd->func(rd->data); |
250 | rd->func(rd->data); |
| 232 | rd2 = rd->next; |
251 | rd2 = rd->next; |
| 233 | //free the structure |
252 | //free the structure |
| 234 | free(rd); |
253 | free(rd); |
| 235 | rd = rd2; |
254 | rd = rd2; |