Rev 2315 | Rev 2400 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 2315 | Rev 2330 | ||
---|---|---|---|
Line 41... | Line 41... | ||
41 | #include <synch/spinlock.h> |
41 | #include <synch/spinlock.h> |
42 | #include <time/delay.h> |
42 | #include <time/delay.h> |
43 | #include <panic.h> |
43 | #include <panic.h> |
44 | #include <print.h> |
44 | #include <print.h> |
45 | 45 | ||
46 | SPINLOCK_INITIALIZE(rcu_global_lock); |
- | |
47 | 46 | ||
48 | typedef struct rcu_callback_list { |
- | |
49 | struct rcu_callback_list* next; |
- | |
50 | void (*func)(void*); |
- | |
51 | void* data; |
47 | |
52 | } rcu_callback_list_t; |
- | |
53 | 48 | ||
54 | typedef struct { |
49 | typedef struct { |
55 | #ifdef CONFIG_SMP |
50 | #ifdef CONFIG_SMP |
56 | bool* cpu_mask; |
51 | bool* cpu_mask; |
57 | #endif |
52 | #endif |
58 | rcu_callback_list_t* next_batch, *current_batch, *done_batch; |
53 | rcu_callback_list_t* next_batch, *current_batch, *done_batch; |
59 | } rcu_global_t; |
54 | } rcu_global_t; |
60 | 55 | ||
61 | - | ||
- | 56 | /** An array of structures holding the callbacks and the progress of QS for each CPU*/ |
|
62 | rcu_global_t* _rcu_global; |
57 | rcu_global_t* rcu_global=NULL; |
- | 58 | /** reference to the RCU tasklet, for scheduling it */ |
|
63 | tasklet_descriptor_t* rcu_tasklet_desc; |
59 | tasklet_descriptor_t* rcu_tasklet_desc; |
64 | 60 | ||
- | 61 | ||
- | 62 | /** |
|
- | 63 | * Initializes data structures needed for RCU |
|
- | 64 | */ |
|
65 | void rcu_init(void) |
65 | void rcu_init(void) |
66 | { |
66 | { |
67 | #ifdef CONFIG_SMP |
67 | #ifdef CONFIG_SMP |
68 | int i; |
68 | int i,j; |
69 | #endif |
69 | #endif |
70 | 70 | ||
71 | _rcu_global = malloc(sizeof(rcu_global_t),0); |
71 | rcu_global = malloc(sizeof(rcu_global_t)*(config.cpu_count),0); |
72 | _rcu_global->done_batch = NULL; |
- | |
73 | _rcu_global->current_batch = NULL; |
- | |
74 | _rcu_global->next_batch = NULL; |
- | |
75 | spinlock_initialize(&rcu_global_lock, "rcu_global_lock"); |
- | |
76 | - | ||
77 | rcu_tasklet_desc = tasklet_register(&rcu_tasklet, NULL); |
72 | rcu_tasklet_desc = tasklet_register(&rcu_tasklet, NULL); |
78 | tasklet_disable(rcu_tasklet_desc); |
- | |
79 | 73 | ||
80 | #ifdef CONFIG_SMP |
74 | #ifdef CONFIG_SMP |
- | 75 | /* |
|
81 | _rcu_global->cpu_mask = malloc (sizeof(bool)*config.cpu_count,0); |
76 | * Note: I allocate the array for a case when every CPU connected will be active |
- | 77 | * In a case when there will be some inactive CPUs, I will use just the first cells. |
|
- | 78 | */ |
|
82 | for (i=0;i<config.cpu_count;i++) { |
79 | for (i=0;i<config.cpu_count;i++) { |
- | 80 | rcu_global[i].done_batch = NULL; |
|
- | 81 | rcu_global[i].current_batch = NULL; |
|
- | 82 | rcu_global[i].next_batch = NULL; |
|
- | 83 | rcu_global[i].cpu_mask = malloc(sizeof(bool)*config.cpu_count,0); |
|
- | 84 | for (j=0;j<config.cpu_count;j++) { |
|
83 | _rcu_global->cpu_mask[i]=false; |
85 | rcu_global[i].cpu_mask[j]=false; |
- | 86 | } |
|
84 | } |
87 | } |
85 | #else |
88 | #else |
- | 89 | rcu_global[CPU->id].done_batch = NULL; |
|
86 | tasklet_schedule(rcu_tasklet_desc); |
90 | rcu_global[CPU->id].current_batch = NULL; |
87 | - | ||
- | 91 | rcu_global[CPU->id].next_batch = NULL; |
|
88 | #endif |
92 | #endif |
89 | tasklet_enable(rcu_tasklet_desc); |
- | |
90 | } |
93 | } |
91 | 94 | ||
- | 95 | ||
- | 96 | /** |
|
- | 97 | * Blocks until the grace period elapses |
|
- | 98 | */ |
|
92 | void rcu_synchronize(void) |
99 | void rcu_synchronize(void) |
93 | { |
100 | { |
94 | #ifdef CONFIG_SMP |
101 | #ifdef CONFIG_SMP |
95 | waitq_t *wq = malloc(sizeof(waitq_t),0); |
102 | waitq_t wq; |
96 | waitq_initialize(wq); |
103 | waitq_initialize(&wq); |
97 | rcu_sync_callback(&rcu_synchronize_callback_function, wq); |
104 | rcu_sync_callback(&rcu_synchronize_callback_function, &wq); |
98 | printf("going to sleep, tlock:%x, wqlock:%x\n", THREAD->lock.val, wq->lock.val); |
105 | //sleep until the end of the grace period |
99 | waitq_sleep(wq); |
106 | waitq_sleep(&wq); |
100 | printf("woken up\n"); |
- | |
101 | free(wq); |
- | |
102 | #endif |
107 | #endif |
103 | } |
108 | } |
104 | 109 | ||
105 | #ifdef CONFIG_SMP |
110 | #ifdef CONFIG_SMP |
- | 111 | /** |
|
- | 112 | * Just a wakeup for waking up rcu_synchronize when the grace period has elapsed |
|
- | 113 | */ |
|
106 | void rcu_synchronize_callback_function(void* waitq) |
114 | void rcu_synchronize_callback_function(void* waitq) |
107 | { |
115 | { |
108 | printf("waking up, wq:%x, wq->head:%x, next:%x, tlock:%x, wqlock:%x\n", |
- | |
109 | waitq, |
- | |
110 | ((waitq_t*)waitq)->head, |
- | |
111 | ((link_t)((waitq_t*)waitq)->head).next, |
- | |
112 | THREAD->lock.val, |
- | |
113 | ((waitq_t*)waitq)->lock.val ); |
- | |
114 | waitq_wakeup(((waitq_t*)waitq), WAKEUP_ALL); |
116 | waitq_wakeup(((waitq_t*)waitq), WAKEUP_ALL); |
115 | } |
117 | } |
116 | #endif |
118 | #endif |
117 | 119 | ||
- | 120 | ||
- | 121 | /** |
|
- | 122 | * appends this callback func to the queue of waiting callbacks, the rest |
|
- | 123 | * is handled in rcu_run_callbacks and in the tasklet. This is a lock free variant, |
|
- | 124 | * which must be supplied with a preallocated rcu_callback_list_t structure |
|
- | 125 | */ |
|
118 | void rcu_sync_callback(void (*func)(void* data), void* data) |
126 | void rcu_sync_callback_custom_alloc(void (*func)(void* data), void* data, rcu_callback_list_t* rd) |
119 | { |
127 | { |
120 | #ifndef CONFIG_SMP |
128 | #ifndef CONFIG_SMP |
121 | func(data); |
129 | func(data); |
122 | #else |
130 | #else |
- | 131 | ||
123 | int i; |
132 | ipl_t ipl; |
124 | rcu_callback_list_t *rd; |
- | |
125 | rd = malloc(sizeof(rcu_callback_list_t), 0); |
- | |
126 | rd->func = func; |
133 | rd->func = func; |
127 | rd->data = data; |
134 | rd->data = data; |
128 | rd->next = NULL; |
- | |
129 | - | ||
130 | printf("synccallback locking \n"); |
- | |
131 | spinlock_lock(&rcu_global_lock); |
- | |
132 | - | ||
133 | rd->next = _rcu_global->next_batch; |
- | |
134 | _rcu_global->next_batch = rd; |
- | |
135 | 135 | ||
136 | if (_rcu_global->current_batch == NULL) { |
136 | ipl = interrupts_disable(); |
137 | _rcu_global->current_batch = _rcu_global->next_batch; |
137 | //append to the list of callbacks waiting for their batch to begin |
138 | _rcu_global->next_batch = NULL; |
138 | rd->next = rcu_global[CPU->id].next_batch; |
139 | printf("setting callback %x as current\n",&rd->func); |
- | |
140 | for (i=0;i<config.cpu_count;i++) |
139 | rcu_global[CPU->id].next_batch = rd; |
141 | _rcu_global->cpu_mask[i]=false; |
140 | interrupts_restore(ipl); |
142 | 141 | ||
143 | //we've surely passed the quiescent point just by running this method |
- | |
144 | rcu_passQS(); |
142 | rcu_passQS(); |
145 | } |
- | |
146 | for (i=0;i<config.cpu_count;i++) { |
- | |
147 | tasklet_schedule_SMP(rcu_tasklet_desc, i); |
- | |
148 | } |
- | |
149 | spinlock_unlock(&rcu_global_lock); |
- | |
150 | printf ("sync callback called,unlocking, state:%x \n",rcu_tasklet_desc->state); |
- | |
151 | #endif |
143 | #endif |
152 | } |
144 | } |
153 | 145 | ||
- | 146 | /** |
|
154 | //TODO: polishing, comments |
147 | * RCU tasklet, tests passing through QSs, moves from current to done |
155 | 148 | */ |
|
156 | void rcu_tasklet(void* data) |
149 | void rcu_tasklet(void* data) |
157 | { |
150 | { |
158 | rcu_callback_list_t* rd; |
151 | rcu_callback_list_t* rd; |
159 | bool passed_all_QS; |
152 | bool passed_all_QS; |
160 | #ifdef CONFIG_SMP |
153 | #ifdef CONFIG_SMP |
161 | int i; |
154 | int i; |
162 | #endif |
155 | #endif |
- | 156 | ipl_t ipl; |
|
- | 157 | ||
- | 158 | ipl = interrupts_disable(); |
|
- | 159 | ||
163 | rcu_passQS(); |
160 | rcu_passQS(); |
164 | passed_all_QS = true; |
161 | passed_all_QS = true; |
165 | printf("tasklet locking \n"); |
- | |
166 | spinlock_lock(&rcu_global_lock); |
- | |
167 | #ifdef CONFIG_SMP |
162 | #ifdef CONFIG_SMP |
168 | - | ||
- | 163 | //check whether all CPUs have passed through QS |
|
169 | for (i = 0; i < config.cpu_active; i++) |
164 | for (i = 0; i < config.cpu_active; i++) |
170 | passed_all_QS &= _rcu_global->cpu_mask[i]; |
165 | passed_all_QS &= rcu_global[CPU->id].cpu_mask[i]; |
171 | #endif |
166 | #endif |
172 | if (passed_all_QS) { |
167 | if (passed_all_QS) { |
- | 168 | //all CPUs have passed through QS -> grace period is over, we can schedule the call to RCU callback |
|
173 | if (_rcu_global->done_batch) { |
169 | if (rcu_global[CPU->id].done_batch) { |
174 | rd = _rcu_global->done_batch; |
170 | rd = rcu_global[CPU->id].done_batch; |
175 | while (rd->next) rd = rd->next; |
171 | while (rd->next) rd = rd->next; |
176 | - | ||
177 | //append the current list to done list |
172 | //append the current list to done list |
178 | rd->next = _rcu_global->current_batch; |
173 | rd->next = rcu_global[CPU->id].current_batch; |
179 | } else |
174 | } else |
180 | _rcu_global->done_batch = _rcu_global->current_batch; |
175 | rcu_global[CPU->id].done_batch = rcu_global[CPU->id].current_batch; |
181 | printf("setting callback %x as done\n",&_rcu_global->current_batch->func); |
- | |
182 | _rcu_global->current_batch = _rcu_global->next_batch; |
- | |
183 | _rcu_global->next_batch = NULL; |
176 | rcu_global[CPU->id].current_batch = NULL; |
184 | #ifdef CONFIG_SMP |
- | |
185 | - | ||
186 | for (i=0;i<config.cpu_count;i++) |
- | |
187 | _rcu_global->cpu_mask[i]=false; |
- | |
188 | #endif |
- | |
189 | //we've surely passed the quiescent point just by running this method |
- | |
190 | rcu_passQS(); |
- | |
191 | } |
177 | } |
192 | - | ||
193 | spinlock_unlock(&rcu_global_lock); |
- | |
194 | printf("tasklet unlocking \n"); |
178 | interrupts_restore(ipl); |
195 | } |
179 | } |
196 | 180 | ||
- | 181 | ||
- | 182 | /** |
|
- | 183 | * This function indicates that the current CPU has gone through the quiescent state |
|
- | 184 | */ |
|
197 | inline void rcu_passQS(void) |
185 | void rcu_passQS(void) |
198 | { |
186 | { |
199 | #ifdef CONFIG_SMP |
187 | #ifdef CONFIG_SMP |
- | 188 | int i; |
|
- | 189 | for (i=0;i<config.cpu_active;i++) |
|
- | 190 | //on all CPUs indicate that this CPU has gone through QS |
|
200 | _rcu_global->cpu_mask[CPU->id] = true; |
191 | rcu_global[i].cpu_mask[CPU->id]=true; |
201 | #endif |
192 | #endif |
202 | } |
193 | } |
203 | 194 | ||
- | 195 | ||
- | 196 | /** |
|
- | 197 | * Moves RCUs from next to current, schedules RCU tasklet, calls the callbacks, frees the rcu_callback_list_t |
|
- | 198 | */ |
|
204 | void rcu_run_callbacks(void) |
199 | void rcu_run_callbacks(void) |
205 | { |
200 | { |
206 | rcu_callback_list_t* rd; |
201 | rcu_callback_list_t* rd, *rd2; |
- | 202 | int i; |
|
- | 203 | ipl_t ipl; |
|
- | 204 | ||
- | 205 | ipl = interrupts_disable(); |
|
- | 206 | if (rcu_global[CPU->id].next_batch) { |
|
- | 207 | //we cannot append to the current list because callbacks from next batch |
|
- | 208 | //haven't passed the QSs |
|
- | 209 | if (rcu_global[CPU->id].current_batch == NULL) { |
|
- | 210 | rcu_global[CPU->id].current_batch = rcu_global[CPU->id].next_batch; |
|
- | 211 | rcu_global[CPU->id].next_batch = NULL; |
|
- | 212 | #ifdef CONFIG_SMP |
|
- | 213 | //initialize our CPU mask |
|
- | 214 | for (i=0;i<config.cpu_active;i++) |
|
- | 215 | rcu_global[CPU->id].cpu_mask[i]=false; |
|
- | 216 | #endif |
|
- | 217 | //schedule tasklet for all CPUs |
|
- | 218 | for (i=0;i<config.cpu_active;i++) { |
|
- | 219 | tasklet_schedule_SMP(rcu_tasklet_desc, i); |
|
- | 220 | } |
|
- | 221 | } |
|
- | 222 | } |
|
- | 223 | //this CPU has passed QS |
|
207 | rcu_passQS(); |
224 | rcu_passQS(); |
208 | if (_rcu_global->done_batch) { |
225 | if (rcu_global[CPU->id].done_batch) { |
209 | printf("run callbacks locking\n"); |
226 | rd = rcu_global[CPU->id].done_batch; |
210 | spinlock_lock(&rcu_global_lock); |
227 | rcu_global[CPU->id].done_batch = NULL; |
211 | rd = _rcu_global->done_batch; |
228 | interrupts_restore(ipl); |
212 | _rcu_global->done_batch = NULL; |
229 | while (rd) { |
213 | spinlock_unlock(&rcu_global_lock); |
230 | //call the callback |
214 | printf("run callbacks unlocking\n"); |
231 | rd->func(rd->data); |
215 | for (; rd; rd=rd->next) { |
232 | rd2 = rd->next; |
216 | printf("calling %x \n",&rd->func); |
233 | //free the structure |
217 | rd->func(&rd->data); |
234 | free(rd); |
- | 235 | rd = rd2; |
|
218 | } |
236 | } |
219 | } |
237 | } |
- | 238 | else |
|
- | 239 | interrupts_restore(ipl); |
|
220 | } |
240 | } |
221 | 241 | ||
222 | 242 |