Subversion Repositories HelenOS

Compare Revisions

Ignore whitespace Rev 2400 → Rev 2430

/branches/rcu/kernel/generic/src/synch/rcu.c
29,7 → 29,8
/** @addtogroup sync
* @{
*/
/** @file
/** @file rcu.c
* @brief RCU synchronization primitive
*/
 
#include <synch/rcu.h>
45,16 → 46,22
 
 
 
 
/** Main data structure of the RCU implementation */
typedef struct {
#ifdef CONFIG_SMP
/** flags indicating whether the corresponding CPU has passed QS for this RCU batch */
bool* cpu_mask;
#endif
rcu_callback_list_t* next_batch, *current_batch, *done_batch;
} rcu_global_t;
/** RCU batch waiting for finishing of current batch, QS monitoring hasn't been started for this one */
rcu_callback_list_t* next_batch;
/** RCU batch that waits for passing of QSs on all CPUs */
rcu_callback_list_t *current_batch;
/** RCU batch that has passed all QSs and waits for invocation */
rcu_callback_list_t *done_batch;
} rcu_cpu_data_t;
 
/** An array of structures holding the callbacks and the progress of QS for each CPU*/
rcu_global_t* rcu_global=NULL;
rcu_cpu_data_t* rcu_global=NULL;
/** reference to the RCU tasklet, for scheduling it */
tasklet_descriptor_t* rcu_tasklet_desc;
 
68,13 → 75,13
int i,j;
#endif
 
rcu_global = malloc(sizeof(rcu_global_t)*(config.cpu_count),0);
rcu_global = malloc(sizeof(rcu_cpu_data_t)*(config.cpu_count),0);
rcu_tasklet_desc = tasklet_register(&rcu_tasklet, NULL);
 
#ifdef CONFIG_SMP
/*
* Note: I allocate the array for a case when every CPU connected will be active
* In a case when there will be some inactive CPUs, I will use just the first cells.
* In a case when there will be some inactive CPUs, I will use just the active ones
*/
for (i=0;i<config.cpu_count;i++) {
rcu_global[i].done_batch = NULL;
119,9 → 126,10
 
 
/**
* appends this callback func to the queue of waiting callbacks, the rest
* Appends this callback func to the queue of waiting callbacks, the rest
* is handled in rcu_run_callbacks and in the tasklet. This is a lock free variant,
* which must be supplied with a preallocated rcu_callback_list_t structure
* which is deallocated after the callback is called
*/
void rcu_sync_callback(void (*func)(void* data), void* data, rcu_callback_list_t* rd)
{
133,18 → 141,19
rd->func = func;
rd->data = data;
 
//disabling interrupts removes need for any synchronization - the list of callbacks is
//always accessed only on current CPU
ipl = interrupts_disable();
//append to the list of callbacks waiting for their batch to begin
rd->next = rcu_global[CPU->id].next_batch;
rcu_global[CPU->id].next_batch = rd;
rcu_passQS();
interrupts_restore(ipl);
 
rcu_passQS();
#endif
}
 
/**
* RCU tasklet, tests passing through QSs, moves from current to done
* RCU tasklet, tests passing through QSs, moves from current list to done list
*/
void rcu_tasklet(void* data)
{
154,20 → 163,23
int i;
#endif
ipl_t ipl;
passed_all_QS = true;
 
 
ipl = interrupts_disable();
 
rcu_passQS();
passed_all_QS = true;
#ifdef CONFIG_SMP
//check whether all CPUs have passed through QS
for (i = 0; i < config.cpu_active; i++)
passed_all_QS &= rcu_global[CPU->id].cpu_mask[i];
//check whether all CPUs have passed through QS of this CPU's current batch
for (i = 0; i < config.cpu_count; i++)
if (cpus[i].active)
passed_all_QS &= rcu_global[CPU->id].cpu_mask[i];
#endif
if (passed_all_QS) {
//all CPUs have passed through QS -> grace period is over, we can schedule the call to RCU callback
if (rcu_global[CPU->id].done_batch) {
rd = rcu_global[CPU->id].done_batch;
 
while (rd->next) rd = rd->next;
//append the current list to done list
rd->next = rcu_global[CPU->id].current_batch;
186,15 → 198,18
{
#ifdef CONFIG_SMP
int i;
for (i=0;i<config.cpu_active;i++)
//on all CPUs indicate that this CPU has gone through QS
rcu_global[i].cpu_mask[CPU->id]=true;
for (i = 0; i < config.cpu_count; i++)
if (cpus[i].active)
//on all CPUs indicate that this CPU has gone through QS
//this can overlap with clearing this flag in rcu_run_callbacks
rcu_global[i].cpu_mask[CPU->id]=true;
#endif
}
 
 
/**
* Moves RCUs from next to current, schedules RCU tasklet, calls the callbacks, frees the rcu_callback_list_t
* Moves RCU callbacks from next list to current list, schedules the RCU tasklet when needed,
* calls the callbacks from done list, frees the rcu_callback_list_t
*/
void rcu_run_callbacks(void)
{
204,7 → 219,7
 
ipl = interrupts_disable();
if (rcu_global[CPU->id].next_batch) {
//we cannot append to the current list because callbacks from next batch
//we cannot append to the current list because the callbacks from next batch
//haven't passed the QSs
if (rcu_global[CPU->id].current_batch == NULL) {
rcu_global[CPU->id].current_batch = rcu_global[CPU->id].next_batch;
211,13 → 226,14
rcu_global[CPU->id].next_batch = NULL;
#ifdef CONFIG_SMP
//initialize our CPU mask
for (i=0;i<config.cpu_active;i++)
rcu_global[CPU->id].cpu_mask[i]=false;
for (i = 0; i < config.cpu_count; i++)
if (cpus[i].active)
rcu_global[CPU->id].cpu_mask[i]=false;
#endif
//schedule tasklet for all CPUs
for (i=0;i<config.cpu_active;i++) {
tasklet_schedule_SMP(rcu_tasklet_desc, i);
}
for (i = 0; i < config.cpu_count; i++)
if (cpus[i].active)
tasklet_schedule_SMP(rcu_tasklet_desc, i);
}
}
//this CPU has passed QS
225,9 → 241,12
if (rcu_global[CPU->id].done_batch) {
rd = rcu_global[CPU->id].done_batch;
rcu_global[CPU->id].done_batch = NULL;
//the callbacks (and free) can block, we must restore the interrupts
interrupts_restore(ipl);
while (rd) {
//call the callback
if (rd->func == NULL)
panic_printf("RCU callback function NULL, desc:%x", rd);
rd->func(rd->data);
rd2 = rd->next;
//free the structure