Subversion Repositories HelenOS

Compare Revisions

Ignore whitespace Rev 2329 → Rev 2330

/branches/rcu/kernel/test/synch/rcu1.c
36,7 → 36,7
#include <arch.h>
#include <preemption.h>
bool gquiet;
bool called;
volatile bool called;
 
static void callback(void* data)
{
57,7 → 57,7
rcu_sync_callback(&callback, NULL);
if (!quiet)
printf("Callback scheduled\n");
while(!called);
// while(!called);
rcu_synchronize();
if (!quiet)
printf("Synchronized\n");
/branches/rcu/kernel/generic/include/synch/rcu.h
41,7 → 41,13
#include <arch.h>
#include <preemption.h>
 
typedef struct rcu_callback_list {
struct rcu_callback_list* next;
void (*func)(void*);
void* data;
} rcu_callback_list_t;
 
 
/** Read lock for RCU protected pointer */
#define rcu_read_lock() preemption_disable()
 
54,12 → 60,17
/** Assigning a value to an RCU protected pointer */
#define rcu_assign_pointer(p, newp) {write_barrier(); (p)=(newp);}
 
/** RCU sync callback for those who don't need custom allocation */
#define rcu_sync_callback(func, data) {\
rcu_callback_list_t* rd = malloc(sizeof(rcu_callback_list_t),0);\
rcu_sync_callback_custom_alloc(func, data, rd);}
 
void rcu_init(void);
void rcu_synchronize(void);
void rcu_synchronize_callback_function(void* waitq);
void rcu_sync_callback(void (*func)(void* data), void* data);
void rcu_sync_callback_custom_alloc(void (*func)(void* data), void* data, rcu_callback_list_t* rcu_struct);
void rcu_tasklet(void* data);
inline void rcu_passQS(void);
void rcu_passQS(void);
void rcu_run_callbacks(void);
 
#endif
/branches/rcu/kernel/generic/src/synch/rcu.c
43,14 → 43,9
#include <panic.h>
#include <print.h>
 
SPINLOCK_INITIALIZE(rcu_global_lock);
 
typedef struct rcu_callback_list {
struct rcu_callback_list* next;
void (*func)(void*);
void* data;
} rcu_callback_list_t;
 
 
typedef struct {
#ifdef CONFIG_SMP
bool* cpu_mask;
58,101 → 53,99
rcu_callback_list_t* next_batch, *current_batch, *done_batch;
} rcu_global_t;
 
 
rcu_global_t* _rcu_global;
/** An array of structures holding the callbacks and the progress of QS for each CPU*/
rcu_global_t* rcu_global=NULL;
/** reference to the RCU tasklet, for scheduling it */
tasklet_descriptor_t* rcu_tasklet_desc;
 
 
/**
* Initializes data structures needed for RCU
*/
void rcu_init(void)
{
#ifdef CONFIG_SMP
int i;
int i,j;
#endif
 
_rcu_global = malloc(sizeof(rcu_global_t),0);
_rcu_global->done_batch = NULL;
_rcu_global->current_batch = NULL;
_rcu_global->next_batch = NULL;
spinlock_initialize(&rcu_global_lock, "rcu_global_lock");
 
rcu_global = malloc(sizeof(rcu_global_t)*(config.cpu_count),0);
rcu_tasklet_desc = tasklet_register(&rcu_tasklet, NULL);
tasklet_disable(rcu_tasklet_desc);
 
#ifdef CONFIG_SMP
_rcu_global->cpu_mask = malloc (sizeof(bool)*config.cpu_count,0);
/*
* Note: I allocate the array for a case when every CPU connected will be active
* In a case when there will be some inactive CPUs, I will use just the first cells.
*/
for (i=0;i<config.cpu_count;i++) {
_rcu_global->cpu_mask[i]=false;
rcu_global[i].done_batch = NULL;
rcu_global[i].current_batch = NULL;
rcu_global[i].next_batch = NULL;
rcu_global[i].cpu_mask = malloc(sizeof(bool)*config.cpu_count,0);
for (j=0;j<config.cpu_count;j++) {
rcu_global[i].cpu_mask[j]=false;
}
}
#else
tasklet_schedule(rcu_tasklet_desc);
 
rcu_global[CPU->id].done_batch = NULL;
rcu_global[CPU->id].current_batch = NULL;
rcu_global[CPU->id].next_batch = NULL;
#endif
tasklet_enable(rcu_tasklet_desc);
}
 
 
/**
* Blocks until the grace period elapses
*/
void rcu_synchronize(void)
{
#ifdef CONFIG_SMP
waitq_t *wq = malloc(sizeof(waitq_t),0);
waitq_initialize(wq);
rcu_sync_callback(&rcu_synchronize_callback_function, wq);
printf("going to sleep, tlock:%x, wqlock:%x\n", THREAD->lock.val, wq->lock.val);
waitq_sleep(wq);
printf("woken up\n");
free(wq);
waitq_t wq;
waitq_initialize(&wq);
rcu_sync_callback(&rcu_synchronize_callback_function, &wq);
//sleep until the end of the grace period
waitq_sleep(&wq);
#endif
}
 
#ifdef CONFIG_SMP
/**
* Just a wakeup for waking up rcu_synchronize when the grace period has elapsed
*/
void rcu_synchronize_callback_function(void* waitq)
{
printf("waking up, wq:%x, wq->head:%x, next:%x, tlock:%x, wqlock:%x\n",
waitq,
((waitq_t*)waitq)->head,
((link_t)((waitq_t*)waitq)->head).next,
THREAD->lock.val,
((waitq_t*)waitq)->lock.val );
waitq_wakeup(((waitq_t*)waitq), WAKEUP_ALL);
}
#endif
 
void rcu_sync_callback(void (*func)(void* data), void* data)
 
/**
* appends this callback func to the queue of waiting callbacks, the rest
* is handled in rcu_run_callbacks and in the tasklet. This is a lock free variant,
* which must be supplied with a preallocated rcu_callback_list_t structure
*/
void rcu_sync_callback_custom_alloc(void (*func)(void* data), void* data, rcu_callback_list_t* rd)
{
#ifndef CONFIG_SMP
func(data);
#else
int i;
rcu_callback_list_t *rd;
rd = malloc(sizeof(rcu_callback_list_t), 0);
 
ipl_t ipl;
rd->func = func;
rd->data = data;
rd->next = NULL;
 
printf("synccallback locking \n");
spinlock_lock(&rcu_global_lock);
ipl = interrupts_disable();
//append to the list of callbacks waiting for their batch to begin
rd->next = rcu_global[CPU->id].next_batch;
rcu_global[CPU->id].next_batch = rd;
interrupts_restore(ipl);
 
rd->next = _rcu_global->next_batch;
_rcu_global->next_batch = rd;
 
if (_rcu_global->current_batch == NULL) {
_rcu_global->current_batch = _rcu_global->next_batch;
_rcu_global->next_batch = NULL;
printf("setting callback %x as current\n",&rd->func);
for (i=0;i<config.cpu_count;i++)
_rcu_global->cpu_mask[i]=false;
 
//we've surely passed the quiescent point just by running this method
rcu_passQS();
}
for (i=0;i<config.cpu_count;i++) {
tasklet_schedule_SMP(rcu_tasklet_desc, i);
}
spinlock_unlock(&rcu_global_lock);
printf ("sync callback called,unlocking, state:%x \n",rcu_tasklet_desc->state);
rcu_passQS();
#endif
}
 
//TODO: polishing, comments
 
/**
* RCU tasklet, tests passing through QSs, moves from current to done
*/
void rcu_tasklet(void* data)
{
rcu_callback_list_t* rd;
160,63 → 153,90
#ifdef CONFIG_SMP
int i;
#endif
ipl_t ipl;
 
ipl = interrupts_disable();
 
rcu_passQS();
passed_all_QS = true;
printf("tasklet locking \n");
spinlock_lock(&rcu_global_lock);
#ifdef CONFIG_SMP
 
//check whether all CPUs have passed through QS
for (i = 0; i < config.cpu_active; i++)
passed_all_QS &= _rcu_global->cpu_mask[i];
passed_all_QS &= rcu_global[CPU->id].cpu_mask[i];
#endif
if (passed_all_QS) {
if (_rcu_global->done_batch) {
rd = _rcu_global->done_batch;
//all CPUs have passed through QS -> grace period is over, we can schedule the call to RCU callback
if (rcu_global[CPU->id].done_batch) {
rd = rcu_global[CPU->id].done_batch;
while (rd->next) rd = rd->next;
 
//append the current list to done list
rd->next = _rcu_global->current_batch;
rd->next = rcu_global[CPU->id].current_batch;
} else
_rcu_global->done_batch = _rcu_global->current_batch;
printf("setting callback %x as done\n",&_rcu_global->current_batch->func);
_rcu_global->current_batch = _rcu_global->next_batch;
_rcu_global->next_batch = NULL;
#ifdef CONFIG_SMP
 
for (i=0;i<config.cpu_count;i++)
_rcu_global->cpu_mask[i]=false;
#endif
//we've surely passed the quiescent point just by running this method
rcu_passQS();
rcu_global[CPU->id].done_batch = rcu_global[CPU->id].current_batch;
rcu_global[CPU->id].current_batch = NULL;
}
 
spinlock_unlock(&rcu_global_lock);
printf("tasklet unlocking \n");
interrupts_restore(ipl);
}
 
inline void rcu_passQS(void)
 
/**
* This function indicates that the current CPU has gone through the quiescent state
*/
void rcu_passQS(void)
{
#ifdef CONFIG_SMP
_rcu_global->cpu_mask[CPU->id] = true;
int i;
for (i=0;i<config.cpu_active;i++)
//on all CPUs indicate that this CPU has gone through QS
rcu_global[i].cpu_mask[CPU->id]=true;
#endif
}
 
 
/**
* Moves RCUs from next to current, schedules RCU tasklet, calls the callbacks, frees the rcu_callback_list_t
*/
void rcu_run_callbacks(void)
{
rcu_callback_list_t* rd;
rcu_callback_list_t* rd, *rd2;
int i;
ipl_t ipl;
 
ipl = interrupts_disable();
if (rcu_global[CPU->id].next_batch) {
//we cannot append to the current list because callbacks from next batch
//haven't passed the QSs
if (rcu_global[CPU->id].current_batch == NULL) {
rcu_global[CPU->id].current_batch = rcu_global[CPU->id].next_batch;
rcu_global[CPU->id].next_batch = NULL;
#ifdef CONFIG_SMP
//initialize our CPU mask
for (i=0;i<config.cpu_active;i++)
rcu_global[CPU->id].cpu_mask[i]=false;
#endif
//schedule tasklet for all CPUs
for (i=0;i<config.cpu_active;i++) {
tasklet_schedule_SMP(rcu_tasklet_desc, i);
}
}
}
//this CPU has passed QS
rcu_passQS();
if (_rcu_global->done_batch) {
printf("run callbacks locking\n");
spinlock_lock(&rcu_global_lock);
rd = _rcu_global->done_batch;
_rcu_global->done_batch = NULL;
spinlock_unlock(&rcu_global_lock);
printf("run callbacks unlocking\n");
for (; rd; rd=rd->next) {
printf("calling %x \n",&rd->func);
rd->func(&rd->data);
if (rcu_global[CPU->id].done_batch) {
rd = rcu_global[CPU->id].done_batch;
rcu_global[CPU->id].done_batch = NULL;
interrupts_restore(ipl);
while (rd) {
//call the callback
rd->func(rd->data);
rd2 = rd->next;
//free the structure
free(rd);
rd = rd2;
}
}
else
interrupts_restore(ipl);
}
 
 
/branches/rcu/kernel/generic/src/proc/tasklet.c
120,7 → 120,7
if (config.cpu_active>1) {
current_cpu = CPU->id;
//find the first cpu with nonempty tasklet_list
for (new_cpu = (current_cpu + 1) % config.cpu_active; new_cpu!=current_cpu && tasklet_list[new_cpu]==0;
for (new_cpu = (current_cpu + 1) % config.cpu_active; new_cpu!=current_cpu && tasklet_list[new_cpu]==0 && cpus[new_cpu].active;
new_cpu=(new_cpu + 1)% config.cpu_active);
 
if (new_cpu!=current_cpu) {