Subversion Repositories HelenOS

Compare Revisions

Ignore whitespace Rev 2429 → Rev 2430

/branches/rcu/kernel/generic/include/proc/tasklet.h
30,7 → 30,8
/** @addtogroup genericddi
* @{
*/
/** @file
/** @file tasklet.h
* @brief Tasklets declarations
*/
 
#ifndef KERN_TASKLET_H_
/branches/rcu/kernel/generic/include/synch/rcu.h
29,7 → 29,8
/** @addtogroup sync
* @{
*/
/** @file
/** @file rcu.h
* @brief declarations for RCU
*/
 
#ifndef KERN_RCU_H_
41,9 → 42,14
#include <arch.h>
#include <preemption.h>
 
 
/** Structure for callbacks */
typedef struct rcu_callback_list {
/** next in the list */
struct rcu_callback_list* next;
/** pointer to callback function */
void (*func)(void*);
/** argument to pass to the callback */
void* data;
} rcu_callback_list_t;
 
/branches/rcu/kernel/generic/src/synch/rcu.c
29,7 → 29,8
/** @addtogroup sync
* @{
*/
/** @file
/** @file rcu.c
* @brief RCU synchronization primitive
*/
 
#include <synch/rcu.h>
45,16 → 46,22
 
 
 
 
/** Main data structure of the RCU implementation */
typedef struct {
#ifdef CONFIG_SMP
/** flags indicating whether the corresponding CPU has passed QS for this RCU batch */
bool* cpu_mask;
#endif
rcu_callback_list_t* next_batch, *current_batch, *done_batch;
} rcu_global_t;
/** RCU batch waiting for finishing of current batch, QS monitoring hasn't been started for this one */
rcu_callback_list_t* next_batch;
/** RCU batch that waits for passing of QSs on all CPUs */
rcu_callback_list_t *current_batch;
/** RCU batch that has passed all QSs and waits for invocation */
rcu_callback_list_t *done_batch;
} rcu_cpu_data_t;
 
/** An array of structures holding the callbacks and the progress of QS for each CPU*/
rcu_global_t* rcu_global=NULL;
rcu_cpu_data_t* rcu_global=NULL;
/** reference to the RCU tasklet, for scheduling it */
tasklet_descriptor_t* rcu_tasklet_desc;
 
68,13 → 75,13
int i,j;
#endif
 
rcu_global = malloc(sizeof(rcu_global_t)*(config.cpu_count),0);
rcu_global = malloc(sizeof(rcu_cpu_data_t)*(config.cpu_count),0);
rcu_tasklet_desc = tasklet_register(&rcu_tasklet, NULL);
 
#ifdef CONFIG_SMP
/*
* Note: I allocate the array for a case when every CPU connected will be active
* In a case when there will be some inactive CPUs, I will use just the first cells.
* In a case when there will be some inactive CPUs, I will use just the active ones
*/
for (i=0;i<config.cpu_count;i++) {
rcu_global[i].done_batch = NULL;
119,9 → 126,10
 
 
/**
* appends this callback func to the queue of waiting callbacks, the rest
* Appends this callback func to the queue of waiting callbacks, the rest
* is handled in rcu_run_callbacks and in the tasklet. This is a lock free variant,
* which must be supplied with a preallocated rcu_callback_list_t structure
* which is deallocated after the callback is called
*/
void rcu_sync_callback(void (*func)(void* data), void* data, rcu_callback_list_t* rd)
{
133,18 → 141,19
rd->func = func;
rd->data = data;
 
//disabling interrupts removes need for any synchronization - the list of callbacks is
//always accessed only on current CPU
ipl = interrupts_disable();
//append to the list of callbacks waiting for their batch to begin
rd->next = rcu_global[CPU->id].next_batch;
rcu_global[CPU->id].next_batch = rd;
rcu_passQS();
interrupts_restore(ipl);
 
rcu_passQS();
#endif
}
 
/**
* RCU tasklet, tests passing through QSs, moves from current to done
* RCU tasklet, tests passing through QSs, moves from current list to done list
*/
void rcu_tasklet(void* data)
{
154,20 → 163,23
int i;
#endif
ipl_t ipl;
passed_all_QS = true;
 
 
ipl = interrupts_disable();
 
rcu_passQS();
passed_all_QS = true;
#ifdef CONFIG_SMP
//check whether all CPUs have passed through QS
for (i = 0; i < config.cpu_active; i++)
passed_all_QS &= rcu_global[CPU->id].cpu_mask[i];
//check whether all CPUs have passed through QS of this CPU's current batch
for (i = 0; i < config.cpu_count; i++)
if (cpus[i].active)
passed_all_QS &= rcu_global[CPU->id].cpu_mask[i];
#endif
if (passed_all_QS) {
//all CPUs have passed through QS -> grace period is over, we can schedule the call to RCU callback
if (rcu_global[CPU->id].done_batch) {
rd = rcu_global[CPU->id].done_batch;
 
while (rd->next) rd = rd->next;
//append the current list to done list
rd->next = rcu_global[CPU->id].current_batch;
186,15 → 198,18
{
#ifdef CONFIG_SMP
int i;
for (i=0;i<config.cpu_active;i++)
//on all CPUs indicate that this CPU has gone through QS
rcu_global[i].cpu_mask[CPU->id]=true;
for (i = 0; i < config.cpu_count; i++)
if (cpus[i].active)
//on all CPUs indicate that this CPU has gone through QS
//this can overlap with clearing this flag in rcu_run_callbacks
rcu_global[i].cpu_mask[CPU->id]=true;
#endif
}
 
 
/**
* Moves RCUs from next to current, schedules RCU tasklet, calls the callbacks, frees the rcu_callback_list_t
* Moves RCU callbacks from next list to current list, schedules the RCU tasklet when needed,
* calls the callbacks from done list, frees the rcu_callback_list_t
*/
void rcu_run_callbacks(void)
{
204,7 → 219,7
 
ipl = interrupts_disable();
if (rcu_global[CPU->id].next_batch) {
//we cannot append to the current list because callbacks from next batch
//we cannot append to the current list because the callbacks from next batch
//haven't passed the QSs
if (rcu_global[CPU->id].current_batch == NULL) {
rcu_global[CPU->id].current_batch = rcu_global[CPU->id].next_batch;
211,13 → 226,14
rcu_global[CPU->id].next_batch = NULL;
#ifdef CONFIG_SMP
//initialize our CPU mask
for (i=0;i<config.cpu_active;i++)
rcu_global[CPU->id].cpu_mask[i]=false;
for (i = 0; i < config.cpu_count; i++)
if (cpus[i].active)
rcu_global[CPU->id].cpu_mask[i]=false;
#endif
//schedule tasklet for all CPUs
for (i=0;i<config.cpu_active;i++) {
tasklet_schedule_SMP(rcu_tasklet_desc, i);
}
for (i = 0; i < config.cpu_count; i++)
if (cpus[i].active)
tasklet_schedule_SMP(rcu_tasklet_desc, i);
}
}
//this CPU has passed QS
225,9 → 241,12
if (rcu_global[CPU->id].done_batch) {
rd = rcu_global[CPU->id].done_batch;
rcu_global[CPU->id].done_batch = NULL;
//the callbacks (and free) can block, we must restore the interrupts
interrupts_restore(ipl);
while (rd) {
//call the callback
if (rd->func == NULL)
panic_printf("RCU callback function NULL, desc:%x", rd);
rd->func(rd->data);
rd2 = rd->next;
//free the structure
/branches/rcu/kernel/generic/src/proc/tasklet.c
30,7 → 30,8
/** @addtogroup genericddi
* @{
*/
/** @file
/** @file tasklet.c
* @brief Tasklet implementation
*/
 
#include <arch.h>
91,10 → 92,10
//create the tasklet_thread, it's wired to the current cpu, we'll migrate it ourselves
thread_t* t= thread_create(&tasklet_thread, NULL, kernel_task, THREAD_FLAG_WIRED, "tasklet_thread", false);
if (t==NULL) {
//wtf?
panic_printf("tasklet thread not created\n");
} else {
spinlock_lock(&t->lock);
spinlock_lock(&t->lock);
//we'll default on the first CPU
t->cpu = &cpus[0];
t->priority = TASKLET_THREAD_PRIORITY;
spinlock_unlock(&t->lock);
114,21 → 115,25
waitq_t wq;
waitq_initialize(&wq);
//the infinite loop
while (true) {
//execute any scheduled tasklets
tasklet_do();
#ifdef CONFIG_SMP
//check whether other CPUs have tasklets to execute
if (config.cpu_active>1) {
current_cpu = CPU->id;
//find the first cpu with nonempty tasklet_list
for (new_cpu = (current_cpu + 1) % config.cpu_active; new_cpu!=current_cpu && tasklet_list[new_cpu]==0 && cpus[new_cpu].active;
new_cpu=(new_cpu + 1)% config.cpu_active);
for (new_cpu = (current_cpu + 1) % config.cpu_count; new_cpu!=current_cpu && tasklet_list[new_cpu]==0 ;
new_cpu=(new_cpu + 1)% config.cpu_count);
 
if (new_cpu!=current_cpu) {
//if we found a CPU with unsatisfied tasklet schedule to run there. It must be active!
if (new_cpu!=current_cpu && cpus[new_cpu].active) {
//we need to migrate this thread to CPU with id new_cpu
cpu = &cpus[new_cpu];
 
spinlock_lock(&THREAD->lock);
//put tasklet_thread on the new_cpu
//move tasklet_thread on the new_cpu
THREAD->cpu = cpu;
spinlock_unlock(&THREAD->lock);
}
203,12 → 208,12
 
 
 
/** Executes scheduled enabled tasklets on current CPU */
/** Executes scheduled enabled tasklets on current CPU
* this function could be called from other parts of kernel */
void tasklet_do(void)
{
spinlock_lock(&tasklet_lock);
tasklet_descriptor_t* t = tasklet_list[CPU->id];
//printf(".");
if (t) {
//empty the tasklet_list
tasklet_list[CPU->id]=0;
218,8 → 223,7
if (t->func) {
t->state = TASKLET_STATE_RUNNING;
t->func(t->data);
//clear running flag, set not active - the tasklet can disable itself
//thats why we don't just set it as not active
//clear running flag, set not active
t->state &= ~TASKLET_STATE_RUNNING;
t->state |= TASKLET_STATE_NOTACTIVE;
} else
240,7 → 244,7
}
 
/** Frees the tasklet structure when no longer needed. The function doesn't provide
* any synchronization, the caller must be sure, the tasklet is not scheduled.
* any synchronization, the caller must be sure, that the tasklet is not scheduled.
*
* @param tasklet to be freed
*/