/*
* Copyright (c) 2007 Jan Hudecek
* Copyright (c) 2006 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/** @addtogroup sync
* @{
*/
/** @file rcu.c
* @brief RCU synchronization primitive
*/
#include <synch/rcu.h>
#include <synch/waitq.h>
#include <arch.h>
#include <config.h>
#include <arch/types.h>
#include <proc/tasklet.h>
#include <synch/spinlock.h>
#include <time/delay.h>
#include <panic.h>
#include <print.h>
/** Main data structure of the RCU implementation */
typedef struct {
#ifdef CONFIG_SMP
/** flags indicating whether the corresponding CPU has passed QS for this RCU batch */
bool* cpu_mask;
#endif
/** RCU batch waiting for finishing of current batch, QS monitoring hasn't been started for this one */
rcu_callback_list_t* next_batch;
/** RCU batch that waits for passing of QSs on all CPUs */
rcu_callback_list_t *current_batch;
/** RCU batch that has passed all QSs and waits for invocation */
rcu_callback_list_t *done_batch;
} rcu_cpu_data_t;
/** An array of structures holding the callbacks and the progress of QS for each CPU*/
rcu_cpu_data_t* rcu_global=NULL;
/** reference to the RCU tasklet, for scheduling it */
tasklet_descriptor_t* rcu_tasklet_desc;
/**
* Initializes data structures needed for RCU
*/
void rcu_init(void)
{
#ifdef CONFIG_SMP
int i,j;
#endif
rcu_global
= malloc(sizeof(rcu_cpu_data_t
)*(config.
cpu_count),0);
rcu_tasklet_desc = tasklet_register(&rcu_tasklet, NULL);
#ifdef CONFIG_SMP
/*
* Note: I allocate the array for a case when every CPU connected will be active
* In a case when there will be some inactive CPUs, I will use just the active ones
*/
for (i=0;i<config.cpu_count;i++) {
rcu_global[i].done_batch = NULL;
rcu_global[i].current_batch = NULL;
rcu_global[i].next_batch = NULL;
rcu_global
[i
].
cpu_mask = malloc(sizeof(bool
)*config.
cpu_count,0);
for (j=0;j<config.cpu_count;j++) {
rcu_global[i].cpu_mask[j]=false;
}
}
#else
rcu_global[CPU->id].done_batch = NULL;
rcu_global[CPU->id].current_batch = NULL;
rcu_global[CPU->id].next_batch = NULL;
#endif
}
/**
* Blocks until the grace period elapses
*/
void rcu_synchronize(void)
{
#ifdef CONFIG_SMP
waitq_t wq;
waitq_initialize(&wq);
rcu_sync_callback_normal_alloc(&rcu_synchronize_callback_function, &wq);
//sleep until the end of the grace period
waitq_sleep(&wq);
#endif
}
#ifdef CONFIG_SMP
/**
* Just a wakeup for waking up rcu_synchronize when the grace period has elapsed
*/
void rcu_synchronize_callback_function(void* waitq)
{
waitq_wakeup(((waitq_t*)waitq), WAKEUP_ALL);
}
#endif
/**
* Appends this callback func to the queue of waiting callbacks, the rest
* is handled in rcu_run_callbacks and in the tasklet. This is a lock free variant,
* which must be supplied with a preallocated rcu_callback_list_t structure
* which is deallocated after the callback is called
*/
void rcu_sync_callback(void (*func)(void* data), void* data, rcu_callback_list_t* rd)
{
#ifndef CONFIG_SMP
func(data);
#else
ipl_t ipl;
rd->func = func;
rd->data = data;
//disabling interrupts removes need for any synchronization - the list of callbacks is
//always accessed only on current CPU
ipl = interrupts_disable();
//append to the list of callbacks waiting for their batch to begin
rd->next = rcu_global[CPU->id].next_batch;
rcu_global[CPU->id].next_batch = rd;
rcu_passQS();
interrupts_restore(ipl);
#endif
}
/**
* RCU tasklet, tests passing through QSs, moves from current list to done list
*/
void rcu_tasklet(void* data)
{
rcu_callback_list_t* rd;
bool passed_all_QS;
#ifdef CONFIG_SMP
int i;
#endif
ipl_t ipl;
passed_all_QS = true;
ipl = interrupts_disable();
rcu_passQS();
#ifdef CONFIG_SMP
//check whether all CPUs have passed through QS of this CPU's current batch
for (i = 0; i < config.cpu_count; i++)
if (cpus[i].active)
passed_all_QS &= rcu_global[CPU->id].cpu_mask[i];
#endif
if (passed_all_QS) {
//all CPUs have passed through QS -> grace period is over, we can schedule the call to RCU callback
if (rcu_global[CPU->id].done_batch) {
rd = rcu_global[CPU->id].done_batch;
while (rd->next) rd = rd->next;
//append the current list to done list
rd->next = rcu_global[CPU->id].current_batch;
} else
rcu_global[CPU->id].done_batch = rcu_global[CPU->id].current_batch;
rcu_global[CPU->id].current_batch = NULL;
}
interrupts_restore(ipl);
}
/**
* This function indicates that the current CPU has gone through the quiescent state
*/
void rcu_passQS(void)
{
#ifdef CONFIG_SMP
int i;
for (i = 0; i < config.cpu_count; i++)
if (cpus[i].active)
//on all CPUs indicate that this CPU has gone through QS
//this can overlap with clearing this flag in rcu_run_callbacks
rcu_global[i].cpu_mask[CPU->id]=true;
#endif
}
/**
* Moves RCU callbacks from next list to current list, schedules the RCU tasklet when needed,
* calls the callbacks from done list, frees the rcu_callback_list_t
*/
void rcu_run_callbacks(void)
{
rcu_callback_list_t* rd, *rd2;
int i;
ipl_t ipl;
ipl = interrupts_disable();
if (rcu_global[CPU->id].next_batch) {
//we cannot append to the current list because the callbacks from next batch
//haven't passed the QSs
if (rcu_global[CPU->id].current_batch == NULL) {
rcu_global[CPU->id].current_batch = rcu_global[CPU->id].next_batch;
rcu_global[CPU->id].next_batch = NULL;
#ifdef CONFIG_SMP
//initialize our CPU mask
for (i = 0; i < config.cpu_count; i++)
if (cpus[i].active)
rcu_global[CPU->id].cpu_mask[i]=false;
#endif
//schedule tasklet for all CPUs
for (i = 0; i < config.cpu_count; i++)
if (cpus[i].active)
tasklet_schedule_SMP(rcu_tasklet_desc, i);
}
}
//this CPU has passed QS
rcu_passQS();
if (rcu_global[CPU->id].done_batch) {
rd = rcu_global[CPU->id].done_batch;
rcu_global[CPU->id].done_batch = NULL;
//the callbacks (and free) can block, we must restore the interrupts
interrupts_restore(ipl);
while (rd) {
//call the callback
if (rd->func == NULL)
panic_printf("RCU callback function NULL, desc:%x", rd);
rd->func(rd->data);
rd2 = rd->next;
//free the structure
rd = rd2;
}
}
else
interrupts_restore(ipl);
}
/**
* Generic callback for RCU, frees @paramref pointer
* @param pointer
*/
void rcu_callback_free(void* pointer)
{
}