Subversion Repositories HelenOS

Compare Revisions

Ignore whitespace Rev 2308 → Rev 2309

/branches/rcu/kernel/test/test.c
58,6 → 58,7
#include <thread/thread1.def>
#include <sysinfo/sysinfo1.def>
#include <tasklet/tasklet1.def>
#include <synch/rcu1.def>
{NULL, NULL, NULL}
};
 
/branches/rcu/kernel/test/synch/rcu1.c
0,0 → 1,64
/*
* Copyright (c) 2007 Jan Hudecek
* Copyright (c) 2005 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#include <synch/rcu.h>
#include <print.h>
#include <test.h>
#include <arch/types.h>
#include <proc/tasklet.h>
#include <arch/barrier.h>
#include <arch.h>
#include <preemption.h>
bool gquiet;
 
static void callback(void* data)
{
if (!gquiet)
printf("callback called\n");
}
char * test_rcu1(bool quiet)
{
gquiet = quiet;
int* p;
rcu_read_lock();
rcu_read_unlock();
rcu_assign_pointer(p,malloc(sizeof(int),0));
if (!quiet)
printf("p:%x\n",rcu_dereference_pointer(p));
rcu_sync_callback(&callback, NULL);
if (!quiet)
printf("Callback scheduled\n");
rcu_synchronize();
if (!quiet)
printf("Synchronized\n");
return NULL;
 
}
 
 
/branches/rcu/kernel/test/synch/rcu1.def
0,0 → 1,6
{
"rcu1",
"RCU test (very basic)",
&test_rcu1,
true
},
/branches/rcu/kernel/test/test.h
70,6 → 70,7
extern char * test_thread1(bool quiet);
extern char * test_sysinfo1(bool quiet);
extern char * test_tasklet1(bool quiet);
extern char * test_rcu1(bool quiet);
 
extern test_t tests[];
 
/branches/rcu/kernel/generic/include/proc/tasklet.h
82,6 → 82,13
*/
void tasklet_schedule(tasklet_descriptor_t* t);
 
 
/** Schedules the tasklet for execution on id CPU
* @param t tasklet to be scheduled
* @param id CPU id on which the tasklet will be scheduled
*/
void tasklet_schedule_SMP(tasklet_descriptor_t* t, uint32_t id);
 
/** Tasklet will not be run, even if scheduled
* @param t tasklet to be disabled
*/
/branches/rcu/kernel/generic/include/synch/rcu.h
38,24 → 38,28
#include <arch/types.h>
#include <proc/tasklet.h>
#include <arch/barrier.h>
#include <arch.h>
#include <preemption.h>
 
 
/** Read lock for RCU protected pointer */
#define rcu_read_lock preemption_disable()
#define rcu_read_lock() preemption_disable()
 
/** Release of read lock for RCU protected pointer */
#define rcu_read_unlock preemption_enable()
#define rcu_read_unlock() preemption_enable()
 
/** Dereferencing of an RCU protected pointer */
#define rcu_dereference_pointer(p) (*(p))
 
/** Assigning a value to an RCU protected pointer */
#define rcu_assign_pointer(p, newp) {write_barrier(); (p)=(newp)}
#define rcu_assign_pointer(p, newp) {write_barrier(); (p)=(newp);}
 
void rcu_init(void);
void rcu_synchronize(void);
void rcu_synchronize_callback_function(void* waitq);
void rcu_sync_callback(void (*func)(void* data), void* data);
void rcu_tasklet(void* data);
inline void rcu_passQS(void);
void rcu_run_callbacks(void);
 
#endif
/branches/rcu/kernel/generic/src/synch/rcu.c
43,53 → 43,77
#include <panic.h>
#include <print.h>
 
typedef struct {
uint32_t current_batch;
uint32_t completed_batch;
bool next_batch_waiting;
} rcu_global_t;
SPINLOCK_INITIALIZE(rcu_global_lock);
 
typedef struct rcu_callback_list {
struct rcu_callback_list* next;
void (*func)(void*);
void* data;
bool* cpu_mask;
} rcu_callback_list_t;
 
 
typedef struct {
uint32_t current_batch_number;
uint32_t QS_passed;
bool QS_pending;
typedef struct {
#ifdef CONFIG_SMP
bool* cpu_mask;
#endif
rcu_callback_list_t* next_batch, *current_batch, *done_batch;
} rcu_percpu_t;
} rcu_global_t;
 
rcu_global_t _rcu_global;
rcu_percpu_t* _rcu_cpu_lists;
 
rcu_global_t* _rcu_global;
tasklet_descriptor_t* rcu_tasklet_desc;
 
void rcu_init(void)
{
_rcu_cpu_lists = malloc(sizeof(rcu_percpu_t)*config.cpu_count,0);
_rcu_global.completed_batch = -1;
_rcu_global.current_batch = -1;
_rcu_global.next_batch_waiting = -1;
#ifdef CONFIG_SMP
int i;
#endif
 
_rcu_global = malloc(sizeof(rcu_global_t),0);
_rcu_global->done_batch = NULL;
_rcu_global->current_batch = NULL;
_rcu_global->next_batch = NULL;
spinlock_initialize(&rcu_global_lock, "rcu_global_lock");
 
rcu_tasklet_desc = tasklet_register(&rcu_tasklet, NULL);
tasklet_disable(rcu_tasklet_desc);
 
#ifdef CONFIG_SMP
_rcu_global->cpu_mask = malloc (sizeof(bool)*config.cpu_count,0);
for (i=0;i<config.cpu_count;i++) {
_rcu_global->cpu_mask[i]=false;
}
#else
tasklet_schedule(rcu_tasklet_desc);
 
#endif
tasklet_enable(rcu_tasklet_desc);
}
 
void rcu_synchronize(void)
{
waitq_t wq;
waitq_initialize(&wq);
rcu_sync_callback(rcu_synchronize_callback_function, &wq);
waitq_sleep(&wq);
#ifdef CONFIG_SMP
waitq_t *wq = malloc(sizeof(waitq_t),0);
waitq_initialize(wq);
rcu_sync_callback(&rcu_synchronize_callback_function, wq);
printf("going to sleep\n");
waitq_sleep(wq);
printf("woken up\n");
#endif
}
 
#ifdef CONFIG_SMP
void rcu_synchronize_callback_function(void* waitq)
{
waitq_wakeup(((waitq_t*)waitq), true);
printf("waking up\n");
waitq_wakeup(((waitq_t*)waitq), WAKEUP_ALL_INC_MISSED);
}
#endif
 
void rcu_sync_callback(void (*func)(void* data), void* data)
{
#ifndef CONFIG_SMP
func(data);
#else
int i;
rcu_callback_list_t *rd;
rd = malloc(sizeof(rcu_callback_list_t), 0);
96,20 → 120,95
rd->func = func;
rd->data = data;
rd->next = NULL;
printf("synccallback locking \n");
spinlock_lock(&rcu_global_lock);
rd->next = _rcu_global->next_batch;
_rcu_global->next_batch = rd;
 
rd->cpu_mask = malloc (sizeof(bool)*config.cpu_count,0);
for (i=0;i<config.cpu_count;i++)
rd->cpu_mask[i]=false;
i = ++(_rcu_global.current_batch);
_rcu_global.next_batch_waiting = true;
if (_rcu_global->current_batch == NULL) {
_rcu_global->current_batch = _rcu_global->next_batch;
_rcu_global->next_batch = NULL;
printf("setting callback %x as current\n",&rd->func);
for (i=0;i<config.cpu_count;i++)
_rcu_global->cpu_mask[i]=false;
 
rd->next = _rcu_cpu_lists[0].next_batch;
//we've surely passed the quiescent point just by running this method
rcu_passQS();
}
for (i=0;i<config.cpu_count;i++) {
_rcu_cpu_lists[i].next_batch = rd;
_rcu_cpu_lists[i].QS_pending = true;
tasklet_schedule_SMP(rcu_tasklet_desc, i);
}
spinlock_unlock(&rcu_global_lock);
printf ("sync callback called,unlocking, state:%x \n",rcu_tasklet_desc->state);
#endif
}
 
//TODO:tasklet, after_thread_ran, polishing
//TODO: polishing, comments
 
void rcu_tasklet(void* data)
{
rcu_callback_list_t* rd;
bool passed_all_QS;
#ifdef CONFIG_SMP
int i;
#endif
rcu_passQS();
passed_all_QS = true;
printf("tasklet locking \n");
spinlock_lock(&rcu_global_lock);
#ifdef CONFIG_SMP
 
for (i = 0; i < config.cpu_active; i++)
passed_all_QS &= _rcu_global->cpu_mask[i];
#endif
if (passed_all_QS) {
if (_rcu_global->done_batch) {
rd = _rcu_global->done_batch;
while (rd->next) rd = rd->next;
 
//append the current list to done list
rd->next = _rcu_global->current_batch;
} else
_rcu_global->done_batch = _rcu_global->current_batch;
printf("setting callback %x as done\n",&_rcu_global->current_batch->func);
_rcu_global->current_batch = NULL;
}
 
_rcu_global->current_batch = _rcu_global->next_batch;
_rcu_global->next_batch = NULL;
 
if (_rcu_global->current_batch == NULL) {
//there are no rcu callbacks registered, there is no need to monitor QS
printf("tasklet idle disabling \n");
// tasklet_disable(rcu_tasklet_desc);
spinlock_unlock(&rcu_global_lock);
} else
spinlock_unlock(&rcu_global_lock);
printf("tasklet unlocking \n");
}
 
inline void rcu_passQS(void)
{
#ifdef CONFIG_SMP
_rcu_global->cpu_mask[CPU->id] = true;
#endif
}
 
void rcu_run_callbacks(void)
{
rcu_callback_list_t* rd;
rcu_passQS();
if (_rcu_global->done_batch) {
printf(".");
spinlock_lock(&rcu_global_lock);
for (rd = _rcu_global->done_batch; rd; rd=rd->next) {
printf("calling %x \n",&rd->func);
rd->func(&rd->data);
}
_rcu_global->done_batch = NULL;
spinlock_unlock(&rcu_global_lock);
printf(":");
}
}
 
 
/branches/rcu/kernel/generic/src/proc/tasklet.c
159,13 → 159,22
*/
void tasklet_schedule(tasklet_descriptor_t* t)
{
tasklet_schedule_SMP(t, CPU->id);
}
 
/** Schedules the tasklet for execution on id CPU
* @param t tasklet to be scheduled
* @param id CPU id on which the tasklet will be scheduled
*/
void tasklet_schedule_SMP(tasklet_descriptor_t* t, uint32_t id)
{
spinlock_lock(&tasklet_lock);
//clear notactive, running and scheduled flags
t->state &= TASKLET_STATE_DISABLED;
//set the scheduled flag
t->state |= TASKLET_STATE_SCHEDULED;
t->next=tasklet_list[CPU->id];
tasklet_list[CPU->id]=t;
t->next=tasklet_list[id];
tasklet_list[id]=t;
spinlock_unlock(&tasklet_lock);
}
 
203,24 → 212,31
if (t) {
//empty the tasklet_list
tasklet_list[CPU->id]=0;
spinlock_unlock(&tasklet_lock);
do {
if (!(t->state & TASKLET_STATE_DISABLED)) {
if (t->func) {
t->state = TASKLET_STATE_RUNNING;
t->func(t->data);
t->state = TASKLET_STATE_NOTACTIVE;
//clear running flag, set not active - the tasklet can disable itself
//thats why we don't just set it as not active
t->state &= ~TASKLET_STATE_RUNNING;
t->state |= TASKLET_STATE_NOTACTIVE;
} else
panic_printf("tasklet func NULL\n");
} else {
//return it back to the queue of scheduled tasklets
spinlock_lock(&tasklet_lock);
t->next = tasklet_list[CPU->id];
tasklet_list[CPU->id] = t;
spinlock_unlock(&tasklet_lock);
}
t=t->next;
}
while (t);
}
spinlock_unlock(&tasklet_lock);
else
spinlock_unlock(&tasklet_lock);
}
 
/** Frees the tasklet structure when no longer needed. The function doesn't provide