Subversion Repositories HelenOS

Rev

Rev 2330 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2260 hudecek 1
/*
2
 * Copyright (c) 2007 Jan Hudecek
3
 * Copyright (c) 2006 Jakub Jermar
4
 * All rights reserved.
5
 *
6
 * Redistribution and use in source and binary forms, with or without
7
 * modification, are permitted provided that the following conditions
8
 * are met:
9
 *
10
 * - Redistributions of source code must retain the above copyright
11
 *   notice, this list of conditions and the following disclaimer.
12
 * - Redistributions in binary form must reproduce the above copyright
13
 *   notice, this list of conditions and the following disclaimer in the
14
 *   documentation and/or other materials provided with the distribution.
15
 * - The name of the author may not be used to endorse or promote products
16
 *   derived from this software without specific prior written permission.
17
 *
18
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28
 */
29
/** @addtogroup sync
30
 * @{
31
 */
32
/** @file
33
 */
34
 
35
#include <synch/rcu.h>
36
#include <synch/waitq.h>
37
#include <arch.h>
38
#include <config.h>
39
#include <arch/types.h>
2265 hudecek 40
#include <proc/tasklet.h>
2260 hudecek 41
#include <synch/spinlock.h>
42
#include <time/delay.h>
43
#include <panic.h>
44
#include <print.h>
45
 
46
 
47
 
2330 hudecek 48
 
2309 hudecek 49
typedef struct  {
50
#ifdef CONFIG_SMP 
51
    bool* cpu_mask;
52
#endif
2265 hudecek 53
    rcu_callback_list_t* next_batch, *current_batch, *done_batch;
2309 hudecek 54
} rcu_global_t;
2260 hudecek 55
 
2330 hudecek 56
/** An array of structures holding the callbacks and the progress of QS for each CPU*/
57
rcu_global_t* rcu_global=NULL;
58
/** reference to the RCU tasklet, for scheduling it */
2309 hudecek 59
tasklet_descriptor_t* rcu_tasklet_desc;
60
 
2330 hudecek 61
 
62
/**
63
* Initializes data structures needed for RCU
64
*/
2265 hudecek 65
void rcu_init(void)
2260 hudecek 66
{
2309 hudecek 67
#ifdef CONFIG_SMP 
2330 hudecek 68
    int i,j;
2309 hudecek 69
#endif
70
 
2330 hudecek 71
    rcu_global = malloc(sizeof(rcu_global_t)*(config.cpu_count),0);
2309 hudecek 72
    rcu_tasklet_desc = tasklet_register(&rcu_tasklet, NULL);
73
 
74
#ifdef CONFIG_SMP 
2330 hudecek 75
    /*
76
     * Note: I allocate the array for a case when every CPU connected will be active
77
     * In a case when there will be some inactive CPUs, I will use just the first cells.
78
     */
2309 hudecek 79
    for (i=0;i<config.cpu_count;i++) {
2330 hudecek 80
        rcu_global[i].done_batch = NULL;
81
        rcu_global[i].current_batch = NULL;
82
        rcu_global[i].next_batch = NULL;
83
        rcu_global[i].cpu_mask = malloc(sizeof(bool)*config.cpu_count,0);
84
        for (j=0;j<config.cpu_count;j++) {
85
            rcu_global[i].cpu_mask[j]=false;
86
        }
2309 hudecek 87
    }
88
#else
2330 hudecek 89
    rcu_global[CPU->id].done_batch = NULL;
90
    rcu_global[CPU->id].current_batch = NULL;
91
    rcu_global[CPU->id].next_batch = NULL;
2309 hudecek 92
#endif
2260 hudecek 93
}
94
 
2330 hudecek 95
 
96
/**
97
* Blocks until the grace period elapses
98
*/
2260 hudecek 99
void rcu_synchronize(void)
100
{
2309 hudecek 101
#ifdef CONFIG_SMP
2330 hudecek 102
    waitq_t wq;
103
    waitq_initialize(&wq);
2400 hudecek 104
    rcu_sync_callback_normal_alloc(&rcu_synchronize_callback_function, &wq);
2330 hudecek 105
    //sleep until the end of the grace period
106
    waitq_sleep(&wq);
2309 hudecek 107
#endif
2260 hudecek 108
}
109
 
2309 hudecek 110
#ifdef CONFIG_SMP
2330 hudecek 111
/**
112
* Just a wakeup for waking up rcu_synchronize when the grace period has elapsed
113
*/
2260 hudecek 114
void rcu_synchronize_callback_function(void* waitq)
115
{
2315 hudecek 116
    waitq_wakeup(((waitq_t*)waitq), WAKEUP_ALL);
2260 hudecek 117
}
2309 hudecek 118
#endif
2260 hudecek 119
 
2330 hudecek 120
 
121
/**
122
* appends this callback func to the queue of waiting callbacks, the rest
123
* is handled in rcu_run_callbacks and in the tasklet. This is a lock free variant,
124
* which must be supplied with a preallocated rcu_callback_list_t structure
125
*/
2400 hudecek 126
void rcu_sync_callback(void (*func)(void* data), void* data, rcu_callback_list_t* rd)
2260 hudecek 127
{
2309 hudecek 128
#ifndef CONFIG_SMP
129
    func(data);
130
#else
2330 hudecek 131
 
132
    ipl_t ipl;
2260 hudecek 133
    rd->func = func;
134
    rd->data = data;
2315 hudecek 135
 
2330 hudecek 136
    ipl = interrupts_disable();
137
    //append to the list of callbacks waiting for their batch to begin
138
    rd->next = rcu_global[CPU->id].next_batch;
139
    rcu_global[CPU->id].next_batch = rd;
140
    interrupts_restore(ipl);
2315 hudecek 141
 
2330 hudecek 142
    rcu_passQS();
2309 hudecek 143
#endif
144
}
2260 hudecek 145
 
2330 hudecek 146
/**
147
* RCU tasklet, tests passing through QSs, moves from current to done
148
*/
2309 hudecek 149
void rcu_tasklet(void* data)
150
{
151
    rcu_callback_list_t* rd;
152
    bool passed_all_QS;
153
#ifdef CONFIG_SMP 
154
    int i;
155
#endif
2330 hudecek 156
    ipl_t ipl;
157
 
158
    ipl = interrupts_disable();
159
 
2309 hudecek 160
    rcu_passQS();
161
    passed_all_QS = true;
162
#ifdef CONFIG_SMP 
2330 hudecek 163
    //check whether all CPUs have passed through QS
2309 hudecek 164
    for (i = 0; i < config.cpu_active; i++)
2330 hudecek 165
        passed_all_QS &= rcu_global[CPU->id].cpu_mask[i];
2309 hudecek 166
#endif
167
    if (passed_all_QS) {
2330 hudecek 168
        //all CPUs have passed through QS -> grace period is over, we can schedule the call to RCU callback
169
        if (rcu_global[CPU->id].done_batch) {
170
            rd = rcu_global[CPU->id].done_batch;
2309 hudecek 171
            while (rd->next) rd = rd->next;
172
            //append the current list to done list
2330 hudecek 173
            rd->next = rcu_global[CPU->id].current_batch;
2309 hudecek 174
        } else
2330 hudecek 175
            rcu_global[CPU->id].done_batch = rcu_global[CPU->id].current_batch;
176
        rcu_global[CPU->id].current_batch = NULL;
2309 hudecek 177
    }
2330 hudecek 178
    interrupts_restore(ipl);
2260 hudecek 179
}
180
 
2330 hudecek 181
 
182
/**
183
* This function indicates that the current CPU has gone through the quiescent state
184
*/
185
void rcu_passQS(void)
2309 hudecek 186
{
187
#ifdef CONFIG_SMP 
2330 hudecek 188
    int i;
189
    for (i=0;i<config.cpu_active;i++)
190
        //on all CPUs indicate that this CPU has gone through QS
191
        rcu_global[i].cpu_mask[CPU->id]=true;
2309 hudecek 192
#endif
193
}
194
 
2330 hudecek 195
 
196
/**
197
* Moves RCUs from next to current, schedules RCU tasklet, calls the callbacks, frees the rcu_callback_list_t
198
*/
2309 hudecek 199
void rcu_run_callbacks(void)
200
{
2330 hudecek 201
    rcu_callback_list_t* rd, *rd2;
202
    int i;
203
    ipl_t ipl;
204
 
205
    ipl = interrupts_disable();
206
    if (rcu_global[CPU->id].next_batch) {
207
        //we cannot append to the current list because callbacks from next batch
208
        //haven't passed the QSs
209
        if (rcu_global[CPU->id].current_batch == NULL) {
210
            rcu_global[CPU->id].current_batch = rcu_global[CPU->id].next_batch;
211
            rcu_global[CPU->id].next_batch = NULL;
212
#ifdef CONFIG_SMP
213
            //initialize our CPU mask
214
            for (i=0;i<config.cpu_active;i++)
215
                rcu_global[CPU->id].cpu_mask[i]=false;
216
#endif
217
            //schedule tasklet for all CPUs
218
            for (i=0;i<config.cpu_active;i++) {
219
                tasklet_schedule_SMP(rcu_tasklet_desc, i);
220
            }
221
        }
222
    }
223
    //this CPU has passed QS
2309 hudecek 224
    rcu_passQS();
2330 hudecek 225
    if (rcu_global[CPU->id].done_batch) {
226
        rd = rcu_global[CPU->id].done_batch;
227
        rcu_global[CPU->id].done_batch = NULL;
228
        interrupts_restore(ipl);
229
        while (rd) {
230
            //call the callback
231
            rd->func(rd->data);
232
            rd2 = rd->next;
233
            //free the structure
234
            free(rd);
235
            rd = rd2;
2309 hudecek 236
        }
237
    }
2330 hudecek 238
    else
239
        interrupts_restore(ipl);
2309 hudecek 240
}
241
 
242
 
2400 hudecek 243
/**
244
* Generic callback for RCU, frees @paramref pointer
245
* @param pointer
246
*/
247
void rcu_callback_free(void* pointer)
248
{
249
    free(pointer);
250
}
251