Subversion Repositories HelenOS

Rev

Rev 2400 | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 2400 Rev 2430
1
/*
1
/*
2
 * Copyright (c) 2007 Jan Hudecek
2
 * Copyright (c) 2007 Jan Hudecek
3
 * Copyright (c) 2006 Jakub Jermar
3
 * Copyright (c) 2006 Jakub Jermar
4
 * All rights reserved.
4
 * All rights reserved.
5
 *
5
 *
6
 * Redistribution and use in source and binary forms, with or without
6
 * Redistribution and use in source and binary forms, with or without
7
 * modification, are permitted provided that the following conditions
7
 * modification, are permitted provided that the following conditions
8
 * are met:
8
 * are met:
9
 *
9
 *
10
 * - Redistributions of source code must retain the above copyright
10
 * - Redistributions of source code must retain the above copyright
11
 *   notice, this list of conditions and the following disclaimer.
11
 *   notice, this list of conditions and the following disclaimer.
12
 * - Redistributions in binary form must reproduce the above copyright
12
 * - Redistributions in binary form must reproduce the above copyright
13
 *   notice, this list of conditions and the following disclaimer in the
13
 *   notice, this list of conditions and the following disclaimer in the
14
 *   documentation and/or other materials provided with the distribution.
14
 *   documentation and/or other materials provided with the distribution.
15
 * - The name of the author may not be used to endorse or promote products
15
 * - The name of the author may not be used to endorse or promote products
16
 *   derived from this software without specific prior written permission.
16
 *   derived from this software without specific prior written permission.
17
 *
17
 *
18
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28
 */
28
 */
29
/** @addtogroup sync
29
/** @addtogroup sync
30
 * @{
30
 * @{
31
 */
31
 */
32
/** @file
32
/** @file rcu.c
-
 
33
 *  @brief RCU synchronization primitive
33
 */
34
 */
34
 
35
 
35
#include <synch/rcu.h>
36
#include <synch/rcu.h>
36
#include <synch/waitq.h>
37
#include <synch/waitq.h>
37
#include <arch.h>
38
#include <arch.h>
38
#include <config.h>
39
#include <config.h>
39
#include <arch/types.h>
40
#include <arch/types.h>
40
#include <proc/tasklet.h>
41
#include <proc/tasklet.h>
41
#include <synch/spinlock.h>
42
#include <synch/spinlock.h>
42
#include <time/delay.h>
43
#include <time/delay.h>
43
#include <panic.h>
44
#include <panic.h>
44
#include <print.h>
45
#include <print.h>
45
 
46
 
46
 
47
 
47
 
48
 
48
 
-
 
-
 
49
/** Main data structure of the RCU implementation */
49
typedef struct  {
50
typedef struct  {
50
#ifdef CONFIG_SMP 
51
#ifdef CONFIG_SMP 
-
 
52
    /** flags indicating whether the corresponding CPU has passed QS for this RCU batch */
51
    bool* cpu_mask;
53
    bool* cpu_mask;
52
#endif
54
#endif
-
 
55
    /** RCU batch waiting for finishing of current batch, QS monitoring hasn't been started for this one */
-
 
56
    rcu_callback_list_t* next_batch;
-
 
57
    /** RCU batch that waits for passing of QSs on all CPUs */
53
    rcu_callback_list_t* next_batch, *current_batch, *done_batch;
58
    rcu_callback_list_t *current_batch;
-
 
59
    /** RCU batch that has passed all QSs and waits for invocation */
-
 
60
    rcu_callback_list_t *done_batch;
54
} rcu_global_t;
61
} rcu_cpu_data_t;
55
 
62
 
56
/** An array of structures holding the callbacks and the progress of QS for each CPU*/
63
/** An array of structures holding the callbacks and the progress of QS for each CPU*/
57
rcu_global_t* rcu_global=NULL;
64
rcu_cpu_data_t* rcu_global=NULL;
58
/** reference to the RCU tasklet, for scheduling it */
65
/** reference to the RCU tasklet, for scheduling it */
59
tasklet_descriptor_t* rcu_tasklet_desc;
66
tasklet_descriptor_t* rcu_tasklet_desc;
60
 
67
 
61
 
68
 
62
/**
69
/**
63
* Initializes data structures needed for RCU
70
* Initializes data structures needed for RCU
64
*/
71
*/
65
void rcu_init(void)
72
void rcu_init(void)
66
{
73
{
67
#ifdef CONFIG_SMP 
74
#ifdef CONFIG_SMP 
68
    int i,j;
75
    int i,j;
69
#endif
76
#endif
70
 
77
 
71
    rcu_global = malloc(sizeof(rcu_global_t)*(config.cpu_count),0);
78
    rcu_global = malloc(sizeof(rcu_cpu_data_t)*(config.cpu_count),0);
72
    rcu_tasklet_desc = tasklet_register(&rcu_tasklet, NULL);
79
    rcu_tasklet_desc = tasklet_register(&rcu_tasklet, NULL);
73
 
80
 
74
#ifdef CONFIG_SMP 
81
#ifdef CONFIG_SMP 
75
    /*
82
    /*
76
     * Note: I allocate the array for a case when every CPU connected will be active
83
     * Note: I allocate the array for a case when every CPU connected will be active
77
     * In a case when there will be some inactive CPUs, I will use just the first cells.
84
     * In a case when there will be some inactive CPUs, I will use just the active ones
78
     */
85
     */
79
    for (i=0;i<config.cpu_count;i++) {
86
    for (i=0;i<config.cpu_count;i++) {
80
        rcu_global[i].done_batch = NULL;
87
        rcu_global[i].done_batch = NULL;
81
        rcu_global[i].current_batch = NULL;
88
        rcu_global[i].current_batch = NULL;
82
        rcu_global[i].next_batch = NULL;
89
        rcu_global[i].next_batch = NULL;
83
        rcu_global[i].cpu_mask = malloc(sizeof(bool)*config.cpu_count,0);
90
        rcu_global[i].cpu_mask = malloc(sizeof(bool)*config.cpu_count,0);
84
        for (j=0;j<config.cpu_count;j++) {
91
        for (j=0;j<config.cpu_count;j++) {
85
            rcu_global[i].cpu_mask[j]=false;
92
            rcu_global[i].cpu_mask[j]=false;
86
        }
93
        }
87
    }
94
    }
88
#else
95
#else
89
    rcu_global[CPU->id].done_batch = NULL;
96
    rcu_global[CPU->id].done_batch = NULL;
90
    rcu_global[CPU->id].current_batch = NULL;
97
    rcu_global[CPU->id].current_batch = NULL;
91
    rcu_global[CPU->id].next_batch = NULL;
98
    rcu_global[CPU->id].next_batch = NULL;
92
#endif
99
#endif
93
}
100
}
94
 
101
 
95
 
102
 
96
/**
103
/**
97
* Blocks until the grace period elapses
104
* Blocks until the grace period elapses
98
*/
105
*/
99
void rcu_synchronize(void)
106
void rcu_synchronize(void)
100
{
107
{
101
#ifdef CONFIG_SMP
108
#ifdef CONFIG_SMP
102
    waitq_t wq;
109
    waitq_t wq;
103
    waitq_initialize(&wq);
110
    waitq_initialize(&wq);
104
    rcu_sync_callback_normal_alloc(&rcu_synchronize_callback_function, &wq);
111
    rcu_sync_callback_normal_alloc(&rcu_synchronize_callback_function, &wq);
105
    //sleep until the end of the grace period
112
    //sleep until the end of the grace period
106
    waitq_sleep(&wq);
113
    waitq_sleep(&wq);
107
#endif
114
#endif
108
}
115
}
109
 
116
 
110
#ifdef CONFIG_SMP
117
#ifdef CONFIG_SMP
111
/**
118
/**
112
* Just a wakeup for waking up rcu_synchronize when the grace period has elapsed
119
* Just a wakeup for waking up rcu_synchronize when the grace period has elapsed
113
*/
120
*/
114
void rcu_synchronize_callback_function(void* waitq)
121
void rcu_synchronize_callback_function(void* waitq)
115
{
122
{
116
    waitq_wakeup(((waitq_t*)waitq), WAKEUP_ALL);
123
    waitq_wakeup(((waitq_t*)waitq), WAKEUP_ALL);
117
}
124
}
118
#endif
125
#endif
119
 
126
 
120
 
127
 
121
/**
128
/**
122
* appends this callback func to the queue of waiting callbacks, the rest
129
* Appends this callback func to the queue of waiting callbacks, the rest
123
* is handled in rcu_run_callbacks and in the tasklet. This is a lock free variant,
130
* is handled in rcu_run_callbacks and in the tasklet. This is a lock free variant,
124
* which must be supplied with a preallocated rcu_callback_list_t structure
131
* which must be supplied with a preallocated rcu_callback_list_t structure
-
 
132
* which is deallocated after the callback is called
125
*/
133
*/
126
void rcu_sync_callback(void (*func)(void* data), void* data, rcu_callback_list_t* rd)
134
void rcu_sync_callback(void (*func)(void* data), void* data, rcu_callback_list_t* rd)
127
{
135
{
128
#ifndef CONFIG_SMP
136
#ifndef CONFIG_SMP
129
    func(data);
137
    func(data);
130
#else
138
#else
131
 
139
 
132
    ipl_t ipl;
140
    ipl_t ipl;
133
    rd->func = func;
141
    rd->func = func;
134
    rd->data = data;
142
    rd->data = data;
135
 
143
 
-
 
144
    //disabling interrupts removes need for any synchronization - the list of callbacks is 
-
 
145
    //always accessed only on current CPU
136
    ipl = interrupts_disable();
146
    ipl = interrupts_disable();
137
    //append to the list of callbacks waiting for their batch to begin
147
    //append to the list of callbacks waiting for their batch to begin
138
    rd->next = rcu_global[CPU->id].next_batch;
148
    rd->next = rcu_global[CPU->id].next_batch;
139
    rcu_global[CPU->id].next_batch = rd;
149
    rcu_global[CPU->id].next_batch = rd;
140
    interrupts_restore(ipl);
-
 
141
 
-
 
142
    rcu_passQS();
150
    rcu_passQS();
-
 
151
    interrupts_restore(ipl);
143
#endif
152
#endif
144
}
153
}
145
 
154
 
146
/**
155
/**
147
* RCU tasklet, tests passing through QSs, moves from current to done
156
* RCU tasklet, tests passing through QSs, moves from current list to done list
148
*/
157
*/
149
void rcu_tasklet(void* data)
158
void rcu_tasklet(void* data)
150
{
159
{
151
    rcu_callback_list_t* rd;
160
    rcu_callback_list_t* rd;
152
    bool passed_all_QS;
161
    bool passed_all_QS;
153
#ifdef CONFIG_SMP 
162
#ifdef CONFIG_SMP 
154
    int i;
163
    int i;
155
#endif
164
#endif
156
    ipl_t ipl;
165
    ipl_t ipl;
-
 
166
    passed_all_QS = true;
157
 
167
 
158
    ipl = interrupts_disable();
-
 
159
 
168
 
-
 
169
    ipl = interrupts_disable();
160
    rcu_passQS();
170
    rcu_passQS();
161
    passed_all_QS = true;
171
   
162
#ifdef CONFIG_SMP 
172
#ifdef CONFIG_SMP 
163
    //check whether all CPUs have passed through QS
173
    //check whether all CPUs have passed through QS of this CPU's current batch
164
    for (i = 0; i < config.cpu_active; i++)
174
    for (i = 0; i < config.cpu_count; i++)
-
 
175
        if (cpus[i].active)
165
        passed_all_QS &= rcu_global[CPU->id].cpu_mask[i];
176
            passed_all_QS &= rcu_global[CPU->id].cpu_mask[i];
166
#endif
177
#endif
167
    if (passed_all_QS) {
178
    if (passed_all_QS) {
168
        //all CPUs have passed through QS -> grace period is over, we can schedule the call to RCU callback
179
        //all CPUs have passed through QS -> grace period is over, we can schedule the call to RCU callback
169
        if (rcu_global[CPU->id].done_batch) {
180
        if (rcu_global[CPU->id].done_batch) {
170
            rd = rcu_global[CPU->id].done_batch;
181
            rd = rcu_global[CPU->id].done_batch;
-
 
182
 
171
            while (rd->next) rd = rd->next;
183
            while (rd->next) rd = rd->next;
172
            //append the current list to done list
184
            //append the current list to done list
173
            rd->next = rcu_global[CPU->id].current_batch;
185
            rd->next = rcu_global[CPU->id].current_batch;
174
        } else
186
        } else
175
            rcu_global[CPU->id].done_batch = rcu_global[CPU->id].current_batch;
187
            rcu_global[CPU->id].done_batch = rcu_global[CPU->id].current_batch;
176
        rcu_global[CPU->id].current_batch = NULL;
188
        rcu_global[CPU->id].current_batch = NULL;
177
    }
189
    }
178
    interrupts_restore(ipl);
190
    interrupts_restore(ipl);
179
}
191
}
180
 
192
 
181
 
193
 
182
/**
194
/**
183
* This function indicates that the current CPU has gone through the quiescent state
195
* This function indicates that the current CPU has gone through the quiescent state
184
*/
196
*/
185
void rcu_passQS(void)
197
void rcu_passQS(void)
186
{
198
{
187
#ifdef CONFIG_SMP 
199
#ifdef CONFIG_SMP 
188
    int i;
200
    int i;
189
    for (i=0;i<config.cpu_active;i++)
201
    for (i = 0; i < config.cpu_count; i++)
-
 
202
        if (cpus[i].active)
190
        //on all CPUs indicate that this CPU has gone through QS
203
            //on all CPUs indicate that this CPU has gone through QS
-
 
204
            //this can overlap with clearing this flag in rcu_run_callbacks
191
        rcu_global[i].cpu_mask[CPU->id]=true;
205
            rcu_global[i].cpu_mask[CPU->id]=true;
192
#endif
206
#endif
193
}
207
}
194
 
208
 
195
 
209
 
196
/**
210
/**
197
* Moves RCUs from next to current, schedules RCU tasklet, calls the callbacks, frees the rcu_callback_list_t
211
* Moves RCU callbacks from next list to current list, schedules the RCU tasklet when needed,
-
 
212
* calls the callbacks from done list, frees the rcu_callback_list_t
198
*/
213
*/
199
void rcu_run_callbacks(void)
214
void rcu_run_callbacks(void)
200
{
215
{
201
    rcu_callback_list_t* rd, *rd2;
216
    rcu_callback_list_t* rd, *rd2;
202
    int i;
217
    int i;
203
    ipl_t ipl;
218
    ipl_t ipl;
204
 
219
 
205
    ipl = interrupts_disable();
220
    ipl = interrupts_disable();
206
    if (rcu_global[CPU->id].next_batch) {
221
    if (rcu_global[CPU->id].next_batch) {
207
        //we cannot append to the current list because callbacks from next batch
222
        //we cannot append to the current list because the callbacks from next batch
208
        //haven't passed the QSs
223
        //haven't passed the QSs
209
        if (rcu_global[CPU->id].current_batch == NULL) {
224
        if (rcu_global[CPU->id].current_batch == NULL) {
210
            rcu_global[CPU->id].current_batch = rcu_global[CPU->id].next_batch;
225
            rcu_global[CPU->id].current_batch = rcu_global[CPU->id].next_batch;
211
            rcu_global[CPU->id].next_batch = NULL;
226
            rcu_global[CPU->id].next_batch = NULL;
212
#ifdef CONFIG_SMP
227
#ifdef CONFIG_SMP
213
            //initialize our CPU mask
228
            //initialize our CPU mask
214
            for (i=0;i<config.cpu_active;i++)
229
            for (i = 0; i < config.cpu_count; i++)
-
 
230
                if (cpus[i].active)
215
                rcu_global[CPU->id].cpu_mask[i]=false;
231
                    rcu_global[CPU->id].cpu_mask[i]=false;
216
#endif
232
#endif
217
            //schedule tasklet for all CPUs
233
            //schedule tasklet for all CPUs
218
            for (i=0;i<config.cpu_active;i++) {
234
            for (i = 0; i < config.cpu_count; i++)
-
 
235
                if (cpus[i].active)
219
                tasklet_schedule_SMP(rcu_tasklet_desc, i);
236
                    tasklet_schedule_SMP(rcu_tasklet_desc, i);
220
            }
-
 
221
        }
237
        }
222
    }
238
    }
223
    //this CPU has passed QS
239
    //this CPU has passed QS
224
    rcu_passQS();
240
    rcu_passQS();
225
    if (rcu_global[CPU->id].done_batch) {
241
    if (rcu_global[CPU->id].done_batch) {
226
        rd = rcu_global[CPU->id].done_batch;
242
        rd = rcu_global[CPU->id].done_batch;
227
        rcu_global[CPU->id].done_batch = NULL;
243
        rcu_global[CPU->id].done_batch = NULL;
-
 
244
        //the callbacks (and free) can block, we must restore the interrupts
228
        interrupts_restore(ipl);
245
        interrupts_restore(ipl);
229
        while (rd) {
246
        while (rd) {
230
            //call the callback
247
            //call the callback
-
 
248
            if (rd->func == NULL)
-
 
249
                panic_printf("RCU callback function NULL, desc:%x", rd);
231
            rd->func(rd->data);
250
            rd->func(rd->data);
232
            rd2 = rd->next;
251
            rd2 = rd->next;
233
            //free the structure
252
            //free the structure
234
            free(rd);
253
            free(rd);
235
            rd = rd2;
254
            rd = rd2;
236
        }
255
        }
237
    }
256
    }
238
    else
257
    else
239
        interrupts_restore(ipl);
258
        interrupts_restore(ipl);
240
}
259
}
241
 
260
 
242
 
261
 
243
/**
262
/**
244
* Generic callback for RCU, frees @paramref pointer
263
* Generic callback for RCU, frees @paramref pointer
245
* @param pointer
264
* @param pointer
246
*/
265
*/
247
void rcu_callback_free(void* pointer)
266
void rcu_callback_free(void* pointer)
248
{
267
{
249
    free(pointer);
268
    free(pointer);
250
}
269
}
251
 
270
 
252
 
271