Subversion Repositories HelenOS

Rev

Rev 2330 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 2330 Rev 2400
1
/*
1
/*
2
 * Copyright (c) 2007 Jan Hudecek
2
 * Copyright (c) 2007 Jan Hudecek
3
 * Copyright (c) 2006 Jakub Jermar
3
 * Copyright (c) 2006 Jakub Jermar
4
 * All rights reserved.
4
 * All rights reserved.
5
 *
5
 *
6
 * Redistribution and use in source and binary forms, with or without
6
 * Redistribution and use in source and binary forms, with or without
7
 * modification, are permitted provided that the following conditions
7
 * modification, are permitted provided that the following conditions
8
 * are met:
8
 * are met:
9
 *
9
 *
10
 * - Redistributions of source code must retain the above copyright
10
 * - Redistributions of source code must retain the above copyright
11
 *   notice, this list of conditions and the following disclaimer.
11
 *   notice, this list of conditions and the following disclaimer.
12
 * - Redistributions in binary form must reproduce the above copyright
12
 * - Redistributions in binary form must reproduce the above copyright
13
 *   notice, this list of conditions and the following disclaimer in the
13
 *   notice, this list of conditions and the following disclaimer in the
14
 *   documentation and/or other materials provided with the distribution.
14
 *   documentation and/or other materials provided with the distribution.
15
 * - The name of the author may not be used to endorse or promote products
15
 * - The name of the author may not be used to endorse or promote products
16
 *   derived from this software without specific prior written permission.
16
 *   derived from this software without specific prior written permission.
17
 *
17
 *
18
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28
 */
28
 */
29
/** @addtogroup sync
29
/** @addtogroup sync
30
 * @{
30
 * @{
31
 */
31
 */
32
/** @file
32
/** @file
33
 */
33
 */
34
 
34
 
35
#include <synch/rcu.h>
35
#include <synch/rcu.h>
36
#include <synch/waitq.h>
36
#include <synch/waitq.h>
37
#include <arch.h>
37
#include <arch.h>
38
#include <config.h>
38
#include <config.h>
39
#include <arch/types.h>
39
#include <arch/types.h>
40
#include <proc/tasklet.h>
40
#include <proc/tasklet.h>
41
#include <synch/spinlock.h>
41
#include <synch/spinlock.h>
42
#include <time/delay.h>
42
#include <time/delay.h>
43
#include <panic.h>
43
#include <panic.h>
44
#include <print.h>
44
#include <print.h>
45
 
45
 
46
 
46
 
47
 
47
 
48
 
48
 
49
typedef struct  {
49
typedef struct  {
50
#ifdef CONFIG_SMP 
50
#ifdef CONFIG_SMP 
51
    bool* cpu_mask;
51
    bool* cpu_mask;
52
#endif
52
#endif
53
    rcu_callback_list_t* next_batch, *current_batch, *done_batch;
53
    rcu_callback_list_t* next_batch, *current_batch, *done_batch;
54
} rcu_global_t;
54
} rcu_global_t;
55
 
55
 
56
/** An array of structures holding the callbacks and the progress of QS for each CPU*/
56
/** An array of structures holding the callbacks and the progress of QS for each CPU*/
57
rcu_global_t* rcu_global=NULL;
57
rcu_global_t* rcu_global=NULL;
58
/** reference to the RCU tasklet, for scheduling it */
58
/** reference to the RCU tasklet, for scheduling it */
59
tasklet_descriptor_t* rcu_tasklet_desc;
59
tasklet_descriptor_t* rcu_tasklet_desc;
60
 
60
 
61
 
61
 
62
/**
62
/**
63
* Initializes data structures needed for RCU
63
* Initializes data structures needed for RCU
64
*/
64
*/
65
void rcu_init(void)
65
void rcu_init(void)
66
{
66
{
67
#ifdef CONFIG_SMP 
67
#ifdef CONFIG_SMP 
68
    int i,j;
68
    int i,j;
69
#endif
69
#endif
70
 
70
 
71
    rcu_global = malloc(sizeof(rcu_global_t)*(config.cpu_count),0);
71
    rcu_global = malloc(sizeof(rcu_global_t)*(config.cpu_count),0);
72
    rcu_tasklet_desc = tasklet_register(&rcu_tasklet, NULL);
72
    rcu_tasklet_desc = tasklet_register(&rcu_tasklet, NULL);
73
 
73
 
74
#ifdef CONFIG_SMP 
74
#ifdef CONFIG_SMP 
75
    /*
75
    /*
76
     * Note: I allocate the array for a case when every CPU connected will be active
76
     * Note: I allocate the array for a case when every CPU connected will be active
77
     * In a case when there will be some inactive CPUs, I will use just the first cells.
77
     * In a case when there will be some inactive CPUs, I will use just the first cells.
78
     */
78
     */
79
    for (i=0;i<config.cpu_count;i++) {
79
    for (i=0;i<config.cpu_count;i++) {
80
        rcu_global[i].done_batch = NULL;
80
        rcu_global[i].done_batch = NULL;
81
        rcu_global[i].current_batch = NULL;
81
        rcu_global[i].current_batch = NULL;
82
        rcu_global[i].next_batch = NULL;
82
        rcu_global[i].next_batch = NULL;
83
        rcu_global[i].cpu_mask = malloc(sizeof(bool)*config.cpu_count,0);
83
        rcu_global[i].cpu_mask = malloc(sizeof(bool)*config.cpu_count,0);
84
        for (j=0;j<config.cpu_count;j++) {
84
        for (j=0;j<config.cpu_count;j++) {
85
            rcu_global[i].cpu_mask[j]=false;
85
            rcu_global[i].cpu_mask[j]=false;
86
        }
86
        }
87
    }
87
    }
88
#else
88
#else
89
    rcu_global[CPU->id].done_batch = NULL;
89
    rcu_global[CPU->id].done_batch = NULL;
90
    rcu_global[CPU->id].current_batch = NULL;
90
    rcu_global[CPU->id].current_batch = NULL;
91
    rcu_global[CPU->id].next_batch = NULL;
91
    rcu_global[CPU->id].next_batch = NULL;
92
#endif
92
#endif
93
}
93
}
94
 
94
 
95
 
95
 
96
/**
96
/**
97
* Blocks until the grace period elapses
97
* Blocks until the grace period elapses
98
*/
98
*/
99
void rcu_synchronize(void)
99
void rcu_synchronize(void)
100
{
100
{
101
#ifdef CONFIG_SMP
101
#ifdef CONFIG_SMP
102
    waitq_t wq;
102
    waitq_t wq;
103
    waitq_initialize(&wq);
103
    waitq_initialize(&wq);
104
    rcu_sync_callback(&rcu_synchronize_callback_function, &wq);
104
    rcu_sync_callback_normal_alloc(&rcu_synchronize_callback_function, &wq);
105
    //sleep until the end of the grace period
105
    //sleep until the end of the grace period
106
    waitq_sleep(&wq);
106
    waitq_sleep(&wq);
107
#endif
107
#endif
108
}
108
}
109
 
109
 
110
#ifdef CONFIG_SMP
110
#ifdef CONFIG_SMP
111
/**
111
/**
112
* Just a wakeup for waking up rcu_synchronize when the grace period has elapsed
112
* Just a wakeup for waking up rcu_synchronize when the grace period has elapsed
113
*/
113
*/
114
void rcu_synchronize_callback_function(void* waitq)
114
void rcu_synchronize_callback_function(void* waitq)
115
{
115
{
116
    waitq_wakeup(((waitq_t*)waitq), WAKEUP_ALL);
116
    waitq_wakeup(((waitq_t*)waitq), WAKEUP_ALL);
117
}
117
}
118
#endif
118
#endif
119
 
119
 
120
 
120
 
121
/**
121
/**
122
* appends this callback func to the queue of waiting callbacks, the rest
122
* appends this callback func to the queue of waiting callbacks, the rest
123
* is handled in rcu_run_callbacks and in the tasklet. This is a lock free variant,
123
* is handled in rcu_run_callbacks and in the tasklet. This is a lock free variant,
124
* which must be supplied with a preallocated rcu_callback_list_t structure
124
* which must be supplied with a preallocated rcu_callback_list_t structure
125
*/
125
*/
126
void rcu_sync_callback_custom_alloc(void (*func)(void* data), void* data, rcu_callback_list_t* rd)
126
void rcu_sync_callback(void (*func)(void* data), void* data, rcu_callback_list_t* rd)
127
{
127
{
128
#ifndef CONFIG_SMP
128
#ifndef CONFIG_SMP
129
    func(data);
129
    func(data);
130
#else
130
#else
131
 
131
 
132
    ipl_t ipl;
132
    ipl_t ipl;
133
    rd->func = func;
133
    rd->func = func;
134
    rd->data = data;
134
    rd->data = data;
135
 
135
 
136
    ipl = interrupts_disable();
136
    ipl = interrupts_disable();
137
    //append to the list of callbacks waiting for their batch to begin
137
    //append to the list of callbacks waiting for their batch to begin
138
    rd->next = rcu_global[CPU->id].next_batch;
138
    rd->next = rcu_global[CPU->id].next_batch;
139
    rcu_global[CPU->id].next_batch = rd;
139
    rcu_global[CPU->id].next_batch = rd;
140
    interrupts_restore(ipl);
140
    interrupts_restore(ipl);
141
 
141
 
142
    rcu_passQS();
142
    rcu_passQS();
143
#endif
143
#endif
144
}
144
}
145
 
145
 
146
/**
146
/**
147
* RCU tasklet, tests passing through QSs, moves from current to done
147
* RCU tasklet, tests passing through QSs, moves from current to done
148
*/
148
*/
149
void rcu_tasklet(void* data)
149
void rcu_tasklet(void* data)
150
{
150
{
151
    rcu_callback_list_t* rd;
151
    rcu_callback_list_t* rd;
152
    bool passed_all_QS;
152
    bool passed_all_QS;
153
#ifdef CONFIG_SMP 
153
#ifdef CONFIG_SMP 
154
    int i;
154
    int i;
155
#endif
155
#endif
156
    ipl_t ipl;
156
    ipl_t ipl;
157
 
157
 
158
    ipl = interrupts_disable();
158
    ipl = interrupts_disable();
159
 
159
 
160
    rcu_passQS();
160
    rcu_passQS();
161
    passed_all_QS = true;
161
    passed_all_QS = true;
162
#ifdef CONFIG_SMP 
162
#ifdef CONFIG_SMP 
163
    //check whether all CPUs have passed through QS
163
    //check whether all CPUs have passed through QS
164
    for (i = 0; i < config.cpu_active; i++)
164
    for (i = 0; i < config.cpu_active; i++)
165
        passed_all_QS &= rcu_global[CPU->id].cpu_mask[i];
165
        passed_all_QS &= rcu_global[CPU->id].cpu_mask[i];
166
#endif
166
#endif
167
    if (passed_all_QS) {
167
    if (passed_all_QS) {
168
        //all CPUs have passed through QS -> grace period is over, we can schedule the call to RCU callback
168
        //all CPUs have passed through QS -> grace period is over, we can schedule the call to RCU callback
169
        if (rcu_global[CPU->id].done_batch) {
169
        if (rcu_global[CPU->id].done_batch) {
170
            rd = rcu_global[CPU->id].done_batch;
170
            rd = rcu_global[CPU->id].done_batch;
171
            while (rd->next) rd = rd->next;
171
            while (rd->next) rd = rd->next;
172
            //append the current list to done list
172
            //append the current list to done list
173
            rd->next = rcu_global[CPU->id].current_batch;
173
            rd->next = rcu_global[CPU->id].current_batch;
174
        } else
174
        } else
175
            rcu_global[CPU->id].done_batch = rcu_global[CPU->id].current_batch;
175
            rcu_global[CPU->id].done_batch = rcu_global[CPU->id].current_batch;
176
        rcu_global[CPU->id].current_batch = NULL;
176
        rcu_global[CPU->id].current_batch = NULL;
177
    }
177
    }
178
    interrupts_restore(ipl);
178
    interrupts_restore(ipl);
179
}
179
}
180
 
180
 
181
 
181
 
182
/**
182
/**
183
* This function indicates that the current CPU has gone through the quiescent state
183
* This function indicates that the current CPU has gone through the quiescent state
184
*/
184
*/
185
void rcu_passQS(void)
185
void rcu_passQS(void)
186
{
186
{
187
#ifdef CONFIG_SMP 
187
#ifdef CONFIG_SMP 
188
    int i;
188
    int i;
189
    for (i=0;i<config.cpu_active;i++)
189
    for (i=0;i<config.cpu_active;i++)
190
        //on all CPUs indicate that this CPU has gone through QS
190
        //on all CPUs indicate that this CPU has gone through QS
191
        rcu_global[i].cpu_mask[CPU->id]=true;
191
        rcu_global[i].cpu_mask[CPU->id]=true;
192
#endif
192
#endif
193
}
193
}
194
 
194
 
195
 
195
 
196
/**
196
/**
197
* Moves RCUs from next to current, schedules RCU tasklet, calls the callbacks, frees the rcu_callback_list_t
197
* Moves RCUs from next to current, schedules RCU tasklet, calls the callbacks, frees the rcu_callback_list_t
198
*/
198
*/
199
void rcu_run_callbacks(void)
199
void rcu_run_callbacks(void)
200
{
200
{
201
    rcu_callback_list_t* rd, *rd2;
201
    rcu_callback_list_t* rd, *rd2;
202
    int i;
202
    int i;
203
    ipl_t ipl;
203
    ipl_t ipl;
204
 
204
 
205
    ipl = interrupts_disable();
205
    ipl = interrupts_disable();
206
    if (rcu_global[CPU->id].next_batch) {
206
    if (rcu_global[CPU->id].next_batch) {
207
        //we cannot append to the current list because callbacks from next batch
207
        //we cannot append to the current list because callbacks from next batch
208
        //haven't passed the QSs
208
        //haven't passed the QSs
209
        if (rcu_global[CPU->id].current_batch == NULL) {
209
        if (rcu_global[CPU->id].current_batch == NULL) {
210
            rcu_global[CPU->id].current_batch = rcu_global[CPU->id].next_batch;
210
            rcu_global[CPU->id].current_batch = rcu_global[CPU->id].next_batch;
211
            rcu_global[CPU->id].next_batch = NULL;
211
            rcu_global[CPU->id].next_batch = NULL;
212
#ifdef CONFIG_SMP
212
#ifdef CONFIG_SMP
213
            //initialize our CPU mask
213
            //initialize our CPU mask
214
            for (i=0;i<config.cpu_active;i++)
214
            for (i=0;i<config.cpu_active;i++)
215
                rcu_global[CPU->id].cpu_mask[i]=false;
215
                rcu_global[CPU->id].cpu_mask[i]=false;
216
#endif
216
#endif
217
            //schedule tasklet for all CPUs
217
            //schedule tasklet for all CPUs
218
            for (i=0;i<config.cpu_active;i++) {
218
            for (i=0;i<config.cpu_active;i++) {
219
                tasklet_schedule_SMP(rcu_tasklet_desc, i);
219
                tasklet_schedule_SMP(rcu_tasklet_desc, i);
220
            }
220
            }
221
        }
221
        }
222
    }
222
    }
223
    //this CPU has passed QS
223
    //this CPU has passed QS
224
    rcu_passQS();
224
    rcu_passQS();
225
    if (rcu_global[CPU->id].done_batch) {
225
    if (rcu_global[CPU->id].done_batch) {
226
        rd = rcu_global[CPU->id].done_batch;
226
        rd = rcu_global[CPU->id].done_batch;
227
        rcu_global[CPU->id].done_batch = NULL;
227
        rcu_global[CPU->id].done_batch = NULL;
228
        interrupts_restore(ipl);
228
        interrupts_restore(ipl);
229
        while (rd) {
229
        while (rd) {
230
            //call the callback
230
            //call the callback
231
            rd->func(rd->data);
231
            rd->func(rd->data);
232
            rd2 = rd->next;
232
            rd2 = rd->next;
233
            //free the structure
233
            //free the structure
234
            free(rd);
234
            free(rd);
235
            rd = rd2;
235
            rd = rd2;
236
        }
236
        }
237
    }
237
    }
238
    else
238
    else
239
        interrupts_restore(ipl);
239
        interrupts_restore(ipl);
240
}
240
}
241
 
241
 
242
 
242
 
-
 
243
/**
-
 
244
* Generic callback for RCU, frees @paramref pointer
-
 
245
* @param pointer
-
 
246
*/
-
 
247
void rcu_callback_free(void* pointer)
-
 
248
{
-
 
249
    free(pointer);
-
 
250
}
-
 
251
 
243
 
252