Rev 15 | Rev 153 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 15 | Rev 68 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Reader/Writer locks |
2 | * Reader/Writer locks |
3 | */ |
3 | */ |
4 | 4 | ||
5 | /* |
5 | /* |
6 | * Copyright (C) 2001-2004 Jakub Jermar |
6 | * Copyright (C) 2001-2004 Jakub Jermar |
7 | * All rights reserved. |
7 | * All rights reserved. |
8 | * |
8 | * |
9 | * Redistribution and use in source and binary forms, with or without |
9 | * Redistribution and use in source and binary forms, with or without |
10 | * modification, are permitted provided that the following conditions |
10 | * modification, are permitted provided that the following conditions |
11 | * are met: |
11 | * are met: |
12 | * |
12 | * |
13 | * - Redistributions of source code must retain the above copyright |
13 | * - Redistributions of source code must retain the above copyright |
14 | * notice, this list of conditions and the following disclaimer. |
14 | * notice, this list of conditions and the following disclaimer. |
15 | * - Redistributions in binary form must reproduce the above copyright |
15 | * - Redistributions in binary form must reproduce the above copyright |
16 | * notice, this list of conditions and the following disclaimer in the |
16 | * notice, this list of conditions and the following disclaimer in the |
17 | * documentation and/or other materials provided with the distribution. |
17 | * documentation and/or other materials provided with the distribution. |
18 | * - The name of the author may not be used to endorse or promote products |
18 | * - The name of the author may not be used to endorse or promote products |
19 | * derived from this software without specific prior written permission. |
19 | * derived from this software without specific prior written permission. |
20 | * |
20 | * |
21 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
21 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
22 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
22 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
23 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
23 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
24 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
24 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
25 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
25 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
26 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
26 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
27 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
27 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
28 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
28 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
29 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
29 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
30 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
30 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
31 | */ |
31 | */ |
32 | 32 | ||
33 | /* |
33 | /* |
34 | * These locks are not recursive. |
34 | * These locks are not recursive. |
35 | * Neither readers nor writers will suffer starvation. |
35 | * Neither readers nor writers will suffer starvation. |
36 | * |
36 | * |
37 | * If there is a writer followed by a reader waiting for the rwlock |
37 | * If there is a writer followed by a reader waiting for the rwlock |
38 | * and the writer times out, all leading readers are automatically woken up |
38 | * and the writer times out, all leading readers are automatically woken up |
39 | * and allowed in. |
39 | * and allowed in. |
40 | */ |
40 | */ |
41 | 41 | ||
42 | /* |
42 | /* |
43 | * NOTE ON rwlock_holder_type |
43 | * NOTE ON rwlock_holder_type |
44 | * This field is set on an attempt to acquire the exclusive mutex |
44 | * This field is set on an attempt to acquire the exclusive mutex |
45 | * to the respective value depending whether the caller is a reader |
45 | * to the respective value depending whether the caller is a reader |
46 | * or a writer. The field is examined only if the thread had been |
46 | * or a writer. The field is examined only if the thread had been |
47 | * previously blocked on the exclusive mutex. Thus it is save |
47 | * previously blocked on the exclusive mutex. Thus it is save |
48 | * to store the rwlock type in the thread structure, because |
48 | * to store the rwlock type in the thread structure, because |
49 | * each thread can block on only one rwlock at a time. |
49 | * each thread can block on only one rwlock at a time. |
50 | */ |
50 | */ |
51 | 51 | ||
52 | #include <synch/synch.h> |
52 | #include <synch/synch.h> |
53 | #include <synch/rwlock.h> |
53 | #include <synch/rwlock.h> |
54 | #include <synch/spinlock.h> |
54 | #include <synch/spinlock.h> |
55 | #include <synch/mutex.h> |
55 | #include <synch/mutex.h> |
56 | #include <synch/waitq.h> |
56 | #include <synch/waitq.h> |
57 | 57 | ||
58 | #include <list.h> |
58 | #include <list.h> |
59 | #include <typedefs.h> |
59 | #include <typedefs.h> |
60 | #include <arch/asm.h> |
60 | #include <arch/asm.h> |
61 | #include <arch.h> |
61 | #include <arch.h> |
62 | #include <proc/thread.h> |
62 | #include <proc/thread.h> |
63 | #include <panic.h> |
63 | #include <panic.h> |
64 | 64 | ||
65 | #define ALLOW_ALL 0 |
65 | #define ALLOW_ALL 0 |
66 | #define ALLOW_READERS_ONLY 1 |
66 | #define ALLOW_READERS_ONLY 1 |
67 | 67 | ||
68 | static void let_others_in(rwlock_t *rwl, int readers_only); |
68 | static void let_others_in(rwlock_t *rwl, int readers_only); |
69 | static void release_spinlock(void *arg); |
69 | static void release_spinlock(void *arg); |
70 | 70 | ||
71 | void rwlock_initialize(rwlock_t *rwl) { |
71 | void rwlock_initialize(rwlock_t *rwl) { |
72 | spinlock_initialize(&rwl->lock); |
72 | spinlock_initialize(&rwl->lock); |
73 | mutex_initialize(&rwl->exclusive); |
73 | mutex_initialize(&rwl->exclusive); |
74 | rwl->readers_in = 0; |
74 | rwl->readers_in = 0; |
75 | } |
75 | } |
76 | 76 | ||
77 | int _rwlock_write_lock_timeout(rwlock_t *rwl, __u32 usec, int trylock) |
77 | int _rwlock_write_lock_timeout(rwlock_t *rwl, __u32 usec, int trylock) |
78 | { |
78 | { |
79 | pri_t pri; |
79 | pri_t pri; |
80 | int rc; |
80 | int rc; |
81 | 81 | ||
82 | pri = cpu_priority_high(); |
82 | pri = cpu_priority_high(); |
83 | spinlock_lock(&THREAD->lock); |
83 | spinlock_lock(&THREAD->lock); |
84 | THREAD->rwlock_holder_type = RWLOCK_WRITER; |
84 | THREAD->rwlock_holder_type = RWLOCK_WRITER; |
85 | spinlock_unlock(&THREAD->lock); |
85 | spinlock_unlock(&THREAD->lock); |
86 | cpu_priority_restore(pri); |
86 | cpu_priority_restore(pri); |
87 | 87 | ||
88 | /* |
88 | /* |
89 | * Writers take the easy part. |
89 | * Writers take the easy part. |
90 | * They just need to acquire the exclusive mutex. |
90 | * They just need to acquire the exclusive mutex. |
91 | */ |
91 | */ |
92 | rc = _mutex_lock_timeout(&rwl->exclusive, usec, trylock); |
92 | rc = _mutex_lock_timeout(&rwl->exclusive, usec, trylock); |
93 | if (SYNCH_FAILED(rc)) { |
93 | if (SYNCH_FAILED(rc)) { |
94 | 94 | ||
95 | /* |
95 | /* |
96 | * Lock operation timed out. |
96 | * Lock operation timed out. |
97 | * The state of rwl is UNKNOWN at this point. |
97 | * The state of rwl is UNKNOWN at this point. |
98 | * No claims about its holder can be made. |
98 | * No claims about its holder can be made. |
99 | */ |
99 | */ |
100 | 100 | ||
101 | pri = cpu_priority_high(); |
101 | pri = cpu_priority_high(); |
102 | spinlock_lock(&rwl->lock); |
102 | spinlock_lock(&rwl->lock); |
103 | /* |
103 | /* |
104 | * Now when rwl is locked, we can inspect it again. |
104 | * Now when rwl is locked, we can inspect it again. |
105 | * If it is held by some readers already, we can let |
105 | * If it is held by some readers already, we can let |
106 | * readers from the head of the wait queue in. |
106 | * readers from the head of the wait queue in. |
107 | */ |
107 | */ |
108 | if (rwl->readers_in) |
108 | if (rwl->readers_in) |
109 | let_others_in(rwl, ALLOW_READERS_ONLY); |
109 | let_others_in(rwl, ALLOW_READERS_ONLY); |
110 | spinlock_unlock(&rwl->lock); |
110 | spinlock_unlock(&rwl->lock); |
111 | cpu_priority_restore(pri); |
111 | cpu_priority_restore(pri); |
112 | } |
112 | } |
113 | 113 | ||
114 | return rc; |
114 | return rc; |
115 | } |
115 | } |
116 | 116 | ||
117 | int _rwlock_read_lock_timeout(rwlock_t *rwl, __u32 usec, int trylock) |
117 | int _rwlock_read_lock_timeout(rwlock_t *rwl, __u32 usec, int trylock) |
118 | { |
118 | { |
119 | int rc; |
119 | int rc; |
120 | pri_t pri; |
120 | pri_t pri; |
121 | 121 | ||
122 | pri = cpu_priority_high(); |
122 | pri = cpu_priority_high(); |
123 | spinlock_lock(&THREAD->lock); |
123 | spinlock_lock(&THREAD->lock); |
124 | THREAD->rwlock_holder_type = RWLOCK_READER; |
124 | THREAD->rwlock_holder_type = RWLOCK_READER; |
125 | spinlock_unlock(&THREAD->lock); |
125 | spinlock_unlock(&THREAD->lock); |
126 | 126 | ||
127 | spinlock_lock(&rwl->lock); |
127 | spinlock_lock(&rwl->lock); |
128 | 128 | ||
129 | /* |
129 | /* |
130 | * Find out whether we can get what we want without blocking. |
130 | * Find out whether we can get what we want without blocking. |
131 | */ |
131 | */ |
132 | rc = mutex_trylock(&rwl->exclusive); |
132 | rc = mutex_trylock(&rwl->exclusive); |
133 | if (SYNCH_FAILED(rc)) { |
133 | if (SYNCH_FAILED(rc)) { |
134 | 134 | ||
135 | /* |
135 | /* |
136 | * 'exclusive' mutex is being held by someone else. |
136 | * 'exclusive' mutex is being held by someone else. |
137 | * If the holder is a reader and there is no one |
137 | * If the holder is a reader and there is no one |
138 | * else waiting for it, we can enter the critical |
138 | * else waiting for it, we can enter the critical |
139 | * section. |
139 | * section. |
140 | */ |
140 | */ |
141 | 141 | ||
142 | if (rwl->readers_in) { |
142 | if (rwl->readers_in) { |
143 | spinlock_lock(&rwl->exclusive.sem.wq.lock); |
143 | spinlock_lock(&rwl->exclusive.sem.wq.lock); |
144 | if (list_empty(&rwl->exclusive.sem.wq.head)) { |
144 | if (list_empty(&rwl->exclusive.sem.wq.head)) { |
145 | /* |
145 | /* |
146 | * We can enter. |
146 | * We can enter. |
147 | */ |
147 | */ |
148 | spinlock_unlock(&rwl->exclusive.sem.wq.lock); |
148 | spinlock_unlock(&rwl->exclusive.sem.wq.lock); |
149 | goto shortcut; |
149 | goto shortcut; |
150 | } |
150 | } |
151 | spinlock_unlock(&rwl->exclusive.sem.wq.lock); |
151 | spinlock_unlock(&rwl->exclusive.sem.wq.lock); |
152 | } |
152 | } |
153 | 153 | ||
154 | /* |
154 | /* |
155 | * In order to prevent a race condition when a reader |
155 | * In order to prevent a race condition when a reader |
156 | * could block another reader at the head of the waitq, |
156 | * could block another reader at the head of the waitq, |
157 | * we register a function to unlock rwl->lock |
157 | * we register a function to unlock rwl->lock |
158 | * after this thread is put asleep. |
158 | * after this thread is put asleep. |
159 | */ |
159 | */ |
160 | thread_register_call_me(release_spinlock, &rwl->lock); |
160 | thread_register_call_me(release_spinlock, &rwl->lock); |
161 | 161 | ||
162 | rc = _mutex_lock_timeout(&rwl->exclusive, usec, trylock); |
162 | rc = _mutex_lock_timeout(&rwl->exclusive, usec, trylock); |
163 | switch (rc) { |
163 | switch (rc) { |
164 | case ESYNCH_WOULD_BLOCK: |
164 | case ESYNCH_WOULD_BLOCK: |
165 | /* |
165 | /* |
166 | * release_spinlock() wasn't called |
166 | * release_spinlock() wasn't called |
167 | */ |
167 | */ |
168 | thread_register_call_me(NULL, NULL); |
168 | thread_register_call_me(NULL, NULL); |
169 | spinlock_unlock(&rwl->lock); |
169 | spinlock_unlock(&rwl->lock); |
170 | case ESYNCH_TIMEOUT: |
170 | case ESYNCH_TIMEOUT: |
171 | /* |
171 | /* |
172 | * The sleep timeouted. |
172 | * The sleep timeouted. |
173 | * We just restore the cpu priority. |
173 | * We just restore the cpu priority. |
174 | */ |
174 | */ |
175 | case ESYNCH_OK_BLOCKED: |
175 | case ESYNCH_OK_BLOCKED: |
176 | /* |
176 | /* |
177 | * We were woken with rwl->readers_in already incremented. |
177 | * We were woken with rwl->readers_in already incremented. |
178 | * Note that this arrangement avoids race condition between |
178 | * Note that this arrangement avoids race condition between |
179 | * two concurrent readers. (Race is avoided if 'exclusive' is |
179 | * two concurrent readers. (Race is avoided if 'exclusive' is |
180 | * locked at the same time as 'readers_in' is incremented. |
180 | * locked at the same time as 'readers_in' is incremented. |
181 | * Same time means both events happen atomically when |
181 | * Same time means both events happen atomically when |
182 | * rwl->lock is held.) |
182 | * rwl->lock is held.) |
183 | */ |
183 | */ |
184 | cpu_priority_restore(pri); |
184 | cpu_priority_restore(pri); |
185 | break; |
185 | break; |
186 | case ESYNCH_OK_ATOMIC: |
186 | case ESYNCH_OK_ATOMIC: |
187 | panic(PANIC "_mutex_lock_timeout()==ESYNCH_OK_ATOMIC"); |
187 | panic("_mutex_lock_timeout()==ESYNCH_OK_ATOMIC"); |
188 | break; |
188 | break; |
189 | dafault: |
189 | dafault: |
190 | panic(PANIC "invalid ESYNCH"); |
190 | panic("invalid ESYNCH"); |
191 | break; |
191 | break; |
192 | } |
192 | } |
193 | return rc; |
193 | return rc; |
194 | } |
194 | } |
195 | 195 | ||
196 | shortcut: |
196 | shortcut: |
197 | 197 | ||
198 | /* |
198 | /* |
199 | * We can increment readers_in only if we didn't go to sleep. |
199 | * We can increment readers_in only if we didn't go to sleep. |
200 | * For sleepers, rwlock_let_others_in() will do the job. |
200 | * For sleepers, rwlock_let_others_in() will do the job. |
201 | */ |
201 | */ |
202 | rwl->readers_in++; |
202 | rwl->readers_in++; |
203 | 203 | ||
204 | spinlock_unlock(&rwl->lock); |
204 | spinlock_unlock(&rwl->lock); |
205 | cpu_priority_restore(pri); |
205 | cpu_priority_restore(pri); |
206 | 206 | ||
207 | return ESYNCH_OK_ATOMIC; |
207 | return ESYNCH_OK_ATOMIC; |
208 | } |
208 | } |
209 | 209 | ||
210 | void rwlock_write_unlock(rwlock_t *rwl) |
210 | void rwlock_write_unlock(rwlock_t *rwl) |
211 | { |
211 | { |
212 | pri_t pri; |
212 | pri_t pri; |
213 | 213 | ||
214 | pri = cpu_priority_high(); |
214 | pri = cpu_priority_high(); |
215 | spinlock_lock(&rwl->lock); |
215 | spinlock_lock(&rwl->lock); |
216 | let_others_in(rwl, ALLOW_ALL); |
216 | let_others_in(rwl, ALLOW_ALL); |
217 | spinlock_unlock(&rwl->lock); |
217 | spinlock_unlock(&rwl->lock); |
218 | cpu_priority_restore(pri); |
218 | cpu_priority_restore(pri); |
219 | 219 | ||
220 | } |
220 | } |
221 | 221 | ||
222 | void rwlock_read_unlock(rwlock_t *rwl) |
222 | void rwlock_read_unlock(rwlock_t *rwl) |
223 | { |
223 | { |
224 | pri_t pri; |
224 | pri_t pri; |
225 | 225 | ||
226 | pri = cpu_priority_high(); |
226 | pri = cpu_priority_high(); |
227 | spinlock_lock(&rwl->lock); |
227 | spinlock_lock(&rwl->lock); |
228 | if (!--rwl->readers_in) |
228 | if (!--rwl->readers_in) |
229 | let_others_in(rwl, ALLOW_ALL); |
229 | let_others_in(rwl, ALLOW_ALL); |
230 | spinlock_unlock(&rwl->lock); |
230 | spinlock_unlock(&rwl->lock); |
231 | cpu_priority_restore(pri); |
231 | cpu_priority_restore(pri); |
232 | } |
232 | } |
233 | 233 | ||
234 | 234 | ||
235 | /* |
235 | /* |
236 | * Must be called with rwl->lock locked. |
236 | * Must be called with rwl->lock locked. |
237 | * Must be called with cpu_priority_high'ed. |
237 | * Must be called with cpu_priority_high'ed. |
238 | */ |
238 | */ |
239 | /* |
239 | /* |
240 | * If readers_only is false: (unlock scenario) |
240 | * If readers_only is false: (unlock scenario) |
241 | * Let the first sleeper on 'exclusive' mutex in, no matter |
241 | * Let the first sleeper on 'exclusive' mutex in, no matter |
242 | * whether it is a reader or a writer. If there are more leading |
242 | * whether it is a reader or a writer. If there are more leading |
243 | * readers in line, let each of them in. |
243 | * readers in line, let each of them in. |
244 | * |
244 | * |
245 | * Otherwise: (timeout scenario) |
245 | * Otherwise: (timeout scenario) |
246 | * Let all leading readers in. |
246 | * Let all leading readers in. |
247 | */ |
247 | */ |
248 | void let_others_in(rwlock_t *rwl, int readers_only) |
248 | void let_others_in(rwlock_t *rwl, int readers_only) |
249 | { |
249 | { |
250 | rwlock_type_t type = RWLOCK_NONE; |
250 | rwlock_type_t type = RWLOCK_NONE; |
251 | thread_t *t = NULL; |
251 | thread_t *t = NULL; |
252 | int one_more = 1; |
252 | int one_more = 1; |
253 | 253 | ||
254 | spinlock_lock(&rwl->exclusive.sem.wq.lock); |
254 | spinlock_lock(&rwl->exclusive.sem.wq.lock); |
255 | 255 | ||
256 | if (!list_empty(&rwl->exclusive.sem.wq.head)) |
256 | if (!list_empty(&rwl->exclusive.sem.wq.head)) |
257 | t = list_get_instance(rwl->exclusive.sem.wq.head.next, thread_t, wq_link); |
257 | t = list_get_instance(rwl->exclusive.sem.wq.head.next, thread_t, wq_link); |
258 | do { |
258 | do { |
259 | if (t) { |
259 | if (t) { |
260 | spinlock_lock(&t->lock); |
260 | spinlock_lock(&t->lock); |
261 | type = t->rwlock_holder_type; |
261 | type = t->rwlock_holder_type; |
262 | spinlock_unlock(&t->lock); |
262 | spinlock_unlock(&t->lock); |
263 | } |
263 | } |
264 | 264 | ||
265 | /* |
265 | /* |
266 | * If readers_only is true, we wake all leading readers |
266 | * If readers_only is true, we wake all leading readers |
267 | * if and only if rwl is locked by another reader. |
267 | * if and only if rwl is locked by another reader. |
268 | * Assumption: readers_only ==> rwl->readers_in |
268 | * Assumption: readers_only ==> rwl->readers_in |
269 | */ |
269 | */ |
270 | if (readers_only && (type != RWLOCK_READER)) |
270 | if (readers_only && (type != RWLOCK_READER)) |
271 | break; |
271 | break; |
272 | 272 | ||
273 | 273 | ||
274 | if (type == RWLOCK_READER) { |
274 | if (type == RWLOCK_READER) { |
275 | /* |
275 | /* |
276 | * Waking up a reader. |
276 | * Waking up a reader. |
277 | * We are responsible for incrementing rwl->readers_in for it. |
277 | * We are responsible for incrementing rwl->readers_in for it. |
278 | */ |
278 | */ |
279 | rwl->readers_in++; |
279 | rwl->readers_in++; |
280 | } |
280 | } |
281 | 281 | ||
282 | /* |
282 | /* |
283 | * Only the last iteration through this loop can increment |
283 | * Only the last iteration through this loop can increment |
284 | * rwl->exclusive.sem.wq.missed_wakeup's. All preceeding |
284 | * rwl->exclusive.sem.wq.missed_wakeup's. All preceeding |
285 | * iterations will wake up a thread. |
285 | * iterations will wake up a thread. |
286 | */ |
286 | */ |
287 | /* We call the internal version of waitq_wakeup, which |
287 | /* We call the internal version of waitq_wakeup, which |
288 | * relies on the fact that the waitq is already locked. |
288 | * relies on the fact that the waitq is already locked. |
289 | */ |
289 | */ |
290 | _waitq_wakeup_unsafe(&rwl->exclusive.sem.wq, WAKEUP_FIRST); |
290 | _waitq_wakeup_unsafe(&rwl->exclusive.sem.wq, WAKEUP_FIRST); |
291 | 291 | ||
292 | t = NULL; |
292 | t = NULL; |
293 | if (!list_empty(&rwl->exclusive.sem.wq.head)) { |
293 | if (!list_empty(&rwl->exclusive.sem.wq.head)) { |
294 | t = list_get_instance(rwl->exclusive.sem.wq.head.next, thread_t, wq_link); |
294 | t = list_get_instance(rwl->exclusive.sem.wq.head.next, thread_t, wq_link); |
295 | if (t) { |
295 | if (t) { |
296 | spinlock_lock(&t->lock); |
296 | spinlock_lock(&t->lock); |
297 | if (t->rwlock_holder_type != RWLOCK_READER) |
297 | if (t->rwlock_holder_type != RWLOCK_READER) |
298 | one_more = 0; |
298 | one_more = 0; |
299 | spinlock_unlock(&t->lock); |
299 | spinlock_unlock(&t->lock); |
300 | } |
300 | } |
301 | } |
301 | } |
302 | } while ((type == RWLOCK_READER) && t && one_more); |
302 | } while ((type == RWLOCK_READER) && t && one_more); |
303 | 303 | ||
304 | spinlock_unlock(&rwl->exclusive.sem.wq.lock); |
304 | spinlock_unlock(&rwl->exclusive.sem.wq.lock); |
305 | } |
305 | } |
306 | 306 | ||
307 | void release_spinlock(void *arg) |
307 | void release_spinlock(void *arg) |
308 | { |
308 | { |
309 | spinlock_unlock((spinlock_t *) arg); |
309 | spinlock_unlock((spinlock_t *) arg); |
310 | } |
310 | } |
311 | 311 |