Rev 385 | Rev 430 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 385 | Rev 413 | ||
---|---|---|---|
Line 94... | Line 94... | ||
94 | * |
94 | * |
95 | * @return See comment for waitq_sleep_timeout(). |
95 | * @return See comment for waitq_sleep_timeout(). |
96 | */ |
96 | */ |
97 | int _rwlock_write_lock_timeout(rwlock_t *rwl, __u32 usec, int trylock) |
97 | int _rwlock_write_lock_timeout(rwlock_t *rwl, __u32 usec, int trylock) |
98 | { |
98 | { |
99 | pri_t pri; |
99 | ipl_t ipl; |
100 | int rc; |
100 | int rc; |
101 | 101 | ||
102 | pri = cpu_priority_high(); |
102 | ipl = interrupts_disable(); |
103 | spinlock_lock(&THREAD->lock); |
103 | spinlock_lock(&THREAD->lock); |
104 | THREAD->rwlock_holder_type = RWLOCK_WRITER; |
104 | THREAD->rwlock_holder_type = RWLOCK_WRITER; |
105 | spinlock_unlock(&THREAD->lock); |
105 | spinlock_unlock(&THREAD->lock); |
106 | cpu_priority_restore(pri); |
106 | interrupts_restore(ipl); |
107 | 107 | ||
108 | /* |
108 | /* |
109 | * Writers take the easy part. |
109 | * Writers take the easy part. |
110 | * They just need to acquire the exclusive mutex. |
110 | * They just need to acquire the exclusive mutex. |
111 | */ |
111 | */ |
Line 116... | Line 116... | ||
116 | * Lock operation timed out. |
116 | * Lock operation timed out. |
117 | * The state of rwl is UNKNOWN at this point. |
117 | * The state of rwl is UNKNOWN at this point. |
118 | * No claims about its holder can be made. |
118 | * No claims about its holder can be made. |
119 | */ |
119 | */ |
120 | 120 | ||
121 | pri = cpu_priority_high(); |
121 | ipl = interrupts_disable(); |
122 | spinlock_lock(&rwl->lock); |
122 | spinlock_lock(&rwl->lock); |
123 | /* |
123 | /* |
124 | * Now when rwl is locked, we can inspect it again. |
124 | * Now when rwl is locked, we can inspect it again. |
125 | * If it is held by some readers already, we can let |
125 | * If it is held by some readers already, we can let |
126 | * readers from the head of the wait queue in. |
126 | * readers from the head of the wait queue in. |
127 | */ |
127 | */ |
128 | if (rwl->readers_in) |
128 | if (rwl->readers_in) |
129 | let_others_in(rwl, ALLOW_READERS_ONLY); |
129 | let_others_in(rwl, ALLOW_READERS_ONLY); |
130 | spinlock_unlock(&rwl->lock); |
130 | spinlock_unlock(&rwl->lock); |
131 | cpu_priority_restore(pri); |
131 | interrupts_restore(ipl); |
132 | } |
132 | } |
133 | 133 | ||
134 | return rc; |
134 | return rc; |
135 | } |
135 | } |
136 | 136 | ||
Line 149... | Line 149... | ||
149 | * @return See comment for waitq_sleep_timeout(). |
149 | * @return See comment for waitq_sleep_timeout(). |
150 | */ |
150 | */ |
151 | int _rwlock_read_lock_timeout(rwlock_t *rwl, __u32 usec, int trylock) |
151 | int _rwlock_read_lock_timeout(rwlock_t *rwl, __u32 usec, int trylock) |
152 | { |
152 | { |
153 | int rc; |
153 | int rc; |
154 | pri_t pri; |
154 | ipl_t ipl; |
155 | 155 | ||
156 | pri = cpu_priority_high(); |
156 | ipl = interrupts_disable(); |
157 | spinlock_lock(&THREAD->lock); |
157 | spinlock_lock(&THREAD->lock); |
158 | THREAD->rwlock_holder_type = RWLOCK_READER; |
158 | THREAD->rwlock_holder_type = RWLOCK_READER; |
159 | spinlock_unlock(&THREAD->lock); |
159 | spinlock_unlock(&THREAD->lock); |
160 | 160 | ||
161 | spinlock_lock(&rwl->lock); |
161 | spinlock_lock(&rwl->lock); |
Line 202... | Line 202... | ||
202 | thread_register_call_me(NULL, NULL); |
202 | thread_register_call_me(NULL, NULL); |
203 | spinlock_unlock(&rwl->lock); |
203 | spinlock_unlock(&rwl->lock); |
204 | case ESYNCH_TIMEOUT: |
204 | case ESYNCH_TIMEOUT: |
205 | /* |
205 | /* |
206 | * The sleep timeouted. |
206 | * The sleep timeouted. |
207 | * We just restore the cpu priority. |
207 | * We just restore interrupt priority level. |
208 | */ |
208 | */ |
209 | case ESYNCH_OK_BLOCKED: |
209 | case ESYNCH_OK_BLOCKED: |
210 | /* |
210 | /* |
211 | * We were woken with rwl->readers_in already incremented. |
211 | * We were woken with rwl->readers_in already incremented. |
212 | * Note that this arrangement avoids race condition between |
212 | * Note that this arrangement avoids race condition between |
213 | * two concurrent readers. (Race is avoided if 'exclusive' is |
213 | * two concurrent readers. (Race is avoided if 'exclusive' is |
214 | * locked at the same time as 'readers_in' is incremented. |
214 | * locked at the same time as 'readers_in' is incremented. |
215 | * Same time means both events happen atomically when |
215 | * Same time means both events happen atomically when |
216 | * rwl->lock is held.) |
216 | * rwl->lock is held.) |
217 | */ |
217 | */ |
218 | cpu_priority_restore(pri); |
218 | interrupts_restore(ipl); |
219 | break; |
219 | break; |
220 | case ESYNCH_OK_ATOMIC: |
220 | case ESYNCH_OK_ATOMIC: |
221 | panic("_mutex_lock_timeout()==ESYNCH_OK_ATOMIC"); |
221 | panic("_mutex_lock_timeout()==ESYNCH_OK_ATOMIC"); |
222 | break; |
222 | break; |
223 | dafault: |
223 | dafault: |
Line 234... | Line 234... | ||
234 | * For sleepers, rwlock_let_others_in() will do the job. |
234 | * For sleepers, rwlock_let_others_in() will do the job. |
235 | */ |
235 | */ |
236 | rwl->readers_in++; |
236 | rwl->readers_in++; |
237 | 237 | ||
238 | spinlock_unlock(&rwl->lock); |
238 | spinlock_unlock(&rwl->lock); |
239 | cpu_priority_restore(pri); |
239 | interrupts_restore(ipl); |
240 | 240 | ||
241 | return ESYNCH_OK_ATOMIC; |
241 | return ESYNCH_OK_ATOMIC; |
242 | } |
242 | } |
243 | 243 | ||
244 | /** Release reader/writer lock held by writer |
244 | /** Release reader/writer lock held by writer |
Line 249... | Line 249... | ||
249 | * |
249 | * |
250 | * @param rwl Reader/Writer lock. |
250 | * @param rwl Reader/Writer lock. |
251 | */ |
251 | */ |
252 | void rwlock_write_unlock(rwlock_t *rwl) |
252 | void rwlock_write_unlock(rwlock_t *rwl) |
253 | { |
253 | { |
254 | pri_t pri; |
254 | ipl_t ipl; |
255 | 255 | ||
256 | pri = cpu_priority_high(); |
256 | ipl = interrupts_disable(); |
257 | spinlock_lock(&rwl->lock); |
257 | spinlock_lock(&rwl->lock); |
258 | let_others_in(rwl, ALLOW_ALL); |
258 | let_others_in(rwl, ALLOW_ALL); |
259 | spinlock_unlock(&rwl->lock); |
259 | spinlock_unlock(&rwl->lock); |
260 | cpu_priority_restore(pri); |
260 | interrupts_restore(ipl); |
261 | 261 | ||
262 | } |
262 | } |
263 | 263 | ||
264 | /** Release reader/writer lock held by reader |
264 | /** Release reader/writer lock held by reader |
265 | * |
265 | * |
Line 270... | Line 270... | ||
270 | * |
270 | * |
271 | * @param rwl Reader/Writer lock. |
271 | * @param rwl Reader/Writer lock. |
272 | */ |
272 | */ |
273 | void rwlock_read_unlock(rwlock_t *rwl) |
273 | void rwlock_read_unlock(rwlock_t *rwl) |
274 | { |
274 | { |
275 | pri_t pri; |
275 | ipl_t ipl; |
276 | 276 | ||
277 | pri = cpu_priority_high(); |
277 | ipl = interrupts_disable(); |
278 | spinlock_lock(&rwl->lock); |
278 | spinlock_lock(&rwl->lock); |
279 | if (!--rwl->readers_in) |
279 | if (!--rwl->readers_in) |
280 | let_others_in(rwl, ALLOW_ALL); |
280 | let_others_in(rwl, ALLOW_ALL); |
281 | spinlock_unlock(&rwl->lock); |
281 | spinlock_unlock(&rwl->lock); |
282 | cpu_priority_restore(pri); |
282 | interrupts_restore(ipl); |
283 | } |
283 | } |
284 | 284 | ||
285 | 285 | ||
286 | /** Direct handoff |
286 | /** Direct handoff |
287 | * |
287 | * |
288 | * Direct handoff of reader/writer lock ownership |
288 | * Direct handoff of reader/writer lock ownership |
289 | * to waiting readers or a writer. |
289 | * to waiting readers or a writer. |
290 | * |
290 | * |
291 | * Must be called with rwl->lock locked. |
291 | * Must be called with rwl->lock locked. |
292 | * Must be called with cpu_priority_high'ed. |
292 | * Must be called with interrupts_disable()'d. |
293 | * |
293 | * |
294 | * @param rwl Reader/Writer lock. |
294 | * @param rwl Reader/Writer lock. |
295 | * @param readers_only See the description below. |
295 | * @param readers_only See the description below. |
296 | * |
296 | * |
297 | * If readers_only is false: (unlock scenario) |
297 | * If readers_only is false: (unlock scenario) |