Rev 1288 | Rev 1467 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
| Rev 1288 | Rev 1375 | ||
|---|---|---|---|
| Line 155... | Line 155... | ||
| 155 | out: |
155 | out: |
| 156 | spinlock_unlock(&threads_lock); |
156 | spinlock_unlock(&threads_lock); |
| 157 | interrupts_restore(ipl); |
157 | interrupts_restore(ipl); |
| 158 | } |
158 | } |
| 159 | 159 | ||
| 160 | - | ||
| 161 | /** Sleep until either wakeup, timeout or interruption occurs |
160 | /** Sleep until either wakeup, timeout or interruption occurs |
| 162 | * |
161 | * |
| 163 | * This is a sleep implementation which allows itself to be |
162 | * This is a sleep implementation which allows itself to be |
| 164 | * interrupted from the sleep, restoring a failover context. |
163 | * interrupted from the sleep, restoring a failover context. |
| 165 | * |
164 | * |
| 166 | * Sleepers are organised in FIFO fashion in a structure called wait queue. |
165 | * Sleepers are organised in a FIFO fashion in a structure called wait queue. |
| 167 | * |
166 | * |
| 168 | * This function is really basic in that other functions as waitq_sleep() |
167 | * This function is really basic in that other functions as waitq_sleep() |
| 169 | * and all the *_timeout() functions use it. |
168 | * and all the *_timeout() functions use it. |
| 170 | * |
169 | * |
| 171 | * @param wq Pointer to wait queue. |
170 | * @param wq Pointer to wait queue. |
| Line 198... | Line 197... | ||
| 198 | * @li ESYNCH_OK_BLOCKED means that the sleep succeeded; the full sleep was |
197 | * @li ESYNCH_OK_BLOCKED means that the sleep succeeded; the full sleep was |
| 199 | * attempted. |
198 | * attempted. |
| 200 | */ |
199 | */ |
| 201 | int waitq_sleep_timeout(waitq_t *wq, __u32 usec, int nonblocking) |
200 | int waitq_sleep_timeout(waitq_t *wq, __u32 usec, int nonblocking) |
| 202 | { |
201 | { |
| 203 | volatile ipl_t ipl; /* must be live after context_restore() */ |
202 | ipl_t ipl; |
| - | 203 | int rc; |
|
| 204 | 204 | ||
| - | 205 | ipl = waitq_sleep_prepare(wq); |
|
| - | 206 | rc = waitq_sleep_timeout_unsafe(wq, usec, nonblocking); |
|
| - | 207 | waitq_sleep_finish(wq, rc, ipl); |
|
| - | 208 | return rc; |
|
| - | 209 | } |
|
| - | 210 | ||
| - | 211 | /** Prepare to sleep in a waitq. |
|
| - | 212 | * |
|
| - | 213 | * This function will return holding the lock of the wait queue |
|
| - | 214 | * and interrupts disabled. |
|
| - | 215 | * |
|
| - | 216 | * @param wq Wait queue. |
|
| - | 217 | * |
|
| - | 218 | * @return Interrupt level as it existed on entry to this function. |
|
| - | 219 | */ |
|
| - | 220 | ipl_t waitq_sleep_prepare(waitq_t *wq) |
|
| - | 221 | { |
|
| - | 222 | ipl_t ipl; |
|
| 205 | 223 | ||
| 206 | restart: |
224 | restart: |
| 207 | ipl = interrupts_disable(); |
225 | ipl = interrupts_disable(); |
| 208 | 226 | ||
| 209 | /* |
227 | /* |
| 210 | * Busy waiting for a delayed timeout. |
228 | * Busy waiting for a delayed timeout. |
| 211 | * This is an important fix for the race condition between |
229 | * This is an important fix for the race condition between |
| 212 | * a delayed timeout and a next call to waitq_sleep_timeout(). |
230 | * a delayed timeout and a next call to waitq_sleep_timeout(). |
| 213 | * Simply, the thread is not allowed to go to sleep if |
231 | * Simply, the thread is not allowed to go to sleep if |
| 214 | * there are timeouts in progress. |
232 | * there are timeouts in progress. |
| 215 | */ |
233 | */ |
| 216 | spinlock_lock(&THREAD->lock); |
234 | spinlock_lock(&THREAD->lock); |
| 217 | if (THREAD->timeout_pending) { |
235 | if (THREAD->timeout_pending) { |
| 218 | spinlock_unlock(&THREAD->lock); |
236 | spinlock_unlock(&THREAD->lock); |
| 219 | interrupts_restore(ipl); |
237 | interrupts_restore(ipl); |
| 220 | goto restart; |
238 | goto restart; |
| 221 | } |
239 | } |
| 222 | spinlock_unlock(&THREAD->lock); |
240 | spinlock_unlock(&THREAD->lock); |
| 223 | 241 | ||
| 224 | spinlock_lock(&wq->lock); |
242 | spinlock_lock(&wq->lock); |
| - | 243 | return ipl; |
|
| - | 244 | } |
|
| - | 245 | ||
| - | 246 | /** Finish waiting in a wait queue. |
|
| - | 247 | * |
|
| - | 248 | * This function restores interrupts to the state that existed prior |
|
| - | 249 | * to the call to waitq_sleep_prepare(). If necessary, the wait queue |
|
| - | 250 | * lock is released. |
|
| - | 251 | * |
|
| - | 252 | * @param wq Wait queue. |
|
| - | 253 | * @param rc Return code of waitq_sleep_timeout_unsafe(). |
|
| - | 254 | * @param ipl Interrupt level returned by waitq_sleep_prepare(). |
|
| - | 255 | */ |
|
| - | 256 | void waitq_sleep_finish(waitq_t *wq, int rc, ipl_t ipl) |
|
| - | 257 | { |
|
| - | 258 | switch (rc) { |
|
| - | 259 | case ESYNCH_WOULD_BLOCK: |
|
| - | 260 | case ESYNCH_OK_ATOMIC: |
|
| - | 261 | spinlock_unlock(&wq->lock); |
|
| - | 262 | break; |
|
| - | 263 | default: |
|
| - | 264 | break; |
|
| 225 | 265 | } |
|
| - | 266 | interrupts_restore(ipl); |
|
| - | 267 | } |
|
| - | 268 | ||
| - | 269 | /** Internal implementation of waitq_sleep_timeout(). |
|
| - | 270 | * |
|
| - | 271 | * This function implements logic of sleeping in a wait queue. |
|
| - | 272 | * This call must be preceeded by a call to waitq_sleep_prepare() |
|
| - | 273 | * and followed by a call to waitq_slee_finish(). |
|
| - | 274 | * |
|
| - | 275 | * @param wq See waitq_sleep_timeout(). |
|
| - | 276 | * @param usec See waitq_sleep_timeout(). |
|
| - | 277 | * @param nonblocking See waitq_sleep_timeout(). |
|
| - | 278 | * |
|
| - | 279 | * @return See waitq_sleep_timeout(). |
|
| - | 280 | */ |
|
| - | 281 | int waitq_sleep_timeout_unsafe(waitq_t *wq, __u32 usec, int nonblocking) |
|
| - | 282 | { |
|
| 226 | /* checks whether to go to sleep at all */ |
283 | /* checks whether to go to sleep at all */ |
| 227 | if (wq->missed_wakeups) { |
284 | if (wq->missed_wakeups) { |
| 228 | wq->missed_wakeups--; |
285 | wq->missed_wakeups--; |
| 229 | spinlock_unlock(&wq->lock); |
- | |
| 230 | interrupts_restore(ipl); |
- | |
| 231 | return ESYNCH_OK_ATOMIC; |
286 | return ESYNCH_OK_ATOMIC; |
| 232 | } |
287 | } |
| 233 | else { |
288 | else { |
| 234 | if (nonblocking && (usec == 0)) { |
289 | if (nonblocking && (usec == 0)) { |
| 235 | /* return immediatelly instead of going to sleep */ |
290 | /* return immediatelly instead of going to sleep */ |
| 236 | spinlock_unlock(&wq->lock); |
- | |
| 237 | interrupts_restore(ipl); |
- | |
| 238 | return ESYNCH_WOULD_BLOCK; |
291 | return ESYNCH_WOULD_BLOCK; |
| 239 | } |
292 | } |
| 240 | } |
293 | } |
| 241 | 294 | ||
| 242 | /* |
295 | /* |
| Line 249... | Line 302... | ||
| 249 | * of this thread is ever interrupted. |
302 | * of this thread is ever interrupted. |
| 250 | */ |
303 | */ |
| 251 | if (!context_save(&THREAD->sleep_interruption_context)) { |
304 | if (!context_save(&THREAD->sleep_interruption_context)) { |
| 252 | /* Short emulation of scheduler() return code. */ |
305 | /* Short emulation of scheduler() return code. */ |
| 253 | spinlock_unlock(&THREAD->lock); |
306 | spinlock_unlock(&THREAD->lock); |
| 254 | interrupts_restore(ipl); |
- | |
| 255 | return ESYNCH_INTERRUPTED; |
307 | return ESYNCH_INTERRUPTED; |
| 256 | } |
308 | } |
| 257 | 309 | ||
| 258 | if (usec) { |
310 | if (usec) { |
| 259 | /* We use the timeout variant. */ |
311 | /* We use the timeout variant. */ |
| 260 | if (!context_save(&THREAD->sleep_timeout_context)) { |
312 | if (!context_save(&THREAD->sleep_timeout_context)) { |
| 261 | /* Short emulation of scheduler() return code. */ |
313 | /* Short emulation of scheduler() return code. */ |
| 262 | spinlock_unlock(&THREAD->lock); |
314 | spinlock_unlock(&THREAD->lock); |
| 263 | interrupts_restore(ipl); |
- | |
| 264 | return ESYNCH_TIMEOUT; |
315 | return ESYNCH_TIMEOUT; |
| 265 | } |
316 | } |
| 266 | THREAD->timeout_pending = true; |
317 | THREAD->timeout_pending = true; |
| 267 | timeout_register(&THREAD->sleep_timeout, (__u64) usec, waitq_timeouted_sleep, THREAD); |
318 | timeout_register(&THREAD->sleep_timeout, (__u64) usec, waitq_timeouted_sleep, THREAD); |
| 268 | } |
319 | } |
| Line 276... | Line 327... | ||
| 276 | THREAD->sleep_queue = wq; |
327 | THREAD->sleep_queue = wq; |
| 277 | 328 | ||
| 278 | spinlock_unlock(&THREAD->lock); |
329 | spinlock_unlock(&THREAD->lock); |
| 279 | 330 | ||
| 280 | scheduler(); /* wq->lock is released in scheduler_separated_stack() */ |
331 | scheduler(); /* wq->lock is released in scheduler_separated_stack() */ |
| 281 | interrupts_restore(ipl); |
- | |
| 282 | 332 | ||
| 283 | return ESYNCH_OK_BLOCKED; |
333 | return ESYNCH_OK_BLOCKED; |
| 284 | } |
334 | } |
| 285 | 335 | ||
| 286 | 336 | ||