Rev 1681 | Rev 1757 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 1681 | Rev 1702 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
- | 29 | /** @addtogroup sync |
|
- | 30 | * @{ |
|
- | 31 | */ |
|
- | 32 | ||
29 | /** |
33 | /** |
30 | * @file waitq.c |
34 | * @file |
31 | * @brief Wait queue. |
35 | * @brief Wait queue. |
32 | * |
36 | * |
33 | * Wait queue is the basic synchronization primitive upon which all |
37 | * Wait queue is the basic synchronization primitive upon which all |
34 | * other synchronization primitives build. |
38 | * other synchronization primitives build. |
35 | * |
39 | * |
36 | * It allows threads to wait for an event in first-come, first-served |
40 | * It allows threads to wait for an event in first-come, first-served |
37 | * fashion. Conditional operation as well as timeouts and interruptions |
41 | * fashion. Conditional operation as well as timeouts and interruptions |
38 | * are supported. |
42 | * are supported. |
39 | */ |
43 | */ |
40 | 44 | ||
41 | #include <synch/waitq.h> |
45 | #include <synch/waitq.h> |
42 | #include <synch/synch.h> |
46 | #include <synch/synch.h> |
43 | #include <synch/spinlock.h> |
47 | #include <synch/spinlock.h> |
44 | #include <proc/thread.h> |
48 | #include <proc/thread.h> |
45 | #include <proc/scheduler.h> |
49 | #include <proc/scheduler.h> |
46 | #include <arch/asm.h> |
50 | #include <arch/asm.h> |
47 | #include <arch/types.h> |
51 | #include <arch/types.h> |
48 | #include <typedefs.h> |
52 | #include <typedefs.h> |
49 | #include <time/timeout.h> |
53 | #include <time/timeout.h> |
50 | #include <arch.h> |
54 | #include <arch.h> |
51 | #include <context.h> |
55 | #include <context.h> |
52 | #include <adt/list.h> |
56 | #include <adt/list.h> |
53 | 57 | ||
54 | static void waitq_timeouted_sleep(void *data); |
58 | static void waitq_timeouted_sleep(void *data); |
55 | 59 | ||
56 | /** Initialize wait queue |
60 | /** Initialize wait queue |
57 | * |
61 | * |
58 | * Initialize wait queue. |
62 | * Initialize wait queue. |
59 | * |
63 | * |
60 | * @param wq Pointer to wait queue to be initialized. |
64 | * @param wq Pointer to wait queue to be initialized. |
61 | */ |
65 | */ |
62 | void waitq_initialize(waitq_t *wq) |
66 | void waitq_initialize(waitq_t *wq) |
63 | { |
67 | { |
64 | spinlock_initialize(&wq->lock, "waitq_lock"); |
68 | spinlock_initialize(&wq->lock, "waitq_lock"); |
65 | list_initialize(&wq->head); |
69 | list_initialize(&wq->head); |
66 | wq->missed_wakeups = 0; |
70 | wq->missed_wakeups = 0; |
67 | } |
71 | } |
68 | 72 | ||
69 | /** Handle timeout during waitq_sleep_timeout() call |
73 | /** Handle timeout during waitq_sleep_timeout() call |
70 | * |
74 | * |
71 | * This routine is called when waitq_sleep_timeout() timeouts. |
75 | * This routine is called when waitq_sleep_timeout() timeouts. |
72 | * Interrupts are disabled. |
76 | * Interrupts are disabled. |
73 | * |
77 | * |
74 | * It is supposed to try to remove 'its' thread from the wait queue; |
78 | * It is supposed to try to remove 'its' thread from the wait queue; |
75 | * it can eventually fail to achieve this goal when these two events |
79 | * it can eventually fail to achieve this goal when these two events |
76 | * overlap. In that case it behaves just as though there was no |
80 | * overlap. In that case it behaves just as though there was no |
77 | * timeout at all. |
81 | * timeout at all. |
78 | * |
82 | * |
79 | * @param data Pointer to the thread that called waitq_sleep_timeout(). |
83 | * @param data Pointer to the thread that called waitq_sleep_timeout(). |
80 | */ |
84 | */ |
81 | void waitq_timeouted_sleep(void *data) |
85 | void waitq_timeouted_sleep(void *data) |
82 | { |
86 | { |
83 | thread_t *t = (thread_t *) data; |
87 | thread_t *t = (thread_t *) data; |
84 | waitq_t *wq; |
88 | waitq_t *wq; |
85 | bool do_wakeup = false; |
89 | bool do_wakeup = false; |
86 | 90 | ||
87 | spinlock_lock(&threads_lock); |
91 | spinlock_lock(&threads_lock); |
88 | if (!thread_exists(t)) |
92 | if (!thread_exists(t)) |
89 | goto out; |
93 | goto out; |
90 | 94 | ||
91 | grab_locks: |
95 | grab_locks: |
92 | spinlock_lock(&t->lock); |
96 | spinlock_lock(&t->lock); |
93 | if ((wq = t->sleep_queue)) { /* assignment */ |
97 | if ((wq = t->sleep_queue)) { /* assignment */ |
94 | if (!spinlock_trylock(&wq->lock)) { |
98 | if (!spinlock_trylock(&wq->lock)) { |
95 | spinlock_unlock(&t->lock); |
99 | spinlock_unlock(&t->lock); |
96 | goto grab_locks; /* avoid deadlock */ |
100 | goto grab_locks; /* avoid deadlock */ |
97 | } |
101 | } |
98 | 102 | ||
99 | list_remove(&t->wq_link); |
103 | list_remove(&t->wq_link); |
100 | t->saved_context = t->sleep_timeout_context; |
104 | t->saved_context = t->sleep_timeout_context; |
101 | do_wakeup = true; |
105 | do_wakeup = true; |
102 | t->sleep_queue = NULL; |
106 | t->sleep_queue = NULL; |
103 | spinlock_unlock(&wq->lock); |
107 | spinlock_unlock(&wq->lock); |
104 | } |
108 | } |
105 | 109 | ||
106 | t->timeout_pending = false; |
110 | t->timeout_pending = false; |
107 | spinlock_unlock(&t->lock); |
111 | spinlock_unlock(&t->lock); |
108 | 112 | ||
109 | if (do_wakeup) |
113 | if (do_wakeup) |
110 | thread_ready(t); |
114 | thread_ready(t); |
111 | 115 | ||
112 | out: |
116 | out: |
113 | spinlock_unlock(&threads_lock); |
117 | spinlock_unlock(&threads_lock); |
114 | } |
118 | } |
115 | 119 | ||
116 | /** Interrupt sleeping thread. |
120 | /** Interrupt sleeping thread. |
117 | * |
121 | * |
118 | * This routine attempts to interrupt a thread from its sleep in a waitqueue. |
122 | * This routine attempts to interrupt a thread from its sleep in a waitqueue. |
119 | * If the thread is not found sleeping, no action is taken. |
123 | * If the thread is not found sleeping, no action is taken. |
120 | * |
124 | * |
121 | * @param t Thread to be interrupted. |
125 | * @param t Thread to be interrupted. |
122 | */ |
126 | */ |
123 | void waitq_interrupt_sleep(thread_t *t) |
127 | void waitq_interrupt_sleep(thread_t *t) |
124 | { |
128 | { |
125 | waitq_t *wq; |
129 | waitq_t *wq; |
126 | bool do_wakeup = false; |
130 | bool do_wakeup = false; |
127 | ipl_t ipl; |
131 | ipl_t ipl; |
128 | 132 | ||
129 | ipl = interrupts_disable(); |
133 | ipl = interrupts_disable(); |
130 | spinlock_lock(&threads_lock); |
134 | spinlock_lock(&threads_lock); |
131 | if (!thread_exists(t)) |
135 | if (!thread_exists(t)) |
132 | goto out; |
136 | goto out; |
133 | 137 | ||
134 | grab_locks: |
138 | grab_locks: |
135 | spinlock_lock(&t->lock); |
139 | spinlock_lock(&t->lock); |
136 | if ((wq = t->sleep_queue)) { /* assignment */ |
140 | if ((wq = t->sleep_queue)) { /* assignment */ |
137 | if (!(t->sleep_interruptible)) { |
141 | if (!(t->sleep_interruptible)) { |
138 | /* |
142 | /* |
139 | * The sleep cannot be interrupted. |
143 | * The sleep cannot be interrupted. |
140 | */ |
144 | */ |
141 | spinlock_unlock(&t->lock); |
145 | spinlock_unlock(&t->lock); |
142 | goto out; |
146 | goto out; |
143 | } |
147 | } |
144 | 148 | ||
145 | if (!spinlock_trylock(&wq->lock)) { |
149 | if (!spinlock_trylock(&wq->lock)) { |
146 | spinlock_unlock(&t->lock); |
150 | spinlock_unlock(&t->lock); |
147 | goto grab_locks; /* avoid deadlock */ |
151 | goto grab_locks; /* avoid deadlock */ |
148 | } |
152 | } |
149 | 153 | ||
150 | if (t->timeout_pending && timeout_unregister(&t->sleep_timeout)) |
154 | if (t->timeout_pending && timeout_unregister(&t->sleep_timeout)) |
151 | t->timeout_pending = false; |
155 | t->timeout_pending = false; |
152 | 156 | ||
153 | list_remove(&t->wq_link); |
157 | list_remove(&t->wq_link); |
154 | t->saved_context = t->sleep_interruption_context; |
158 | t->saved_context = t->sleep_interruption_context; |
155 | do_wakeup = true; |
159 | do_wakeup = true; |
156 | t->sleep_queue = NULL; |
160 | t->sleep_queue = NULL; |
157 | spinlock_unlock(&wq->lock); |
161 | spinlock_unlock(&wq->lock); |
158 | } |
162 | } |
159 | spinlock_unlock(&t->lock); |
163 | spinlock_unlock(&t->lock); |
160 | 164 | ||
161 | if (do_wakeup) |
165 | if (do_wakeup) |
162 | thread_ready(t); |
166 | thread_ready(t); |
163 | 167 | ||
164 | out: |
168 | out: |
165 | spinlock_unlock(&threads_lock); |
169 | spinlock_unlock(&threads_lock); |
166 | interrupts_restore(ipl); |
170 | interrupts_restore(ipl); |
167 | } |
171 | } |
168 | 172 | ||
169 | /** Sleep until either wakeup, timeout or interruption occurs |
173 | /** Sleep until either wakeup, timeout or interruption occurs |
170 | * |
174 | * |
171 | * This is a sleep implementation which allows itself to time out or to be |
175 | * This is a sleep implementation which allows itself to time out or to be |
172 | * interrupted from the sleep, restoring a failover context. |
176 | * interrupted from the sleep, restoring a failover context. |
173 | * |
177 | * |
174 | * Sleepers are organised in a FIFO fashion in a structure called wait queue. |
178 | * Sleepers are organised in a FIFO fashion in a structure called wait queue. |
175 | * |
179 | * |
176 | * This function is really basic in that other functions as waitq_sleep() |
180 | * This function is really basic in that other functions as waitq_sleep() |
177 | * and all the *_timeout() functions use it. |
181 | * and all the *_timeout() functions use it. |
178 | * |
182 | * |
179 | * @param wq Pointer to wait queue. |
183 | * @param wq Pointer to wait queue. |
180 | * @param usec Timeout in microseconds. |
184 | * @param usec Timeout in microseconds. |
181 | * @param flags Specify mode of the sleep. |
185 | * @param flags Specify mode of the sleep. |
182 | * |
186 | * |
183 | * The sleep can be interrupted only if the |
187 | * The sleep can be interrupted only if the |
184 | * SYNCH_FLAGS_INTERRUPTIBLE bit is specified in flags. |
188 | * SYNCH_FLAGS_INTERRUPTIBLE bit is specified in flags. |
185 | |
189 | |
186 | * If usec is greater than zero, regardless of the value of the |
190 | * If usec is greater than zero, regardless of the value of the |
187 | * SYNCH_FLAGS_NON_BLOCKING bit in flags, the call will not return until either timeout, |
191 | * SYNCH_FLAGS_NON_BLOCKING bit in flags, the call will not return until either timeout, |
188 | * interruption or wakeup comes. |
192 | * interruption or wakeup comes. |
189 | * |
193 | * |
190 | * If usec is zero and the SYNCH_FLAGS_NON_BLOCKING bit is not set in flags, the call |
194 | * If usec is zero and the SYNCH_FLAGS_NON_BLOCKING bit is not set in flags, the call |
191 | * will not return until wakeup or interruption comes. |
195 | * will not return until wakeup or interruption comes. |
192 | * |
196 | * |
193 | * If usec is zero and the SYNCH_FLAGS_NON_BLOCKING bit is set in flags, the call will |
197 | * If usec is zero and the SYNCH_FLAGS_NON_BLOCKING bit is set in flags, the call will |
194 | * immediately return, reporting either success or failure. |
198 | * immediately return, reporting either success or failure. |
195 | * |
199 | * |
196 | * @return Returns one of: ESYNCH_WOULD_BLOCK, ESYNCH_TIMEOUT, ESYNCH_INTERRUPTED, |
200 | * @return Returns one of: ESYNCH_WOULD_BLOCK, ESYNCH_TIMEOUT, ESYNCH_INTERRUPTED, |
197 | * ESYNCH_OK_ATOMIC, ESYNCH_OK_BLOCKED. |
201 | * ESYNCH_OK_ATOMIC, ESYNCH_OK_BLOCKED. |
198 | * |
202 | * |
199 | * @li ESYNCH_WOULD_BLOCK means that the sleep failed because at the time |
203 | * @li ESYNCH_WOULD_BLOCK means that the sleep failed because at the time |
200 | * of the call there was no pending wakeup. |
204 | * of the call there was no pending wakeup. |
201 | * |
205 | * |
202 | * @li ESYNCH_TIMEOUT means that the sleep timed out. |
206 | * @li ESYNCH_TIMEOUT means that the sleep timed out. |
203 | * |
207 | * |
204 | * @li ESYNCH_INTERRUPTED means that somebody interrupted the sleeping thread. |
208 | * @li ESYNCH_INTERRUPTED means that somebody interrupted the sleeping thread. |
205 | * |
209 | * |
206 | * @li ESYNCH_OK_ATOMIC means that the sleep succeeded and that there was |
210 | * @li ESYNCH_OK_ATOMIC means that the sleep succeeded and that there was |
207 | * a pending wakeup at the time of the call. The caller was not put |
211 | * a pending wakeup at the time of the call. The caller was not put |
208 | * asleep at all. |
212 | * asleep at all. |
209 | * |
213 | * |
210 | * @li ESYNCH_OK_BLOCKED means that the sleep succeeded; the full sleep was |
214 | * @li ESYNCH_OK_BLOCKED means that the sleep succeeded; the full sleep was |
211 | * attempted. |
215 | * attempted. |
212 | */ |
216 | */ |
213 | int waitq_sleep_timeout(waitq_t *wq, __u32 usec, int flags) |
217 | int waitq_sleep_timeout(waitq_t *wq, __u32 usec, int flags) |
214 | { |
218 | { |
215 | ipl_t ipl; |
219 | ipl_t ipl; |
216 | int rc; |
220 | int rc; |
217 | 221 | ||
218 | ipl = waitq_sleep_prepare(wq); |
222 | ipl = waitq_sleep_prepare(wq); |
219 | rc = waitq_sleep_timeout_unsafe(wq, usec, flags); |
223 | rc = waitq_sleep_timeout_unsafe(wq, usec, flags); |
220 | waitq_sleep_finish(wq, rc, ipl); |
224 | waitq_sleep_finish(wq, rc, ipl); |
221 | return rc; |
225 | return rc; |
222 | } |
226 | } |
223 | 227 | ||
224 | /** Prepare to sleep in a waitq. |
228 | /** Prepare to sleep in a waitq. |
225 | * |
229 | * |
226 | * This function will return holding the lock of the wait queue |
230 | * This function will return holding the lock of the wait queue |
227 | * and interrupts disabled. |
231 | * and interrupts disabled. |
228 | * |
232 | * |
229 | * @param wq Wait queue. |
233 | * @param wq Wait queue. |
230 | * |
234 | * |
231 | * @return Interrupt level as it existed on entry to this function. |
235 | * @return Interrupt level as it existed on entry to this function. |
232 | */ |
236 | */ |
233 | ipl_t waitq_sleep_prepare(waitq_t *wq) |
237 | ipl_t waitq_sleep_prepare(waitq_t *wq) |
234 | { |
238 | { |
235 | ipl_t ipl; |
239 | ipl_t ipl; |
236 | 240 | ||
237 | restart: |
241 | restart: |
238 | ipl = interrupts_disable(); |
242 | ipl = interrupts_disable(); |
239 | 243 | ||
240 | if (THREAD) { /* needed during system initiailzation */ |
244 | if (THREAD) { /* needed during system initiailzation */ |
241 | /* |
245 | /* |
242 | * Busy waiting for a delayed timeout. |
246 | * Busy waiting for a delayed timeout. |
243 | * This is an important fix for the race condition between |
247 | * This is an important fix for the race condition between |
244 | * a delayed timeout and a next call to waitq_sleep_timeout(). |
248 | * a delayed timeout and a next call to waitq_sleep_timeout(). |
245 | * Simply, the thread is not allowed to go to sleep if |
249 | * Simply, the thread is not allowed to go to sleep if |
246 | * there are timeouts in progress. |
250 | * there are timeouts in progress. |
247 | */ |
251 | */ |
248 | spinlock_lock(&THREAD->lock); |
252 | spinlock_lock(&THREAD->lock); |
249 | if (THREAD->timeout_pending) { |
253 | if (THREAD->timeout_pending) { |
250 | spinlock_unlock(&THREAD->lock); |
254 | spinlock_unlock(&THREAD->lock); |
251 | interrupts_restore(ipl); |
255 | interrupts_restore(ipl); |
252 | goto restart; |
256 | goto restart; |
253 | } |
257 | } |
254 | spinlock_unlock(&THREAD->lock); |
258 | spinlock_unlock(&THREAD->lock); |
255 | } |
259 | } |
256 | 260 | ||
257 | spinlock_lock(&wq->lock); |
261 | spinlock_lock(&wq->lock); |
258 | return ipl; |
262 | return ipl; |
259 | } |
263 | } |
260 | 264 | ||
261 | /** Finish waiting in a wait queue. |
265 | /** Finish waiting in a wait queue. |
262 | * |
266 | * |
263 | * This function restores interrupts to the state that existed prior |
267 | * This function restores interrupts to the state that existed prior |
264 | * to the call to waitq_sleep_prepare(). If necessary, the wait queue |
268 | * to the call to waitq_sleep_prepare(). If necessary, the wait queue |
265 | * lock is released. |
269 | * lock is released. |
266 | * |
270 | * |
267 | * @param wq Wait queue. |
271 | * @param wq Wait queue. |
268 | * @param rc Return code of waitq_sleep_timeout_unsafe(). |
272 | * @param rc Return code of waitq_sleep_timeout_unsafe(). |
269 | * @param ipl Interrupt level returned by waitq_sleep_prepare(). |
273 | * @param ipl Interrupt level returned by waitq_sleep_prepare(). |
270 | */ |
274 | */ |
271 | void waitq_sleep_finish(waitq_t *wq, int rc, ipl_t ipl) |
275 | void waitq_sleep_finish(waitq_t *wq, int rc, ipl_t ipl) |
272 | { |
276 | { |
273 | switch (rc) { |
277 | switch (rc) { |
274 | case ESYNCH_WOULD_BLOCK: |
278 | case ESYNCH_WOULD_BLOCK: |
275 | case ESYNCH_OK_ATOMIC: |
279 | case ESYNCH_OK_ATOMIC: |
276 | spinlock_unlock(&wq->lock); |
280 | spinlock_unlock(&wq->lock); |
277 | break; |
281 | break; |
278 | default: |
282 | default: |
279 | break; |
283 | break; |
280 | } |
284 | } |
281 | interrupts_restore(ipl); |
285 | interrupts_restore(ipl); |
282 | } |
286 | } |
283 | 287 | ||
284 | /** Internal implementation of waitq_sleep_timeout(). |
288 | /** Internal implementation of waitq_sleep_timeout(). |
285 | * |
289 | * |
286 | * This function implements logic of sleeping in a wait queue. |
290 | * This function implements logic of sleeping in a wait queue. |
287 | * This call must be preceeded by a call to waitq_sleep_prepare() |
291 | * This call must be preceeded by a call to waitq_sleep_prepare() |
288 | * and followed by a call to waitq_slee_finish(). |
292 | * and followed by a call to waitq_slee_finish(). |
289 | * |
293 | * |
290 | * @param wq See waitq_sleep_timeout(). |
294 | * @param wq See waitq_sleep_timeout(). |
291 | * @param usec See waitq_sleep_timeout(). |
295 | * @param usec See waitq_sleep_timeout(). |
292 | * @param flags See waitq_sleep_timeout(). |
296 | * @param flags See waitq_sleep_timeout(). |
293 | * |
297 | * |
294 | * @return See waitq_sleep_timeout(). |
298 | * @return See waitq_sleep_timeout(). |
295 | */ |
299 | */ |
296 | int waitq_sleep_timeout_unsafe(waitq_t *wq, __u32 usec, int flags) |
300 | int waitq_sleep_timeout_unsafe(waitq_t *wq, __u32 usec, int flags) |
297 | { |
301 | { |
298 | /* checks whether to go to sleep at all */ |
302 | /* checks whether to go to sleep at all */ |
299 | if (wq->missed_wakeups) { |
303 | if (wq->missed_wakeups) { |
300 | wq->missed_wakeups--; |
304 | wq->missed_wakeups--; |
301 | return ESYNCH_OK_ATOMIC; |
305 | return ESYNCH_OK_ATOMIC; |
302 | } |
306 | } |
303 | else { |
307 | else { |
304 | if ((flags & SYNCH_FLAGS_NON_BLOCKING) && (usec == 0)) { |
308 | if ((flags & SYNCH_FLAGS_NON_BLOCKING) && (usec == 0)) { |
305 | /* return immediatelly instead of going to sleep */ |
309 | /* return immediatelly instead of going to sleep */ |
306 | return ESYNCH_WOULD_BLOCK; |
310 | return ESYNCH_WOULD_BLOCK; |
307 | } |
311 | } |
308 | } |
312 | } |
309 | 313 | ||
310 | /* |
314 | /* |
311 | * Now we are firmly decided to go to sleep. |
315 | * Now we are firmly decided to go to sleep. |
312 | */ |
316 | */ |
313 | spinlock_lock(&THREAD->lock); |
317 | spinlock_lock(&THREAD->lock); |
314 | 318 | ||
315 | if (flags & SYNCH_FLAGS_INTERRUPTIBLE) { |
319 | if (flags & SYNCH_FLAGS_INTERRUPTIBLE) { |
316 | 320 | ||
317 | /* |
321 | /* |
318 | * If the thread was already interrupted, |
322 | * If the thread was already interrupted, |
319 | * don't go to sleep at all. |
323 | * don't go to sleep at all. |
320 | */ |
324 | */ |
321 | if (THREAD->interrupted) { |
325 | if (THREAD->interrupted) { |
322 | spinlock_unlock(&THREAD->lock); |
326 | spinlock_unlock(&THREAD->lock); |
323 | spinlock_unlock(&wq->lock); |
327 | spinlock_unlock(&wq->lock); |
324 | return ESYNCH_INTERRUPTED; |
328 | return ESYNCH_INTERRUPTED; |
325 | } |
329 | } |
326 | 330 | ||
327 | /* |
331 | /* |
328 | * Set context that will be restored if the sleep |
332 | * Set context that will be restored if the sleep |
329 | * of this thread is ever interrupted. |
333 | * of this thread is ever interrupted. |
330 | */ |
334 | */ |
331 | THREAD->sleep_interruptible = true; |
335 | THREAD->sleep_interruptible = true; |
332 | if (!context_save(&THREAD->sleep_interruption_context)) { |
336 | if (!context_save(&THREAD->sleep_interruption_context)) { |
333 | /* Short emulation of scheduler() return code. */ |
337 | /* Short emulation of scheduler() return code. */ |
334 | spinlock_unlock(&THREAD->lock); |
338 | spinlock_unlock(&THREAD->lock); |
335 | return ESYNCH_INTERRUPTED; |
339 | return ESYNCH_INTERRUPTED; |
336 | } |
340 | } |
337 | 341 | ||
338 | } else { |
342 | } else { |
339 | THREAD->sleep_interruptible = false; |
343 | THREAD->sleep_interruptible = false; |
340 | } |
344 | } |
341 | 345 | ||
342 | if (usec) { |
346 | if (usec) { |
343 | /* We use the timeout variant. */ |
347 | /* We use the timeout variant. */ |
344 | if (!context_save(&THREAD->sleep_timeout_context)) { |
348 | if (!context_save(&THREAD->sleep_timeout_context)) { |
345 | /* Short emulation of scheduler() return code. */ |
349 | /* Short emulation of scheduler() return code. */ |
346 | spinlock_unlock(&THREAD->lock); |
350 | spinlock_unlock(&THREAD->lock); |
347 | return ESYNCH_TIMEOUT; |
351 | return ESYNCH_TIMEOUT; |
348 | } |
352 | } |
349 | THREAD->timeout_pending = true; |
353 | THREAD->timeout_pending = true; |
350 | timeout_register(&THREAD->sleep_timeout, (__u64) usec, waitq_timeouted_sleep, THREAD); |
354 | timeout_register(&THREAD->sleep_timeout, (__u64) usec, waitq_timeouted_sleep, THREAD); |
351 | } |
355 | } |
352 | 356 | ||
353 | list_append(&THREAD->wq_link, &wq->head); |
357 | list_append(&THREAD->wq_link, &wq->head); |
354 | 358 | ||
355 | /* |
359 | /* |
356 | * Suspend execution. |
360 | * Suspend execution. |
357 | */ |
361 | */ |
358 | THREAD->state = Sleeping; |
362 | THREAD->state = Sleeping; |
359 | THREAD->sleep_queue = wq; |
363 | THREAD->sleep_queue = wq; |
360 | 364 | ||
361 | spinlock_unlock(&THREAD->lock); |
365 | spinlock_unlock(&THREAD->lock); |
362 | 366 | ||
363 | scheduler(); /* wq->lock is released in scheduler_separated_stack() */ |
367 | scheduler(); /* wq->lock is released in scheduler_separated_stack() */ |
364 | 368 | ||
365 | return ESYNCH_OK_BLOCKED; |
369 | return ESYNCH_OK_BLOCKED; |
366 | } |
370 | } |
367 | 371 | ||
368 | 372 | ||
369 | /** Wake up first thread sleeping in a wait queue |
373 | /** Wake up first thread sleeping in a wait queue |
370 | * |
374 | * |
371 | * Wake up first thread sleeping in a wait queue. |
375 | * Wake up first thread sleeping in a wait queue. |
372 | * This is the SMP- and IRQ-safe wrapper meant for |
376 | * This is the SMP- and IRQ-safe wrapper meant for |
373 | * general use. |
377 | * general use. |
374 | * |
378 | * |
375 | * Besides its 'normal' wakeup operation, it attempts |
379 | * Besides its 'normal' wakeup operation, it attempts |
376 | * to unregister possible timeout. |
380 | * to unregister possible timeout. |
377 | * |
381 | * |
378 | * @param wq Pointer to wait queue. |
382 | * @param wq Pointer to wait queue. |
379 | * @param all If this is non-zero, all sleeping threads |
383 | * @param all If this is non-zero, all sleeping threads |
380 | * will be woken up and missed count will be zeroed. |
384 | * will be woken up and missed count will be zeroed. |
381 | */ |
385 | */ |
382 | void waitq_wakeup(waitq_t *wq, bool all) |
386 | void waitq_wakeup(waitq_t *wq, bool all) |
383 | { |
387 | { |
384 | ipl_t ipl; |
388 | ipl_t ipl; |
385 | 389 | ||
386 | ipl = interrupts_disable(); |
390 | ipl = interrupts_disable(); |
387 | spinlock_lock(&wq->lock); |
391 | spinlock_lock(&wq->lock); |
388 | 392 | ||
389 | _waitq_wakeup_unsafe(wq, all); |
393 | _waitq_wakeup_unsafe(wq, all); |
390 | 394 | ||
391 | spinlock_unlock(&wq->lock); |
395 | spinlock_unlock(&wq->lock); |
392 | interrupts_restore(ipl); |
396 | interrupts_restore(ipl); |
393 | } |
397 | } |
394 | 398 | ||
395 | /** Internal SMP- and IRQ-unsafe version of waitq_wakeup() |
399 | /** Internal SMP- and IRQ-unsafe version of waitq_wakeup() |
396 | * |
400 | * |
397 | * This is the internal SMP- and IRQ-unsafe version |
401 | * This is the internal SMP- and IRQ-unsafe version |
398 | * of waitq_wakeup(). It assumes wq->lock is already |
402 | * of waitq_wakeup(). It assumes wq->lock is already |
399 | * locked and interrupts are already disabled. |
403 | * locked and interrupts are already disabled. |
400 | * |
404 | * |
401 | * @param wq Pointer to wait queue. |
405 | * @param wq Pointer to wait queue. |
402 | * @param all If this is non-zero, all sleeping threads |
406 | * @param all If this is non-zero, all sleeping threads |
403 | * will be woken up and missed count will be zeroed. |
407 | * will be woken up and missed count will be zeroed. |
404 | */ |
408 | */ |
405 | void _waitq_wakeup_unsafe(waitq_t *wq, bool all) |
409 | void _waitq_wakeup_unsafe(waitq_t *wq, bool all) |
406 | { |
410 | { |
407 | thread_t *t; |
411 | thread_t *t; |
408 | 412 | ||
409 | loop: |
413 | loop: |
410 | if (list_empty(&wq->head)) { |
414 | if (list_empty(&wq->head)) { |
411 | wq->missed_wakeups++; |
415 | wq->missed_wakeups++; |
412 | if (all) |
416 | if (all) |
413 | wq->missed_wakeups = 0; |
417 | wq->missed_wakeups = 0; |
414 | return; |
418 | return; |
415 | } |
419 | } |
416 | 420 | ||
417 | t = list_get_instance(wq->head.next, thread_t, wq_link); |
421 | t = list_get_instance(wq->head.next, thread_t, wq_link); |
418 | 422 | ||
419 | /* |
423 | /* |
420 | * Lock the thread prior to removing it from the wq. |
424 | * Lock the thread prior to removing it from the wq. |
421 | * This is not necessary because of mutual exclusion |
425 | * This is not necessary because of mutual exclusion |
422 | * (the link belongs to the wait queue), but because |
426 | * (the link belongs to the wait queue), but because |
423 | * of synchronization with waitq_timeouted_sleep() |
427 | * of synchronization with waitq_timeouted_sleep() |
424 | * and waitq_interrupt_sleep(). |
428 | * and waitq_interrupt_sleep(). |
425 | * |
429 | * |
426 | * In order for these two functions to work, the following |
430 | * In order for these two functions to work, the following |
427 | * invariant must hold: |
431 | * invariant must hold: |
428 | * |
432 | * |
429 | * t->sleep_queue != NULL <=> t sleeps in a wait queue |
433 | * t->sleep_queue != NULL <=> t sleeps in a wait queue |
430 | * |
434 | * |
431 | * For an observer who locks the thread, the invariant |
435 | * For an observer who locks the thread, the invariant |
432 | * holds only when the lock is held prior to removing |
436 | * holds only when the lock is held prior to removing |
433 | * it from the wait queue. |
437 | * it from the wait queue. |
434 | */ |
438 | */ |
435 | spinlock_lock(&t->lock); |
439 | spinlock_lock(&t->lock); |
436 | list_remove(&t->wq_link); |
440 | list_remove(&t->wq_link); |
437 | 441 | ||
438 | if (t->timeout_pending && timeout_unregister(&t->sleep_timeout)) |
442 | if (t->timeout_pending && timeout_unregister(&t->sleep_timeout)) |
439 | t->timeout_pending = false; |
443 | t->timeout_pending = false; |
440 | t->sleep_queue = NULL; |
444 | t->sleep_queue = NULL; |
441 | spinlock_unlock(&t->lock); |
445 | spinlock_unlock(&t->lock); |
442 | 446 | ||
443 | thread_ready(t); |
447 | thread_ready(t); |
444 | 448 | ||
445 | if (all) |
449 | if (all) |
446 | goto loop; |
450 | goto loop; |
447 | } |
451 | } |
- | 452 | ||
- | 453 | /** @} |
|
- | 454 | */ |
|
- | 455 | ||
448 | 456 |