Rev 1593 | Rev 1702 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 1593 | Rev 1681 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | /** |
29 | /** |
30 | * @file waitq.c |
30 | * @file waitq.c |
31 | * @brief Wait queue. |
31 | * @brief Wait queue. |
32 | * |
32 | * |
33 | * Wait queue is the basic synchronization primitive upon which all |
33 | * Wait queue is the basic synchronization primitive upon which all |
34 | * other synchronization primitives build. |
34 | * other synchronization primitives build. |
35 | * |
35 | * |
36 | * It allows threads to wait for an event in first-come, first-served |
36 | * It allows threads to wait for an event in first-come, first-served |
37 | * fashion. Conditional operation as well as timeouts and interruptions |
37 | * fashion. Conditional operation as well as timeouts and interruptions |
38 | * are supported. |
38 | * are supported. |
39 | */ |
39 | */ |
40 | 40 | ||
41 | #include <synch/waitq.h> |
41 | #include <synch/waitq.h> |
42 | #include <synch/synch.h> |
42 | #include <synch/synch.h> |
43 | #include <synch/spinlock.h> |
43 | #include <synch/spinlock.h> |
44 | #include <proc/thread.h> |
44 | #include <proc/thread.h> |
45 | #include <proc/scheduler.h> |
45 | #include <proc/scheduler.h> |
46 | #include <arch/asm.h> |
46 | #include <arch/asm.h> |
47 | #include <arch/types.h> |
47 | #include <arch/types.h> |
48 | #include <typedefs.h> |
48 | #include <typedefs.h> |
49 | #include <time/timeout.h> |
49 | #include <time/timeout.h> |
50 | #include <arch.h> |
50 | #include <arch.h> |
51 | #include <context.h> |
51 | #include <context.h> |
52 | #include <adt/list.h> |
52 | #include <adt/list.h> |
53 | 53 | ||
54 | static void waitq_timeouted_sleep(void *data); |
54 | static void waitq_timeouted_sleep(void *data); |
55 | 55 | ||
56 | /** Initialize wait queue |
56 | /** Initialize wait queue |
57 | * |
57 | * |
58 | * Initialize wait queue. |
58 | * Initialize wait queue. |
59 | * |
59 | * |
60 | * @param wq Pointer to wait queue to be initialized. |
60 | * @param wq Pointer to wait queue to be initialized. |
61 | */ |
61 | */ |
62 | void waitq_initialize(waitq_t *wq) |
62 | void waitq_initialize(waitq_t *wq) |
63 | { |
63 | { |
64 | spinlock_initialize(&wq->lock, "waitq_lock"); |
64 | spinlock_initialize(&wq->lock, "waitq_lock"); |
65 | list_initialize(&wq->head); |
65 | list_initialize(&wq->head); |
66 | wq->missed_wakeups = 0; |
66 | wq->missed_wakeups = 0; |
67 | } |
67 | } |
68 | 68 | ||
69 | /** Handle timeout during waitq_sleep_timeout() call |
69 | /** Handle timeout during waitq_sleep_timeout() call |
70 | * |
70 | * |
71 | * This routine is called when waitq_sleep_timeout() timeouts. |
71 | * This routine is called when waitq_sleep_timeout() timeouts. |
72 | * Interrupts are disabled. |
72 | * Interrupts are disabled. |
73 | * |
73 | * |
74 | * It is supposed to try to remove 'its' thread from the wait queue; |
74 | * It is supposed to try to remove 'its' thread from the wait queue; |
75 | * it can eventually fail to achieve this goal when these two events |
75 | * it can eventually fail to achieve this goal when these two events |
76 | * overlap. In that case it behaves just as though there was no |
76 | * overlap. In that case it behaves just as though there was no |
77 | * timeout at all. |
77 | * timeout at all. |
78 | * |
78 | * |
79 | * @param data Pointer to the thread that called waitq_sleep_timeout(). |
79 | * @param data Pointer to the thread that called waitq_sleep_timeout(). |
80 | */ |
80 | */ |
81 | void waitq_timeouted_sleep(void *data) |
81 | void waitq_timeouted_sleep(void *data) |
82 | { |
82 | { |
83 | thread_t *t = (thread_t *) data; |
83 | thread_t *t = (thread_t *) data; |
84 | waitq_t *wq; |
84 | waitq_t *wq; |
85 | bool do_wakeup = false; |
85 | bool do_wakeup = false; |
86 | 86 | ||
87 | spinlock_lock(&threads_lock); |
87 | spinlock_lock(&threads_lock); |
88 | if (!thread_exists(t)) |
88 | if (!thread_exists(t)) |
89 | goto out; |
89 | goto out; |
90 | 90 | ||
91 | grab_locks: |
91 | grab_locks: |
92 | spinlock_lock(&t->lock); |
92 | spinlock_lock(&t->lock); |
93 | if ((wq = t->sleep_queue)) { /* assignment */ |
93 | if ((wq = t->sleep_queue)) { /* assignment */ |
94 | if (!spinlock_trylock(&wq->lock)) { |
94 | if (!spinlock_trylock(&wq->lock)) { |
95 | spinlock_unlock(&t->lock); |
95 | spinlock_unlock(&t->lock); |
96 | goto grab_locks; /* avoid deadlock */ |
96 | goto grab_locks; /* avoid deadlock */ |
97 | } |
97 | } |
98 | 98 | ||
99 | list_remove(&t->wq_link); |
99 | list_remove(&t->wq_link); |
100 | t->saved_context = t->sleep_timeout_context; |
100 | t->saved_context = t->sleep_timeout_context; |
101 | do_wakeup = true; |
101 | do_wakeup = true; |
102 | - | ||
103 | spinlock_unlock(&wq->lock); |
- | |
104 | t->sleep_queue = NULL; |
102 | t->sleep_queue = NULL; |
- | 103 | spinlock_unlock(&wq->lock); |
|
105 | } |
104 | } |
106 | 105 | ||
107 | t->timeout_pending = false; |
106 | t->timeout_pending = false; |
108 | spinlock_unlock(&t->lock); |
107 | spinlock_unlock(&t->lock); |
109 | 108 | ||
110 | if (do_wakeup) |
109 | if (do_wakeup) |
111 | thread_ready(t); |
110 | thread_ready(t); |
112 | 111 | ||
113 | out: |
112 | out: |
114 | spinlock_unlock(&threads_lock); |
113 | spinlock_unlock(&threads_lock); |
115 | } |
114 | } |
116 | 115 | ||
117 | /** Interrupt sleeping thread. |
116 | /** Interrupt sleeping thread. |
118 | * |
117 | * |
119 | * This routine attempts to interrupt a thread from its sleep in a waitqueue. |
118 | * This routine attempts to interrupt a thread from its sleep in a waitqueue. |
120 | * If the thread is not found sleeping, no action is taken. |
119 | * If the thread is not found sleeping, no action is taken. |
121 | * |
120 | * |
122 | * @param t Thread to be interrupted. |
121 | * @param t Thread to be interrupted. |
123 | */ |
122 | */ |
124 | void waitq_interrupt_sleep(thread_t *t) |
123 | void waitq_interrupt_sleep(thread_t *t) |
125 | { |
124 | { |
126 | waitq_t *wq; |
125 | waitq_t *wq; |
127 | bool do_wakeup = false; |
126 | bool do_wakeup = false; |
128 | ipl_t ipl; |
127 | ipl_t ipl; |
129 | 128 | ||
130 | ipl = interrupts_disable(); |
129 | ipl = interrupts_disable(); |
131 | spinlock_lock(&threads_lock); |
130 | spinlock_lock(&threads_lock); |
132 | if (!thread_exists(t)) |
131 | if (!thread_exists(t)) |
133 | goto out; |
132 | goto out; |
134 | 133 | ||
135 | grab_locks: |
134 | grab_locks: |
136 | spinlock_lock(&t->lock); |
135 | spinlock_lock(&t->lock); |
137 | if ((wq = t->sleep_queue)) { /* assignment */ |
136 | if ((wq = t->sleep_queue)) { /* assignment */ |
138 | if (!(t->sleep_interruptible)) { |
137 | if (!(t->sleep_interruptible)) { |
139 | /* |
138 | /* |
140 | * The sleep cannot be interrupted. |
139 | * The sleep cannot be interrupted. |
141 | */ |
140 | */ |
142 | spinlock_unlock(&t->lock); |
141 | spinlock_unlock(&t->lock); |
143 | goto out; |
142 | goto out; |
144 | } |
143 | } |
145 | 144 | ||
146 | if (!spinlock_trylock(&wq->lock)) { |
145 | if (!spinlock_trylock(&wq->lock)) { |
147 | spinlock_unlock(&t->lock); |
146 | spinlock_unlock(&t->lock); |
148 | goto grab_locks; /* avoid deadlock */ |
147 | goto grab_locks; /* avoid deadlock */ |
149 | } |
148 | } |
150 | 149 | ||
151 | if (t->timeout_pending && timeout_unregister(&t->sleep_timeout)) |
150 | if (t->timeout_pending && timeout_unregister(&t->sleep_timeout)) |
152 | t->timeout_pending = false; |
151 | t->timeout_pending = false; |
153 | 152 | ||
154 | list_remove(&t->wq_link); |
153 | list_remove(&t->wq_link); |
155 | t->saved_context = t->sleep_interruption_context; |
154 | t->saved_context = t->sleep_interruption_context; |
156 | do_wakeup = true; |
155 | do_wakeup = true; |
157 | - | ||
158 | spinlock_unlock(&wq->lock); |
- | |
159 | t->sleep_queue = NULL; |
156 | t->sleep_queue = NULL; |
- | 157 | spinlock_unlock(&wq->lock); |
|
160 | } |
158 | } |
161 | spinlock_unlock(&t->lock); |
159 | spinlock_unlock(&t->lock); |
162 | 160 | ||
163 | if (do_wakeup) |
161 | if (do_wakeup) |
164 | thread_ready(t); |
162 | thread_ready(t); |
165 | 163 | ||
166 | out: |
164 | out: |
167 | spinlock_unlock(&threads_lock); |
165 | spinlock_unlock(&threads_lock); |
168 | interrupts_restore(ipl); |
166 | interrupts_restore(ipl); |
169 | } |
167 | } |
170 | 168 | ||
171 | /** Sleep until either wakeup, timeout or interruption occurs |
169 | /** Sleep until either wakeup, timeout or interruption occurs |
172 | * |
170 | * |
173 | * This is a sleep implementation which allows itself to time out or to be |
171 | * This is a sleep implementation which allows itself to time out or to be |
174 | * interrupted from the sleep, restoring a failover context. |
172 | * interrupted from the sleep, restoring a failover context. |
175 | * |
173 | * |
176 | * Sleepers are organised in a FIFO fashion in a structure called wait queue. |
174 | * Sleepers are organised in a FIFO fashion in a structure called wait queue. |
177 | * |
175 | * |
178 | * This function is really basic in that other functions as waitq_sleep() |
176 | * This function is really basic in that other functions as waitq_sleep() |
179 | * and all the *_timeout() functions use it. |
177 | * and all the *_timeout() functions use it. |
180 | * |
178 | * |
181 | * @param wq Pointer to wait queue. |
179 | * @param wq Pointer to wait queue. |
182 | * @param usec Timeout in microseconds. |
180 | * @param usec Timeout in microseconds. |
183 | * @param flags Specify mode of the sleep. |
181 | * @param flags Specify mode of the sleep. |
184 | * |
182 | * |
185 | * The sleep can be interrupted only if the |
183 | * The sleep can be interrupted only if the |
186 | * SYNCH_FLAGS_INTERRUPTIBLE bit is specified in flags. |
184 | * SYNCH_FLAGS_INTERRUPTIBLE bit is specified in flags. |
187 | |
185 | |
188 | * If usec is greater than zero, regardless of the value of the |
186 | * If usec is greater than zero, regardless of the value of the |
189 | * SYNCH_FLAGS_NON_BLOCKING bit in flags, the call will not return until either timeout, |
187 | * SYNCH_FLAGS_NON_BLOCKING bit in flags, the call will not return until either timeout, |
190 | * interruption or wakeup comes. |
188 | * interruption or wakeup comes. |
191 | * |
189 | * |
192 | * If usec is zero and the SYNCH_FLAGS_NON_BLOCKING bit is not set in flags, the call |
190 | * If usec is zero and the SYNCH_FLAGS_NON_BLOCKING bit is not set in flags, the call |
193 | * will not return until wakeup or interruption comes. |
191 | * will not return until wakeup or interruption comes. |
194 | * |
192 | * |
195 | * If usec is zero and the SYNCH_FLAGS_NON_BLOCKING bit is set in flags, the call will |
193 | * If usec is zero and the SYNCH_FLAGS_NON_BLOCKING bit is set in flags, the call will |
196 | * immediately return, reporting either success or failure. |
194 | * immediately return, reporting either success or failure. |
197 | * |
195 | * |
198 | * @return Returns one of: ESYNCH_WOULD_BLOCK, ESYNCH_TIMEOUT, ESYNCH_INTERRUPTED, |
196 | * @return Returns one of: ESYNCH_WOULD_BLOCK, ESYNCH_TIMEOUT, ESYNCH_INTERRUPTED, |
199 | * ESYNCH_OK_ATOMIC, ESYNCH_OK_BLOCKED. |
197 | * ESYNCH_OK_ATOMIC, ESYNCH_OK_BLOCKED. |
200 | * |
198 | * |
201 | * @li ESYNCH_WOULD_BLOCK means that the sleep failed because at the time |
199 | * @li ESYNCH_WOULD_BLOCK means that the sleep failed because at the time |
202 | * of the call there was no pending wakeup. |
200 | * of the call there was no pending wakeup. |
203 | * |
201 | * |
204 | * @li ESYNCH_TIMEOUT means that the sleep timed out. |
202 | * @li ESYNCH_TIMEOUT means that the sleep timed out. |
205 | * |
203 | * |
206 | * @li ESYNCH_INTERRUPTED means that somebody interrupted the sleeping thread. |
204 | * @li ESYNCH_INTERRUPTED means that somebody interrupted the sleeping thread. |
207 | * |
205 | * |
208 | * @li ESYNCH_OK_ATOMIC means that the sleep succeeded and that there was |
206 | * @li ESYNCH_OK_ATOMIC means that the sleep succeeded and that there was |
209 | * a pending wakeup at the time of the call. The caller was not put |
207 | * a pending wakeup at the time of the call. The caller was not put |
210 | * asleep at all. |
208 | * asleep at all. |
211 | * |
209 | * |
212 | * @li ESYNCH_OK_BLOCKED means that the sleep succeeded; the full sleep was |
210 | * @li ESYNCH_OK_BLOCKED means that the sleep succeeded; the full sleep was |
213 | * attempted. |
211 | * attempted. |
214 | */ |
212 | */ |
215 | int waitq_sleep_timeout(waitq_t *wq, __u32 usec, int flags) |
213 | int waitq_sleep_timeout(waitq_t *wq, __u32 usec, int flags) |
216 | { |
214 | { |
217 | ipl_t ipl; |
215 | ipl_t ipl; |
218 | int rc; |
216 | int rc; |
219 | 217 | ||
220 | ipl = waitq_sleep_prepare(wq); |
218 | ipl = waitq_sleep_prepare(wq); |
221 | rc = waitq_sleep_timeout_unsafe(wq, usec, flags); |
219 | rc = waitq_sleep_timeout_unsafe(wq, usec, flags); |
222 | waitq_sleep_finish(wq, rc, ipl); |
220 | waitq_sleep_finish(wq, rc, ipl); |
223 | return rc; |
221 | return rc; |
224 | } |
222 | } |
225 | 223 | ||
226 | /** Prepare to sleep in a waitq. |
224 | /** Prepare to sleep in a waitq. |
227 | * |
225 | * |
228 | * This function will return holding the lock of the wait queue |
226 | * This function will return holding the lock of the wait queue |
229 | * and interrupts disabled. |
227 | * and interrupts disabled. |
230 | * |
228 | * |
231 | * @param wq Wait queue. |
229 | * @param wq Wait queue. |
232 | * |
230 | * |
233 | * @return Interrupt level as it existed on entry to this function. |
231 | * @return Interrupt level as it existed on entry to this function. |
234 | */ |
232 | */ |
235 | ipl_t waitq_sleep_prepare(waitq_t *wq) |
233 | ipl_t waitq_sleep_prepare(waitq_t *wq) |
236 | { |
234 | { |
237 | ipl_t ipl; |
235 | ipl_t ipl; |
238 | 236 | ||
239 | restart: |
237 | restart: |
240 | ipl = interrupts_disable(); |
238 | ipl = interrupts_disable(); |
241 | 239 | ||
242 | if (THREAD) { /* needed during system initiailzation */ |
240 | if (THREAD) { /* needed during system initiailzation */ |
243 | /* |
241 | /* |
244 | * Busy waiting for a delayed timeout. |
242 | * Busy waiting for a delayed timeout. |
245 | * This is an important fix for the race condition between |
243 | * This is an important fix for the race condition between |
246 | * a delayed timeout and a next call to waitq_sleep_timeout(). |
244 | * a delayed timeout and a next call to waitq_sleep_timeout(). |
247 | * Simply, the thread is not allowed to go to sleep if |
245 | * Simply, the thread is not allowed to go to sleep if |
248 | * there are timeouts in progress. |
246 | * there are timeouts in progress. |
249 | */ |
247 | */ |
250 | spinlock_lock(&THREAD->lock); |
248 | spinlock_lock(&THREAD->lock); |
251 | if (THREAD->timeout_pending) { |
249 | if (THREAD->timeout_pending) { |
252 | spinlock_unlock(&THREAD->lock); |
250 | spinlock_unlock(&THREAD->lock); |
253 | interrupts_restore(ipl); |
251 | interrupts_restore(ipl); |
254 | goto restart; |
252 | goto restart; |
255 | } |
253 | } |
256 | spinlock_unlock(&THREAD->lock); |
254 | spinlock_unlock(&THREAD->lock); |
257 | } |
255 | } |
258 | 256 | ||
259 | spinlock_lock(&wq->lock); |
257 | spinlock_lock(&wq->lock); |
260 | return ipl; |
258 | return ipl; |
261 | } |
259 | } |
262 | 260 | ||
263 | /** Finish waiting in a wait queue. |
261 | /** Finish waiting in a wait queue. |
264 | * |
262 | * |
265 | * This function restores interrupts to the state that existed prior |
263 | * This function restores interrupts to the state that existed prior |
266 | * to the call to waitq_sleep_prepare(). If necessary, the wait queue |
264 | * to the call to waitq_sleep_prepare(). If necessary, the wait queue |
267 | * lock is released. |
265 | * lock is released. |
268 | * |
266 | * |
269 | * @param wq Wait queue. |
267 | * @param wq Wait queue. |
270 | * @param rc Return code of waitq_sleep_timeout_unsafe(). |
268 | * @param rc Return code of waitq_sleep_timeout_unsafe(). |
271 | * @param ipl Interrupt level returned by waitq_sleep_prepare(). |
269 | * @param ipl Interrupt level returned by waitq_sleep_prepare(). |
272 | */ |
270 | */ |
273 | void waitq_sleep_finish(waitq_t *wq, int rc, ipl_t ipl) |
271 | void waitq_sleep_finish(waitq_t *wq, int rc, ipl_t ipl) |
274 | { |
272 | { |
275 | switch (rc) { |
273 | switch (rc) { |
276 | case ESYNCH_WOULD_BLOCK: |
274 | case ESYNCH_WOULD_BLOCK: |
277 | case ESYNCH_OK_ATOMIC: |
275 | case ESYNCH_OK_ATOMIC: |
278 | spinlock_unlock(&wq->lock); |
276 | spinlock_unlock(&wq->lock); |
279 | break; |
277 | break; |
280 | default: |
278 | default: |
281 | break; |
279 | break; |
282 | } |
280 | } |
283 | interrupts_restore(ipl); |
281 | interrupts_restore(ipl); |
284 | } |
282 | } |
285 | 283 | ||
286 | /** Internal implementation of waitq_sleep_timeout(). |
284 | /** Internal implementation of waitq_sleep_timeout(). |
287 | * |
285 | * |
288 | * This function implements logic of sleeping in a wait queue. |
286 | * This function implements logic of sleeping in a wait queue. |
289 | * This call must be preceeded by a call to waitq_sleep_prepare() |
287 | * This call must be preceeded by a call to waitq_sleep_prepare() |
290 | * and followed by a call to waitq_slee_finish(). |
288 | * and followed by a call to waitq_slee_finish(). |
291 | * |
289 | * |
292 | * @param wq See waitq_sleep_timeout(). |
290 | * @param wq See waitq_sleep_timeout(). |
293 | * @param usec See waitq_sleep_timeout(). |
291 | * @param usec See waitq_sleep_timeout(). |
294 | * @param flags See waitq_sleep_timeout(). |
292 | * @param flags See waitq_sleep_timeout(). |
295 | * |
293 | * |
296 | * @return See waitq_sleep_timeout(). |
294 | * @return See waitq_sleep_timeout(). |
297 | */ |
295 | */ |
298 | int waitq_sleep_timeout_unsafe(waitq_t *wq, __u32 usec, int flags) |
296 | int waitq_sleep_timeout_unsafe(waitq_t *wq, __u32 usec, int flags) |
299 | { |
297 | { |
300 | /* checks whether to go to sleep at all */ |
298 | /* checks whether to go to sleep at all */ |
301 | if (wq->missed_wakeups) { |
299 | if (wq->missed_wakeups) { |
302 | wq->missed_wakeups--; |
300 | wq->missed_wakeups--; |
303 | return ESYNCH_OK_ATOMIC; |
301 | return ESYNCH_OK_ATOMIC; |
304 | } |
302 | } |
305 | else { |
303 | else { |
306 | if ((flags & SYNCH_FLAGS_NON_BLOCKING) && (usec == 0)) { |
304 | if ((flags & SYNCH_FLAGS_NON_BLOCKING) && (usec == 0)) { |
307 | /* return immediatelly instead of going to sleep */ |
305 | /* return immediatelly instead of going to sleep */ |
308 | return ESYNCH_WOULD_BLOCK; |
306 | return ESYNCH_WOULD_BLOCK; |
309 | } |
307 | } |
310 | } |
308 | } |
311 | 309 | ||
312 | /* |
310 | /* |
313 | * Now we are firmly decided to go to sleep. |
311 | * Now we are firmly decided to go to sleep. |
314 | */ |
312 | */ |
315 | spinlock_lock(&THREAD->lock); |
313 | spinlock_lock(&THREAD->lock); |
316 | 314 | ||
317 | if (flags & SYNCH_FLAGS_INTERRUPTIBLE) { |
315 | if (flags & SYNCH_FLAGS_INTERRUPTIBLE) { |
318 | 316 | ||
319 | /* |
317 | /* |
320 | * If the thread was already interrupted, |
318 | * If the thread was already interrupted, |
321 | * don't go to sleep at all. |
319 | * don't go to sleep at all. |
322 | */ |
320 | */ |
323 | if (THREAD->interrupted) { |
321 | if (THREAD->interrupted) { |
324 | spinlock_unlock(&THREAD->lock); |
322 | spinlock_unlock(&THREAD->lock); |
325 | spinlock_unlock(&wq->lock); |
323 | spinlock_unlock(&wq->lock); |
326 | return ESYNCH_INTERRUPTED; |
324 | return ESYNCH_INTERRUPTED; |
327 | } |
325 | } |
328 | 326 | ||
329 | /* |
327 | /* |
330 | * Set context that will be restored if the sleep |
328 | * Set context that will be restored if the sleep |
331 | * of this thread is ever interrupted. |
329 | * of this thread is ever interrupted. |
332 | */ |
330 | */ |
333 | THREAD->sleep_interruptible = true; |
331 | THREAD->sleep_interruptible = true; |
334 | if (!context_save(&THREAD->sleep_interruption_context)) { |
332 | if (!context_save(&THREAD->sleep_interruption_context)) { |
335 | /* Short emulation of scheduler() return code. */ |
333 | /* Short emulation of scheduler() return code. */ |
336 | spinlock_unlock(&THREAD->lock); |
334 | spinlock_unlock(&THREAD->lock); |
337 | return ESYNCH_INTERRUPTED; |
335 | return ESYNCH_INTERRUPTED; |
338 | } |
336 | } |
339 | 337 | ||
340 | } else { |
338 | } else { |
341 | THREAD->sleep_interruptible = false; |
339 | THREAD->sleep_interruptible = false; |
342 | } |
340 | } |
343 | 341 | ||
344 | if (usec) { |
342 | if (usec) { |
345 | /* We use the timeout variant. */ |
343 | /* We use the timeout variant. */ |
346 | if (!context_save(&THREAD->sleep_timeout_context)) { |
344 | if (!context_save(&THREAD->sleep_timeout_context)) { |
347 | /* Short emulation of scheduler() return code. */ |
345 | /* Short emulation of scheduler() return code. */ |
348 | spinlock_unlock(&THREAD->lock); |
346 | spinlock_unlock(&THREAD->lock); |
349 | return ESYNCH_TIMEOUT; |
347 | return ESYNCH_TIMEOUT; |
350 | } |
348 | } |
351 | THREAD->timeout_pending = true; |
349 | THREAD->timeout_pending = true; |
352 | timeout_register(&THREAD->sleep_timeout, (__u64) usec, waitq_timeouted_sleep, THREAD); |
350 | timeout_register(&THREAD->sleep_timeout, (__u64) usec, waitq_timeouted_sleep, THREAD); |
353 | } |
351 | } |
354 | 352 | ||
355 | list_append(&THREAD->wq_link, &wq->head); |
353 | list_append(&THREAD->wq_link, &wq->head); |
356 | 354 | ||
357 | /* |
355 | /* |
358 | * Suspend execution. |
356 | * Suspend execution. |
359 | */ |
357 | */ |
360 | THREAD->state = Sleeping; |
358 | THREAD->state = Sleeping; |
361 | THREAD->sleep_queue = wq; |
359 | THREAD->sleep_queue = wq; |
362 | 360 | ||
363 | spinlock_unlock(&THREAD->lock); |
361 | spinlock_unlock(&THREAD->lock); |
364 | 362 | ||
365 | scheduler(); /* wq->lock is released in scheduler_separated_stack() */ |
363 | scheduler(); /* wq->lock is released in scheduler_separated_stack() */ |
366 | 364 | ||
367 | return ESYNCH_OK_BLOCKED; |
365 | return ESYNCH_OK_BLOCKED; |
368 | } |
366 | } |
369 | 367 | ||
370 | 368 | ||
371 | /** Wake up first thread sleeping in a wait queue |
369 | /** Wake up first thread sleeping in a wait queue |
372 | * |
370 | * |
373 | * Wake up first thread sleeping in a wait queue. |
371 | * Wake up first thread sleeping in a wait queue. |
374 | * This is the SMP- and IRQ-safe wrapper meant for |
372 | * This is the SMP- and IRQ-safe wrapper meant for |
375 | * general use. |
373 | * general use. |
376 | * |
374 | * |
377 | * Besides its 'normal' wakeup operation, it attempts |
375 | * Besides its 'normal' wakeup operation, it attempts |
378 | * to unregister possible timeout. |
376 | * to unregister possible timeout. |
379 | * |
377 | * |
380 | * @param wq Pointer to wait queue. |
378 | * @param wq Pointer to wait queue. |
381 | * @param all If this is non-zero, all sleeping threads |
379 | * @param all If this is non-zero, all sleeping threads |
382 | * will be woken up and missed count will be zeroed. |
380 | * will be woken up and missed count will be zeroed. |
383 | */ |
381 | */ |
384 | void waitq_wakeup(waitq_t *wq, bool all) |
382 | void waitq_wakeup(waitq_t *wq, bool all) |
385 | { |
383 | { |
386 | ipl_t ipl; |
384 | ipl_t ipl; |
387 | 385 | ||
388 | ipl = interrupts_disable(); |
386 | ipl = interrupts_disable(); |
389 | spinlock_lock(&wq->lock); |
387 | spinlock_lock(&wq->lock); |
390 | 388 | ||
391 | _waitq_wakeup_unsafe(wq, all); |
389 | _waitq_wakeup_unsafe(wq, all); |
392 | 390 | ||
393 | spinlock_unlock(&wq->lock); |
391 | spinlock_unlock(&wq->lock); |
394 | interrupts_restore(ipl); |
392 | interrupts_restore(ipl); |
395 | } |
393 | } |
396 | 394 | ||
397 | /** Internal SMP- and IRQ-unsafe version of waitq_wakeup() |
395 | /** Internal SMP- and IRQ-unsafe version of waitq_wakeup() |
398 | * |
396 | * |
399 | * This is the internal SMP- and IRQ-unsafe version |
397 | * This is the internal SMP- and IRQ-unsafe version |
400 | * of waitq_wakeup(). It assumes wq->lock is already |
398 | * of waitq_wakeup(). It assumes wq->lock is already |
401 | * locked and interrupts are already disabled. |
399 | * locked and interrupts are already disabled. |
402 | * |
400 | * |
403 | * @param wq Pointer to wait queue. |
401 | * @param wq Pointer to wait queue. |
404 | * @param all If this is non-zero, all sleeping threads |
402 | * @param all If this is non-zero, all sleeping threads |
405 | * will be woken up and missed count will be zeroed. |
403 | * will be woken up and missed count will be zeroed. |
406 | */ |
404 | */ |
407 | void _waitq_wakeup_unsafe(waitq_t *wq, bool all) |
405 | void _waitq_wakeup_unsafe(waitq_t *wq, bool all) |
408 | { |
406 | { |
409 | thread_t *t; |
407 | thread_t *t; |
410 | 408 | ||
411 | loop: |
409 | loop: |
412 | if (list_empty(&wq->head)) { |
410 | if (list_empty(&wq->head)) { |
413 | wq->missed_wakeups++; |
411 | wq->missed_wakeups++; |
414 | if (all) |
412 | if (all) |
415 | wq->missed_wakeups = 0; |
413 | wq->missed_wakeups = 0; |
416 | return; |
414 | return; |
417 | } |
415 | } |
418 | 416 | ||
419 | t = list_get_instance(wq->head.next, thread_t, wq_link); |
417 | t = list_get_instance(wq->head.next, thread_t, wq_link); |
420 | 418 | ||
- | 419 | /* |
|
- | 420 | * Lock the thread prior to removing it from the wq. |
|
- | 421 | * This is not necessary because of mutual exclusion |
|
- | 422 | * (the link belongs to the wait queue), but because |
|
- | 423 | * of synchronization with waitq_timeouted_sleep() |
|
- | 424 | * and waitq_interrupt_sleep(). |
|
- | 425 | * |
|
- | 426 | * In order for these two functions to work, the following |
|
- | 427 | * invariant must hold: |
|
- | 428 | * |
|
- | 429 | * t->sleep_queue != NULL <=> t sleeps in a wait queue |
|
- | 430 | * |
|
- | 431 | * For an observer who locks the thread, the invariant |
|
- | 432 | * holds only when the lock is held prior to removing |
|
421 | list_remove(&t->wq_link); |
433 | * it from the wait queue. |
- | 434 | */ |
|
422 | spinlock_lock(&t->lock); |
435 | spinlock_lock(&t->lock); |
- | 436 | list_remove(&t->wq_link); |
|
- | 437 | ||
423 | if (t->timeout_pending && timeout_unregister(&t->sleep_timeout)) |
438 | if (t->timeout_pending && timeout_unregister(&t->sleep_timeout)) |
424 | t->timeout_pending = false; |
439 | t->timeout_pending = false; |
425 | t->sleep_queue = NULL; |
440 | t->sleep_queue = NULL; |
426 | spinlock_unlock(&t->lock); |
441 | spinlock_unlock(&t->lock); |
427 | 442 | ||
428 | thread_ready(t); |
443 | thread_ready(t); |
429 | 444 | ||
430 | if (all) |
445 | if (all) |
431 | goto loop; |
446 | goto loop; |
432 | } |
447 | } |
433 | 448 |