Rev 1288 | Rev 1467 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 1288 | Rev 1375 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | /** |
29 | /** |
30 | * @file waitq.c |
30 | * @file waitq.c |
31 | * @brief Wait queue. |
31 | * @brief Wait queue. |
32 | * |
32 | * |
33 | * Wait queue is the basic synchronization primitive upon which all |
33 | * Wait queue is the basic synchronization primitive upon which all |
34 | * other synchronization primitives build. |
34 | * other synchronization primitives build. |
35 | * |
35 | * |
36 | * It allows threads to wait for an event in first-come, first-served |
36 | * It allows threads to wait for an event in first-come, first-served |
37 | * fashion. Conditional operation as well as timeouts and interruptions |
37 | * fashion. Conditional operation as well as timeouts and interruptions |
38 | * are supported. |
38 | * are supported. |
39 | */ |
39 | */ |
40 | 40 | ||
41 | #include <synch/waitq.h> |
41 | #include <synch/waitq.h> |
42 | #include <synch/synch.h> |
42 | #include <synch/synch.h> |
43 | #include <synch/spinlock.h> |
43 | #include <synch/spinlock.h> |
44 | #include <proc/thread.h> |
44 | #include <proc/thread.h> |
45 | #include <proc/scheduler.h> |
45 | #include <proc/scheduler.h> |
46 | #include <arch/asm.h> |
46 | #include <arch/asm.h> |
47 | #include <arch/types.h> |
47 | #include <arch/types.h> |
48 | #include <typedefs.h> |
48 | #include <typedefs.h> |
49 | #include <time/timeout.h> |
49 | #include <time/timeout.h> |
50 | #include <arch.h> |
50 | #include <arch.h> |
51 | #include <context.h> |
51 | #include <context.h> |
52 | #include <adt/list.h> |
52 | #include <adt/list.h> |
53 | 53 | ||
54 | static void waitq_timeouted_sleep(void *data); |
54 | static void waitq_timeouted_sleep(void *data); |
55 | 55 | ||
56 | /** Initialize wait queue |
56 | /** Initialize wait queue |
57 | * |
57 | * |
58 | * Initialize wait queue. |
58 | * Initialize wait queue. |
59 | * |
59 | * |
60 | * @param wq Pointer to wait queue to be initialized. |
60 | * @param wq Pointer to wait queue to be initialized. |
61 | */ |
61 | */ |
62 | void waitq_initialize(waitq_t *wq) |
62 | void waitq_initialize(waitq_t *wq) |
63 | { |
63 | { |
64 | spinlock_initialize(&wq->lock, "waitq_lock"); |
64 | spinlock_initialize(&wq->lock, "waitq_lock"); |
65 | list_initialize(&wq->head); |
65 | list_initialize(&wq->head); |
66 | wq->missed_wakeups = 0; |
66 | wq->missed_wakeups = 0; |
67 | } |
67 | } |
68 | 68 | ||
69 | /** Handle timeout during waitq_sleep_timeout() call |
69 | /** Handle timeout during waitq_sleep_timeout() call |
70 | * |
70 | * |
71 | * This routine is called when waitq_sleep_timeout() timeouts. |
71 | * This routine is called when waitq_sleep_timeout() timeouts. |
72 | * Interrupts are disabled. |
72 | * Interrupts are disabled. |
73 | * |
73 | * |
74 | * It is supposed to try to remove 'its' thread from the wait queue; |
74 | * It is supposed to try to remove 'its' thread from the wait queue; |
75 | * it can eventually fail to achieve this goal when these two events |
75 | * it can eventually fail to achieve this goal when these two events |
76 | * overlap. In that case it behaves just as though there was no |
76 | * overlap. In that case it behaves just as though there was no |
77 | * timeout at all. |
77 | * timeout at all. |
78 | * |
78 | * |
79 | * @param data Pointer to the thread that called waitq_sleep_timeout(). |
79 | * @param data Pointer to the thread that called waitq_sleep_timeout(). |
80 | */ |
80 | */ |
81 | void waitq_timeouted_sleep(void *data) |
81 | void waitq_timeouted_sleep(void *data) |
82 | { |
82 | { |
83 | thread_t *t = (thread_t *) data; |
83 | thread_t *t = (thread_t *) data; |
84 | waitq_t *wq; |
84 | waitq_t *wq; |
85 | bool do_wakeup = false; |
85 | bool do_wakeup = false; |
86 | 86 | ||
87 | spinlock_lock(&threads_lock); |
87 | spinlock_lock(&threads_lock); |
88 | if (!thread_exists(t)) |
88 | if (!thread_exists(t)) |
89 | goto out; |
89 | goto out; |
90 | 90 | ||
91 | grab_locks: |
91 | grab_locks: |
92 | spinlock_lock(&t->lock); |
92 | spinlock_lock(&t->lock); |
93 | if ((wq = t->sleep_queue)) { /* assignment */ |
93 | if ((wq = t->sleep_queue)) { /* assignment */ |
94 | if (!spinlock_trylock(&wq->lock)) { |
94 | if (!spinlock_trylock(&wq->lock)) { |
95 | spinlock_unlock(&t->lock); |
95 | spinlock_unlock(&t->lock); |
96 | goto grab_locks; /* avoid deadlock */ |
96 | goto grab_locks; /* avoid deadlock */ |
97 | } |
97 | } |
98 | 98 | ||
99 | list_remove(&t->wq_link); |
99 | list_remove(&t->wq_link); |
100 | t->saved_context = t->sleep_timeout_context; |
100 | t->saved_context = t->sleep_timeout_context; |
101 | do_wakeup = true; |
101 | do_wakeup = true; |
102 | 102 | ||
103 | spinlock_unlock(&wq->lock); |
103 | spinlock_unlock(&wq->lock); |
104 | t->sleep_queue = NULL; |
104 | t->sleep_queue = NULL; |
105 | } |
105 | } |
106 | 106 | ||
107 | t->timeout_pending = false; |
107 | t->timeout_pending = false; |
108 | spinlock_unlock(&t->lock); |
108 | spinlock_unlock(&t->lock); |
109 | 109 | ||
110 | if (do_wakeup) |
110 | if (do_wakeup) |
111 | thread_ready(t); |
111 | thread_ready(t); |
112 | 112 | ||
113 | out: |
113 | out: |
114 | spinlock_unlock(&threads_lock); |
114 | spinlock_unlock(&threads_lock); |
115 | } |
115 | } |
116 | 116 | ||
117 | /** Interrupt sleeping thread. |
117 | /** Interrupt sleeping thread. |
118 | * |
118 | * |
119 | * This routine attempts to interrupt a thread from its sleep in a waitqueue. |
119 | * This routine attempts to interrupt a thread from its sleep in a waitqueue. |
120 | * If the thread is not found sleeping, no action is taken. |
120 | * If the thread is not found sleeping, no action is taken. |
121 | * |
121 | * |
122 | * @param t Thread to be interrupted. |
122 | * @param t Thread to be interrupted. |
123 | */ |
123 | */ |
124 | void waitq_interrupt_sleep(thread_t *t) |
124 | void waitq_interrupt_sleep(thread_t *t) |
125 | { |
125 | { |
126 | waitq_t *wq; |
126 | waitq_t *wq; |
127 | bool do_wakeup = false; |
127 | bool do_wakeup = false; |
128 | ipl_t ipl; |
128 | ipl_t ipl; |
129 | 129 | ||
130 | ipl = interrupts_disable(); |
130 | ipl = interrupts_disable(); |
131 | spinlock_lock(&threads_lock); |
131 | spinlock_lock(&threads_lock); |
132 | if (!thread_exists(t)) |
132 | if (!thread_exists(t)) |
133 | goto out; |
133 | goto out; |
134 | 134 | ||
135 | grab_locks: |
135 | grab_locks: |
136 | spinlock_lock(&t->lock); |
136 | spinlock_lock(&t->lock); |
137 | if ((wq = t->sleep_queue)) { /* assignment */ |
137 | if ((wq = t->sleep_queue)) { /* assignment */ |
138 | if (!spinlock_trylock(&wq->lock)) { |
138 | if (!spinlock_trylock(&wq->lock)) { |
139 | spinlock_unlock(&t->lock); |
139 | spinlock_unlock(&t->lock); |
140 | goto grab_locks; /* avoid deadlock */ |
140 | goto grab_locks; /* avoid deadlock */ |
141 | } |
141 | } |
142 | 142 | ||
143 | list_remove(&t->wq_link); |
143 | list_remove(&t->wq_link); |
144 | t->saved_context = t->sleep_interruption_context; |
144 | t->saved_context = t->sleep_interruption_context; |
145 | do_wakeup = true; |
145 | do_wakeup = true; |
146 | 146 | ||
147 | spinlock_unlock(&wq->lock); |
147 | spinlock_unlock(&wq->lock); |
148 | t->sleep_queue = NULL; |
148 | t->sleep_queue = NULL; |
149 | } |
149 | } |
150 | spinlock_unlock(&t->lock); |
150 | spinlock_unlock(&t->lock); |
151 | 151 | ||
152 | if (do_wakeup) |
152 | if (do_wakeup) |
153 | thread_ready(t); |
153 | thread_ready(t); |
154 | 154 | ||
155 | out: |
155 | out: |
156 | spinlock_unlock(&threads_lock); |
156 | spinlock_unlock(&threads_lock); |
157 | interrupts_restore(ipl); |
157 | interrupts_restore(ipl); |
158 | } |
158 | } |
159 | 159 | ||
160 | - | ||
161 | /** Sleep until either wakeup, timeout or interruption occurs |
160 | /** Sleep until either wakeup, timeout or interruption occurs |
162 | * |
161 | * |
163 | * This is a sleep implementation which allows itself to be |
162 | * This is a sleep implementation which allows itself to be |
164 | * interrupted from the sleep, restoring a failover context. |
163 | * interrupted from the sleep, restoring a failover context. |
165 | * |
164 | * |
166 | * Sleepers are organised in FIFO fashion in a structure called wait queue. |
165 | * Sleepers are organised in a FIFO fashion in a structure called wait queue. |
167 | * |
166 | * |
168 | * This function is really basic in that other functions as waitq_sleep() |
167 | * This function is really basic in that other functions as waitq_sleep() |
169 | * and all the *_timeout() functions use it. |
168 | * and all the *_timeout() functions use it. |
170 | * |
169 | * |
171 | * @param wq Pointer to wait queue. |
170 | * @param wq Pointer to wait queue. |
172 | * @param usec Timeout in microseconds. |
171 | * @param usec Timeout in microseconds. |
173 | * @param nonblocking Blocking vs. non-blocking operation mode switch. |
172 | * @param nonblocking Blocking vs. non-blocking operation mode switch. |
174 | * |
173 | * |
175 | * If usec is greater than zero, regardless of the value of nonblocking, |
174 | * If usec is greater than zero, regardless of the value of nonblocking, |
176 | * the call will not return until either timeout or wakeup comes. |
175 | * the call will not return until either timeout or wakeup comes. |
177 | * |
176 | * |
178 | * If usec is zero and @nonblocking is zero (false), the call |
177 | * If usec is zero and @nonblocking is zero (false), the call |
179 | * will not return until wakeup comes. |
178 | * will not return until wakeup comes. |
180 | * |
179 | * |
181 | * If usec is zero and nonblocking is non-zero (true), the call will |
180 | * If usec is zero and nonblocking is non-zero (true), the call will |
182 | * immediately return, reporting either success or failure. |
181 | * immediately return, reporting either success or failure. |
183 | * |
182 | * |
184 | * @return Returns one of: ESYNCH_WOULD_BLOCK, ESYNCH_TIMEOUT, |
183 | * @return Returns one of: ESYNCH_WOULD_BLOCK, ESYNCH_TIMEOUT, |
185 | * ESYNCH_OK_ATOMIC, ESYNCH_OK_BLOCKED. |
184 | * ESYNCH_OK_ATOMIC, ESYNCH_OK_BLOCKED. |
186 | * |
185 | * |
187 | * @li ESYNCH_WOULD_BLOCK means that the sleep failed because at the time |
186 | * @li ESYNCH_WOULD_BLOCK means that the sleep failed because at the time |
188 | * of the call there was no pending wakeup. |
187 | * of the call there was no pending wakeup. |
189 | * |
188 | * |
190 | * @li ESYNCH_TIMEOUT means that the sleep timed out. |
189 | * @li ESYNCH_TIMEOUT means that the sleep timed out. |
191 | * |
190 | * |
192 | * @li ESYNCH_INTERRUPTED means that somebody interrupted the sleeping thread. |
191 | * @li ESYNCH_INTERRUPTED means that somebody interrupted the sleeping thread. |
193 | * |
192 | * |
194 | * @li ESYNCH_OK_ATOMIC means that the sleep succeeded and that there was |
193 | * @li ESYNCH_OK_ATOMIC means that the sleep succeeded and that there was |
195 | * a pending wakeup at the time of the call. The caller was not put |
194 | * a pending wakeup at the time of the call. The caller was not put |
196 | * asleep at all. |
195 | * asleep at all. |
197 | * |
196 | * |
198 | * @li ESYNCH_OK_BLOCKED means that the sleep succeeded; the full sleep was |
197 | * @li ESYNCH_OK_BLOCKED means that the sleep succeeded; the full sleep was |
199 | * attempted. |
198 | * attempted. |
200 | */ |
199 | */ |
201 | int waitq_sleep_timeout(waitq_t *wq, __u32 usec, int nonblocking) |
200 | int waitq_sleep_timeout(waitq_t *wq, __u32 usec, int nonblocking) |
202 | { |
201 | { |
203 | volatile ipl_t ipl; /* must be live after context_restore() */ |
202 | ipl_t ipl; |
- | 203 | int rc; |
|
204 | 204 | ||
- | 205 | ipl = waitq_sleep_prepare(wq); |
|
- | 206 | rc = waitq_sleep_timeout_unsafe(wq, usec, nonblocking); |
|
- | 207 | waitq_sleep_finish(wq, rc, ipl); |
|
- | 208 | return rc; |
|
- | 209 | } |
|
- | 210 | ||
- | 211 | /** Prepare to sleep in a waitq. |
|
- | 212 | * |
|
- | 213 | * This function will return holding the lock of the wait queue |
|
- | 214 | * and interrupts disabled. |
|
- | 215 | * |
|
- | 216 | * @param wq Wait queue. |
|
- | 217 | * |
|
- | 218 | * @return Interrupt level as it existed on entry to this function. |
|
- | 219 | */ |
|
- | 220 | ipl_t waitq_sleep_prepare(waitq_t *wq) |
|
- | 221 | { |
|
- | 222 | ipl_t ipl; |
|
205 | 223 | ||
206 | restart: |
224 | restart: |
207 | ipl = interrupts_disable(); |
225 | ipl = interrupts_disable(); |
208 | 226 | ||
209 | /* |
227 | /* |
210 | * Busy waiting for a delayed timeout. |
228 | * Busy waiting for a delayed timeout. |
211 | * This is an important fix for the race condition between |
229 | * This is an important fix for the race condition between |
212 | * a delayed timeout and a next call to waitq_sleep_timeout(). |
230 | * a delayed timeout and a next call to waitq_sleep_timeout(). |
213 | * Simply, the thread is not allowed to go to sleep if |
231 | * Simply, the thread is not allowed to go to sleep if |
214 | * there are timeouts in progress. |
232 | * there are timeouts in progress. |
215 | */ |
233 | */ |
216 | spinlock_lock(&THREAD->lock); |
234 | spinlock_lock(&THREAD->lock); |
217 | if (THREAD->timeout_pending) { |
235 | if (THREAD->timeout_pending) { |
218 | spinlock_unlock(&THREAD->lock); |
236 | spinlock_unlock(&THREAD->lock); |
219 | interrupts_restore(ipl); |
237 | interrupts_restore(ipl); |
220 | goto restart; |
238 | goto restart; |
221 | } |
239 | } |
222 | spinlock_unlock(&THREAD->lock); |
240 | spinlock_unlock(&THREAD->lock); |
223 | 241 | ||
224 | spinlock_lock(&wq->lock); |
242 | spinlock_lock(&wq->lock); |
- | 243 | return ipl; |
|
- | 244 | } |
|
- | 245 | ||
- | 246 | /** Finish waiting in a wait queue. |
|
- | 247 | * |
|
- | 248 | * This function restores interrupts to the state that existed prior |
|
- | 249 | * to the call to waitq_sleep_prepare(). If necessary, the wait queue |
|
- | 250 | * lock is released. |
|
- | 251 | * |
|
- | 252 | * @param wq Wait queue. |
|
- | 253 | * @param rc Return code of waitq_sleep_timeout_unsafe(). |
|
- | 254 | * @param ipl Interrupt level returned by waitq_sleep_prepare(). |
|
- | 255 | */ |
|
- | 256 | void waitq_sleep_finish(waitq_t *wq, int rc, ipl_t ipl) |
|
- | 257 | { |
|
- | 258 | switch (rc) { |
|
- | 259 | case ESYNCH_WOULD_BLOCK: |
|
- | 260 | case ESYNCH_OK_ATOMIC: |
|
- | 261 | spinlock_unlock(&wq->lock); |
|
- | 262 | break; |
|
- | 263 | default: |
|
- | 264 | break; |
|
225 | 265 | } |
|
- | 266 | interrupts_restore(ipl); |
|
- | 267 | } |
|
- | 268 | ||
- | 269 | /** Internal implementation of waitq_sleep_timeout(). |
|
- | 270 | * |
|
- | 271 | * This function implements logic of sleeping in a wait queue. |
|
- | 272 | * This call must be preceeded by a call to waitq_sleep_prepare() |
|
- | 273 | * and followed by a call to waitq_slee_finish(). |
|
- | 274 | * |
|
- | 275 | * @param wq See waitq_sleep_timeout(). |
|
- | 276 | * @param usec See waitq_sleep_timeout(). |
|
- | 277 | * @param nonblocking See waitq_sleep_timeout(). |
|
- | 278 | * |
|
- | 279 | * @return See waitq_sleep_timeout(). |
|
- | 280 | */ |
|
- | 281 | int waitq_sleep_timeout_unsafe(waitq_t *wq, __u32 usec, int nonblocking) |
|
- | 282 | { |
|
226 | /* checks whether to go to sleep at all */ |
283 | /* checks whether to go to sleep at all */ |
227 | if (wq->missed_wakeups) { |
284 | if (wq->missed_wakeups) { |
228 | wq->missed_wakeups--; |
285 | wq->missed_wakeups--; |
229 | spinlock_unlock(&wq->lock); |
- | |
230 | interrupts_restore(ipl); |
- | |
231 | return ESYNCH_OK_ATOMIC; |
286 | return ESYNCH_OK_ATOMIC; |
232 | } |
287 | } |
233 | else { |
288 | else { |
234 | if (nonblocking && (usec == 0)) { |
289 | if (nonblocking && (usec == 0)) { |
235 | /* return immediatelly instead of going to sleep */ |
290 | /* return immediatelly instead of going to sleep */ |
236 | spinlock_unlock(&wq->lock); |
- | |
237 | interrupts_restore(ipl); |
- | |
238 | return ESYNCH_WOULD_BLOCK; |
291 | return ESYNCH_WOULD_BLOCK; |
239 | } |
292 | } |
240 | } |
293 | } |
241 | 294 | ||
242 | /* |
295 | /* |
243 | * Now we are firmly decided to go to sleep. |
296 | * Now we are firmly decided to go to sleep. |
244 | */ |
297 | */ |
245 | spinlock_lock(&THREAD->lock); |
298 | spinlock_lock(&THREAD->lock); |
246 | 299 | ||
247 | /* |
300 | /* |
248 | * Set context that will be restored if the sleep |
301 | * Set context that will be restored if the sleep |
249 | * of this thread is ever interrupted. |
302 | * of this thread is ever interrupted. |
250 | */ |
303 | */ |
251 | if (!context_save(&THREAD->sleep_interruption_context)) { |
304 | if (!context_save(&THREAD->sleep_interruption_context)) { |
252 | /* Short emulation of scheduler() return code. */ |
305 | /* Short emulation of scheduler() return code. */ |
253 | spinlock_unlock(&THREAD->lock); |
306 | spinlock_unlock(&THREAD->lock); |
254 | interrupts_restore(ipl); |
- | |
255 | return ESYNCH_INTERRUPTED; |
307 | return ESYNCH_INTERRUPTED; |
256 | } |
308 | } |
257 | 309 | ||
258 | if (usec) { |
310 | if (usec) { |
259 | /* We use the timeout variant. */ |
311 | /* We use the timeout variant. */ |
260 | if (!context_save(&THREAD->sleep_timeout_context)) { |
312 | if (!context_save(&THREAD->sleep_timeout_context)) { |
261 | /* Short emulation of scheduler() return code. */ |
313 | /* Short emulation of scheduler() return code. */ |
262 | spinlock_unlock(&THREAD->lock); |
314 | spinlock_unlock(&THREAD->lock); |
263 | interrupts_restore(ipl); |
- | |
264 | return ESYNCH_TIMEOUT; |
315 | return ESYNCH_TIMEOUT; |
265 | } |
316 | } |
266 | THREAD->timeout_pending = true; |
317 | THREAD->timeout_pending = true; |
267 | timeout_register(&THREAD->sleep_timeout, (__u64) usec, waitq_timeouted_sleep, THREAD); |
318 | timeout_register(&THREAD->sleep_timeout, (__u64) usec, waitq_timeouted_sleep, THREAD); |
268 | } |
319 | } |
269 | 320 | ||
270 | list_append(&THREAD->wq_link, &wq->head); |
321 | list_append(&THREAD->wq_link, &wq->head); |
271 | 322 | ||
272 | /* |
323 | /* |
273 | * Suspend execution. |
324 | * Suspend execution. |
274 | */ |
325 | */ |
275 | THREAD->state = Sleeping; |
326 | THREAD->state = Sleeping; |
276 | THREAD->sleep_queue = wq; |
327 | THREAD->sleep_queue = wq; |
277 | 328 | ||
278 | spinlock_unlock(&THREAD->lock); |
329 | spinlock_unlock(&THREAD->lock); |
279 | 330 | ||
280 | scheduler(); /* wq->lock is released in scheduler_separated_stack() */ |
331 | scheduler(); /* wq->lock is released in scheduler_separated_stack() */ |
281 | interrupts_restore(ipl); |
- | |
282 | 332 | ||
283 | return ESYNCH_OK_BLOCKED; |
333 | return ESYNCH_OK_BLOCKED; |
284 | } |
334 | } |
285 | 335 | ||
286 | 336 | ||
287 | /** Wake up first thread sleeping in a wait queue |
337 | /** Wake up first thread sleeping in a wait queue |
288 | * |
338 | * |
289 | * Wake up first thread sleeping in a wait queue. |
339 | * Wake up first thread sleeping in a wait queue. |
290 | * This is the SMP- and IRQ-safe wrapper meant for |
340 | * This is the SMP- and IRQ-safe wrapper meant for |
291 | * general use. |
341 | * general use. |
292 | * |
342 | * |
293 | * Besides its 'normal' wakeup operation, it attempts |
343 | * Besides its 'normal' wakeup operation, it attempts |
294 | * to unregister possible timeout. |
344 | * to unregister possible timeout. |
295 | * |
345 | * |
296 | * @param wq Pointer to wait queue. |
346 | * @param wq Pointer to wait queue. |
297 | * @param all If this is non-zero, all sleeping threads |
347 | * @param all If this is non-zero, all sleeping threads |
298 | * will be woken up and missed count will be zeroed. |
348 | * will be woken up and missed count will be zeroed. |
299 | */ |
349 | */ |
300 | void waitq_wakeup(waitq_t *wq, bool all) |
350 | void waitq_wakeup(waitq_t *wq, bool all) |
301 | { |
351 | { |
302 | ipl_t ipl; |
352 | ipl_t ipl; |
303 | 353 | ||
304 | ipl = interrupts_disable(); |
354 | ipl = interrupts_disable(); |
305 | spinlock_lock(&wq->lock); |
355 | spinlock_lock(&wq->lock); |
306 | 356 | ||
307 | _waitq_wakeup_unsafe(wq, all); |
357 | _waitq_wakeup_unsafe(wq, all); |
308 | 358 | ||
309 | spinlock_unlock(&wq->lock); |
359 | spinlock_unlock(&wq->lock); |
310 | interrupts_restore(ipl); |
360 | interrupts_restore(ipl); |
311 | } |
361 | } |
312 | 362 | ||
313 | /** Internal SMP- and IRQ-unsafe version of waitq_wakeup() |
363 | /** Internal SMP- and IRQ-unsafe version of waitq_wakeup() |
314 | * |
364 | * |
315 | * This is the internal SMP- and IRQ-unsafe version |
365 | * This is the internal SMP- and IRQ-unsafe version |
316 | * of waitq_wakeup(). It assumes wq->lock is already |
366 | * of waitq_wakeup(). It assumes wq->lock is already |
317 | * locked and interrupts are already disabled. |
367 | * locked and interrupts are already disabled. |
318 | * |
368 | * |
319 | * @param wq Pointer to wait queue. |
369 | * @param wq Pointer to wait queue. |
320 | * @param all If this is non-zero, all sleeping threads |
370 | * @param all If this is non-zero, all sleeping threads |
321 | * will be woken up and missed count will be zeroed. |
371 | * will be woken up and missed count will be zeroed. |
322 | */ |
372 | */ |
323 | void _waitq_wakeup_unsafe(waitq_t *wq, bool all) |
373 | void _waitq_wakeup_unsafe(waitq_t *wq, bool all) |
324 | { |
374 | { |
325 | thread_t *t; |
375 | thread_t *t; |
326 | 376 | ||
327 | loop: |
377 | loop: |
328 | if (list_empty(&wq->head)) { |
378 | if (list_empty(&wq->head)) { |
329 | wq->missed_wakeups++; |
379 | wq->missed_wakeups++; |
330 | if (all) |
380 | if (all) |
331 | wq->missed_wakeups = 0; |
381 | wq->missed_wakeups = 0; |
332 | return; |
382 | return; |
333 | } |
383 | } |
334 | 384 | ||
335 | t = list_get_instance(wq->head.next, thread_t, wq_link); |
385 | t = list_get_instance(wq->head.next, thread_t, wq_link); |
336 | 386 | ||
337 | list_remove(&t->wq_link); |
387 | list_remove(&t->wq_link); |
338 | spinlock_lock(&t->lock); |
388 | spinlock_lock(&t->lock); |
339 | if (t->timeout_pending && timeout_unregister(&t->sleep_timeout)) |
389 | if (t->timeout_pending && timeout_unregister(&t->sleep_timeout)) |
340 | t->timeout_pending = false; |
390 | t->timeout_pending = false; |
341 | t->sleep_queue = NULL; |
391 | t->sleep_queue = NULL; |
342 | spinlock_unlock(&t->lock); |
392 | spinlock_unlock(&t->lock); |
343 | 393 | ||
344 | thread_ready(t); |
394 | thread_ready(t); |
345 | 395 | ||
346 | if (all) |
396 | if (all) |
347 | goto loop; |
397 | goto loop; |
348 | } |
398 | } |
349 | 399 |