Rev 1229 | Rev 1288 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 1229 | Rev 1248 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
- | 29 | /** |
|
- | 30 | * @file waitq.c |
|
- | 31 | * @brief Wait queue. |
|
- | 32 | * |
|
- | 33 | * Wait queue is the basic synchronization primitive upon all |
|
- | 34 | * other synchronization primitives build. |
|
- | 35 | * |
|
- | 36 | * It allows threads to wait for an event in first-come, first-served |
|
- | 37 | * fashion. Conditional operation as well as timeouts and interruptions |
|
- | 38 | * are supported. |
|
- | 39 | */ |
|
- | 40 | ||
29 | #include <synch/waitq.h> |
41 | #include <synch/waitq.h> |
30 | #include <synch/synch.h> |
42 | #include <synch/synch.h> |
31 | #include <synch/spinlock.h> |
43 | #include <synch/spinlock.h> |
32 | #include <proc/thread.h> |
44 | #include <proc/thread.h> |
33 | #include <proc/scheduler.h> |
45 | #include <proc/scheduler.h> |
34 | #include <arch/asm.h> |
46 | #include <arch/asm.h> |
35 | #include <arch/types.h> |
47 | #include <arch/types.h> |
36 | #include <typedefs.h> |
48 | #include <typedefs.h> |
37 | #include <time/timeout.h> |
49 | #include <time/timeout.h> |
38 | #include <arch.h> |
50 | #include <arch.h> |
39 | #include <context.h> |
51 | #include <context.h> |
40 | #include <adt/list.h> |
52 | #include <adt/list.h> |
41 | 53 | ||
42 | static void waitq_timeouted_sleep(void *data); |
54 | static void waitq_timeouted_sleep(void *data); |
43 | 55 | ||
44 | /** Initialize wait queue |
56 | /** Initialize wait queue |
45 | * |
57 | * |
46 | * Initialize wait queue. |
58 | * Initialize wait queue. |
47 | * |
59 | * |
48 | * @param wq Pointer to wait queue to be initialized. |
60 | * @param wq Pointer to wait queue to be initialized. |
49 | */ |
61 | */ |
50 | void waitq_initialize(waitq_t *wq) |
62 | void waitq_initialize(waitq_t *wq) |
51 | { |
63 | { |
52 | spinlock_initialize(&wq->lock, "waitq_lock"); |
64 | spinlock_initialize(&wq->lock, "waitq_lock"); |
53 | list_initialize(&wq->head); |
65 | list_initialize(&wq->head); |
54 | wq->missed_wakeups = 0; |
66 | wq->missed_wakeups = 0; |
55 | } |
67 | } |
56 | 68 | ||
57 | /** Handle timeout during waitq_sleep_timeout() call |
69 | /** Handle timeout during waitq_sleep_timeout() call |
58 | * |
70 | * |
59 | * This routine is called when waitq_sleep_timeout() timeouts. |
71 | * This routine is called when waitq_sleep_timeout() timeouts. |
60 | * Interrupts are disabled. |
72 | * Interrupts are disabled. |
61 | * |
73 | * |
62 | * It is supposed to try to remove 'its' thread from the wait queue; |
74 | * It is supposed to try to remove 'its' thread from the wait queue; |
63 | * it can eventually fail to achieve this goal when these two events |
75 | * it can eventually fail to achieve this goal when these two events |
64 | * overlap. In that case it behaves just as though there was no |
76 | * overlap. In that case it behaves just as though there was no |
65 | * timeout at all. |
77 | * timeout at all. |
66 | * |
78 | * |
67 | * @param data Pointer to the thread that called waitq_sleep_timeout(). |
79 | * @param data Pointer to the thread that called waitq_sleep_timeout(). |
68 | */ |
80 | */ |
69 | void waitq_timeouted_sleep(void *data) |
81 | void waitq_timeouted_sleep(void *data) |
70 | { |
82 | { |
71 | thread_t *t = (thread_t *) data; |
83 | thread_t *t = (thread_t *) data; |
72 | waitq_t *wq; |
84 | waitq_t *wq; |
73 | bool do_wakeup = false; |
85 | bool do_wakeup = false; |
74 | 86 | ||
75 | spinlock_lock(&threads_lock); |
87 | spinlock_lock(&threads_lock); |
76 | if (!thread_exists(t)) |
88 | if (!thread_exists(t)) |
77 | goto out; |
89 | goto out; |
78 | 90 | ||
79 | grab_locks: |
91 | grab_locks: |
80 | spinlock_lock(&t->lock); |
92 | spinlock_lock(&t->lock); |
81 | if ((wq = t->sleep_queue)) { /* assignment */ |
93 | if ((wq = t->sleep_queue)) { /* assignment */ |
82 | if (!spinlock_trylock(&wq->lock)) { |
94 | if (!spinlock_trylock(&wq->lock)) { |
83 | spinlock_unlock(&t->lock); |
95 | spinlock_unlock(&t->lock); |
84 | goto grab_locks; /* avoid deadlock */ |
96 | goto grab_locks; /* avoid deadlock */ |
85 | } |
97 | } |
86 | 98 | ||
87 | list_remove(&t->wq_link); |
99 | list_remove(&t->wq_link); |
88 | t->saved_context = t->sleep_timeout_context; |
100 | t->saved_context = t->sleep_timeout_context; |
89 | do_wakeup = true; |
101 | do_wakeup = true; |
90 | 102 | ||
91 | spinlock_unlock(&wq->lock); |
103 | spinlock_unlock(&wq->lock); |
92 | t->sleep_queue = NULL; |
104 | t->sleep_queue = NULL; |
93 | } |
105 | } |
94 | 106 | ||
95 | t->timeout_pending = false; |
107 | t->timeout_pending = false; |
96 | spinlock_unlock(&t->lock); |
108 | spinlock_unlock(&t->lock); |
97 | 109 | ||
98 | if (do_wakeup) |
110 | if (do_wakeup) |
99 | thread_ready(t); |
111 | thread_ready(t); |
100 | 112 | ||
101 | out: |
113 | out: |
102 | spinlock_unlock(&threads_lock); |
114 | spinlock_unlock(&threads_lock); |
103 | } |
115 | } |
104 | 116 | ||
105 | /** Interrupt sleeping thread. |
117 | /** Interrupt sleeping thread. |
106 | * |
118 | * |
107 | * This routine attempts to interrupt a thread from its sleep in a waitqueue. |
119 | * This routine attempts to interrupt a thread from its sleep in a waitqueue. |
108 | * If the thread is not found sleeping, no action is taken. |
120 | * If the thread is not found sleeping, no action is taken. |
109 | * |
121 | * |
110 | * @param t Thread to be interrupted. |
122 | * @param t Thread to be interrupted. |
111 | */ |
123 | */ |
112 | void waitq_interrupt_sleep(thread_t *t) |
124 | void waitq_interrupt_sleep(thread_t *t) |
113 | { |
125 | { |
114 | waitq_t *wq; |
126 | waitq_t *wq; |
115 | bool do_wakeup = false; |
127 | bool do_wakeup = false; |
116 | ipl_t ipl; |
128 | ipl_t ipl; |
117 | 129 | ||
118 | ipl = interrupts_disable(); |
130 | ipl = interrupts_disable(); |
119 | spinlock_lock(&threads_lock); |
131 | spinlock_lock(&threads_lock); |
120 | if (!thread_exists(t)) |
132 | if (!thread_exists(t)) |
121 | goto out; |
133 | goto out; |
122 | 134 | ||
123 | grab_locks: |
135 | grab_locks: |
124 | spinlock_lock(&t->lock); |
136 | spinlock_lock(&t->lock); |
125 | if ((wq = t->sleep_queue)) { /* assignment */ |
137 | if ((wq = t->sleep_queue)) { /* assignment */ |
126 | if (!spinlock_trylock(&wq->lock)) { |
138 | if (!spinlock_trylock(&wq->lock)) { |
127 | spinlock_unlock(&t->lock); |
139 | spinlock_unlock(&t->lock); |
128 | goto grab_locks; /* avoid deadlock */ |
140 | goto grab_locks; /* avoid deadlock */ |
129 | } |
141 | } |
130 | 142 | ||
131 | list_remove(&t->wq_link); |
143 | list_remove(&t->wq_link); |
132 | t->saved_context = t->sleep_interruption_context; |
144 | t->saved_context = t->sleep_interruption_context; |
133 | do_wakeup = true; |
145 | do_wakeup = true; |
134 | 146 | ||
135 | spinlock_unlock(&wq->lock); |
147 | spinlock_unlock(&wq->lock); |
136 | t->sleep_queue = NULL; |
148 | t->sleep_queue = NULL; |
137 | } |
149 | } |
138 | spinlock_unlock(&t->lock); |
150 | spinlock_unlock(&t->lock); |
139 | 151 | ||
140 | if (do_wakeup) |
152 | if (do_wakeup) |
141 | thread_ready(t); |
153 | thread_ready(t); |
142 | 154 | ||
143 | out: |
155 | out: |
144 | spinlock_unlock(&threads_lock); |
156 | spinlock_unlock(&threads_lock); |
145 | interrupts_restore(ipl); |
157 | interrupts_restore(ipl); |
146 | } |
158 | } |
147 | 159 | ||
148 | 160 | ||
149 | /** Sleep until either wakeup, timeout or interruption occurs |
161 | /** Sleep until either wakeup, timeout or interruption occurs |
150 | * |
162 | * |
151 | * This is a sleep implementation which allows itself to be |
163 | * This is a sleep implementation which allows itself to be |
152 | * interrupted from the sleep, restoring a failover context. |
164 | * interrupted from the sleep, restoring a failover context. |
153 | * |
165 | * |
154 | * Sleepers are organised in FIFO fashion in a structure called wait queue. |
166 | * Sleepers are organised in FIFO fashion in a structure called wait queue. |
155 | * |
167 | * |
156 | * This function is really basic in that other functions as waitq_sleep() |
168 | * This function is really basic in that other functions as waitq_sleep() |
157 | * and all the *_timeout() functions use it. |
169 | * and all the *_timeout() functions use it. |
158 | * |
170 | * |
159 | * @param wq Pointer to wait queue. |
171 | * @param wq Pointer to wait queue. |
160 | * @param usec Timeout in microseconds. |
172 | * @param usec Timeout in microseconds. |
161 | * @param nonblocking Blocking vs. non-blocking operation mode switch. |
173 | * @param nonblocking Blocking vs. non-blocking operation mode switch. |
162 | * |
174 | * |
163 | * If @usec is greater than zero, regardless of the value of @nonblocking, |
175 | * If usec is greater than zero, regardless of the value of nonblocking, |
164 | * the call will not return until either timeout or wakeup comes. |
176 | * the call will not return until either timeout or wakeup comes. |
165 | * |
177 | * |
166 | * If @usec is zero and @nonblocking is zero (false), the call |
178 | * If usec is zero and @nonblocking is zero (false), the call |
167 | * will not return until wakeup comes. |
179 | * will not return until wakeup comes. |
168 | * |
180 | * |
169 | * If @usec is zero and @nonblocking is non-zero (true), the call will |
181 | * If usec is zero and nonblocking is non-zero (true), the call will |
170 | * immediately return, reporting either success or failure. |
182 | * immediately return, reporting either success or failure. |
171 | * |
183 | * |
172 | * @return Returns one of: ESYNCH_WOULD_BLOCK, ESYNCH_TIMEOUT, |
184 | * @return Returns one of: ESYNCH_WOULD_BLOCK, ESYNCH_TIMEOUT, |
173 | * ESYNCH_OK_ATOMIC, ESYNCH_OK_BLOCKED. |
185 | * ESYNCH_OK_ATOMIC, ESYNCH_OK_BLOCKED. |
174 | * |
186 | * |
175 | * ESYNCH_WOULD_BLOCK means that the sleep failed because at the time |
187 | * @li ESYNCH_WOULD_BLOCK means that the sleep failed because at the time |
176 | * of the call there was no pending wakeup. |
188 | * of the call there was no pending wakeup. |
177 | * |
189 | * |
178 | * ESYNCH_TIMEOUT means that the sleep timed out. |
190 | * @li ESYNCH_TIMEOUT means that the sleep timed out. |
179 | * |
191 | * |
180 | * ESYNCH_INTERRUPTED means that somebody interrupted the sleeping thread. |
192 | * @li ESYNCH_INTERRUPTED means that somebody interrupted the sleeping thread. |
181 | * |
193 | * |
182 | * ESYNCH_OK_ATOMIC means that the sleep succeeded and that there was |
194 | * @li ESYNCH_OK_ATOMIC means that the sleep succeeded and that there was |
183 | * a pending wakeup at the time of the call. The caller was not put |
195 | * a pending wakeup at the time of the call. The caller was not put |
184 | * asleep at all. |
196 | * asleep at all. |
185 | * |
197 | * |
186 | * ESYNCH_OK_BLOCKED means that the sleep succeeded; the full sleep was |
198 | * @li ESYNCH_OK_BLOCKED means that the sleep succeeded; the full sleep was |
187 | * attempted. |
199 | * attempted. |
188 | */ |
200 | */ |
189 | int waitq_sleep_timeout(waitq_t *wq, __u32 usec, int nonblocking) |
201 | int waitq_sleep_timeout(waitq_t *wq, __u32 usec, int nonblocking) |
190 | { |
202 | { |
191 | volatile ipl_t ipl; /* must be live after context_restore() */ |
203 | volatile ipl_t ipl; /* must be live after context_restore() */ |
192 | 204 | ||
193 | 205 | ||
194 | restart: |
206 | restart: |
195 | ipl = interrupts_disable(); |
207 | ipl = interrupts_disable(); |
196 | 208 | ||
197 | /* |
209 | /* |
198 | * Busy waiting for a delayed timeout. |
210 | * Busy waiting for a delayed timeout. |
199 | * This is an important fix for the race condition between |
211 | * This is an important fix for the race condition between |
200 | * a delayed timeout and a next call to waitq_sleep_timeout(). |
212 | * a delayed timeout and a next call to waitq_sleep_timeout(). |
201 | * Simply, the thread is not allowed to go to sleep if |
213 | * Simply, the thread is not allowed to go to sleep if |
202 | * there are timeouts in progress. |
214 | * there are timeouts in progress. |
203 | */ |
215 | */ |
204 | spinlock_lock(&THREAD->lock); |
216 | spinlock_lock(&THREAD->lock); |
205 | if (THREAD->timeout_pending) { |
217 | if (THREAD->timeout_pending) { |
206 | spinlock_unlock(&THREAD->lock); |
218 | spinlock_unlock(&THREAD->lock); |
207 | interrupts_restore(ipl); |
219 | interrupts_restore(ipl); |
208 | goto restart; |
220 | goto restart; |
209 | } |
221 | } |
210 | spinlock_unlock(&THREAD->lock); |
222 | spinlock_unlock(&THREAD->lock); |
211 | 223 | ||
212 | spinlock_lock(&wq->lock); |
224 | spinlock_lock(&wq->lock); |
213 | 225 | ||
214 | /* checks whether to go to sleep at all */ |
226 | /* checks whether to go to sleep at all */ |
215 | if (wq->missed_wakeups) { |
227 | if (wq->missed_wakeups) { |
216 | wq->missed_wakeups--; |
228 | wq->missed_wakeups--; |
217 | spinlock_unlock(&wq->lock); |
229 | spinlock_unlock(&wq->lock); |
218 | interrupts_restore(ipl); |
230 | interrupts_restore(ipl); |
219 | return ESYNCH_OK_ATOMIC; |
231 | return ESYNCH_OK_ATOMIC; |
220 | } |
232 | } |
221 | else { |
233 | else { |
222 | if (nonblocking && (usec == 0)) { |
234 | if (nonblocking && (usec == 0)) { |
223 | /* return immediatelly instead of going to sleep */ |
235 | /* return immediatelly instead of going to sleep */ |
224 | spinlock_unlock(&wq->lock); |
236 | spinlock_unlock(&wq->lock); |
225 | interrupts_restore(ipl); |
237 | interrupts_restore(ipl); |
226 | return ESYNCH_WOULD_BLOCK; |
238 | return ESYNCH_WOULD_BLOCK; |
227 | } |
239 | } |
228 | } |
240 | } |
229 | 241 | ||
230 | /* |
242 | /* |
231 | * Now we are firmly decided to go to sleep. |
243 | * Now we are firmly decided to go to sleep. |
232 | */ |
244 | */ |
233 | spinlock_lock(&THREAD->lock); |
245 | spinlock_lock(&THREAD->lock); |
234 | 246 | ||
235 | /* |
247 | /* |
236 | * Set context that will be restored if the sleep |
248 | * Set context that will be restored if the sleep |
237 | * of this thread is ever interrupted. |
249 | * of this thread is ever interrupted. |
238 | */ |
250 | */ |
239 | if (!context_save(&THREAD->sleep_interruption_context)) { |
251 | if (!context_save(&THREAD->sleep_interruption_context)) { |
240 | /* Short emulation of scheduler() return code. */ |
252 | /* Short emulation of scheduler() return code. */ |
241 | spinlock_unlock(&THREAD->lock); |
253 | spinlock_unlock(&THREAD->lock); |
242 | interrupts_restore(ipl); |
254 | interrupts_restore(ipl); |
243 | return ESYNCH_INTERRUPTED; |
255 | return ESYNCH_INTERRUPTED; |
244 | } |
256 | } |
245 | 257 | ||
246 | if (usec) { |
258 | if (usec) { |
247 | /* We use the timeout variant. */ |
259 | /* We use the timeout variant. */ |
248 | if (!context_save(&THREAD->sleep_timeout_context)) { |
260 | if (!context_save(&THREAD->sleep_timeout_context)) { |
249 | /* Short emulation of scheduler() return code. */ |
261 | /* Short emulation of scheduler() return code. */ |
250 | spinlock_unlock(&THREAD->lock); |
262 | spinlock_unlock(&THREAD->lock); |
251 | interrupts_restore(ipl); |
263 | interrupts_restore(ipl); |
252 | return ESYNCH_TIMEOUT; |
264 | return ESYNCH_TIMEOUT; |
253 | } |
265 | } |
254 | THREAD->timeout_pending = true; |
266 | THREAD->timeout_pending = true; |
255 | timeout_register(&THREAD->sleep_timeout, (__u64) usec, waitq_timeouted_sleep, THREAD); |
267 | timeout_register(&THREAD->sleep_timeout, (__u64) usec, waitq_timeouted_sleep, THREAD); |
256 | } |
268 | } |
257 | 269 | ||
258 | list_append(&THREAD->wq_link, &wq->head); |
270 | list_append(&THREAD->wq_link, &wq->head); |
259 | 271 | ||
260 | /* |
272 | /* |
261 | * Suspend execution. |
273 | * Suspend execution. |
262 | */ |
274 | */ |
263 | THREAD->state = Sleeping; |
275 | THREAD->state = Sleeping; |
264 | THREAD->sleep_queue = wq; |
276 | THREAD->sleep_queue = wq; |
265 | 277 | ||
266 | spinlock_unlock(&THREAD->lock); |
278 | spinlock_unlock(&THREAD->lock); |
267 | 279 | ||
268 | scheduler(); /* wq->lock is released in scheduler_separated_stack() */ |
280 | scheduler(); /* wq->lock is released in scheduler_separated_stack() */ |
269 | interrupts_restore(ipl); |
281 | interrupts_restore(ipl); |
270 | 282 | ||
271 | return ESYNCH_OK_BLOCKED; |
283 | return ESYNCH_OK_BLOCKED; |
272 | } |
284 | } |
273 | 285 | ||
274 | 286 | ||
275 | /** Wake up first thread sleeping in a wait queue |
287 | /** Wake up first thread sleeping in a wait queue |
276 | * |
288 | * |
277 | * Wake up first thread sleeping in a wait queue. |
289 | * Wake up first thread sleeping in a wait queue. |
278 | * This is the SMP- and IRQ-safe wrapper meant for |
290 | * This is the SMP- and IRQ-safe wrapper meant for |
279 | * general use. |
291 | * general use. |
280 | * |
292 | * |
281 | * Besides its 'normal' wakeup operation, it attempts |
293 | * Besides its 'normal' wakeup operation, it attempts |
282 | * to unregister possible timeout. |
294 | * to unregister possible timeout. |
283 | * |
295 | * |
284 | * @param wq Pointer to wait queue. |
296 | * @param wq Pointer to wait queue. |
285 | * @param all If this is non-zero, all sleeping threads |
297 | * @param all If this is non-zero, all sleeping threads |
286 | * will be woken up and missed count will be zeroed. |
298 | * will be woken up and missed count will be zeroed. |
287 | */ |
299 | */ |
288 | void waitq_wakeup(waitq_t *wq, bool all) |
300 | void waitq_wakeup(waitq_t *wq, bool all) |
289 | { |
301 | { |
290 | ipl_t ipl; |
302 | ipl_t ipl; |
291 | 303 | ||
292 | ipl = interrupts_disable(); |
304 | ipl = interrupts_disable(); |
293 | spinlock_lock(&wq->lock); |
305 | spinlock_lock(&wq->lock); |
294 | 306 | ||
295 | _waitq_wakeup_unsafe(wq, all); |
307 | _waitq_wakeup_unsafe(wq, all); |
296 | 308 | ||
297 | spinlock_unlock(&wq->lock); |
309 | spinlock_unlock(&wq->lock); |
298 | interrupts_restore(ipl); |
310 | interrupts_restore(ipl); |
299 | } |
311 | } |
300 | 312 | ||
301 | /** Internal SMP- and IRQ-unsafe version of waitq_wakeup() |
313 | /** Internal SMP- and IRQ-unsafe version of waitq_wakeup() |
302 | * |
314 | * |
303 | * This is the internal SMP- and IRQ-unsafe version |
315 | * This is the internal SMP- and IRQ-unsafe version |
304 | * of waitq_wakeup(). It assumes wq->lock is already |
316 | * of waitq_wakeup(). It assumes wq->lock is already |
305 | * locked and interrupts are already disabled. |
317 | * locked and interrupts are already disabled. |
306 | * |
318 | * |
307 | * @param wq Pointer to wait queue. |
319 | * @param wq Pointer to wait queue. |
308 | * @param all If this is non-zero, all sleeping threads |
320 | * @param all If this is non-zero, all sleeping threads |
309 | * will be woken up and missed count will be zeroed. |
321 | * will be woken up and missed count will be zeroed. |
310 | */ |
322 | */ |
311 | void _waitq_wakeup_unsafe(waitq_t *wq, bool all) |
323 | void _waitq_wakeup_unsafe(waitq_t *wq, bool all) |
312 | { |
324 | { |
313 | thread_t *t; |
325 | thread_t *t; |
314 | 326 | ||
315 | loop: |
327 | loop: |
316 | if (list_empty(&wq->head)) { |
328 | if (list_empty(&wq->head)) { |
317 | wq->missed_wakeups++; |
329 | wq->missed_wakeups++; |
318 | if (all) |
330 | if (all) |
319 | wq->missed_wakeups = 0; |
331 | wq->missed_wakeups = 0; |
320 | return; |
332 | return; |
321 | } |
333 | } |
322 | 334 | ||
323 | t = list_get_instance(wq->head.next, thread_t, wq_link); |
335 | t = list_get_instance(wq->head.next, thread_t, wq_link); |
324 | 336 | ||
325 | list_remove(&t->wq_link); |
337 | list_remove(&t->wq_link); |
326 | spinlock_lock(&t->lock); |
338 | spinlock_lock(&t->lock); |
327 | if (t->timeout_pending && timeout_unregister(&t->sleep_timeout)) |
339 | if (t->timeout_pending && timeout_unregister(&t->sleep_timeout)) |
328 | t->timeout_pending = false; |
340 | t->timeout_pending = false; |
329 | t->sleep_queue = NULL; |
341 | t->sleep_queue = NULL; |
330 | spinlock_unlock(&t->lock); |
342 | spinlock_unlock(&t->lock); |
331 | 343 | ||
332 | thread_ready(t); |
344 | thread_ready(t); |
333 | 345 | ||
334 | if (all) |
346 | if (all) |
335 | goto loop; |
347 | goto loop; |
336 | } |
348 | } |
337 | 349 |