Rev 1 | Rev 25 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
1 | jermar | 1 | /* |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
||
3 | * All rights reserved. |
||
4 | * |
||
5 | * Redistribution and use in source and binary forms, with or without |
||
6 | * modification, are permitted provided that the following conditions |
||
7 | * are met: |
||
8 | * |
||
9 | * - Redistributions of source code must retain the above copyright |
||
10 | * notice, this list of conditions and the following disclaimer. |
||
11 | * - Redistributions in binary form must reproduce the above copyright |
||
12 | * notice, this list of conditions and the following disclaimer in the |
||
13 | * documentation and/or other materials provided with the distribution. |
||
14 | * - The name of the author may not be used to endorse or promote products |
||
15 | * derived from this software without specific prior written permission. |
||
16 | * |
||
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
||
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
||
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
||
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
||
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
||
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
||
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
||
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
||
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
||
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
||
27 | */ |
||
28 | |||
29 | #include <context.h> |
||
30 | #include <proc/thread.h> |
||
31 | |||
32 | #include <synch/synch.h> |
||
33 | #include <synch/waitq.h> |
||
34 | #include <synch/spinlock.h> |
||
35 | |||
36 | #include <arch/asm.h> |
||
37 | #include <arch/types.h> |
||
38 | #include <arch.h> |
||
39 | |||
40 | #include <list.h> |
||
41 | |||
42 | #include <time/timeout.h> |
||
43 | |||
44 | void waitq_initialize(waitq_t *wq) |
||
45 | { |
||
46 | spinlock_initialize(&wq->lock); |
||
47 | list_initialize(&wq->head); |
||
48 | wq->missed_wakeups = 0; |
||
49 | } |
||
50 | |||
51 | /* |
||
52 | * Called with interrupts disabled from clock() when sleep_timeout |
||
53 | * timeouts. This function is not allowed to enable interrupts. |
||
54 | * |
||
55 | * It is supposed to try to remove 'its' thread from the waitqueue; it |
||
56 | * can eventually fail to achieve this goal when these two events |
||
57 | * overlap; in that case it behaves just as though there was no |
||
58 | * timeout at all |
||
59 | */ |
||
60 | void waitq_interrupted_sleep(void *data) |
||
61 | { |
||
62 | thread_t *t = (thread_t *) data; |
||
63 | waitq_t *wq; |
||
64 | int do_wakeup = 0; |
||
65 | |||
66 | spinlock_lock(&threads_lock); |
||
67 | if (!list_member(&t->threads_link, &threads_head)) |
||
68 | goto out; |
||
69 | |||
70 | grab_locks: |
||
71 | spinlock_lock(&t->lock); |
||
72 | if (wq = t->sleep_queue) { |
||
73 | if (!spinlock_trylock(&wq->lock)) { |
||
74 | spinlock_unlock(&t->lock); |
||
75 | goto grab_locks; /* avoid deadlock */ |
||
76 | } |
||
77 | |||
78 | list_remove(&t->wq_link); |
||
79 | t->saved_context = t->sleep_timeout_context; |
||
80 | do_wakeup = 1; |
||
81 | |||
82 | spinlock_unlock(&wq->lock); |
||
83 | t->sleep_queue = NULL; |
||
84 | } |
||
85 | |||
86 | t->timeout_pending = 0; |
||
87 | spinlock_unlock(&t->lock); |
||
88 | |||
89 | if (do_wakeup) thread_ready(t); |
||
90 | |||
91 | out: |
||
92 | spinlock_unlock(&threads_lock); |
||
93 | } |
||
94 | |||
95 | /* |
||
96 | * This is a sleep implementation which allows itself to be |
||
97 | * interrupted from the sleep, restoring a failover context. |
||
98 | * |
||
99 | * This function is really basic in that other functions as waitq_sleep() |
||
100 | * and all the *_timeout() functions use it. |
||
101 | * |
||
102 | * The third argument controls whether only a conditional sleep |
||
103 | * (non-blocking sleep) is called for when the second argument is 0. |
||
104 | * |
||
105 | * usec | nonblocking | what happens if there is no missed_wakeup |
||
106 | * -----+-------------+-------------------------------------------- |
||
107 | * 0 | 0 | blocks without timeout until wakeup |
||
108 | * 0 | <> 0 | immediately returns ESYNCH_WOULD_BLOCK |
||
109 | * > 0 | x | blocks with timeout until timeout or wakeup |
||
110 | * |
||
111 | * return values: |
||
112 | * ESYNCH_WOULD_BLOCK |
||
113 | * ESYNCH_TIMEOUT |
||
114 | * ESYNCH_OK_ATOMIC |
||
115 | * ESYNCH_OK_BLOCKED |
||
116 | */ |
||
117 | int waitq_sleep_timeout(waitq_t *wq, __u32 usec, int nonblocking) |
||
118 | { |
||
119 | volatile pri_t pri; /* must be live after context_restore() */ |
||
120 | |||
121 | |||
122 | restart: |
||
123 | pri = cpu_priority_high(); |
||
124 | |||
125 | /* |
||
126 | * Busy waiting for a delayed timeout. |
||
127 | * This is an important fix for the race condition between |
||
128 | * a delayed timeout and a next call to waitq_sleep_timeout(). |
||
129 | * Simply, the thread is not allowed to go to sleep if |
||
130 | * there are timeouts in progress. |
||
131 | */ |
||
15 | jermar | 132 | spinlock_lock(&THREAD->lock); |
133 | if (THREAD->timeout_pending) { |
||
134 | spinlock_unlock(&THREAD->lock); |
||
1 | jermar | 135 | cpu_priority_restore(pri); |
136 | goto restart; |
||
137 | } |
||
15 | jermar | 138 | spinlock_unlock(&THREAD->lock); |
1 | jermar | 139 | |
140 | spinlock_lock(&wq->lock); |
||
141 | |||
142 | /* checks whether to go to sleep at all */ |
||
143 | if (wq->missed_wakeups) { |
||
144 | wq->missed_wakeups--; |
||
145 | spinlock_unlock(&wq->lock); |
||
146 | cpu_priority_restore(pri); |
||
147 | return ESYNCH_OK_ATOMIC; |
||
148 | } |
||
149 | else { |
||
150 | if (nonblocking && (usec == 0)) { |
||
151 | /* return immediatelly instead of going to sleep */ |
||
152 | spinlock_unlock(&wq->lock); |
||
153 | cpu_priority_restore(pri); |
||
154 | return ESYNCH_WOULD_BLOCK; |
||
155 | } |
||
156 | } |
||
157 | |||
158 | |||
159 | /* |
||
160 | * Now we are firmly decided to go to sleep. |
||
161 | */ |
||
15 | jermar | 162 | spinlock_lock(&THREAD->lock); |
1 | jermar | 163 | if (usec) { |
164 | /* We use the timeout variant. */ |
||
15 | jermar | 165 | if (!context_save(&THREAD->sleep_timeout_context)) { |
1 | jermar | 166 | /* |
167 | * Short emulation of scheduler() return code. |
||
168 | */ |
||
15 | jermar | 169 | spinlock_unlock(&THREAD->lock); |
1 | jermar | 170 | cpu_priority_restore(pri); |
171 | return ESYNCH_TIMEOUT; |
||
172 | } |
||
15 | jermar | 173 | THREAD->timeout_pending = 1; |
174 | timeout_register(&THREAD->sleep_timeout, (__u64) usec, waitq_interrupted_sleep, THREAD); |
||
1 | jermar | 175 | } |
176 | |||
15 | jermar | 177 | list_append(&THREAD->wq_link, &wq->head); |
1 | jermar | 178 | |
179 | /* |
||
180 | * Suspend execution. |
||
181 | */ |
||
15 | jermar | 182 | THREAD->state = Sleeping; |
183 | THREAD->sleep_queue = wq; |
||
1 | jermar | 184 | |
15 | jermar | 185 | spinlock_unlock(&THREAD->lock); |
1 | jermar | 186 | |
187 | scheduler(); /* wq->lock is released in scheduler_separated_stack() */ |
||
188 | cpu_priority_restore(pri); |
||
189 | |||
190 | return ESYNCH_OK_BLOCKED; |
||
191 | } |
||
192 | |||
193 | |||
194 | /* |
||
195 | * This is the SMP- and IRQ-safe wrapper meant for general use. |
||
196 | */ |
||
197 | /* |
||
198 | * Besides its 'normal' wakeup operation, it attempts to unregister possible timeout. |
||
199 | */ |
||
200 | void waitq_wakeup(waitq_t *wq, int all) |
||
201 | { |
||
202 | pri_t pri; |
||
203 | |||
204 | pri = cpu_priority_high(); |
||
205 | spinlock_lock(&wq->lock); |
||
206 | |||
207 | _waitq_wakeup_unsafe(wq, all); |
||
208 | |||
209 | spinlock_unlock(&wq->lock); |
||
210 | cpu_priority_restore(pri); |
||
211 | } |
||
212 | |||
213 | /* |
||
214 | * This is the internal SMP- and IRQ-unsafe version of waitq_wakeup. |
||
215 | * It assumes wq->lock is already locked. |
||
216 | */ |
||
217 | void _waitq_wakeup_unsafe(waitq_t *wq, int all) |
||
218 | { |
||
219 | thread_t *t; |
||
220 | |||
221 | loop: |
||
222 | if (list_empty(&wq->head)) { |
||
223 | wq->missed_wakeups++; |
||
224 | if (all) wq->missed_wakeups = 0; |
||
225 | return; |
||
226 | } |
||
227 | |||
228 | t = list_get_instance(wq->head.next, thread_t, wq_link); |
||
229 | |||
230 | list_remove(&t->wq_link); |
||
231 | spinlock_lock(&t->lock); |
||
232 | if (t->timeout_pending && timeout_unregister(&t->sleep_timeout)) |
||
233 | t->timeout_pending = 0; |
||
234 | t->sleep_queue = NULL; |
||
235 | spinlock_unlock(&t->lock); |
||
236 | |||
237 | thread_ready(t); |
||
238 | |||
239 | if (all) goto loop; |
||
240 | } |