Rev 405 | Rev 414 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 405 | Rev 413 | ||
---|---|---|---|
Line 135... | Line 135... | ||
135 | * ESYNCH_OK_BLOCKED means that the sleep succeeded; the full sleep was |
135 | * ESYNCH_OK_BLOCKED means that the sleep succeeded; the full sleep was |
136 | * attempted. |
136 | * attempted. |
137 | */ |
137 | */ |
138 | int waitq_sleep_timeout(waitq_t *wq, __u32 usec, int nonblocking) |
138 | int waitq_sleep_timeout(waitq_t *wq, __u32 usec, int nonblocking) |
139 | { |
139 | { |
140 | volatile pri_t pri; /* must be live after context_restore() */ |
140 | volatile ipl_t ipl; /* must be live after context_restore() */ |
141 | 141 | ||
142 | 142 | ||
143 | restart: |
143 | restart: |
144 | pri = cpu_priority_high(); |
144 | ipl = interrupts_disable(); |
145 | 145 | ||
146 | /* |
146 | /* |
147 | * Busy waiting for a delayed timeout. |
147 | * Busy waiting for a delayed timeout. |
148 | * This is an important fix for the race condition between |
148 | * This is an important fix for the race condition between |
149 | * a delayed timeout and a next call to waitq_sleep_timeout(). |
149 | * a delayed timeout and a next call to waitq_sleep_timeout(). |
Line 151... | Line 151... | ||
151 | * there are timeouts in progress. |
151 | * there are timeouts in progress. |
152 | */ |
152 | */ |
153 | spinlock_lock(&THREAD->lock); |
153 | spinlock_lock(&THREAD->lock); |
154 | if (THREAD->timeout_pending) { |
154 | if (THREAD->timeout_pending) { |
155 | spinlock_unlock(&THREAD->lock); |
155 | spinlock_unlock(&THREAD->lock); |
156 | cpu_priority_restore(pri); |
156 | interrupts_restore(ipl); |
157 | goto restart; |
157 | goto restart; |
158 | } |
158 | } |
159 | spinlock_unlock(&THREAD->lock); |
159 | spinlock_unlock(&THREAD->lock); |
160 | 160 | ||
161 | spinlock_lock(&wq->lock); |
161 | spinlock_lock(&wq->lock); |
162 | 162 | ||
163 | /* checks whether to go to sleep at all */ |
163 | /* checks whether to go to sleep at all */ |
164 | if (wq->missed_wakeups) { |
164 | if (wq->missed_wakeups) { |
165 | wq->missed_wakeups--; |
165 | wq->missed_wakeups--; |
166 | spinlock_unlock(&wq->lock); |
166 | spinlock_unlock(&wq->lock); |
167 | cpu_priority_restore(pri); |
167 | interrupts_restore(ipl); |
168 | return ESYNCH_OK_ATOMIC; |
168 | return ESYNCH_OK_ATOMIC; |
169 | } |
169 | } |
170 | else { |
170 | else { |
171 | if (nonblocking && (usec == 0)) { |
171 | if (nonblocking && (usec == 0)) { |
172 | /* return immediatelly instead of going to sleep */ |
172 | /* return immediatelly instead of going to sleep */ |
173 | spinlock_unlock(&wq->lock); |
173 | spinlock_unlock(&wq->lock); |
174 | cpu_priority_restore(pri); |
174 | interrupts_restore(ipl); |
175 | return ESYNCH_WOULD_BLOCK; |
175 | return ESYNCH_WOULD_BLOCK; |
176 | } |
176 | } |
177 | } |
177 | } |
178 | 178 | ||
179 | 179 | ||
Line 187... | Line 187... | ||
187 | /* |
187 | /* |
188 | * Short emulation of scheduler() return code. |
188 | * Short emulation of scheduler() return code. |
189 | */ |
189 | */ |
190 | before_thread_runs(); |
190 | before_thread_runs(); |
191 | spinlock_unlock(&THREAD->lock); |
191 | spinlock_unlock(&THREAD->lock); |
192 | cpu_priority_restore(pri); |
192 | interrupts_restore(ipl); |
193 | return ESYNCH_TIMEOUT; |
193 | return ESYNCH_TIMEOUT; |
194 | } |
194 | } |
195 | THREAD->timeout_pending = 1; |
195 | THREAD->timeout_pending = 1; |
196 | timeout_register(&THREAD->sleep_timeout, (__u64) usec, waitq_interrupted_sleep, THREAD); |
196 | timeout_register(&THREAD->sleep_timeout, (__u64) usec, waitq_interrupted_sleep, THREAD); |
197 | } |
197 | } |
Line 205... | Line 205... | ||
205 | THREAD->sleep_queue = wq; |
205 | THREAD->sleep_queue = wq; |
206 | 206 | ||
207 | spinlock_unlock(&THREAD->lock); |
207 | spinlock_unlock(&THREAD->lock); |
208 | 208 | ||
209 | scheduler(); /* wq->lock is released in scheduler_separated_stack() */ |
209 | scheduler(); /* wq->lock is released in scheduler_separated_stack() */ |
210 | cpu_priority_restore(pri); |
210 | interrupts_restore(ipl); |
211 | 211 | ||
212 | return ESYNCH_OK_BLOCKED; |
212 | return ESYNCH_OK_BLOCKED; |
213 | } |
213 | } |
214 | 214 | ||
215 | 215 | ||
Line 226... | Line 226... | ||
226 | * @param all If this is non-zero, all sleeping threads |
226 | * @param all If this is non-zero, all sleeping threads |
227 | * will be woken up and missed count will be zeroed. |
227 | * will be woken up and missed count will be zeroed. |
228 | */ |
228 | */ |
229 | void waitq_wakeup(waitq_t *wq, int all) |
229 | void waitq_wakeup(waitq_t *wq, int all) |
230 | { |
230 | { |
231 | pri_t pri; |
231 | ipl_t ipl; |
232 | 232 | ||
233 | pri = cpu_priority_high(); |
233 | ipl = interrupts_disable(); |
234 | spinlock_lock(&wq->lock); |
234 | spinlock_lock(&wq->lock); |
235 | 235 | ||
236 | _waitq_wakeup_unsafe(wq, all); |
236 | _waitq_wakeup_unsafe(wq, all); |
237 | 237 | ||
238 | spinlock_unlock(&wq->lock); |
238 | spinlock_unlock(&wq->lock); |
239 | cpu_priority_restore(pri); |
239 | interrupts_restore(ipl); |
240 | } |
240 | } |
241 | 241 | ||
242 | /** Internal SMP- and IRQ-unsafe version of waitq_wakeup() |
242 | /** Internal SMP- and IRQ-unsafe version of waitq_wakeup() |
243 | * |
243 | * |
244 | * This is the internal SMP- and IRQ-unsafe version |
244 | * This is the internal SMP- and IRQ-unsafe version |