Rev 1854 | Rev 1888 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 1854 | Rev 1882 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | /** @addtogroup genericproc |
29 | /** @addtogroup genericproc |
30 | * @{ |
30 | * @{ |
31 | */ |
31 | */ |
32 | 32 | ||
33 | /** |
33 | /** |
34 | * @file |
34 | * @file |
35 | * @brief Scheduler and load balancing. |
35 | * @brief Scheduler and load balancing. |
36 | * |
36 | * |
37 | * This file contains the scheduler and kcpulb kernel thread which |
37 | * This file contains the scheduler and kcpulb kernel thread which |
38 | * performs load-balancing of per-CPU run queues. |
38 | * performs load-balancing of per-CPU run queues. |
39 | */ |
39 | */ |
40 | 40 | ||
41 | #include <proc/scheduler.h> |
41 | #include <proc/scheduler.h> |
42 | #include <proc/thread.h> |
42 | #include <proc/thread.h> |
43 | #include <proc/task.h> |
43 | #include <proc/task.h> |
44 | #include <mm/frame.h> |
44 | #include <mm/frame.h> |
45 | #include <mm/page.h> |
45 | #include <mm/page.h> |
46 | #include <mm/as.h> |
46 | #include <mm/as.h> |
47 | #include <time/delay.h> |
47 | #include <time/delay.h> |
48 | #include <arch/asm.h> |
48 | #include <arch/asm.h> |
49 | #include <arch/faddr.h> |
49 | #include <arch/faddr.h> |
50 | #include <atomic.h> |
50 | #include <atomic.h> |
51 | #include <synch/spinlock.h> |
51 | #include <synch/spinlock.h> |
52 | #include <config.h> |
52 | #include <config.h> |
53 | #include <context.h> |
53 | #include <context.h> |
54 | #include <func.h> |
54 | #include <func.h> |
55 | #include <arch.h> |
55 | #include <arch.h> |
56 | #include <adt/list.h> |
56 | #include <adt/list.h> |
57 | #include <panic.h> |
57 | #include <panic.h> |
58 | #include <typedefs.h> |
58 | #include <typedefs.h> |
59 | #include <cpu.h> |
59 | #include <cpu.h> |
60 | #include <print.h> |
60 | #include <print.h> |
61 | #include <debug.h> |
61 | #include <debug.h> |
62 | 62 | ||
63 | static void before_task_runs(void); |
63 | static void before_task_runs(void); |
64 | static void before_thread_runs(void); |
64 | static void before_thread_runs(void); |
65 | static void after_thread_ran(void); |
65 | static void after_thread_ran(void); |
66 | static void scheduler_separated_stack(void); |
66 | static void scheduler_separated_stack(void); |
67 | 67 | ||
68 | atomic_t nrdy; /**< Number of ready threads in the system. */ |
68 | atomic_t nrdy; /**< Number of ready threads in the system. */ |
69 | 69 | ||
70 | /** Carry out actions before new task runs. */ |
70 | /** Carry out actions before new task runs. */ |
71 | void before_task_runs(void) |
71 | void before_task_runs(void) |
72 | { |
72 | { |
73 | before_task_runs_arch(); |
73 | before_task_runs_arch(); |
74 | } |
74 | } |
75 | 75 | ||
76 | /** Take actions before new thread runs. |
76 | /** Take actions before new thread runs. |
77 | * |
77 | * |
78 | * Perform actions that need to be |
78 | * Perform actions that need to be |
79 | * taken before the newly selected |
79 | * taken before the newly selected |
80 | * tread is passed control. |
80 | * tread is passed control. |
81 | * |
81 | * |
82 | * THREAD->lock is locked on entry |
82 | * THREAD->lock is locked on entry |
83 | * |
83 | * |
84 | */ |
84 | */ |
85 | void before_thread_runs(void) |
85 | void before_thread_runs(void) |
86 | { |
86 | { |
87 | before_thread_runs_arch(); |
87 | before_thread_runs_arch(); |
88 | #ifdef CONFIG_FPU_LAZY |
88 | #ifdef CONFIG_FPU_LAZY |
89 | if(THREAD==CPU->fpu_owner) |
89 | if(THREAD == CPU->fpu_owner) |
90 | fpu_enable(); |
90 | fpu_enable(); |
91 | else |
91 | else |
92 | fpu_disable(); |
92 | fpu_disable(); |
93 | #else |
93 | #else |
94 | fpu_enable(); |
94 | fpu_enable(); |
95 | if (THREAD->fpu_context_exists) |
95 | if (THREAD->fpu_context_exists) |
96 | fpu_context_restore(THREAD->saved_fpu_context); |
96 | fpu_context_restore(THREAD->saved_fpu_context); |
97 | else { |
97 | else { |
98 | fpu_init(); |
98 | fpu_init(); |
99 | THREAD->fpu_context_exists=1; |
99 | THREAD->fpu_context_exists = 1; |
100 | } |
100 | } |
101 | #endif |
101 | #endif |
102 | } |
102 | } |
103 | 103 | ||
104 | /** Take actions after THREAD had run. |
104 | /** Take actions after THREAD had run. |
105 | * |
105 | * |
106 | * Perform actions that need to be |
106 | * Perform actions that need to be |
107 | * taken after the running thread |
107 | * taken after the running thread |
108 | * had been preempted by the scheduler. |
108 | * had been preempted by the scheduler. |
109 | * |
109 | * |
110 | * THREAD->lock is locked on entry |
110 | * THREAD->lock is locked on entry |
111 | * |
111 | * |
112 | */ |
112 | */ |
113 | void after_thread_ran(void) |
113 | void after_thread_ran(void) |
114 | { |
114 | { |
115 | after_thread_ran_arch(); |
115 | after_thread_ran_arch(); |
116 | } |
116 | } |
117 | 117 | ||
118 | #ifdef CONFIG_FPU_LAZY |
118 | #ifdef CONFIG_FPU_LAZY |
119 | void scheduler_fpu_lazy_request(void) |
119 | void scheduler_fpu_lazy_request(void) |
120 | { |
120 | { |
121 | restart: |
121 | restart: |
122 | fpu_enable(); |
122 | fpu_enable(); |
123 | spinlock_lock(&CPU->lock); |
123 | spinlock_lock(&CPU->lock); |
124 | 124 | ||
125 | /* Save old context */ |
125 | /* Save old context */ |
126 | if (CPU->fpu_owner != NULL) { |
126 | if (CPU->fpu_owner != NULL) { |
127 | spinlock_lock(&CPU->fpu_owner->lock); |
127 | spinlock_lock(&CPU->fpu_owner->lock); |
128 | fpu_context_save(CPU->fpu_owner->saved_fpu_context); |
128 | fpu_context_save(CPU->fpu_owner->saved_fpu_context); |
129 | /* don't prevent migration */ |
129 | /* don't prevent migration */ |
130 | CPU->fpu_owner->fpu_context_engaged=0; |
130 | CPU->fpu_owner->fpu_context_engaged = 0; |
131 | spinlock_unlock(&CPU->fpu_owner->lock); |
131 | spinlock_unlock(&CPU->fpu_owner->lock); |
132 | CPU->fpu_owner = NULL; |
132 | CPU->fpu_owner = NULL; |
133 | } |
133 | } |
134 | 134 | ||
135 | spinlock_lock(&THREAD->lock); |
135 | spinlock_lock(&THREAD->lock); |
136 | if (THREAD->fpu_context_exists) { |
136 | if (THREAD->fpu_context_exists) { |
137 | fpu_context_restore(THREAD->saved_fpu_context); |
137 | fpu_context_restore(THREAD->saved_fpu_context); |
138 | } else { |
138 | } else { |
139 | /* Allocate FPU context */ |
139 | /* Allocate FPU context */ |
140 | if (!THREAD->saved_fpu_context) { |
140 | if (!THREAD->saved_fpu_context) { |
141 | /* Might sleep */ |
141 | /* Might sleep */ |
142 | spinlock_unlock(&THREAD->lock); |
142 | spinlock_unlock(&THREAD->lock); |
143 | spinlock_unlock(&CPU->lock); |
143 | spinlock_unlock(&CPU->lock); |
144 | THREAD->saved_fpu_context = slab_alloc(fpu_context_slab, 0); |
144 | THREAD->saved_fpu_context = slab_alloc(fpu_context_slab, 0); |
145 | /* We may have switched CPUs during slab_alloc */ |
145 | /* We may have switched CPUs during slab_alloc */ |
146 | goto restart; |
146 | goto restart; |
147 | } |
147 | } |
148 | fpu_init(); |
148 | fpu_init(); |
149 | THREAD->fpu_context_exists=1; |
149 | THREAD->fpu_context_exists = 1; |
150 | } |
150 | } |
151 | CPU->fpu_owner=THREAD; |
151 | CPU->fpu_owner = THREAD; |
152 | THREAD->fpu_context_engaged = 1; |
152 | THREAD->fpu_context_engaged = 1; |
153 | spinlock_unlock(&THREAD->lock); |
153 | spinlock_unlock(&THREAD->lock); |
154 | 154 | ||
155 | spinlock_unlock(&CPU->lock); |
155 | spinlock_unlock(&CPU->lock); |
156 | } |
156 | } |
157 | #endif |
157 | #endif |
158 | 158 | ||
159 | /** Initialize scheduler |
159 | /** Initialize scheduler |
160 | * |
160 | * |
161 | * Initialize kernel scheduler. |
161 | * Initialize kernel scheduler. |
162 | * |
162 | * |
163 | */ |
163 | */ |
164 | void scheduler_init(void) |
164 | void scheduler_init(void) |
165 | { |
165 | { |
166 | } |
166 | } |
167 | 167 | ||
168 | /** Get thread to be scheduled |
168 | /** Get thread to be scheduled |
169 | * |
169 | * |
170 | * Get the optimal thread to be scheduled |
170 | * Get the optimal thread to be scheduled |
171 | * according to thread accounting and scheduler |
171 | * according to thread accounting and scheduler |
172 | * policy. |
172 | * policy. |
173 | * |
173 | * |
174 | * @return Thread to be scheduled. |
174 | * @return Thread to be scheduled. |
175 | * |
175 | * |
176 | */ |
176 | */ |
177 | static thread_t *find_best_thread(void) |
177 | static thread_t *find_best_thread(void) |
178 | { |
178 | { |
179 | thread_t *t; |
179 | thread_t *t; |
180 | runq_t *r; |
180 | runq_t *r; |
181 | int i; |
181 | int i; |
182 | 182 | ||
183 | ASSERT(CPU != NULL); |
183 | ASSERT(CPU != NULL); |
184 | 184 | ||
185 | loop: |
185 | loop: |
186 | interrupts_enable(); |
186 | interrupts_enable(); |
187 | 187 | ||
188 | if (atomic_get(&CPU->nrdy) == 0) { |
188 | if (atomic_get(&CPU->nrdy) == 0) { |
189 | /* |
189 | /* |
190 | * For there was nothing to run, the CPU goes to sleep |
190 | * For there was nothing to run, the CPU goes to sleep |
191 | * until a hardware interrupt or an IPI comes. |
191 | * until a hardware interrupt or an IPI comes. |
192 | * This improves energy saving and hyperthreading. |
192 | * This improves energy saving and hyperthreading. |
193 | */ |
193 | */ |
194 | 194 | ||
195 | /* |
195 | /* |
196 | * An interrupt might occur right now and wake up a thread. |
196 | * An interrupt might occur right now and wake up a thread. |
197 | * In such case, the CPU will continue to go to sleep |
197 | * In such case, the CPU will continue to go to sleep |
198 | * even though there is a runnable thread. |
198 | * even though there is a runnable thread. |
199 | */ |
199 | */ |
200 | 200 | ||
201 | cpu_sleep(); |
201 | cpu_sleep(); |
202 | goto loop; |
202 | goto loop; |
203 | } |
203 | } |
204 | 204 | ||
205 | interrupts_disable(); |
205 | interrupts_disable(); |
206 | 206 | ||
207 | for (i = 0; i<RQ_COUNT; i++) { |
207 | for (i = 0; i<RQ_COUNT; i++) { |
208 | r = &CPU->rq[i]; |
208 | r = &CPU->rq[i]; |
209 | spinlock_lock(&r->lock); |
209 | spinlock_lock(&r->lock); |
210 | if (r->n == 0) { |
210 | if (r->n == 0) { |
211 | /* |
211 | /* |
212 | * If this queue is empty, try a lower-priority queue. |
212 | * If this queue is empty, try a lower-priority queue. |
213 | */ |
213 | */ |
214 | spinlock_unlock(&r->lock); |
214 | spinlock_unlock(&r->lock); |
215 | continue; |
215 | continue; |
216 | } |
216 | } |
217 | 217 | ||
218 | atomic_dec(&CPU->nrdy); |
218 | atomic_dec(&CPU->nrdy); |
219 | atomic_dec(&nrdy); |
219 | atomic_dec(&nrdy); |
220 | r->n--; |
220 | r->n--; |
221 | 221 | ||
222 | /* |
222 | /* |
223 | * Take the first thread from the queue. |
223 | * Take the first thread from the queue. |
224 | */ |
224 | */ |
225 | t = list_get_instance(r->rq_head.next, thread_t, rq_link); |
225 | t = list_get_instance(r->rq_head.next, thread_t, rq_link); |
226 | list_remove(&t->rq_link); |
226 | list_remove(&t->rq_link); |
227 | 227 | ||
228 | spinlock_unlock(&r->lock); |
228 | spinlock_unlock(&r->lock); |
229 | 229 | ||
230 | spinlock_lock(&t->lock); |
230 | spinlock_lock(&t->lock); |
231 | t->cpu = CPU; |
231 | t->cpu = CPU; |
232 | 232 | ||
233 | t->ticks = us2ticks((i+1)*10000); |
233 | t->ticks = us2ticks((i+1)*10000); |
234 | t->priority = i; /* correct rq index */ |
234 | t->priority = i; /* correct rq index */ |
235 | 235 | ||
236 | /* |
236 | /* |
237 | * Clear the THREAD_FLAG_STOLEN flag so that t can be migrated |
237 | * Clear the THREAD_FLAG_STOLEN flag so that t can be migrated |
238 | * when load balancing needs emerge. |
238 | * when load balancing needs emerge. |
239 | */ |
239 | */ |
240 | t->flags &= ~THREAD_FLAG_STOLEN; |
240 | t->flags &= ~THREAD_FLAG_STOLEN; |
241 | spinlock_unlock(&t->lock); |
241 | spinlock_unlock(&t->lock); |
242 | 242 | ||
243 | return t; |
243 | return t; |
244 | } |
244 | } |
245 | goto loop; |
245 | goto loop; |
246 | 246 | ||
247 | } |
247 | } |
248 | 248 | ||
249 | /** Prevent rq starvation |
249 | /** Prevent rq starvation |
250 | * |
250 | * |
251 | * Prevent low priority threads from starving in rq's. |
251 | * Prevent low priority threads from starving in rq's. |
252 | * |
252 | * |
253 | * When the function decides to relink rq's, it reconnects |
253 | * When the function decides to relink rq's, it reconnects |
254 | * respective pointers so that in result threads with 'pri' |
254 | * respective pointers so that in result threads with 'pri' |
255 | * greater or equal start are moved to a higher-priority queue. |
255 | * greater or equal start are moved to a higher-priority queue. |
256 | * |
256 | * |
257 | * @param start Threshold priority. |
257 | * @param start Threshold priority. |
258 | * |
258 | * |
259 | */ |
259 | */ |
260 | static void relink_rq(int start) |
260 | static void relink_rq(int start) |
261 | { |
261 | { |
262 | link_t head; |
262 | link_t head; |
263 | runq_t *r; |
263 | runq_t *r; |
264 | int i, n; |
264 | int i, n; |
265 | 265 | ||
266 | list_initialize(&head); |
266 | list_initialize(&head); |
267 | spinlock_lock(&CPU->lock); |
267 | spinlock_lock(&CPU->lock); |
268 | if (CPU->needs_relink > NEEDS_RELINK_MAX) { |
268 | if (CPU->needs_relink > NEEDS_RELINK_MAX) { |
269 | for (i = start; i<RQ_COUNT-1; i++) { |
269 | for (i = start; i<RQ_COUNT-1; i++) { |
270 | /* remember and empty rq[i + 1] */ |
270 | /* remember and empty rq[i + 1] */ |
271 | r = &CPU->rq[i + 1]; |
271 | r = &CPU->rq[i + 1]; |
272 | spinlock_lock(&r->lock); |
272 | spinlock_lock(&r->lock); |
273 | list_concat(&head, &r->rq_head); |
273 | list_concat(&head, &r->rq_head); |
274 | n = r->n; |
274 | n = r->n; |
275 | r->n = 0; |
275 | r->n = 0; |
276 | spinlock_unlock(&r->lock); |
276 | spinlock_unlock(&r->lock); |
277 | 277 | ||
278 | /* append rq[i + 1] to rq[i] */ |
278 | /* append rq[i + 1] to rq[i] */ |
279 | r = &CPU->rq[i]; |
279 | r = &CPU->rq[i]; |
280 | spinlock_lock(&r->lock); |
280 | spinlock_lock(&r->lock); |
281 | list_concat(&r->rq_head, &head); |
281 | list_concat(&r->rq_head, &head); |
282 | r->n += n; |
282 | r->n += n; |
283 | spinlock_unlock(&r->lock); |
283 | spinlock_unlock(&r->lock); |
284 | } |
284 | } |
285 | CPU->needs_relink = 0; |
285 | CPU->needs_relink = 0; |
286 | } |
286 | } |
287 | spinlock_unlock(&CPU->lock); |
287 | spinlock_unlock(&CPU->lock); |
288 | 288 | ||
289 | } |
289 | } |
290 | 290 | ||
291 | /** The scheduler |
291 | /** The scheduler |
292 | * |
292 | * |
293 | * The thread scheduling procedure. |
293 | * The thread scheduling procedure. |
294 | * Passes control directly to |
294 | * Passes control directly to |
295 | * scheduler_separated_stack(). |
295 | * scheduler_separated_stack(). |
296 | * |
296 | * |
297 | */ |
297 | */ |
298 | void scheduler(void) |
298 | void scheduler(void) |
299 | { |
299 | { |
300 | volatile ipl_t ipl; |
300 | volatile ipl_t ipl; |
301 | 301 | ||
302 | ASSERT(CPU != NULL); |
302 | ASSERT(CPU != NULL); |
303 | 303 | ||
304 | ipl = interrupts_disable(); |
304 | ipl = interrupts_disable(); |
305 | 305 | ||
306 | if (atomic_get(&haltstate)) |
306 | if (atomic_get(&haltstate)) |
307 | halt(); |
307 | halt(); |
308 | 308 | ||
309 | if (THREAD) { |
309 | if (THREAD) { |
310 | spinlock_lock(&THREAD->lock); |
310 | spinlock_lock(&THREAD->lock); |
311 | #ifndef CONFIG_FPU_LAZY |
311 | #ifndef CONFIG_FPU_LAZY |
312 | fpu_context_save(THREAD->saved_fpu_context); |
312 | fpu_context_save(THREAD->saved_fpu_context); |
313 | #endif |
313 | #endif |
314 | if (!context_save(&THREAD->saved_context)) { |
314 | if (!context_save(&THREAD->saved_context)) { |
315 | /* |
315 | /* |
316 | * This is the place where threads leave scheduler(); |
316 | * This is the place where threads leave scheduler(); |
317 | */ |
317 | */ |
318 | spinlock_unlock(&THREAD->lock); |
318 | spinlock_unlock(&THREAD->lock); |
319 | interrupts_restore(THREAD->saved_context.ipl); |
319 | interrupts_restore(THREAD->saved_context.ipl); |
320 | 320 | ||
321 | return; |
321 | return; |
322 | } |
322 | } |
323 | 323 | ||
324 | /* |
324 | /* |
325 | * Interrupt priority level of preempted thread is recorded here |
325 | * Interrupt priority level of preempted thread is recorded here |
326 | * to facilitate scheduler() invocations from interrupts_disable()'d |
326 | * to facilitate scheduler() invocations from interrupts_disable()'d |
327 | * code (e.g. waitq_sleep_timeout()). |
327 | * code (e.g. waitq_sleep_timeout()). |
328 | */ |
328 | */ |
329 | THREAD->saved_context.ipl = ipl; |
329 | THREAD->saved_context.ipl = ipl; |
330 | } |
330 | } |
331 | 331 | ||
332 | /* |
332 | /* |
333 | * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM |
333 | * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM |
334 | * and preemption counter. At this point THE could be coming either |
334 | * and preemption counter. At this point THE could be coming either |
335 | * from THREAD's or CPU's stack. |
335 | * from THREAD's or CPU's stack. |
336 | */ |
336 | */ |
337 | the_copy(THE, (the_t *) CPU->stack); |
337 | the_copy(THE, (the_t *) CPU->stack); |
338 | 338 | ||
339 | /* |
339 | /* |
340 | * We may not keep the old stack. |
340 | * We may not keep the old stack. |
341 | * Reason: If we kept the old stack and got blocked, for instance, in |
341 | * Reason: If we kept the old stack and got blocked, for instance, in |
342 | * find_best_thread(), the old thread could get rescheduled by another |
342 | * find_best_thread(), the old thread could get rescheduled by another |
343 | * CPU and overwrite the part of its own stack that was also used by |
343 | * CPU and overwrite the part of its own stack that was also used by |
344 | * the scheduler on this CPU. |
344 | * the scheduler on this CPU. |
345 | * |
345 | * |
346 | * Moreover, we have to bypass the compiler-generated POP sequence |
346 | * Moreover, we have to bypass the compiler-generated POP sequence |
347 | * which is fooled by SP being set to the very top of the stack. |
347 | * which is fooled by SP being set to the very top of the stack. |
348 | * Therefore the scheduler() function continues in |
348 | * Therefore the scheduler() function continues in |
349 | * scheduler_separated_stack(). |
349 | * scheduler_separated_stack(). |
350 | */ |
350 | */ |
351 | context_save(&CPU->saved_context); |
351 | context_save(&CPU->saved_context); |
352 | context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), |
352 | context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), |
353 | (uintptr_t) CPU->stack, CPU_STACK_SIZE); |
353 | (uintptr_t) CPU->stack, CPU_STACK_SIZE); |
354 | context_restore(&CPU->saved_context); |
354 | context_restore(&CPU->saved_context); |
355 | /* not reached */ |
355 | /* not reached */ |
356 | } |
356 | } |
357 | 357 | ||
358 | /** Scheduler stack switch wrapper |
358 | /** Scheduler stack switch wrapper |
359 | * |
359 | * |
360 | * Second part of the scheduler() function |
360 | * Second part of the scheduler() function |
361 | * using new stack. Handling the actual context |
361 | * using new stack. Handling the actual context |
362 | * switch to a new thread. |
362 | * switch to a new thread. |
363 | * |
363 | * |
364 | * Assume THREAD->lock is held. |
364 | * Assume THREAD->lock is held. |
365 | */ |
365 | */ |
366 | void scheduler_separated_stack(void) |
366 | void scheduler_separated_stack(void) |
367 | { |
367 | { |
368 | int priority; |
368 | int priority; |
369 | 369 | ||
370 | ASSERT(CPU != NULL); |
370 | ASSERT(CPU != NULL); |
371 | 371 | ||
372 | if (THREAD) { |
372 | if (THREAD) { |
373 | /* must be run after the switch to scheduler stack */ |
373 | /* must be run after the switch to scheduler stack */ |
374 | after_thread_ran(); |
374 | after_thread_ran(); |
375 | 375 | ||
376 | switch (THREAD->state) { |
376 | switch (THREAD->state) { |
377 | case Running: |
377 | case Running: |
378 | spinlock_unlock(&THREAD->lock); |
378 | spinlock_unlock(&THREAD->lock); |
379 | thread_ready(THREAD); |
379 | thread_ready(THREAD); |
380 | break; |
380 | break; |
381 | 381 | ||
382 | case Exiting: |
382 | case Exiting: |
383 | repeat: |
383 | repeat: |
384 | if (THREAD->detached) { |
384 | if (THREAD->detached) { |
385 | thread_destroy(THREAD); |
385 | thread_destroy(THREAD); |
386 | } else { |
386 | } else { |
387 | /* |
387 | /* |
388 | * The thread structure is kept allocated until somebody |
388 | * The thread structure is kept allocated until somebody |
389 | * calls thread_detach() on it. |
389 | * calls thread_detach() on it. |
390 | */ |
390 | */ |
391 | if (!spinlock_trylock(&THREAD->join_wq.lock)) { |
391 | if (!spinlock_trylock(&THREAD->join_wq.lock)) { |
392 | /* |
392 | /* |
393 | * Avoid deadlock. |
393 | * Avoid deadlock. |
394 | */ |
394 | */ |
395 | spinlock_unlock(&THREAD->lock); |
395 | spinlock_unlock(&THREAD->lock); |
396 | delay(10); |
396 | delay(10); |
397 | spinlock_lock(&THREAD->lock); |
397 | spinlock_lock(&THREAD->lock); |
398 | goto repeat; |
398 | goto repeat; |
399 | } |
399 | } |
400 | _waitq_wakeup_unsafe(&THREAD->join_wq, false); |
400 | _waitq_wakeup_unsafe(&THREAD->join_wq, false); |
401 | spinlock_unlock(&THREAD->join_wq.lock); |
401 | spinlock_unlock(&THREAD->join_wq.lock); |
402 | 402 | ||
403 | THREAD->state = Undead; |
403 | THREAD->state = Undead; |
404 | spinlock_unlock(&THREAD->lock); |
404 | spinlock_unlock(&THREAD->lock); |
405 | } |
405 | } |
406 | break; |
406 | break; |
407 | 407 | ||
408 | case Sleeping: |
408 | case Sleeping: |
409 | /* |
409 | /* |
410 | * Prefer the thread after it's woken up. |
410 | * Prefer the thread after it's woken up. |
411 | */ |
411 | */ |
412 | THREAD->priority = -1; |
412 | THREAD->priority = -1; |
413 | 413 | ||
414 | /* |
414 | /* |
415 | * We need to release wq->lock which we locked in waitq_sleep(). |
415 | * We need to release wq->lock which we locked in waitq_sleep(). |
416 | * Address of wq->lock is kept in THREAD->sleep_queue. |
416 | * Address of wq->lock is kept in THREAD->sleep_queue. |
417 | */ |
417 | */ |
418 | spinlock_unlock(&THREAD->sleep_queue->lock); |
418 | spinlock_unlock(&THREAD->sleep_queue->lock); |
419 | 419 | ||
420 | /* |
420 | /* |
421 | * Check for possible requests for out-of-context invocation. |
421 | * Check for possible requests for out-of-context invocation. |
422 | */ |
422 | */ |
423 | if (THREAD->call_me) { |
423 | if (THREAD->call_me) { |
424 | THREAD->call_me(THREAD->call_me_with); |
424 | THREAD->call_me(THREAD->call_me_with); |
425 | THREAD->call_me = NULL; |
425 | THREAD->call_me = NULL; |
426 | THREAD->call_me_with = NULL; |
426 | THREAD->call_me_with = NULL; |
427 | } |
427 | } |
428 | 428 | ||
429 | spinlock_unlock(&THREAD->lock); |
429 | spinlock_unlock(&THREAD->lock); |
430 | 430 | ||
431 | break; |
431 | break; |
432 | 432 | ||
433 | default: |
433 | default: |
434 | /* |
434 | /* |
435 | * Entering state is unexpected. |
435 | * Entering state is unexpected. |
436 | */ |
436 | */ |
437 | panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]); |
437 | panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]); |
438 | break; |
438 | break; |
439 | } |
439 | } |
440 | 440 | ||
441 | THREAD = NULL; |
441 | THREAD = NULL; |
442 | } |
442 | } |
443 | 443 | ||
444 | THREAD = find_best_thread(); |
444 | THREAD = find_best_thread(); |
445 | 445 | ||
446 | spinlock_lock(&THREAD->lock); |
446 | spinlock_lock(&THREAD->lock); |
447 | priority = THREAD->priority; |
447 | priority = THREAD->priority; |
448 | spinlock_unlock(&THREAD->lock); |
448 | spinlock_unlock(&THREAD->lock); |
449 | 449 | ||
450 | relink_rq(priority); |
450 | relink_rq(priority); |
451 | 451 | ||
452 | /* |
452 | /* |
453 | * If both the old and the new task are the same, lots of work is avoided. |
453 | * If both the old and the new task are the same, lots of work is avoided. |
454 | */ |
454 | */ |
455 | if (TASK != THREAD->task) { |
455 | if (TASK != THREAD->task) { |
456 | as_t *as1 = NULL; |
456 | as_t *as1 = NULL; |
457 | as_t *as2; |
457 | as_t *as2; |
458 | 458 | ||
459 | if (TASK) { |
459 | if (TASK) { |
460 | spinlock_lock(&TASK->lock); |
460 | spinlock_lock(&TASK->lock); |
461 | as1 = TASK->as; |
461 | as1 = TASK->as; |
462 | spinlock_unlock(&TASK->lock); |
462 | spinlock_unlock(&TASK->lock); |
463 | } |
463 | } |
464 | 464 | ||
465 | spinlock_lock(&THREAD->task->lock); |
465 | spinlock_lock(&THREAD->task->lock); |
466 | as2 = THREAD->task->as; |
466 | as2 = THREAD->task->as; |
467 | spinlock_unlock(&THREAD->task->lock); |
467 | spinlock_unlock(&THREAD->task->lock); |
468 | 468 | ||
469 | /* |
469 | /* |
470 | * Note that it is possible for two tasks to share one address space. |
470 | * Note that it is possible for two tasks to share one address space. |
471 | */ |
471 | */ |
472 | if (as1 != as2) { |
472 | if (as1 != as2) { |
473 | /* |
473 | /* |
474 | * Both tasks and address spaces are different. |
474 | * Both tasks and address spaces are different. |
475 | * Replace the old one with the new one. |
475 | * Replace the old one with the new one. |
476 | */ |
476 | */ |
477 | as_switch(as1, as2); |
477 | as_switch(as1, as2); |
478 | } |
478 | } |
479 | TASK = THREAD->task; |
479 | TASK = THREAD->task; |
480 | before_task_runs(); |
480 | before_task_runs(); |
481 | } |
481 | } |
482 | 482 | ||
483 | spinlock_lock(&THREAD->lock); |
483 | spinlock_lock(&THREAD->lock); |
484 | THREAD->state = Running; |
484 | THREAD->state = Running; |
485 | 485 | ||
486 | #ifdef SCHEDULER_VERBOSE |
486 | #ifdef SCHEDULER_VERBOSE |
487 | printf("cpu%d: tid %d (priority=%d,ticks=%lld,nrdy=%ld)\n", |
487 | printf("cpu%d: tid %d (priority=%d,ticks=%lld,nrdy=%ld)\n", |
488 | CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, atomic_get(&CPU->nrdy)); |
488 | CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, atomic_get(&CPU->nrdy)); |
489 | #endif |
489 | #endif |
490 | 490 | ||
491 | /* |
491 | /* |
492 | * Some architectures provide late kernel PA2KA(identity) |
492 | * Some architectures provide late kernel PA2KA(identity) |
493 | * mapping in a page fault handler. However, the page fault |
493 | * mapping in a page fault handler. However, the page fault |
494 | * handler uses the kernel stack of the running thread and |
494 | * handler uses the kernel stack of the running thread and |
495 | * therefore cannot be used to map it. The kernel stack, if |
495 | * therefore cannot be used to map it. The kernel stack, if |
496 | * necessary, is to be mapped in before_thread_runs(). This |
496 | * necessary, is to be mapped in before_thread_runs(). This |
497 | * function must be executed before the switch to the new stack. |
497 | * function must be executed before the switch to the new stack. |
498 | */ |
498 | */ |
499 | before_thread_runs(); |
499 | before_thread_runs(); |
500 | 500 | ||
501 | /* |
501 | /* |
502 | * Copy the knowledge of CPU, TASK, THREAD and preemption counter to thread's stack. |
502 | * Copy the knowledge of CPU, TASK, THREAD and preemption counter to thread's stack. |
503 | */ |
503 | */ |
504 | the_copy(THE, (the_t *) THREAD->kstack); |
504 | the_copy(THE, (the_t *) THREAD->kstack); |
505 | 505 | ||
506 | context_restore(&THREAD->saved_context); |
506 | context_restore(&THREAD->saved_context); |
507 | /* not reached */ |
507 | /* not reached */ |
508 | } |
508 | } |
509 | 509 | ||
510 | #ifdef CONFIG_SMP |
510 | #ifdef CONFIG_SMP |
511 | /** Load balancing thread |
511 | /** Load balancing thread |
512 | * |
512 | * |
513 | * SMP load balancing thread, supervising thread supplies |
513 | * SMP load balancing thread, supervising thread supplies |
514 | * for the CPU it's wired to. |
514 | * for the CPU it's wired to. |
515 | * |
515 | * |
516 | * @param arg Generic thread argument (unused). |
516 | * @param arg Generic thread argument (unused). |
517 | * |
517 | * |
518 | */ |
518 | */ |
519 | void kcpulb(void *arg) |
519 | void kcpulb(void *arg) |
520 | { |
520 | { |
521 | thread_t *t; |
521 | thread_t *t; |
522 | int count, average, i, j, k = 0; |
522 | int count, average, i, j, k = 0; |
523 | ipl_t ipl; |
523 | ipl_t ipl; |
524 | 524 | ||
525 | /* |
525 | /* |
526 | * Detach kcpulb as nobody will call thread_join_timeout() on it. |
526 | * Detach kcpulb as nobody will call thread_join_timeout() on it. |
527 | */ |
527 | */ |
528 | thread_detach(THREAD); |
528 | thread_detach(THREAD); |
529 | 529 | ||
530 | loop: |
530 | loop: |
531 | /* |
531 | /* |
532 | * Work in 1s intervals. |
532 | * Work in 1s intervals. |
533 | */ |
533 | */ |
534 | thread_sleep(1); |
534 | thread_sleep(1); |
535 | 535 | ||
536 | not_satisfied: |
536 | not_satisfied: |
537 | /* |
537 | /* |
538 | * Calculate the number of threads that will be migrated/stolen from |
538 | * Calculate the number of threads that will be migrated/stolen from |
539 | * other CPU's. Note that situation can have changed between two |
539 | * other CPU's. Note that situation can have changed between two |
540 | * passes. Each time get the most up to date counts. |
540 | * passes. Each time get the most up to date counts. |
541 | */ |
541 | */ |
542 | average = atomic_get(&nrdy) / config.cpu_active + 1; |
542 | average = atomic_get(&nrdy) / config.cpu_active + 1; |
543 | count = average - atomic_get(&CPU->nrdy); |
543 | count = average - atomic_get(&CPU->nrdy); |
544 | 544 | ||
545 | if (count <= 0) |
545 | if (count <= 0) |
546 | goto satisfied; |
546 | goto satisfied; |
547 | 547 | ||
548 | /* |
548 | /* |
549 | * Searching least priority queues on all CPU's first and most priority queues on all CPU's last. |
549 | * Searching least priority queues on all CPU's first and most priority queues on all CPU's last. |
550 | */ |
550 | */ |
551 | for (j=RQ_COUNT-1; j >= 0; j--) { |
551 | for (j=RQ_COUNT-1; j >= 0; j--) { |
552 | for (i=0; i < config.cpu_active; i++) { |
552 | for (i=0; i < config.cpu_active; i++) { |
553 | link_t *l; |
553 | link_t *l; |
554 | runq_t *r; |
554 | runq_t *r; |
555 | cpu_t *cpu; |
555 | cpu_t *cpu; |
556 | 556 | ||
557 | cpu = &cpus[(i + k) % config.cpu_active]; |
557 | cpu = &cpus[(i + k) % config.cpu_active]; |
558 | 558 | ||
559 | /* |
559 | /* |
560 | * Not interested in ourselves. |
560 | * Not interested in ourselves. |
561 | * Doesn't require interrupt disabling for kcpulb has THREAD_FLAG_WIRED. |
561 | * Doesn't require interrupt disabling for kcpulb has THREAD_FLAG_WIRED. |
562 | */ |
562 | */ |
563 | if (CPU == cpu) |
563 | if (CPU == cpu) |
564 | continue; |
564 | continue; |
565 | if (atomic_get(&cpu->nrdy) <= average) |
565 | if (atomic_get(&cpu->nrdy) <= average) |
566 | continue; |
566 | continue; |
567 | 567 | ||
568 | ipl = interrupts_disable(); |
568 | ipl = interrupts_disable(); |
569 | r = &cpu->rq[j]; |
569 | r = &cpu->rq[j]; |
570 | spinlock_lock(&r->lock); |
570 | spinlock_lock(&r->lock); |
571 | if (r->n == 0) { |
571 | if (r->n == 0) { |
572 | spinlock_unlock(&r->lock); |
572 | spinlock_unlock(&r->lock); |
573 | interrupts_restore(ipl); |
573 | interrupts_restore(ipl); |
574 | continue; |
574 | continue; |
575 | } |
575 | } |
576 | 576 | ||
577 | t = NULL; |
577 | t = NULL; |
578 | l = r->rq_head.prev; /* search rq from the back */ |
578 | l = r->rq_head.prev; /* search rq from the back */ |
579 | while (l != &r->rq_head) { |
579 | while (l != &r->rq_head) { |
580 | t = list_get_instance(l, thread_t, rq_link); |
580 | t = list_get_instance(l, thread_t, rq_link); |
581 | /* |
581 | /* |
582 | * We don't want to steal CPU-wired threads neither threads already |
582 | * We don't want to steal CPU-wired threads neither threads already |
583 | * stolen. The latter prevents threads from migrating between CPU's |
583 | * stolen. The latter prevents threads from migrating between CPU's |
584 | * without ever being run. We don't want to steal threads whose FPU |
584 | * without ever being run. We don't want to steal threads whose FPU |
585 | * context is still in CPU. |
585 | * context is still in CPU. |
586 | */ |
586 | */ |
587 | spinlock_lock(&t->lock); |
587 | spinlock_lock(&t->lock); |
588 | if ((!(t->flags & (THREAD_FLAG_WIRED | THREAD_FLAG_STOLEN))) && |
588 | if ((!(t->flags & (THREAD_FLAG_WIRED | THREAD_FLAG_STOLEN))) && |
589 | (!(t->fpu_context_engaged)) ) { |
589 | (!(t->fpu_context_engaged)) ) { |
590 | /* |
590 | /* |
591 | * Remove t from r. |
591 | * Remove t from r. |
592 | */ |
592 | */ |
593 | spinlock_unlock(&t->lock); |
593 | spinlock_unlock(&t->lock); |
594 | 594 | ||
595 | atomic_dec(&cpu->nrdy); |
595 | atomic_dec(&cpu->nrdy); |
596 | atomic_dec(&nrdy); |
596 | atomic_dec(&nrdy); |
597 | 597 | ||
598 | r->n--; |
598 | r->n--; |
599 | list_remove(&t->rq_link); |
599 | list_remove(&t->rq_link); |
600 | 600 | ||
601 | break; |
601 | break; |
602 | } |
602 | } |
603 | spinlock_unlock(&t->lock); |
603 | spinlock_unlock(&t->lock); |
604 | l = l->prev; |
604 | l = l->prev; |
605 | t = NULL; |
605 | t = NULL; |
606 | } |
606 | } |
607 | spinlock_unlock(&r->lock); |
607 | spinlock_unlock(&r->lock); |
608 | 608 | ||
609 | if (t) { |
609 | if (t) { |
610 | /* |
610 | /* |
611 | * Ready t on local CPU |
611 | * Ready t on local CPU |
612 | */ |
612 | */ |
613 | spinlock_lock(&t->lock); |
613 | spinlock_lock(&t->lock); |
614 | #ifdef KCPULB_VERBOSE |
614 | #ifdef KCPULB_VERBOSE |
615 | printf("kcpulb%d: TID %d -> cpu%d, nrdy=%ld, avg=%nd\n", |
615 | printf("kcpulb%d: TID %d -> cpu%d, nrdy=%ld, avg=%nd\n", |
616 | CPU->id, t->tid, CPU->id, atomic_get(&CPU->nrdy), |
616 | CPU->id, t->tid, CPU->id, atomic_get(&CPU->nrdy), |
617 | atomic_get(&nrdy) / config.cpu_active); |
617 | atomic_get(&nrdy) / config.cpu_active); |
618 | #endif |
618 | #endif |
619 | t->flags |= THREAD_FLAG_STOLEN; |
619 | t->flags |= THREAD_FLAG_STOLEN; |
620 | t->state = Entering; |
620 | t->state = Entering; |
621 | spinlock_unlock(&t->lock); |
621 | spinlock_unlock(&t->lock); |
622 | 622 | ||
623 | thread_ready(t); |
623 | thread_ready(t); |
624 | 624 | ||
625 | interrupts_restore(ipl); |
625 | interrupts_restore(ipl); |
626 | 626 | ||
627 | if (--count == 0) |
627 | if (--count == 0) |
628 | goto satisfied; |
628 | goto satisfied; |
629 | 629 | ||
630 | /* |
630 | /* |
631 | * We are not satisfied yet, focus on another CPU next time. |
631 | * We are not satisfied yet, focus on another CPU next time. |
632 | */ |
632 | */ |
633 | k++; |
633 | k++; |
634 | 634 | ||
635 | continue; |
635 | continue; |
636 | } |
636 | } |
637 | interrupts_restore(ipl); |
637 | interrupts_restore(ipl); |
638 | } |
638 | } |
639 | } |
639 | } |
640 | 640 | ||
641 | if (atomic_get(&CPU->nrdy)) { |
641 | if (atomic_get(&CPU->nrdy)) { |
642 | /* |
642 | /* |
643 | * Be a little bit light-weight and let migrated threads run. |
643 | * Be a little bit light-weight and let migrated threads run. |
644 | */ |
644 | */ |
645 | scheduler(); |
645 | scheduler(); |
646 | } else { |
646 | } else { |
647 | /* |
647 | /* |
648 | * We failed to migrate a single thread. |
648 | * We failed to migrate a single thread. |
649 | * Give up this turn. |
649 | * Give up this turn. |
650 | */ |
650 | */ |
651 | goto loop; |
651 | goto loop; |
652 | } |
652 | } |
653 | 653 | ||
654 | goto not_satisfied; |
654 | goto not_satisfied; |
655 | 655 | ||
656 | satisfied: |
656 | satisfied: |
657 | goto loop; |
657 | goto loop; |
658 | } |
658 | } |
659 | 659 | ||
660 | #endif /* CONFIG_SMP */ |
660 | #endif /* CONFIG_SMP */ |
661 | 661 | ||
662 | 662 | ||
663 | /** Print information about threads & scheduler queues */ |
663 | /** Print information about threads & scheduler queues */ |
664 | void sched_print_list(void) |
664 | void sched_print_list(void) |
665 | { |
665 | { |
666 | ipl_t ipl; |
666 | ipl_t ipl; |
667 | int cpu,i; |
667 | int cpu,i; |
668 | runq_t *r; |
668 | runq_t *r; |
669 | thread_t *t; |
669 | thread_t *t; |
670 | link_t *cur; |
670 | link_t *cur; |
671 | 671 | ||
672 | /* We are going to mess with scheduler structures, |
672 | /* We are going to mess with scheduler structures, |
673 | * let's not be interrupted */ |
673 | * let's not be interrupted */ |
674 | ipl = interrupts_disable(); |
674 | ipl = interrupts_disable(); |
675 | for (cpu=0;cpu < config.cpu_count; cpu++) { |
675 | for (cpu=0;cpu < config.cpu_count; cpu++) { |
676 | 676 | ||
677 | if (!cpus[cpu].active) |
677 | if (!cpus[cpu].active) |
678 | continue; |
678 | continue; |
679 | 679 | ||
680 | spinlock_lock(&cpus[cpu].lock); |
680 | spinlock_lock(&cpus[cpu].lock); |
681 | printf("cpu%d: address=%p, nrdy=%ld, needs_relink=%ld\n", |
681 | printf("cpu%d: address=%p, nrdy=%ld, needs_relink=%ld\n", |
682 | cpus[cpu].id, &cpus[cpu], atomic_get(&cpus[cpu].nrdy), cpus[cpu].needs_relink); |
682 | cpus[cpu].id, &cpus[cpu], atomic_get(&cpus[cpu].nrdy), cpus[cpu].needs_relink); |
683 | 683 | ||
684 | for (i=0; i<RQ_COUNT; i++) { |
684 | for (i=0; i<RQ_COUNT; i++) { |
685 | r = &cpus[cpu].rq[i]; |
685 | r = &cpus[cpu].rq[i]; |
686 | spinlock_lock(&r->lock); |
686 | spinlock_lock(&r->lock); |
687 | if (!r->n) { |
687 | if (!r->n) { |
688 | spinlock_unlock(&r->lock); |
688 | spinlock_unlock(&r->lock); |
689 | continue; |
689 | continue; |
690 | } |
690 | } |
691 | printf("\trq[%d]: ", i); |
691 | printf("\trq[%d]: ", i); |
692 | for (cur=r->rq_head.next; cur!=&r->rq_head; cur=cur->next) { |
692 | for (cur=r->rq_head.next; cur!=&r->rq_head; cur=cur->next) { |
693 | t = list_get_instance(cur, thread_t, rq_link); |
693 | t = list_get_instance(cur, thread_t, rq_link); |
694 | printf("%d(%s) ", t->tid, |
694 | printf("%d(%s) ", t->tid, |
695 | thread_states[t->state]); |
695 | thread_states[t->state]); |
696 | } |
696 | } |
697 | printf("\n"); |
697 | printf("\n"); |
698 | spinlock_unlock(&r->lock); |
698 | spinlock_unlock(&r->lock); |
699 | } |
699 | } |
700 | spinlock_unlock(&cpus[cpu].lock); |
700 | spinlock_unlock(&cpus[cpu].lock); |
701 | } |
701 | } |
702 | 702 | ||
703 | interrupts_restore(ipl); |
703 | interrupts_restore(ipl); |
704 | } |
704 | } |
705 | 705 | ||
706 | /** @} |
706 | /** @} |
707 | */ |
707 | */ |
708 | 708 |