Rev 2089 | Rev 2183 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 2089 | Rev 2118 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (c) 2001-2007 Jakub Jermar |
2 | * Copyright (c) 2001-2007 Jakub Jermar |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | /** @addtogroup genericproc |
29 | /** @addtogroup genericproc |
30 | * @{ |
30 | * @{ |
31 | */ |
31 | */ |
32 | 32 | ||
33 | /** |
33 | /** |
34 | * @file |
34 | * @file |
35 | * @brief Scheduler and load balancing. |
35 | * @brief Scheduler and load balancing. |
36 | * |
36 | * |
37 | * This file contains the scheduler and kcpulb kernel thread which |
37 | * This file contains the scheduler and kcpulb kernel thread which |
38 | * performs load-balancing of per-CPU run queues. |
38 | * performs load-balancing of per-CPU run queues. |
39 | */ |
39 | */ |
40 | 40 | ||
41 | #include <proc/scheduler.h> |
41 | #include <proc/scheduler.h> |
42 | #include <proc/thread.h> |
42 | #include <proc/thread.h> |
43 | #include <proc/task.h> |
43 | #include <proc/task.h> |
44 | #include <mm/frame.h> |
44 | #include <mm/frame.h> |
45 | #include <mm/page.h> |
45 | #include <mm/page.h> |
46 | #include <mm/as.h> |
46 | #include <mm/as.h> |
47 | #include <time/timeout.h> |
47 | #include <time/timeout.h> |
48 | #include <time/delay.h> |
48 | #include <time/delay.h> |
49 | #include <arch/asm.h> |
49 | #include <arch/asm.h> |
50 | #include <arch/faddr.h> |
50 | #include <arch/faddr.h> |
51 | #include <arch/cycle.h> |
51 | #include <arch/cycle.h> |
52 | #include <atomic.h> |
52 | #include <atomic.h> |
53 | #include <synch/spinlock.h> |
53 | #include <synch/spinlock.h> |
54 | #include <config.h> |
54 | #include <config.h> |
55 | #include <context.h> |
55 | #include <context.h> |
56 | #include <fpu_context.h> |
56 | #include <fpu_context.h> |
57 | #include <func.h> |
57 | #include <func.h> |
58 | #include <arch.h> |
58 | #include <arch.h> |
59 | #include <adt/list.h> |
59 | #include <adt/list.h> |
60 | #include <panic.h> |
60 | #include <panic.h> |
61 | #include <cpu.h> |
61 | #include <cpu.h> |
62 | #include <print.h> |
62 | #include <print.h> |
63 | #include <debug.h> |
63 | #include <debug.h> |
64 | 64 | ||
65 | static void before_task_runs(void); |
65 | static void before_task_runs(void); |
66 | static void before_thread_runs(void); |
66 | static void before_thread_runs(void); |
67 | static void after_thread_ran(void); |
67 | static void after_thread_ran(void); |
68 | static void scheduler_separated_stack(void); |
68 | static void scheduler_separated_stack(void); |
69 | 69 | ||
70 | atomic_t nrdy; /**< Number of ready threads in the system. */ |
70 | atomic_t nrdy; /**< Number of ready threads in the system. */ |
71 | 71 | ||
72 | /** Carry out actions before new task runs. */ |
72 | /** Carry out actions before new task runs. */ |
73 | void before_task_runs(void) |
73 | void before_task_runs(void) |
74 | { |
74 | { |
75 | before_task_runs_arch(); |
75 | before_task_runs_arch(); |
76 | } |
76 | } |
77 | 77 | ||
78 | /** Take actions before new thread runs. |
78 | /** Take actions before new thread runs. |
79 | * |
79 | * |
80 | * Perform actions that need to be |
80 | * Perform actions that need to be |
81 | * taken before the newly selected |
81 | * taken before the newly selected |
82 | * tread is passed control. |
82 | * tread is passed control. |
83 | * |
83 | * |
84 | * THREAD->lock is locked on entry |
84 | * THREAD->lock is locked on entry |
85 | * |
85 | * |
86 | */ |
86 | */ |
87 | void before_thread_runs(void) |
87 | void before_thread_runs(void) |
88 | { |
88 | { |
89 | before_thread_runs_arch(); |
89 | before_thread_runs_arch(); |
90 | #ifdef CONFIG_FPU_LAZY |
90 | #ifdef CONFIG_FPU_LAZY |
91 | if(THREAD == CPU->fpu_owner) |
91 | if(THREAD == CPU->fpu_owner) |
92 | fpu_enable(); |
92 | fpu_enable(); |
93 | else |
93 | else |
94 | fpu_disable(); |
94 | fpu_disable(); |
95 | #else |
95 | #else |
96 | fpu_enable(); |
96 | fpu_enable(); |
97 | if (THREAD->fpu_context_exists) |
97 | if (THREAD->fpu_context_exists) |
98 | fpu_context_restore(THREAD->saved_fpu_context); |
98 | fpu_context_restore(THREAD->saved_fpu_context); |
99 | else { |
99 | else { |
100 | fpu_init(); |
100 | fpu_init(); |
101 | THREAD->fpu_context_exists = 1; |
101 | THREAD->fpu_context_exists = 1; |
102 | } |
102 | } |
103 | #endif |
103 | #endif |
104 | } |
104 | } |
105 | 105 | ||
106 | /** Take actions after THREAD had run. |
106 | /** Take actions after THREAD had run. |
107 | * |
107 | * |
108 | * Perform actions that need to be |
108 | * Perform actions that need to be |
109 | * taken after the running thread |
109 | * taken after the running thread |
110 | * had been preempted by the scheduler. |
110 | * had been preempted by the scheduler. |
111 | * |
111 | * |
112 | * THREAD->lock is locked on entry |
112 | * THREAD->lock is locked on entry |
113 | * |
113 | * |
114 | */ |
114 | */ |
115 | void after_thread_ran(void) |
115 | void after_thread_ran(void) |
116 | { |
116 | { |
117 | after_thread_ran_arch(); |
117 | after_thread_ran_arch(); |
118 | } |
118 | } |
119 | 119 | ||
120 | #ifdef CONFIG_FPU_LAZY |
120 | #ifdef CONFIG_FPU_LAZY |
121 | void scheduler_fpu_lazy_request(void) |
121 | void scheduler_fpu_lazy_request(void) |
122 | { |
122 | { |
123 | restart: |
123 | restart: |
124 | fpu_enable(); |
124 | fpu_enable(); |
125 | spinlock_lock(&CPU->lock); |
125 | spinlock_lock(&CPU->lock); |
126 | 126 | ||
127 | /* Save old context */ |
127 | /* Save old context */ |
128 | if (CPU->fpu_owner != NULL) { |
128 | if (CPU->fpu_owner != NULL) { |
129 | spinlock_lock(&CPU->fpu_owner->lock); |
129 | spinlock_lock(&CPU->fpu_owner->lock); |
130 | fpu_context_save(CPU->fpu_owner->saved_fpu_context); |
130 | fpu_context_save(CPU->fpu_owner->saved_fpu_context); |
131 | /* don't prevent migration */ |
131 | /* don't prevent migration */ |
132 | CPU->fpu_owner->fpu_context_engaged = 0; |
132 | CPU->fpu_owner->fpu_context_engaged = 0; |
133 | spinlock_unlock(&CPU->fpu_owner->lock); |
133 | spinlock_unlock(&CPU->fpu_owner->lock); |
134 | CPU->fpu_owner = NULL; |
134 | CPU->fpu_owner = NULL; |
135 | } |
135 | } |
136 | 136 | ||
137 | spinlock_lock(&THREAD->lock); |
137 | spinlock_lock(&THREAD->lock); |
138 | if (THREAD->fpu_context_exists) { |
138 | if (THREAD->fpu_context_exists) { |
139 | fpu_context_restore(THREAD->saved_fpu_context); |
139 | fpu_context_restore(THREAD->saved_fpu_context); |
140 | } else { |
140 | } else { |
141 | /* Allocate FPU context */ |
141 | /* Allocate FPU context */ |
142 | if (!THREAD->saved_fpu_context) { |
142 | if (!THREAD->saved_fpu_context) { |
143 | /* Might sleep */ |
143 | /* Might sleep */ |
144 | spinlock_unlock(&THREAD->lock); |
144 | spinlock_unlock(&THREAD->lock); |
145 | spinlock_unlock(&CPU->lock); |
145 | spinlock_unlock(&CPU->lock); |
146 | THREAD->saved_fpu_context = |
146 | THREAD->saved_fpu_context = |
147 | slab_alloc(fpu_context_slab, 0); |
147 | (fpu_context_t *) slab_alloc(fpu_context_slab, 0); |
148 | /* We may have switched CPUs during slab_alloc */ |
148 | /* We may have switched CPUs during slab_alloc */ |
149 | goto restart; |
149 | goto restart; |
150 | } |
150 | } |
151 | fpu_init(); |
151 | fpu_init(); |
152 | THREAD->fpu_context_exists = 1; |
152 | THREAD->fpu_context_exists = 1; |
153 | } |
153 | } |
154 | CPU->fpu_owner = THREAD; |
154 | CPU->fpu_owner = THREAD; |
155 | THREAD->fpu_context_engaged = 1; |
155 | THREAD->fpu_context_engaged = 1; |
156 | spinlock_unlock(&THREAD->lock); |
156 | spinlock_unlock(&THREAD->lock); |
157 | 157 | ||
158 | spinlock_unlock(&CPU->lock); |
158 | spinlock_unlock(&CPU->lock); |
159 | } |
159 | } |
160 | #endif |
160 | #endif |
161 | 161 | ||
162 | /** Initialize scheduler |
162 | /** Initialize scheduler |
163 | * |
163 | * |
164 | * Initialize kernel scheduler. |
164 | * Initialize kernel scheduler. |
165 | * |
165 | * |
166 | */ |
166 | */ |
167 | void scheduler_init(void) |
167 | void scheduler_init(void) |
168 | { |
168 | { |
169 | } |
169 | } |
170 | 170 | ||
171 | /** Get thread to be scheduled |
171 | /** Get thread to be scheduled |
172 | * |
172 | * |
173 | * Get the optimal thread to be scheduled |
173 | * Get the optimal thread to be scheduled |
174 | * according to thread accounting and scheduler |
174 | * according to thread accounting and scheduler |
175 | * policy. |
175 | * policy. |
176 | * |
176 | * |
177 | * @return Thread to be scheduled. |
177 | * @return Thread to be scheduled. |
178 | * |
178 | * |
179 | */ |
179 | */ |
180 | static thread_t *find_best_thread(void) |
180 | static thread_t *find_best_thread(void) |
181 | { |
181 | { |
182 | thread_t *t; |
182 | thread_t *t; |
183 | runq_t *r; |
183 | runq_t *r; |
184 | int i; |
184 | int i; |
185 | 185 | ||
186 | ASSERT(CPU != NULL); |
186 | ASSERT(CPU != NULL); |
187 | 187 | ||
188 | loop: |
188 | loop: |
189 | interrupts_enable(); |
189 | interrupts_enable(); |
190 | 190 | ||
191 | if (atomic_get(&CPU->nrdy) == 0) { |
191 | if (atomic_get(&CPU->nrdy) == 0) { |
192 | /* |
192 | /* |
193 | * For there was nothing to run, the CPU goes to sleep |
193 | * For there was nothing to run, the CPU goes to sleep |
194 | * until a hardware interrupt or an IPI comes. |
194 | * until a hardware interrupt or an IPI comes. |
195 | * This improves energy saving and hyperthreading. |
195 | * This improves energy saving and hyperthreading. |
196 | */ |
196 | */ |
197 | 197 | ||
198 | /* |
198 | /* |
199 | * An interrupt might occur right now and wake up a thread. |
199 | * An interrupt might occur right now and wake up a thread. |
200 | * In such case, the CPU will continue to go to sleep |
200 | * In such case, the CPU will continue to go to sleep |
201 | * even though there is a runnable thread. |
201 | * even though there is a runnable thread. |
202 | */ |
202 | */ |
203 | 203 | ||
204 | cpu_sleep(); |
204 | cpu_sleep(); |
205 | goto loop; |
205 | goto loop; |
206 | } |
206 | } |
207 | 207 | ||
208 | interrupts_disable(); |
208 | interrupts_disable(); |
209 | 209 | ||
210 | for (i = 0; i<RQ_COUNT; i++) { |
210 | for (i = 0; i<RQ_COUNT; i++) { |
211 | r = &CPU->rq[i]; |
211 | r = &CPU->rq[i]; |
212 | spinlock_lock(&r->lock); |
212 | spinlock_lock(&r->lock); |
213 | if (r->n == 0) { |
213 | if (r->n == 0) { |
214 | /* |
214 | /* |
215 | * If this queue is empty, try a lower-priority queue. |
215 | * If this queue is empty, try a lower-priority queue. |
216 | */ |
216 | */ |
217 | spinlock_unlock(&r->lock); |
217 | spinlock_unlock(&r->lock); |
218 | continue; |
218 | continue; |
219 | } |
219 | } |
220 | 220 | ||
221 | atomic_dec(&CPU->nrdy); |
221 | atomic_dec(&CPU->nrdy); |
222 | atomic_dec(&nrdy); |
222 | atomic_dec(&nrdy); |
223 | r->n--; |
223 | r->n--; |
224 | 224 | ||
225 | /* |
225 | /* |
226 | * Take the first thread from the queue. |
226 | * Take the first thread from the queue. |
227 | */ |
227 | */ |
228 | t = list_get_instance(r->rq_head.next, thread_t, rq_link); |
228 | t = list_get_instance(r->rq_head.next, thread_t, rq_link); |
229 | list_remove(&t->rq_link); |
229 | list_remove(&t->rq_link); |
230 | 230 | ||
231 | spinlock_unlock(&r->lock); |
231 | spinlock_unlock(&r->lock); |
232 | 232 | ||
233 | spinlock_lock(&t->lock); |
233 | spinlock_lock(&t->lock); |
234 | t->cpu = CPU; |
234 | t->cpu = CPU; |
235 | 235 | ||
236 | t->ticks = us2ticks((i + 1) * 10000); |
236 | t->ticks = us2ticks((i + 1) * 10000); |
237 | t->priority = i; /* correct rq index */ |
237 | t->priority = i; /* correct rq index */ |
238 | 238 | ||
239 | /* |
239 | /* |
240 | * Clear the THREAD_FLAG_STOLEN flag so that t can be migrated |
240 | * Clear the THREAD_FLAG_STOLEN flag so that t can be migrated |
241 | * when load balancing needs emerge. |
241 | * when load balancing needs emerge. |
242 | */ |
242 | */ |
243 | t->flags &= ~THREAD_FLAG_STOLEN; |
243 | t->flags &= ~THREAD_FLAG_STOLEN; |
244 | spinlock_unlock(&t->lock); |
244 | spinlock_unlock(&t->lock); |
245 | 245 | ||
246 | return t; |
246 | return t; |
247 | } |
247 | } |
248 | goto loop; |
248 | goto loop; |
249 | 249 | ||
250 | } |
250 | } |
251 | 251 | ||
252 | /** Prevent rq starvation |
252 | /** Prevent rq starvation |
253 | * |
253 | * |
254 | * Prevent low priority threads from starving in rq's. |
254 | * Prevent low priority threads from starving in rq's. |
255 | * |
255 | * |
256 | * When the function decides to relink rq's, it reconnects |
256 | * When the function decides to relink rq's, it reconnects |
257 | * respective pointers so that in result threads with 'pri' |
257 | * respective pointers so that in result threads with 'pri' |
258 | * greater or equal start are moved to a higher-priority queue. |
258 | * greater or equal start are moved to a higher-priority queue. |
259 | * |
259 | * |
260 | * @param start Threshold priority. |
260 | * @param start Threshold priority. |
261 | * |
261 | * |
262 | */ |
262 | */ |
263 | static void relink_rq(int start) |
263 | static void relink_rq(int start) |
264 | { |
264 | { |
265 | link_t head; |
265 | link_t head; |
266 | runq_t *r; |
266 | runq_t *r; |
267 | int i, n; |
267 | int i, n; |
268 | 268 | ||
269 | list_initialize(&head); |
269 | list_initialize(&head); |
270 | spinlock_lock(&CPU->lock); |
270 | spinlock_lock(&CPU->lock); |
271 | if (CPU->needs_relink > NEEDS_RELINK_MAX) { |
271 | if (CPU->needs_relink > NEEDS_RELINK_MAX) { |
272 | for (i = start; i < RQ_COUNT - 1; i++) { |
272 | for (i = start; i < RQ_COUNT - 1; i++) { |
273 | /* remember and empty rq[i + 1] */ |
273 | /* remember and empty rq[i + 1] */ |
274 | r = &CPU->rq[i + 1]; |
274 | r = &CPU->rq[i + 1]; |
275 | spinlock_lock(&r->lock); |
275 | spinlock_lock(&r->lock); |
276 | list_concat(&head, &r->rq_head); |
276 | list_concat(&head, &r->rq_head); |
277 | n = r->n; |
277 | n = r->n; |
278 | r->n = 0; |
278 | r->n = 0; |
279 | spinlock_unlock(&r->lock); |
279 | spinlock_unlock(&r->lock); |
280 | 280 | ||
281 | /* append rq[i + 1] to rq[i] */ |
281 | /* append rq[i + 1] to rq[i] */ |
282 | r = &CPU->rq[i]; |
282 | r = &CPU->rq[i]; |
283 | spinlock_lock(&r->lock); |
283 | spinlock_lock(&r->lock); |
284 | list_concat(&r->rq_head, &head); |
284 | list_concat(&r->rq_head, &head); |
285 | r->n += n; |
285 | r->n += n; |
286 | spinlock_unlock(&r->lock); |
286 | spinlock_unlock(&r->lock); |
287 | } |
287 | } |
288 | CPU->needs_relink = 0; |
288 | CPU->needs_relink = 0; |
289 | } |
289 | } |
290 | spinlock_unlock(&CPU->lock); |
290 | spinlock_unlock(&CPU->lock); |
291 | 291 | ||
292 | } |
292 | } |
293 | 293 | ||
294 | /** The scheduler |
294 | /** The scheduler |
295 | * |
295 | * |
296 | * The thread scheduling procedure. |
296 | * The thread scheduling procedure. |
297 | * Passes control directly to |
297 | * Passes control directly to |
298 | * scheduler_separated_stack(). |
298 | * scheduler_separated_stack(). |
299 | * |
299 | * |
300 | */ |
300 | */ |
301 | void scheduler(void) |
301 | void scheduler(void) |
302 | { |
302 | { |
303 | volatile ipl_t ipl; |
303 | volatile ipl_t ipl; |
304 | 304 | ||
305 | ASSERT(CPU != NULL); |
305 | ASSERT(CPU != NULL); |
306 | 306 | ||
307 | ipl = interrupts_disable(); |
307 | ipl = interrupts_disable(); |
308 | 308 | ||
309 | if (atomic_get(&haltstate)) |
309 | if (atomic_get(&haltstate)) |
310 | halt(); |
310 | halt(); |
311 | 311 | ||
312 | if (THREAD) { |
312 | if (THREAD) { |
313 | spinlock_lock(&THREAD->lock); |
313 | spinlock_lock(&THREAD->lock); |
314 | 314 | ||
315 | /* Update thread accounting */ |
315 | /* Update thread accounting */ |
316 | THREAD->cycles += get_cycle() - THREAD->last_cycle; |
316 | THREAD->cycles += get_cycle() - THREAD->last_cycle; |
317 | 317 | ||
318 | #ifndef CONFIG_FPU_LAZY |
318 | #ifndef CONFIG_FPU_LAZY |
319 | fpu_context_save(THREAD->saved_fpu_context); |
319 | fpu_context_save(THREAD->saved_fpu_context); |
320 | #endif |
320 | #endif |
321 | if (!context_save(&THREAD->saved_context)) { |
321 | if (!context_save(&THREAD->saved_context)) { |
322 | /* |
322 | /* |
323 | * This is the place where threads leave scheduler(); |
323 | * This is the place where threads leave scheduler(); |
324 | */ |
324 | */ |
325 | 325 | ||
326 | /* Save current CPU cycle */ |
326 | /* Save current CPU cycle */ |
327 | THREAD->last_cycle = get_cycle(); |
327 | THREAD->last_cycle = get_cycle(); |
328 | 328 | ||
329 | spinlock_unlock(&THREAD->lock); |
329 | spinlock_unlock(&THREAD->lock); |
330 | interrupts_restore(THREAD->saved_context.ipl); |
330 | interrupts_restore(THREAD->saved_context.ipl); |
331 | 331 | ||
332 | return; |
332 | return; |
333 | } |
333 | } |
334 | 334 | ||
335 | /* |
335 | /* |
336 | * Interrupt priority level of preempted thread is recorded |
336 | * Interrupt priority level of preempted thread is recorded |
337 | * here to facilitate scheduler() invocations from |
337 | * here to facilitate scheduler() invocations from |
338 | * interrupts_disable()'d code (e.g. waitq_sleep_timeout()). |
338 | * interrupts_disable()'d code (e.g. waitq_sleep_timeout()). |
339 | */ |
339 | */ |
340 | THREAD->saved_context.ipl = ipl; |
340 | THREAD->saved_context.ipl = ipl; |
341 | } |
341 | } |
342 | 342 | ||
343 | /* |
343 | /* |
344 | * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM |
344 | * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM |
345 | * and preemption counter. At this point THE could be coming either |
345 | * and preemption counter. At this point THE could be coming either |
346 | * from THREAD's or CPU's stack. |
346 | * from THREAD's or CPU's stack. |
347 | */ |
347 | */ |
348 | the_copy(THE, (the_t *) CPU->stack); |
348 | the_copy(THE, (the_t *) CPU->stack); |
349 | 349 | ||
350 | /* |
350 | /* |
351 | * We may not keep the old stack. |
351 | * We may not keep the old stack. |
352 | * Reason: If we kept the old stack and got blocked, for instance, in |
352 | * Reason: If we kept the old stack and got blocked, for instance, in |
353 | * find_best_thread(), the old thread could get rescheduled by another |
353 | * find_best_thread(), the old thread could get rescheduled by another |
354 | * CPU and overwrite the part of its own stack that was also used by |
354 | * CPU and overwrite the part of its own stack that was also used by |
355 | * the scheduler on this CPU. |
355 | * the scheduler on this CPU. |
356 | * |
356 | * |
357 | * Moreover, we have to bypass the compiler-generated POP sequence |
357 | * Moreover, we have to bypass the compiler-generated POP sequence |
358 | * which is fooled by SP being set to the very top of the stack. |
358 | * which is fooled by SP being set to the very top of the stack. |
359 | * Therefore the scheduler() function continues in |
359 | * Therefore the scheduler() function continues in |
360 | * scheduler_separated_stack(). |
360 | * scheduler_separated_stack(). |
361 | */ |
361 | */ |
362 | context_save(&CPU->saved_context); |
362 | context_save(&CPU->saved_context); |
363 | context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), |
363 | context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), |
364 | (uintptr_t) CPU->stack, CPU_STACK_SIZE); |
364 | (uintptr_t) CPU->stack, CPU_STACK_SIZE); |
365 | context_restore(&CPU->saved_context); |
365 | context_restore(&CPU->saved_context); |
366 | /* not reached */ |
366 | /* not reached */ |
367 | } |
367 | } |
368 | 368 | ||
369 | /** Scheduler stack switch wrapper |
369 | /** Scheduler stack switch wrapper |
370 | * |
370 | * |
371 | * Second part of the scheduler() function |
371 | * Second part of the scheduler() function |
372 | * using new stack. Handling the actual context |
372 | * using new stack. Handling the actual context |
373 | * switch to a new thread. |
373 | * switch to a new thread. |
374 | * |
374 | * |
375 | * Assume THREAD->lock is held. |
375 | * Assume THREAD->lock is held. |
376 | */ |
376 | */ |
377 | void scheduler_separated_stack(void) |
377 | void scheduler_separated_stack(void) |
378 | { |
378 | { |
379 | int priority; |
379 | int priority; |
380 | 380 | ||
381 | ASSERT(CPU != NULL); |
381 | ASSERT(CPU != NULL); |
382 | 382 | ||
383 | if (THREAD) { |
383 | if (THREAD) { |
384 | /* must be run after the switch to scheduler stack */ |
384 | /* must be run after the switch to scheduler stack */ |
385 | after_thread_ran(); |
385 | after_thread_ran(); |
386 | 386 | ||
387 | switch (THREAD->state) { |
387 | switch (THREAD->state) { |
388 | case Running: |
388 | case Running: |
389 | spinlock_unlock(&THREAD->lock); |
389 | spinlock_unlock(&THREAD->lock); |
390 | thread_ready(THREAD); |
390 | thread_ready(THREAD); |
391 | break; |
391 | break; |
392 | 392 | ||
393 | case Exiting: |
393 | case Exiting: |
394 | repeat: |
394 | repeat: |
395 | if (THREAD->detached) { |
395 | if (THREAD->detached) { |
396 | thread_destroy(THREAD); |
396 | thread_destroy(THREAD); |
397 | } else { |
397 | } else { |
398 | /* |
398 | /* |
399 | * The thread structure is kept allocated until |
399 | * The thread structure is kept allocated until |
400 | * somebody calls thread_detach() on it. |
400 | * somebody calls thread_detach() on it. |
401 | */ |
401 | */ |
402 | if (!spinlock_trylock(&THREAD->join_wq.lock)) { |
402 | if (!spinlock_trylock(&THREAD->join_wq.lock)) { |
403 | /* |
403 | /* |
404 | * Avoid deadlock. |
404 | * Avoid deadlock. |
405 | */ |
405 | */ |
406 | spinlock_unlock(&THREAD->lock); |
406 | spinlock_unlock(&THREAD->lock); |
407 | delay(10); |
407 | delay(10); |
408 | spinlock_lock(&THREAD->lock); |
408 | spinlock_lock(&THREAD->lock); |
409 | goto repeat; |
409 | goto repeat; |
410 | } |
410 | } |
411 | _waitq_wakeup_unsafe(&THREAD->join_wq, false); |
411 | _waitq_wakeup_unsafe(&THREAD->join_wq, false); |
412 | spinlock_unlock(&THREAD->join_wq.lock); |
412 | spinlock_unlock(&THREAD->join_wq.lock); |
413 | 413 | ||
414 | THREAD->state = Undead; |
414 | THREAD->state = Undead; |
415 | spinlock_unlock(&THREAD->lock); |
415 | spinlock_unlock(&THREAD->lock); |
416 | } |
416 | } |
417 | break; |
417 | break; |
418 | 418 | ||
419 | case Sleeping: |
419 | case Sleeping: |
420 | /* |
420 | /* |
421 | * Prefer the thread after it's woken up. |
421 | * Prefer the thread after it's woken up. |
422 | */ |
422 | */ |
423 | THREAD->priority = -1; |
423 | THREAD->priority = -1; |
424 | 424 | ||
425 | /* |
425 | /* |
426 | * We need to release wq->lock which we locked in |
426 | * We need to release wq->lock which we locked in |
427 | * waitq_sleep(). Address of wq->lock is kept in |
427 | * waitq_sleep(). Address of wq->lock is kept in |
428 | * THREAD->sleep_queue. |
428 | * THREAD->sleep_queue. |
429 | */ |
429 | */ |
430 | spinlock_unlock(&THREAD->sleep_queue->lock); |
430 | spinlock_unlock(&THREAD->sleep_queue->lock); |
431 | 431 | ||
432 | /* |
432 | /* |
433 | * Check for possible requests for out-of-context |
433 | * Check for possible requests for out-of-context |
434 | * invocation. |
434 | * invocation. |
435 | */ |
435 | */ |
436 | if (THREAD->call_me) { |
436 | if (THREAD->call_me) { |
437 | THREAD->call_me(THREAD->call_me_with); |
437 | THREAD->call_me(THREAD->call_me_with); |
438 | THREAD->call_me = NULL; |
438 | THREAD->call_me = NULL; |
439 | THREAD->call_me_with = NULL; |
439 | THREAD->call_me_with = NULL; |
440 | } |
440 | } |
441 | 441 | ||
442 | spinlock_unlock(&THREAD->lock); |
442 | spinlock_unlock(&THREAD->lock); |
443 | 443 | ||
444 | break; |
444 | break; |
445 | 445 | ||
446 | default: |
446 | default: |
447 | /* |
447 | /* |
448 | * Entering state is unexpected. |
448 | * Entering state is unexpected. |
449 | */ |
449 | */ |
450 | panic("tid%d: unexpected state %s\n", THREAD->tid, |
450 | panic("tid%d: unexpected state %s\n", THREAD->tid, |
451 | thread_states[THREAD->state]); |
451 | thread_states[THREAD->state]); |
452 | break; |
452 | break; |
453 | } |
453 | } |
454 | 454 | ||
455 | THREAD = NULL; |
455 | THREAD = NULL; |
456 | } |
456 | } |
457 | 457 | ||
458 | THREAD = find_best_thread(); |
458 | THREAD = find_best_thread(); |
459 | 459 | ||
460 | spinlock_lock(&THREAD->lock); |
460 | spinlock_lock(&THREAD->lock); |
461 | priority = THREAD->priority; |
461 | priority = THREAD->priority; |
462 | spinlock_unlock(&THREAD->lock); |
462 | spinlock_unlock(&THREAD->lock); |
463 | 463 | ||
464 | relink_rq(priority); |
464 | relink_rq(priority); |
465 | 465 | ||
466 | /* |
466 | /* |
467 | * If both the old and the new task are the same, lots of work is |
467 | * If both the old and the new task are the same, lots of work is |
468 | * avoided. |
468 | * avoided. |
469 | */ |
469 | */ |
470 | if (TASK != THREAD->task) { |
470 | if (TASK != THREAD->task) { |
471 | as_t *as1 = NULL; |
471 | as_t *as1 = NULL; |
472 | as_t *as2; |
472 | as_t *as2; |
473 | 473 | ||
474 | if (TASK) { |
474 | if (TASK) { |
475 | spinlock_lock(&TASK->lock); |
475 | spinlock_lock(&TASK->lock); |
476 | as1 = TASK->as; |
476 | as1 = TASK->as; |
477 | spinlock_unlock(&TASK->lock); |
477 | spinlock_unlock(&TASK->lock); |
478 | } |
478 | } |
479 | 479 | ||
480 | spinlock_lock(&THREAD->task->lock); |
480 | spinlock_lock(&THREAD->task->lock); |
481 | as2 = THREAD->task->as; |
481 | as2 = THREAD->task->as; |
482 | spinlock_unlock(&THREAD->task->lock); |
482 | spinlock_unlock(&THREAD->task->lock); |
483 | 483 | ||
484 | /* |
484 | /* |
485 | * Note that it is possible for two tasks to share one address |
485 | * Note that it is possible for two tasks to share one address |
486 | * space. |
486 | * space. |
487 | */ |
487 | */ |
488 | if (as1 != as2) { |
488 | if (as1 != as2) { |
489 | /* |
489 | /* |
490 | * Both tasks and address spaces are different. |
490 | * Both tasks and address spaces are different. |
491 | * Replace the old one with the new one. |
491 | * Replace the old one with the new one. |
492 | */ |
492 | */ |
493 | as_switch(as1, as2); |
493 | as_switch(as1, as2); |
494 | } |
494 | } |
495 | TASK = THREAD->task; |
495 | TASK = THREAD->task; |
496 | before_task_runs(); |
496 | before_task_runs(); |
497 | } |
497 | } |
498 | 498 | ||
499 | spinlock_lock(&THREAD->lock); |
499 | spinlock_lock(&THREAD->lock); |
500 | THREAD->state = Running; |
500 | THREAD->state = Running; |
501 | 501 | ||
502 | #ifdef SCHEDULER_VERBOSE |
502 | #ifdef SCHEDULER_VERBOSE |
503 | printf("cpu%d: tid %d (priority=%d, ticks=%lld, nrdy=%ld)\n", |
503 | printf("cpu%d: tid %d (priority=%d, ticks=%lld, nrdy=%ld)\n", |
504 | CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, |
504 | CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, |
505 | atomic_get(&CPU->nrdy)); |
505 | atomic_get(&CPU->nrdy)); |
506 | #endif |
506 | #endif |
507 | 507 | ||
508 | /* |
508 | /* |
509 | * Some architectures provide late kernel PA2KA(identity) |
509 | * Some architectures provide late kernel PA2KA(identity) |
510 | * mapping in a page fault handler. However, the page fault |
510 | * mapping in a page fault handler. However, the page fault |
511 | * handler uses the kernel stack of the running thread and |
511 | * handler uses the kernel stack of the running thread and |
512 | * therefore cannot be used to map it. The kernel stack, if |
512 | * therefore cannot be used to map it. The kernel stack, if |
513 | * necessary, is to be mapped in before_thread_runs(). This |
513 | * necessary, is to be mapped in before_thread_runs(). This |
514 | * function must be executed before the switch to the new stack. |
514 | * function must be executed before the switch to the new stack. |
515 | */ |
515 | */ |
516 | before_thread_runs(); |
516 | before_thread_runs(); |
517 | 517 | ||
518 | /* |
518 | /* |
519 | * Copy the knowledge of CPU, TASK, THREAD and preemption counter to |
519 | * Copy the knowledge of CPU, TASK, THREAD and preemption counter to |
520 | * thread's stack. |
520 | * thread's stack. |
521 | */ |
521 | */ |
522 | the_copy(THE, (the_t *) THREAD->kstack); |
522 | the_copy(THE, (the_t *) THREAD->kstack); |
523 | 523 | ||
524 | context_restore(&THREAD->saved_context); |
524 | context_restore(&THREAD->saved_context); |
525 | /* not reached */ |
525 | /* not reached */ |
526 | } |
526 | } |
527 | 527 | ||
528 | #ifdef CONFIG_SMP |
528 | #ifdef CONFIG_SMP |
529 | /** Load balancing thread |
529 | /** Load balancing thread |
530 | * |
530 | * |
531 | * SMP load balancing thread, supervising thread supplies |
531 | * SMP load balancing thread, supervising thread supplies |
532 | * for the CPU it's wired to. |
532 | * for the CPU it's wired to. |
533 | * |
533 | * |
534 | * @param arg Generic thread argument (unused). |
534 | * @param arg Generic thread argument (unused). |
535 | * |
535 | * |
536 | */ |
536 | */ |
537 | void kcpulb(void *arg) |
537 | void kcpulb(void *arg) |
538 | { |
538 | { |
539 | thread_t *t; |
539 | thread_t *t; |
540 | int count, average, i, j, k = 0; |
540 | int count, average, j, k = 0; |
- | 541 | unsigned int i; |
|
541 | ipl_t ipl; |
542 | ipl_t ipl; |
542 | 543 | ||
543 | /* |
544 | /* |
544 | * Detach kcpulb as nobody will call thread_join_timeout() on it. |
545 | * Detach kcpulb as nobody will call thread_join_timeout() on it. |
545 | */ |
546 | */ |
546 | thread_detach(THREAD); |
547 | thread_detach(THREAD); |
547 | 548 | ||
548 | loop: |
549 | loop: |
549 | /* |
550 | /* |
550 | * Work in 1s intervals. |
551 | * Work in 1s intervals. |
551 | */ |
552 | */ |
552 | thread_sleep(1); |
553 | thread_sleep(1); |
553 | 554 | ||
554 | not_satisfied: |
555 | not_satisfied: |
555 | /* |
556 | /* |
556 | * Calculate the number of threads that will be migrated/stolen from |
557 | * Calculate the number of threads that will be migrated/stolen from |
557 | * other CPU's. Note that situation can have changed between two |
558 | * other CPU's. Note that situation can have changed between two |
558 | * passes. Each time get the most up to date counts. |
559 | * passes. Each time get the most up to date counts. |
559 | */ |
560 | */ |
560 | average = atomic_get(&nrdy) / config.cpu_active + 1; |
561 | average = atomic_get(&nrdy) / config.cpu_active + 1; |
561 | count = average - atomic_get(&CPU->nrdy); |
562 | count = average - atomic_get(&CPU->nrdy); |
562 | 563 | ||
563 | if (count <= 0) |
564 | if (count <= 0) |
564 | goto satisfied; |
565 | goto satisfied; |
565 | 566 | ||
566 | /* |
567 | /* |
567 | * Searching least priority queues on all CPU's first and most priority |
568 | * Searching least priority queues on all CPU's first and most priority |
568 | * queues on all CPU's last. |
569 | * queues on all CPU's last. |
569 | */ |
570 | */ |
570 | for (j= RQ_COUNT - 1; j >= 0; j--) { |
571 | for (j= RQ_COUNT - 1; j >= 0; j--) { |
571 | for (i = 0; i < config.cpu_active; i++) { |
572 | for (i = 0; i < config.cpu_active; i++) { |
572 | link_t *l; |
573 | link_t *l; |
573 | runq_t *r; |
574 | runq_t *r; |
574 | cpu_t *cpu; |
575 | cpu_t *cpu; |
575 | 576 | ||
576 | cpu = &cpus[(i + k) % config.cpu_active]; |
577 | cpu = &cpus[(i + k) % config.cpu_active]; |
577 | 578 | ||
578 | /* |
579 | /* |
579 | * Not interested in ourselves. |
580 | * Not interested in ourselves. |
580 | * Doesn't require interrupt disabling for kcpulb has |
581 | * Doesn't require interrupt disabling for kcpulb has |
581 | * THREAD_FLAG_WIRED. |
582 | * THREAD_FLAG_WIRED. |
582 | */ |
583 | */ |
583 | if (CPU == cpu) |
584 | if (CPU == cpu) |
584 | continue; |
585 | continue; |
585 | if (atomic_get(&cpu->nrdy) <= average) |
586 | if (atomic_get(&cpu->nrdy) <= average) |
586 | continue; |
587 | continue; |
587 | 588 | ||
588 | ipl = interrupts_disable(); |
589 | ipl = interrupts_disable(); |
589 | r = &cpu->rq[j]; |
590 | r = &cpu->rq[j]; |
590 | spinlock_lock(&r->lock); |
591 | spinlock_lock(&r->lock); |
591 | if (r->n == 0) { |
592 | if (r->n == 0) { |
592 | spinlock_unlock(&r->lock); |
593 | spinlock_unlock(&r->lock); |
593 | interrupts_restore(ipl); |
594 | interrupts_restore(ipl); |
594 | continue; |
595 | continue; |
595 | } |
596 | } |
596 | 597 | ||
597 | t = NULL; |
598 | t = NULL; |
598 | l = r->rq_head.prev; /* search rq from the back */ |
599 | l = r->rq_head.prev; /* search rq from the back */ |
599 | while (l != &r->rq_head) { |
600 | while (l != &r->rq_head) { |
600 | t = list_get_instance(l, thread_t, rq_link); |
601 | t = list_get_instance(l, thread_t, rq_link); |
601 | /* |
602 | /* |
602 | * We don't want to steal CPU-wired threads |
603 | * We don't want to steal CPU-wired threads |
603 | * neither threads already stolen. The latter |
604 | * neither threads already stolen. The latter |
604 | * prevents threads from migrating between CPU's |
605 | * prevents threads from migrating between CPU's |
605 | * without ever being run. We don't want to |
606 | * without ever being run. We don't want to |
606 | * steal threads whose FPU context is still in |
607 | * steal threads whose FPU context is still in |
607 | * CPU. |
608 | * CPU. |
608 | */ |
609 | */ |
609 | spinlock_lock(&t->lock); |
610 | spinlock_lock(&t->lock); |
610 | if ((!(t->flags & (THREAD_FLAG_WIRED | |
611 | if ((!(t->flags & (THREAD_FLAG_WIRED | |
611 | THREAD_FLAG_STOLEN))) && |
612 | THREAD_FLAG_STOLEN))) && |
612 | (!(t->fpu_context_engaged)) ) { |
613 | (!(t->fpu_context_engaged)) ) { |
613 | /* |
614 | /* |
614 | * Remove t from r. |
615 | * Remove t from r. |
615 | */ |
616 | */ |
616 | spinlock_unlock(&t->lock); |
617 | spinlock_unlock(&t->lock); |
617 | 618 | ||
618 | atomic_dec(&cpu->nrdy); |
619 | atomic_dec(&cpu->nrdy); |
619 | atomic_dec(&nrdy); |
620 | atomic_dec(&nrdy); |
620 | 621 | ||
621 | r->n--; |
622 | r->n--; |
622 | list_remove(&t->rq_link); |
623 | list_remove(&t->rq_link); |
623 | 624 | ||
624 | break; |
625 | break; |
625 | } |
626 | } |
626 | spinlock_unlock(&t->lock); |
627 | spinlock_unlock(&t->lock); |
627 | l = l->prev; |
628 | l = l->prev; |
628 | t = NULL; |
629 | t = NULL; |
629 | } |
630 | } |
630 | spinlock_unlock(&r->lock); |
631 | spinlock_unlock(&r->lock); |
631 | 632 | ||
632 | if (t) { |
633 | if (t) { |
633 | /* |
634 | /* |
634 | * Ready t on local CPU |
635 | * Ready t on local CPU |
635 | */ |
636 | */ |
636 | spinlock_lock(&t->lock); |
637 | spinlock_lock(&t->lock); |
637 | #ifdef KCPULB_VERBOSE |
638 | #ifdef KCPULB_VERBOSE |
638 | printf("kcpulb%d: TID %d -> cpu%d, nrdy=%ld, " |
639 | printf("kcpulb%d: TID %d -> cpu%d, nrdy=%ld, " |
639 | "avg=%nd\n", CPU->id, t->tid, CPU->id, |
640 | "avg=%nd\n", CPU->id, t->tid, CPU->id, |
640 | atomic_get(&CPU->nrdy), |
641 | atomic_get(&CPU->nrdy), |
641 | atomic_get(&nrdy) / config.cpu_active); |
642 | atomic_get(&nrdy) / config.cpu_active); |
642 | #endif |
643 | #endif |
643 | t->flags |= THREAD_FLAG_STOLEN; |
644 | t->flags |= THREAD_FLAG_STOLEN; |
644 | t->state = Entering; |
645 | t->state = Entering; |
645 | spinlock_unlock(&t->lock); |
646 | spinlock_unlock(&t->lock); |
646 | 647 | ||
647 | thread_ready(t); |
648 | thread_ready(t); |
648 | 649 | ||
649 | interrupts_restore(ipl); |
650 | interrupts_restore(ipl); |
650 | 651 | ||
651 | if (--count == 0) |
652 | if (--count == 0) |
652 | goto satisfied; |
653 | goto satisfied; |
653 | 654 | ||
654 | /* |
655 | /* |
655 | * We are not satisfied yet, focus on another |
656 | * We are not satisfied yet, focus on another |
656 | * CPU next time. |
657 | * CPU next time. |
657 | */ |
658 | */ |
658 | k++; |
659 | k++; |
659 | 660 | ||
660 | continue; |
661 | continue; |
661 | } |
662 | } |
662 | interrupts_restore(ipl); |
663 | interrupts_restore(ipl); |
663 | } |
664 | } |
664 | } |
665 | } |
665 | 666 | ||
666 | if (atomic_get(&CPU->nrdy)) { |
667 | if (atomic_get(&CPU->nrdy)) { |
667 | /* |
668 | /* |
668 | * Be a little bit light-weight and let migrated threads run. |
669 | * Be a little bit light-weight and let migrated threads run. |
669 | */ |
670 | */ |
670 | scheduler(); |
671 | scheduler(); |
671 | } else { |
672 | } else { |
672 | /* |
673 | /* |
673 | * We failed to migrate a single thread. |
674 | * We failed to migrate a single thread. |
674 | * Give up this turn. |
675 | * Give up this turn. |
675 | */ |
676 | */ |
676 | goto loop; |
677 | goto loop; |
677 | } |
678 | } |
678 | 679 | ||
679 | goto not_satisfied; |
680 | goto not_satisfied; |
680 | 681 | ||
681 | satisfied: |
682 | satisfied: |
682 | goto loop; |
683 | goto loop; |
683 | } |
684 | } |
684 | 685 | ||
685 | #endif /* CONFIG_SMP */ |
686 | #endif /* CONFIG_SMP */ |
686 | 687 | ||
687 | 688 | ||
688 | /** Print information about threads & scheduler queues */ |
689 | /** Print information about threads & scheduler queues */ |
689 | void sched_print_list(void) |
690 | void sched_print_list(void) |
690 | { |
691 | { |
691 | ipl_t ipl; |
692 | ipl_t ipl; |
692 | int cpu,i; |
693 | unsigned int cpu, i; |
693 | runq_t *r; |
694 | runq_t *r; |
694 | thread_t *t; |
695 | thread_t *t; |
695 | link_t *cur; |
696 | link_t *cur; |
696 | 697 | ||
697 | /* We are going to mess with scheduler structures, |
698 | /* We are going to mess with scheduler structures, |
698 | * let's not be interrupted */ |
699 | * let's not be interrupted */ |
699 | ipl = interrupts_disable(); |
700 | ipl = interrupts_disable(); |
700 | for (cpu=0;cpu < config.cpu_count; cpu++) { |
701 | for (cpu = 0; cpu < config.cpu_count; cpu++) { |
701 | 702 | ||
702 | if (!cpus[cpu].active) |
703 | if (!cpus[cpu].active) |
703 | continue; |
704 | continue; |
704 | 705 | ||
705 | spinlock_lock(&cpus[cpu].lock); |
706 | spinlock_lock(&cpus[cpu].lock); |
706 | printf("cpu%d: address=%p, nrdy=%ld, needs_relink=%ld\n", |
707 | printf("cpu%d: address=%p, nrdy=%ld, needs_relink=%ld\n", |
707 | cpus[cpu].id, &cpus[cpu], atomic_get(&cpus[cpu].nrdy), |
708 | cpus[cpu].id, &cpus[cpu], atomic_get(&cpus[cpu].nrdy), |
708 | cpus[cpu].needs_relink); |
709 | cpus[cpu].needs_relink); |
709 | 710 | ||
710 | for (i = 0; i < RQ_COUNT; i++) { |
711 | for (i = 0; i < RQ_COUNT; i++) { |
711 | r = &cpus[cpu].rq[i]; |
712 | r = &cpus[cpu].rq[i]; |
712 | spinlock_lock(&r->lock); |
713 | spinlock_lock(&r->lock); |
713 | if (!r->n) { |
714 | if (!r->n) { |
714 | spinlock_unlock(&r->lock); |
715 | spinlock_unlock(&r->lock); |
715 | continue; |
716 | continue; |
716 | } |
717 | } |
717 | printf("\trq[%d]: ", i); |
718 | printf("\trq[%d]: ", i); |
718 | for (cur = r->rq_head.next; cur != &r->rq_head; |
719 | for (cur = r->rq_head.next; cur != &r->rq_head; |
719 | cur = cur->next) { |
720 | cur = cur->next) { |
720 | t = list_get_instance(cur, thread_t, rq_link); |
721 | t = list_get_instance(cur, thread_t, rq_link); |
721 | printf("%d(%s) ", t->tid, |
722 | printf("%d(%s) ", t->tid, |
722 | thread_states[t->state]); |
723 | thread_states[t->state]); |
723 | } |
724 | } |
724 | printf("\n"); |
725 | printf("\n"); |
725 | spinlock_unlock(&r->lock); |
726 | spinlock_unlock(&r->lock); |
726 | } |
727 | } |
727 | spinlock_unlock(&cpus[cpu].lock); |
728 | spinlock_unlock(&cpus[cpu].lock); |
728 | } |
729 | } |
729 | 730 | ||
730 | interrupts_restore(ipl); |
731 | interrupts_restore(ipl); |
731 | } |
732 | } |
732 | 733 | ||
733 | /** @} |
734 | /** @} |
734 | */ |
735 | */ |
735 | 736 |