Rev 1571 | Rev 1705 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 1571 | Rev 1576 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | /** |
29 | /** |
30 | * @file scheduler.c |
30 | * @file scheduler.c |
31 | * @brief Scheduler and load balancing. |
31 | * @brief Scheduler and load balancing. |
32 | * |
32 | * |
33 | * This file contains the scheduler and kcpulb kernel thread which |
33 | * This file contains the scheduler and kcpulb kernel thread which |
34 | * performs load-balancing of per-CPU run queues. |
34 | * performs load-balancing of per-CPU run queues. |
35 | */ |
35 | */ |
36 | 36 | ||
37 | #include <proc/scheduler.h> |
37 | #include <proc/scheduler.h> |
38 | #include <proc/thread.h> |
38 | #include <proc/thread.h> |
39 | #include <proc/task.h> |
39 | #include <proc/task.h> |
40 | #include <mm/frame.h> |
40 | #include <mm/frame.h> |
41 | #include <mm/page.h> |
41 | #include <mm/page.h> |
42 | #include <mm/as.h> |
42 | #include <mm/as.h> |
43 | #include <time/delay.h> |
43 | #include <time/delay.h> |
44 | #include <arch/asm.h> |
44 | #include <arch/asm.h> |
45 | #include <arch/faddr.h> |
45 | #include <arch/faddr.h> |
46 | #include <atomic.h> |
46 | #include <atomic.h> |
47 | #include <synch/spinlock.h> |
47 | #include <synch/spinlock.h> |
48 | #include <config.h> |
48 | #include <config.h> |
49 | #include <context.h> |
49 | #include <context.h> |
50 | #include <func.h> |
50 | #include <func.h> |
51 | #include <arch.h> |
51 | #include <arch.h> |
52 | #include <adt/list.h> |
52 | #include <adt/list.h> |
53 | #include <panic.h> |
53 | #include <panic.h> |
54 | #include <typedefs.h> |
54 | #include <typedefs.h> |
55 | #include <cpu.h> |
55 | #include <cpu.h> |
56 | #include <print.h> |
56 | #include <print.h> |
57 | #include <debug.h> |
57 | #include <debug.h> |
58 | 58 | ||
59 | static void before_task_runs(void); |
59 | static void before_task_runs(void); |
60 | static void before_thread_runs(void); |
60 | static void before_thread_runs(void); |
61 | static void after_thread_ran(void); |
61 | static void after_thread_ran(void); |
62 | static void scheduler_separated_stack(void); |
62 | static void scheduler_separated_stack(void); |
63 | 63 | ||
64 | atomic_t nrdy; /**< Number of ready threads in the system. */ |
64 | atomic_t nrdy; /**< Number of ready threads in the system. */ |
65 | 65 | ||
66 | /** Carry out actions before new task runs. */ |
66 | /** Carry out actions before new task runs. */ |
67 | void before_task_runs(void) |
67 | void before_task_runs(void) |
68 | { |
68 | { |
69 | before_task_runs_arch(); |
69 | before_task_runs_arch(); |
70 | } |
70 | } |
71 | 71 | ||
72 | /** Take actions before new thread runs. |
72 | /** Take actions before new thread runs. |
73 | * |
73 | * |
74 | * Perform actions that need to be |
74 | * Perform actions that need to be |
75 | * taken before the newly selected |
75 | * taken before the newly selected |
76 | * tread is passed control. |
76 | * tread is passed control. |
77 | * |
77 | * |
78 | * THREAD->lock is locked on entry |
78 | * THREAD->lock is locked on entry |
79 | * |
79 | * |
80 | */ |
80 | */ |
81 | void before_thread_runs(void) |
81 | void before_thread_runs(void) |
82 | { |
82 | { |
83 | before_thread_runs_arch(); |
83 | before_thread_runs_arch(); |
84 | #ifdef CONFIG_FPU_LAZY |
84 | #ifdef CONFIG_FPU_LAZY |
85 | if(THREAD==CPU->fpu_owner) |
85 | if(THREAD==CPU->fpu_owner) |
86 | fpu_enable(); |
86 | fpu_enable(); |
87 | else |
87 | else |
88 | fpu_disable(); |
88 | fpu_disable(); |
89 | #else |
89 | #else |
90 | fpu_enable(); |
90 | fpu_enable(); |
91 | if (THREAD->fpu_context_exists) |
91 | if (THREAD->fpu_context_exists) |
92 | fpu_context_restore(THREAD->saved_fpu_context); |
92 | fpu_context_restore(THREAD->saved_fpu_context); |
93 | else { |
93 | else { |
94 | fpu_init(); |
94 | fpu_init(); |
95 | THREAD->fpu_context_exists=1; |
95 | THREAD->fpu_context_exists=1; |
96 | } |
96 | } |
97 | #endif |
97 | #endif |
98 | } |
98 | } |
99 | 99 | ||
100 | /** Take actions after THREAD had run. |
100 | /** Take actions after THREAD had run. |
101 | * |
101 | * |
102 | * Perform actions that need to be |
102 | * Perform actions that need to be |
103 | * taken after the running thread |
103 | * taken after the running thread |
104 | * had been preempted by the scheduler. |
104 | * had been preempted by the scheduler. |
105 | * |
105 | * |
106 | * THREAD->lock is locked on entry |
106 | * THREAD->lock is locked on entry |
107 | * |
107 | * |
108 | */ |
108 | */ |
109 | void after_thread_ran(void) |
109 | void after_thread_ran(void) |
110 | { |
110 | { |
111 | after_thread_ran_arch(); |
111 | after_thread_ran_arch(); |
112 | } |
112 | } |
113 | 113 | ||
114 | #ifdef CONFIG_FPU_LAZY |
114 | #ifdef CONFIG_FPU_LAZY |
115 | void scheduler_fpu_lazy_request(void) |
115 | void scheduler_fpu_lazy_request(void) |
116 | { |
116 | { |
117 | restart: |
117 | restart: |
118 | fpu_enable(); |
118 | fpu_enable(); |
119 | spinlock_lock(&CPU->lock); |
119 | spinlock_lock(&CPU->lock); |
120 | 120 | ||
121 | /* Save old context */ |
121 | /* Save old context */ |
122 | if (CPU->fpu_owner != NULL) { |
122 | if (CPU->fpu_owner != NULL) { |
123 | spinlock_lock(&CPU->fpu_owner->lock); |
123 | spinlock_lock(&CPU->fpu_owner->lock); |
124 | fpu_context_save(CPU->fpu_owner->saved_fpu_context); |
124 | fpu_context_save(CPU->fpu_owner->saved_fpu_context); |
125 | /* don't prevent migration */ |
125 | /* don't prevent migration */ |
126 | CPU->fpu_owner->fpu_context_engaged=0; |
126 | CPU->fpu_owner->fpu_context_engaged=0; |
127 | spinlock_unlock(&CPU->fpu_owner->lock); |
127 | spinlock_unlock(&CPU->fpu_owner->lock); |
128 | CPU->fpu_owner = NULL; |
128 | CPU->fpu_owner = NULL; |
129 | } |
129 | } |
130 | 130 | ||
131 | spinlock_lock(&THREAD->lock); |
131 | spinlock_lock(&THREAD->lock); |
132 | if (THREAD->fpu_context_exists) { |
132 | if (THREAD->fpu_context_exists) { |
133 | fpu_context_restore(THREAD->saved_fpu_context); |
133 | fpu_context_restore(THREAD->saved_fpu_context); |
134 | } else { |
134 | } else { |
135 | /* Allocate FPU context */ |
135 | /* Allocate FPU context */ |
136 | if (!THREAD->saved_fpu_context) { |
136 | if (!THREAD->saved_fpu_context) { |
137 | /* Might sleep */ |
137 | /* Might sleep */ |
138 | spinlock_unlock(&THREAD->lock); |
138 | spinlock_unlock(&THREAD->lock); |
139 | spinlock_unlock(&CPU->lock); |
139 | spinlock_unlock(&CPU->lock); |
140 | THREAD->saved_fpu_context = slab_alloc(fpu_context_slab, |
140 | THREAD->saved_fpu_context = slab_alloc(fpu_context_slab, |
141 | 0); |
141 | 0); |
142 | /* We may have switched CPUs during slab_alloc */ |
142 | /* We may have switched CPUs during slab_alloc */ |
143 | goto restart; |
143 | goto restart; |
144 | } |
144 | } |
145 | fpu_init(); |
145 | fpu_init(); |
146 | THREAD->fpu_context_exists=1; |
146 | THREAD->fpu_context_exists=1; |
147 | } |
147 | } |
148 | CPU->fpu_owner=THREAD; |
148 | CPU->fpu_owner=THREAD; |
149 | THREAD->fpu_context_engaged = 1; |
149 | THREAD->fpu_context_engaged = 1; |
150 | spinlock_unlock(&THREAD->lock); |
150 | spinlock_unlock(&THREAD->lock); |
151 | 151 | ||
152 | spinlock_unlock(&CPU->lock); |
152 | spinlock_unlock(&CPU->lock); |
153 | } |
153 | } |
154 | #endif |
154 | #endif |
155 | 155 | ||
156 | /** Initialize scheduler |
156 | /** Initialize scheduler |
157 | * |
157 | * |
158 | * Initialize kernel scheduler. |
158 | * Initialize kernel scheduler. |
159 | * |
159 | * |
160 | */ |
160 | */ |
161 | void scheduler_init(void) |
161 | void scheduler_init(void) |
162 | { |
162 | { |
163 | } |
163 | } |
164 | 164 | ||
165 | /** Get thread to be scheduled |
165 | /** Get thread to be scheduled |
166 | * |
166 | * |
167 | * Get the optimal thread to be scheduled |
167 | * Get the optimal thread to be scheduled |
168 | * according to thread accounting and scheduler |
168 | * according to thread accounting and scheduler |
169 | * policy. |
169 | * policy. |
170 | * |
170 | * |
171 | * @return Thread to be scheduled. |
171 | * @return Thread to be scheduled. |
172 | * |
172 | * |
173 | */ |
173 | */ |
174 | static thread_t *find_best_thread(void) |
174 | static thread_t *find_best_thread(void) |
175 | { |
175 | { |
176 | thread_t *t; |
176 | thread_t *t; |
177 | runq_t *r; |
177 | runq_t *r; |
178 | int i; |
178 | int i; |
179 | 179 | ||
180 | ASSERT(CPU != NULL); |
180 | ASSERT(CPU != NULL); |
181 | 181 | ||
182 | loop: |
182 | loop: |
183 | interrupts_enable(); |
183 | interrupts_enable(); |
184 | 184 | ||
185 | if (atomic_get(&CPU->nrdy) == 0) { |
185 | if (atomic_get(&CPU->nrdy) == 0) { |
186 | /* |
186 | /* |
187 | * For there was nothing to run, the CPU goes to sleep |
187 | * For there was nothing to run, the CPU goes to sleep |
188 | * until a hardware interrupt or an IPI comes. |
188 | * until a hardware interrupt or an IPI comes. |
189 | * This improves energy saving and hyperthreading. |
189 | * This improves energy saving and hyperthreading. |
190 | */ |
190 | */ |
191 | 191 | ||
192 | /* |
192 | /* |
193 | * An interrupt might occur right now and wake up a thread. |
193 | * An interrupt might occur right now and wake up a thread. |
194 | * In such case, the CPU will continue to go to sleep |
194 | * In such case, the CPU will continue to go to sleep |
195 | * even though there is a runnable thread. |
195 | * even though there is a runnable thread. |
196 | */ |
196 | */ |
197 | 197 | ||
198 | cpu_sleep(); |
198 | cpu_sleep(); |
199 | goto loop; |
199 | goto loop; |
200 | } |
200 | } |
201 | 201 | ||
202 | interrupts_disable(); |
202 | interrupts_disable(); |
203 | 203 | ||
204 | for (i = 0; i<RQ_COUNT; i++) { |
204 | for (i = 0; i<RQ_COUNT; i++) { |
205 | r = &CPU->rq[i]; |
205 | r = &CPU->rq[i]; |
206 | spinlock_lock(&r->lock); |
206 | spinlock_lock(&r->lock); |
207 | if (r->n == 0) { |
207 | if (r->n == 0) { |
208 | /* |
208 | /* |
209 | * If this queue is empty, try a lower-priority queue. |
209 | * If this queue is empty, try a lower-priority queue. |
210 | */ |
210 | */ |
211 | spinlock_unlock(&r->lock); |
211 | spinlock_unlock(&r->lock); |
212 | continue; |
212 | continue; |
213 | } |
213 | } |
214 | 214 | ||
215 | atomic_dec(&CPU->nrdy); |
215 | atomic_dec(&CPU->nrdy); |
216 | atomic_dec(&nrdy); |
216 | atomic_dec(&nrdy); |
217 | r->n--; |
217 | r->n--; |
218 | 218 | ||
219 | /* |
219 | /* |
220 | * Take the first thread from the queue. |
220 | * Take the first thread from the queue. |
221 | */ |
221 | */ |
222 | t = list_get_instance(r->rq_head.next, thread_t, rq_link); |
222 | t = list_get_instance(r->rq_head.next, thread_t, rq_link); |
223 | list_remove(&t->rq_link); |
223 | list_remove(&t->rq_link); |
224 | 224 | ||
225 | spinlock_unlock(&r->lock); |
225 | spinlock_unlock(&r->lock); |
226 | 226 | ||
227 | spinlock_lock(&t->lock); |
227 | spinlock_lock(&t->lock); |
228 | t->cpu = CPU; |
228 | t->cpu = CPU; |
229 | 229 | ||
230 | t->ticks = us2ticks((i+1)*10000); |
230 | t->ticks = us2ticks((i+1)*10000); |
231 | t->priority = i; /* correct rq index */ |
231 | t->priority = i; /* correct rq index */ |
232 | 232 | ||
233 | /* |
233 | /* |
234 | * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge. |
234 | * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge. |
235 | */ |
235 | */ |
236 | t->flags &= ~X_STOLEN; |
236 | t->flags &= ~X_STOLEN; |
237 | spinlock_unlock(&t->lock); |
237 | spinlock_unlock(&t->lock); |
238 | 238 | ||
239 | return t; |
239 | return t; |
240 | } |
240 | } |
241 | goto loop; |
241 | goto loop; |
242 | 242 | ||
243 | } |
243 | } |
244 | 244 | ||
245 | /** Prevent rq starvation |
245 | /** Prevent rq starvation |
246 | * |
246 | * |
247 | * Prevent low priority threads from starving in rq's. |
247 | * Prevent low priority threads from starving in rq's. |
248 | * |
248 | * |
249 | * When the function decides to relink rq's, it reconnects |
249 | * When the function decides to relink rq's, it reconnects |
250 | * respective pointers so that in result threads with 'pri' |
250 | * respective pointers so that in result threads with 'pri' |
251 | * greater or equal @start are moved to a higher-priority queue. |
251 | * greater or equal @start are moved to a higher-priority queue. |
252 | * |
252 | * |
253 | * @param start Threshold priority. |
253 | * @param start Threshold priority. |
254 | * |
254 | * |
255 | */ |
255 | */ |
256 | static void relink_rq(int start) |
256 | static void relink_rq(int start) |
257 | { |
257 | { |
258 | link_t head; |
258 | link_t head; |
259 | runq_t *r; |
259 | runq_t *r; |
260 | int i, n; |
260 | int i, n; |
261 | 261 | ||
262 | list_initialize(&head); |
262 | list_initialize(&head); |
263 | spinlock_lock(&CPU->lock); |
263 | spinlock_lock(&CPU->lock); |
264 | if (CPU->needs_relink > NEEDS_RELINK_MAX) { |
264 | if (CPU->needs_relink > NEEDS_RELINK_MAX) { |
265 | for (i = start; i<RQ_COUNT-1; i++) { |
265 | for (i = start; i<RQ_COUNT-1; i++) { |
266 | /* remember and empty rq[i + 1] */ |
266 | /* remember and empty rq[i + 1] */ |
267 | r = &CPU->rq[i + 1]; |
267 | r = &CPU->rq[i + 1]; |
268 | spinlock_lock(&r->lock); |
268 | spinlock_lock(&r->lock); |
269 | list_concat(&head, &r->rq_head); |
269 | list_concat(&head, &r->rq_head); |
270 | n = r->n; |
270 | n = r->n; |
271 | r->n = 0; |
271 | r->n = 0; |
272 | spinlock_unlock(&r->lock); |
272 | spinlock_unlock(&r->lock); |
273 | 273 | ||
274 | /* append rq[i + 1] to rq[i] */ |
274 | /* append rq[i + 1] to rq[i] */ |
275 | r = &CPU->rq[i]; |
275 | r = &CPU->rq[i]; |
276 | spinlock_lock(&r->lock); |
276 | spinlock_lock(&r->lock); |
277 | list_concat(&r->rq_head, &head); |
277 | list_concat(&r->rq_head, &head); |
278 | r->n += n; |
278 | r->n += n; |
279 | spinlock_unlock(&r->lock); |
279 | spinlock_unlock(&r->lock); |
280 | } |
280 | } |
281 | CPU->needs_relink = 0; |
281 | CPU->needs_relink = 0; |
282 | } |
282 | } |
283 | spinlock_unlock(&CPU->lock); |
283 | spinlock_unlock(&CPU->lock); |
284 | 284 | ||
285 | } |
285 | } |
286 | 286 | ||
287 | /** The scheduler |
287 | /** The scheduler |
288 | * |
288 | * |
289 | * The thread scheduling procedure. |
289 | * The thread scheduling procedure. |
290 | * Passes control directly to |
290 | * Passes control directly to |
291 | * scheduler_separated_stack(). |
291 | * scheduler_separated_stack(). |
292 | * |
292 | * |
293 | */ |
293 | */ |
294 | void scheduler(void) |
294 | void scheduler(void) |
295 | { |
295 | { |
296 | volatile ipl_t ipl; |
296 | volatile ipl_t ipl; |
297 | 297 | ||
298 | ASSERT(CPU != NULL); |
298 | ASSERT(CPU != NULL); |
299 | 299 | ||
300 | ipl = interrupts_disable(); |
300 | ipl = interrupts_disable(); |
301 | 301 | ||
302 | if (atomic_get(&haltstate)) |
302 | if (atomic_get(&haltstate)) |
303 | halt(); |
303 | halt(); |
304 | 304 | ||
305 | if (THREAD) { |
305 | if (THREAD) { |
306 | spinlock_lock(&THREAD->lock); |
306 | spinlock_lock(&THREAD->lock); |
307 | #ifndef CONFIG_FPU_LAZY |
307 | #ifndef CONFIG_FPU_LAZY |
308 | fpu_context_save(THREAD->saved_fpu_context); |
308 | fpu_context_save(THREAD->saved_fpu_context); |
309 | #endif |
309 | #endif |
310 | if (!context_save(&THREAD->saved_context)) { |
310 | if (!context_save(&THREAD->saved_context)) { |
311 | /* |
311 | /* |
312 | * This is the place where threads leave scheduler(); |
312 | * This is the place where threads leave scheduler(); |
313 | */ |
313 | */ |
314 | spinlock_unlock(&THREAD->lock); |
314 | spinlock_unlock(&THREAD->lock); |
315 | interrupts_restore(THREAD->saved_context.ipl); |
315 | interrupts_restore(THREAD->saved_context.ipl); |
316 | 316 | ||
317 | return; |
317 | return; |
318 | } |
318 | } |
319 | 319 | ||
320 | /* |
320 | /* |
321 | * Interrupt priority level of preempted thread is recorded here |
321 | * Interrupt priority level of preempted thread is recorded here |
322 | * to facilitate scheduler() invocations from interrupts_disable()'d |
322 | * to facilitate scheduler() invocations from interrupts_disable()'d |
323 | * code (e.g. waitq_sleep_timeout()). |
323 | * code (e.g. waitq_sleep_timeout()). |
324 | */ |
324 | */ |
325 | THREAD->saved_context.ipl = ipl; |
325 | THREAD->saved_context.ipl = ipl; |
326 | } |
326 | } |
327 | 327 | ||
328 | /* |
328 | /* |
329 | * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM |
329 | * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM |
330 | * and preemption counter. At this point THE could be coming either |
330 | * and preemption counter. At this point THE could be coming either |
331 | * from THREAD's or CPU's stack. |
331 | * from THREAD's or CPU's stack. |
332 | */ |
332 | */ |
333 | the_copy(THE, (the_t *) CPU->stack); |
333 | the_copy(THE, (the_t *) CPU->stack); |
334 | 334 | ||
335 | /* |
335 | /* |
336 | * We may not keep the old stack. |
336 | * We may not keep the old stack. |
337 | * Reason: If we kept the old stack and got blocked, for instance, in |
337 | * Reason: If we kept the old stack and got blocked, for instance, in |
338 | * find_best_thread(), the old thread could get rescheduled by another |
338 | * find_best_thread(), the old thread could get rescheduled by another |
339 | * CPU and overwrite the part of its own stack that was also used by |
339 | * CPU and overwrite the part of its own stack that was also used by |
340 | * the scheduler on this CPU. |
340 | * the scheduler on this CPU. |
341 | * |
341 | * |
342 | * Moreover, we have to bypass the compiler-generated POP sequence |
342 | * Moreover, we have to bypass the compiler-generated POP sequence |
343 | * which is fooled by SP being set to the very top of the stack. |
343 | * which is fooled by SP being set to the very top of the stack. |
344 | * Therefore the scheduler() function continues in |
344 | * Therefore the scheduler() function continues in |
345 | * scheduler_separated_stack(). |
345 | * scheduler_separated_stack(). |
346 | */ |
346 | */ |
347 | context_save(&CPU->saved_context); |
347 | context_save(&CPU->saved_context); |
348 | context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), (__address) CPU->stack, CPU_STACK_SIZE); |
348 | context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), (__address) CPU->stack, CPU_STACK_SIZE); |
349 | context_restore(&CPU->saved_context); |
349 | context_restore(&CPU->saved_context); |
350 | /* not reached */ |
350 | /* not reached */ |
351 | } |
351 | } |
352 | 352 | ||
353 | /** Scheduler stack switch wrapper |
353 | /** Scheduler stack switch wrapper |
354 | * |
354 | * |
355 | * Second part of the scheduler() function |
355 | * Second part of the scheduler() function |
356 | * using new stack. Handling the actual context |
356 | * using new stack. Handling the actual context |
357 | * switch to a new thread. |
357 | * switch to a new thread. |
358 | * |
358 | * |
359 | * Assume THREAD->lock is held. |
359 | * Assume THREAD->lock is held. |
360 | */ |
360 | */ |
361 | void scheduler_separated_stack(void) |
361 | void scheduler_separated_stack(void) |
362 | { |
362 | { |
363 | int priority; |
363 | int priority; |
364 | 364 | ||
365 | ASSERT(CPU != NULL); |
365 | ASSERT(CPU != NULL); |
366 | 366 | ||
367 | if (THREAD) { |
367 | if (THREAD) { |
368 | /* must be run after the switch to scheduler stack */ |
368 | /* must be run after the switch to scheduler stack */ |
369 | after_thread_ran(); |
369 | after_thread_ran(); |
370 | 370 | ||
371 | switch (THREAD->state) { |
371 | switch (THREAD->state) { |
372 | case Running: |
372 | case Running: |
373 | spinlock_unlock(&THREAD->lock); |
373 | spinlock_unlock(&THREAD->lock); |
374 | thread_ready(THREAD); |
374 | thread_ready(THREAD); |
375 | break; |
375 | break; |
376 | 376 | ||
377 | case Exiting: |
377 | case Exiting: |
378 | repeat: |
378 | repeat: |
379 | if (THREAD->detached) { |
379 | if (THREAD->detached) { |
380 | thread_destroy(THREAD); |
380 | thread_destroy(THREAD); |
381 | } else { |
381 | } else { |
382 | /* |
382 | /* |
383 | * The thread structure is kept allocated until somebody |
383 | * The thread structure is kept allocated until somebody |
384 | * calls thread_detach() on it. |
384 | * calls thread_detach() on it. |
385 | */ |
385 | */ |
386 | if (!spinlock_trylock(&THREAD->join_wq.lock)) { |
386 | if (!spinlock_trylock(&THREAD->join_wq.lock)) { |
387 | /* |
387 | /* |
388 | * Avoid deadlock. |
388 | * Avoid deadlock. |
389 | */ |
389 | */ |
390 | spinlock_unlock(&THREAD->lock); |
390 | spinlock_unlock(&THREAD->lock); |
391 | delay(10); |
391 | delay(10); |
392 | spinlock_lock(&THREAD->lock); |
392 | spinlock_lock(&THREAD->lock); |
393 | goto repeat; |
393 | goto repeat; |
394 | } |
394 | } |
395 | _waitq_wakeup_unsafe(&THREAD->join_wq, false); |
395 | _waitq_wakeup_unsafe(&THREAD->join_wq, false); |
396 | spinlock_unlock(&THREAD->join_wq.lock); |
396 | spinlock_unlock(&THREAD->join_wq.lock); |
397 | 397 | ||
398 | THREAD->state = Undead; |
398 | THREAD->state = Undead; |
399 | spinlock_unlock(&THREAD->lock); |
399 | spinlock_unlock(&THREAD->lock); |
400 | } |
400 | } |
401 | break; |
401 | break; |
402 | 402 | ||
403 | case Sleeping: |
403 | case Sleeping: |
404 | /* |
404 | /* |
405 | * Prefer the thread after it's woken up. |
405 | * Prefer the thread after it's woken up. |
406 | */ |
406 | */ |
407 | THREAD->priority = -1; |
407 | THREAD->priority = -1; |
408 | 408 | ||
409 | /* |
409 | /* |
410 | * We need to release wq->lock which we locked in waitq_sleep(). |
410 | * We need to release wq->lock which we locked in waitq_sleep(). |
411 | * Address of wq->lock is kept in THREAD->sleep_queue. |
411 | * Address of wq->lock is kept in THREAD->sleep_queue. |
412 | */ |
412 | */ |
413 | spinlock_unlock(&THREAD->sleep_queue->lock); |
413 | spinlock_unlock(&THREAD->sleep_queue->lock); |
414 | 414 | ||
415 | /* |
415 | /* |
416 | * Check for possible requests for out-of-context invocation. |
416 | * Check for possible requests for out-of-context invocation. |
417 | */ |
417 | */ |
418 | if (THREAD->call_me) { |
418 | if (THREAD->call_me) { |
419 | THREAD->call_me(THREAD->call_me_with); |
419 | THREAD->call_me(THREAD->call_me_with); |
420 | THREAD->call_me = NULL; |
420 | THREAD->call_me = NULL; |
421 | THREAD->call_me_with = NULL; |
421 | THREAD->call_me_with = NULL; |
422 | } |
422 | } |
423 | 423 | ||
424 | spinlock_unlock(&THREAD->lock); |
424 | spinlock_unlock(&THREAD->lock); |
425 | 425 | ||
426 | break; |
426 | break; |
427 | 427 | ||
428 | default: |
428 | default: |
429 | /* |
429 | /* |
430 | * Entering state is unexpected. |
430 | * Entering state is unexpected. |
431 | */ |
431 | */ |
432 | panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]); |
432 | panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]); |
433 | break; |
433 | break; |
434 | } |
434 | } |
435 | 435 | ||
436 | THREAD = NULL; |
436 | THREAD = NULL; |
437 | } |
437 | } |
438 | 438 | ||
439 | THREAD = find_best_thread(); |
439 | THREAD = find_best_thread(); |
440 | 440 | ||
441 | spinlock_lock(&THREAD->lock); |
441 | spinlock_lock(&THREAD->lock); |
442 | priority = THREAD->priority; |
442 | priority = THREAD->priority; |
443 | spinlock_unlock(&THREAD->lock); |
443 | spinlock_unlock(&THREAD->lock); |
444 | 444 | ||
445 | relink_rq(priority); |
445 | relink_rq(priority); |
446 | 446 | ||
447 | /* |
447 | /* |
448 | * If both the old and the new task are the same, lots of work is avoided. |
448 | * If both the old and the new task are the same, lots of work is avoided. |
449 | */ |
449 | */ |
450 | if (TASK != THREAD->task) { |
450 | if (TASK != THREAD->task) { |
451 | as_t *as1 = NULL; |
451 | as_t *as1 = NULL; |
452 | as_t *as2; |
452 | as_t *as2; |
453 | 453 | ||
454 | if (TASK) { |
454 | if (TASK) { |
455 | spinlock_lock(&TASK->lock); |
455 | spinlock_lock(&TASK->lock); |
456 | as1 = TASK->as; |
456 | as1 = TASK->as; |
457 | spinlock_unlock(&TASK->lock); |
457 | spinlock_unlock(&TASK->lock); |
458 | } |
458 | } |
459 | 459 | ||
460 | spinlock_lock(&THREAD->task->lock); |
460 | spinlock_lock(&THREAD->task->lock); |
461 | as2 = THREAD->task->as; |
461 | as2 = THREAD->task->as; |
462 | spinlock_unlock(&THREAD->task->lock); |
462 | spinlock_unlock(&THREAD->task->lock); |
463 | 463 | ||
464 | /* |
464 | /* |
465 | * Note that it is possible for two tasks to share one address space. |
465 | * Note that it is possible for two tasks to share one address space. |
466 | */ |
466 | */ |
467 | if (as1 != as2) { |
467 | if (as1 != as2) { |
468 | /* |
468 | /* |
469 | * Both tasks and address spaces are different. |
469 | * Both tasks and address spaces are different. |
470 | * Replace the old one with the new one. |
470 | * Replace the old one with the new one. |
471 | */ |
471 | */ |
472 | as_switch(as1, as2); |
472 | as_switch(as1, as2); |
473 | } |
473 | } |
474 | TASK = THREAD->task; |
474 | TASK = THREAD->task; |
475 | before_task_runs(); |
475 | before_task_runs(); |
476 | } |
476 | } |
477 | 477 | ||
478 | spinlock_lock(&THREAD->lock); |
478 | spinlock_lock(&THREAD->lock); |
479 | THREAD->state = Running; |
479 | THREAD->state = Running; |
480 | 480 | ||
481 | #ifdef SCHEDULER_VERBOSE |
481 | #ifdef SCHEDULER_VERBOSE |
482 | printf("cpu%d: tid %d (priority=%d,ticks=%lld,nrdy=%ld)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, atomic_get(&CPU->nrdy)); |
482 | printf("cpu%d: tid %d (priority=%d,ticks=%lld,nrdy=%ld)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, atomic_get(&CPU->nrdy)); |
483 | #endif |
483 | #endif |
484 | 484 | ||
485 | /* |
485 | /* |
486 | * Some architectures provide late kernel PA2KA(identity) |
486 | * Some architectures provide late kernel PA2KA(identity) |
487 | * mapping in a page fault handler. However, the page fault |
487 | * mapping in a page fault handler. However, the page fault |
488 | * handler uses the kernel stack of the running thread and |
488 | * handler uses the kernel stack of the running thread and |
489 | * therefore cannot be used to map it. The kernel stack, if |
489 | * therefore cannot be used to map it. The kernel stack, if |
490 | * necessary, is to be mapped in before_thread_runs(). This |
490 | * necessary, is to be mapped in before_thread_runs(). This |
491 | * function must be executed before the switch to the new stack. |
491 | * function must be executed before the switch to the new stack. |
492 | */ |
492 | */ |
493 | before_thread_runs(); |
493 | before_thread_runs(); |
494 | 494 | ||
495 | /* |
495 | /* |
496 | * Copy the knowledge of CPU, TASK, THREAD and preemption counter to thread's stack. |
496 | * Copy the knowledge of CPU, TASK, THREAD and preemption counter to thread's stack. |
497 | */ |
497 | */ |
498 | the_copy(THE, (the_t *) THREAD->kstack); |
498 | the_copy(THE, (the_t *) THREAD->kstack); |
499 | 499 | ||
500 | context_restore(&THREAD->saved_context); |
500 | context_restore(&THREAD->saved_context); |
501 | /* not reached */ |
501 | /* not reached */ |
502 | } |
502 | } |
503 | 503 | ||
504 | #ifdef CONFIG_SMP |
504 | #ifdef CONFIG_SMP |
505 | /** Load balancing thread |
505 | /** Load balancing thread |
506 | * |
506 | * |
507 | * SMP load balancing thread, supervising thread supplies |
507 | * SMP load balancing thread, supervising thread supplies |
508 | * for the CPU it's wired to. |
508 | * for the CPU it's wired to. |
509 | * |
509 | * |
510 | * @param arg Generic thread argument (unused). |
510 | * @param arg Generic thread argument (unused). |
511 | * |
511 | * |
512 | */ |
512 | */ |
513 | void kcpulb(void *arg) |
513 | void kcpulb(void *arg) |
514 | { |
514 | { |
515 | thread_t *t; |
515 | thread_t *t; |
516 | int count, average, i, j, k = 0; |
516 | int count, average, i, j, k = 0; |
517 | ipl_t ipl; |
517 | ipl_t ipl; |
518 | 518 | ||
- | 519 | /* |
|
- | 520 | * Detach kcpulb as nobody will call thread_join_timeout() on it. |
|
- | 521 | */ |
|
- | 522 | thread_detach(THREAD); |
|
- | 523 | ||
519 | loop: |
524 | loop: |
520 | /* |
525 | /* |
521 | * Work in 1s intervals. |
526 | * Work in 1s intervals. |
522 | */ |
527 | */ |
523 | thread_sleep(1); |
528 | thread_sleep(1); |
524 | 529 | ||
525 | not_satisfied: |
530 | not_satisfied: |
526 | /* |
531 | /* |
527 | * Calculate the number of threads that will be migrated/stolen from |
532 | * Calculate the number of threads that will be migrated/stolen from |
528 | * other CPU's. Note that situation can have changed between two |
533 | * other CPU's. Note that situation can have changed between two |
529 | * passes. Each time get the most up to date counts. |
534 | * passes. Each time get the most up to date counts. |
530 | */ |
535 | */ |
531 | average = atomic_get(&nrdy) / config.cpu_active + 1; |
536 | average = atomic_get(&nrdy) / config.cpu_active + 1; |
532 | count = average - atomic_get(&CPU->nrdy); |
537 | count = average - atomic_get(&CPU->nrdy); |
533 | 538 | ||
534 | if (count <= 0) |
539 | if (count <= 0) |
535 | goto satisfied; |
540 | goto satisfied; |
536 | 541 | ||
537 | /* |
542 | /* |
538 | * Searching least priority queues on all CPU's first and most priority queues on all CPU's last. |
543 | * Searching least priority queues on all CPU's first and most priority queues on all CPU's last. |
539 | */ |
544 | */ |
540 | for (j=RQ_COUNT-1; j >= 0; j--) { |
545 | for (j=RQ_COUNT-1; j >= 0; j--) { |
541 | for (i=0; i < config.cpu_active; i++) { |
546 | for (i=0; i < config.cpu_active; i++) { |
542 | link_t *l; |
547 | link_t *l; |
543 | runq_t *r; |
548 | runq_t *r; |
544 | cpu_t *cpu; |
549 | cpu_t *cpu; |
545 | 550 | ||
546 | cpu = &cpus[(i + k) % config.cpu_active]; |
551 | cpu = &cpus[(i + k) % config.cpu_active]; |
547 | 552 | ||
548 | /* |
553 | /* |
549 | * Not interested in ourselves. |
554 | * Not interested in ourselves. |
550 | * Doesn't require interrupt disabling for kcpulb is X_WIRED. |
555 | * Doesn't require interrupt disabling for kcpulb is X_WIRED. |
551 | */ |
556 | */ |
552 | if (CPU == cpu) |
557 | if (CPU == cpu) |
553 | continue; |
558 | continue; |
554 | if (atomic_get(&cpu->nrdy) <= average) |
559 | if (atomic_get(&cpu->nrdy) <= average) |
555 | continue; |
560 | continue; |
556 | 561 | ||
557 | ipl = interrupts_disable(); |
562 | ipl = interrupts_disable(); |
558 | r = &cpu->rq[j]; |
563 | r = &cpu->rq[j]; |
559 | spinlock_lock(&r->lock); |
564 | spinlock_lock(&r->lock); |
560 | if (r->n == 0) { |
565 | if (r->n == 0) { |
561 | spinlock_unlock(&r->lock); |
566 | spinlock_unlock(&r->lock); |
562 | interrupts_restore(ipl); |
567 | interrupts_restore(ipl); |
563 | continue; |
568 | continue; |
564 | } |
569 | } |
565 | 570 | ||
566 | t = NULL; |
571 | t = NULL; |
567 | l = r->rq_head.prev; /* search rq from the back */ |
572 | l = r->rq_head.prev; /* search rq from the back */ |
568 | while (l != &r->rq_head) { |
573 | while (l != &r->rq_head) { |
569 | t = list_get_instance(l, thread_t, rq_link); |
574 | t = list_get_instance(l, thread_t, rq_link); |
570 | /* |
575 | /* |
571 | * We don't want to steal CPU-wired threads neither threads already stolen. |
576 | * We don't want to steal CPU-wired threads neither threads already stolen. |
572 | * The latter prevents threads from migrating between CPU's without ever being run. |
577 | * The latter prevents threads from migrating between CPU's without ever being run. |
573 | * We don't want to steal threads whose FPU context is still in CPU. |
578 | * We don't want to steal threads whose FPU context is still in CPU. |
574 | */ |
579 | */ |
575 | spinlock_lock(&t->lock); |
580 | spinlock_lock(&t->lock); |
576 | if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) { |
581 | if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) { |
577 | /* |
582 | /* |
578 | * Remove t from r. |
583 | * Remove t from r. |
579 | */ |
584 | */ |
580 | spinlock_unlock(&t->lock); |
585 | spinlock_unlock(&t->lock); |
581 | 586 | ||
582 | atomic_dec(&cpu->nrdy); |
587 | atomic_dec(&cpu->nrdy); |
583 | atomic_dec(&nrdy); |
588 | atomic_dec(&nrdy); |
584 | 589 | ||
585 | r->n--; |
590 | r->n--; |
586 | list_remove(&t->rq_link); |
591 | list_remove(&t->rq_link); |
587 | 592 | ||
588 | break; |
593 | break; |
589 | } |
594 | } |
590 | spinlock_unlock(&t->lock); |
595 | spinlock_unlock(&t->lock); |
591 | l = l->prev; |
596 | l = l->prev; |
592 | t = NULL; |
597 | t = NULL; |
593 | } |
598 | } |
594 | spinlock_unlock(&r->lock); |
599 | spinlock_unlock(&r->lock); |
595 | 600 | ||
596 | if (t) { |
601 | if (t) { |
597 | /* |
602 | /* |
598 | * Ready t on local CPU |
603 | * Ready t on local CPU |
599 | */ |
604 | */ |
600 | spinlock_lock(&t->lock); |
605 | spinlock_lock(&t->lock); |
601 | #ifdef KCPULB_VERBOSE |
606 | #ifdef KCPULB_VERBOSE |
602 | printf("kcpulb%d: TID %d -> cpu%d, nrdy=%ld, avg=%nd\n", CPU->id, t->tid, CPU->id, atomic_get(&CPU->nrdy), atomic_get(&nrdy) / config.cpu_active); |
607 | printf("kcpulb%d: TID %d -> cpu%d, nrdy=%ld, avg=%nd\n", CPU->id, t->tid, CPU->id, atomic_get(&CPU->nrdy), atomic_get(&nrdy) / config.cpu_active); |
603 | #endif |
608 | #endif |
604 | t->flags |= X_STOLEN; |
609 | t->flags |= X_STOLEN; |
605 | t->state = Entering; |
610 | t->state = Entering; |
606 | spinlock_unlock(&t->lock); |
611 | spinlock_unlock(&t->lock); |
607 | 612 | ||
608 | thread_ready(t); |
613 | thread_ready(t); |
609 | 614 | ||
610 | interrupts_restore(ipl); |
615 | interrupts_restore(ipl); |
611 | 616 | ||
612 | if (--count == 0) |
617 | if (--count == 0) |
613 | goto satisfied; |
618 | goto satisfied; |
614 | 619 | ||
615 | /* |
620 | /* |
616 | * We are not satisfied yet, focus on another CPU next time. |
621 | * We are not satisfied yet, focus on another CPU next time. |
617 | */ |
622 | */ |
618 | k++; |
623 | k++; |
619 | 624 | ||
620 | continue; |
625 | continue; |
621 | } |
626 | } |
622 | interrupts_restore(ipl); |
627 | interrupts_restore(ipl); |
623 | } |
628 | } |
624 | } |
629 | } |
625 | 630 | ||
626 | if (atomic_get(&CPU->nrdy)) { |
631 | if (atomic_get(&CPU->nrdy)) { |
627 | /* |
632 | /* |
628 | * Be a little bit light-weight and let migrated threads run. |
633 | * Be a little bit light-weight and let migrated threads run. |
629 | */ |
634 | */ |
630 | scheduler(); |
635 | scheduler(); |
631 | } else { |
636 | } else { |
632 | /* |
637 | /* |
633 | * We failed to migrate a single thread. |
638 | * We failed to migrate a single thread. |
634 | * Give up this turn. |
639 | * Give up this turn. |
635 | */ |
640 | */ |
636 | goto loop; |
641 | goto loop; |
637 | } |
642 | } |
638 | 643 | ||
639 | goto not_satisfied; |
644 | goto not_satisfied; |
640 | 645 | ||
641 | satisfied: |
646 | satisfied: |
642 | goto loop; |
647 | goto loop; |
643 | } |
648 | } |
644 | 649 | ||
645 | #endif /* CONFIG_SMP */ |
650 | #endif /* CONFIG_SMP */ |
646 | 651 | ||
647 | 652 | ||
648 | /** Print information about threads & scheduler queues */ |
653 | /** Print information about threads & scheduler queues */ |
649 | void sched_print_list(void) |
654 | void sched_print_list(void) |
650 | { |
655 | { |
651 | ipl_t ipl; |
656 | ipl_t ipl; |
652 | int cpu,i; |
657 | int cpu,i; |
653 | runq_t *r; |
658 | runq_t *r; |
654 | thread_t *t; |
659 | thread_t *t; |
655 | link_t *cur; |
660 | link_t *cur; |
656 | 661 | ||
657 | /* We are going to mess with scheduler structures, |
662 | /* We are going to mess with scheduler structures, |
658 | * let's not be interrupted */ |
663 | * let's not be interrupted */ |
659 | ipl = interrupts_disable(); |
664 | ipl = interrupts_disable(); |
660 | for (cpu=0;cpu < config.cpu_count; cpu++) { |
665 | for (cpu=0;cpu < config.cpu_count; cpu++) { |
661 | 666 | ||
662 | if (!cpus[cpu].active) |
667 | if (!cpus[cpu].active) |
663 | continue; |
668 | continue; |
664 | 669 | ||
665 | spinlock_lock(&cpus[cpu].lock); |
670 | spinlock_lock(&cpus[cpu].lock); |
666 | printf("cpu%d: address=%p, nrdy=%ld, needs_relink=%ld\n", |
671 | printf("cpu%d: address=%p, nrdy=%ld, needs_relink=%ld\n", |
667 | cpus[cpu].id, &cpus[cpu], atomic_get(&cpus[cpu].nrdy), cpus[cpu].needs_relink); |
672 | cpus[cpu].id, &cpus[cpu], atomic_get(&cpus[cpu].nrdy), cpus[cpu].needs_relink); |
668 | 673 | ||
669 | for (i=0; i<RQ_COUNT; i++) { |
674 | for (i=0; i<RQ_COUNT; i++) { |
670 | r = &cpus[cpu].rq[i]; |
675 | r = &cpus[cpu].rq[i]; |
671 | spinlock_lock(&r->lock); |
676 | spinlock_lock(&r->lock); |
672 | if (!r->n) { |
677 | if (!r->n) { |
673 | spinlock_unlock(&r->lock); |
678 | spinlock_unlock(&r->lock); |
674 | continue; |
679 | continue; |
675 | } |
680 | } |
676 | printf("\trq[%d]: ", i); |
681 | printf("\trq[%d]: ", i); |
677 | for (cur=r->rq_head.next; cur!=&r->rq_head; cur=cur->next) { |
682 | for (cur=r->rq_head.next; cur!=&r->rq_head; cur=cur->next) { |
678 | t = list_get_instance(cur, thread_t, rq_link); |
683 | t = list_get_instance(cur, thread_t, rq_link); |
679 | printf("%d(%s) ", t->tid, |
684 | printf("%d(%s) ", t->tid, |
680 | thread_states[t->state]); |
685 | thread_states[t->state]); |
681 | } |
686 | } |
682 | printf("\n"); |
687 | printf("\n"); |
683 | spinlock_unlock(&r->lock); |
688 | spinlock_unlock(&r->lock); |
684 | } |
689 | } |
685 | spinlock_unlock(&cpus[cpu].lock); |
690 | spinlock_unlock(&cpus[cpu].lock); |
686 | } |
691 | } |
687 | 692 | ||
688 | interrupts_restore(ipl); |
693 | interrupts_restore(ipl); |
689 | } |
694 | } |
690 | 695 |