Rev 214 | Rev 241 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 214 | Rev 227 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | #include <proc/scheduler.h> |
29 | #include <proc/scheduler.h> |
30 | #include <proc/thread.h> |
30 | #include <proc/thread.h> |
31 | #include <proc/task.h> |
31 | #include <proc/task.h> |
32 | #include <cpu.h> |
32 | #include <cpu.h> |
33 | #include <mm/vm.h> |
33 | #include <mm/vm.h> |
34 | #include <config.h> |
34 | #include <config.h> |
35 | #include <context.h> |
35 | #include <context.h> |
36 | #include <func.h> |
36 | #include <func.h> |
37 | #include <arch.h> |
37 | #include <arch.h> |
38 | #include <arch/asm.h> |
38 | #include <arch/asm.h> |
39 | #include <list.h> |
39 | #include <list.h> |
40 | #include <panic.h> |
40 | #include <panic.h> |
41 | #include <typedefs.h> |
41 | #include <typedefs.h> |
42 | #include <mm/page.h> |
42 | #include <mm/page.h> |
43 | #include <synch/spinlock.h> |
43 | #include <synch/spinlock.h> |
44 | #include <arch/faddr.h> |
44 | #include <arch/faddr.h> |
45 | #include <arch/atomic.h> |
45 | #include <arch/atomic.h> |
46 | #include <print.h> |
46 | #include <print.h> |
47 | #include <mm/frame.h> |
47 | #include <mm/frame.h> |
48 | #include <mm/heap.h> |
48 | #include <mm/heap.h> |
- | 49 | #include <debug.h> |
|
49 | 50 | ||
50 | - | ||
51 | volatile int nrdy; |
51 | volatile count_t nrdy; |
52 | 52 | ||
53 | 53 | ||
54 | /** Take actions before new thread runs |
54 | /** Take actions before new thread runs |
55 | * |
55 | * |
56 | * Perform actions that need to be |
56 | * Perform actions that need to be |
57 | * taken before the newly selected |
57 | * taken before the newly selected |
58 | * tread is passed control. |
58 | * tread is passed control. |
59 | * |
59 | * |
60 | */ |
60 | */ |
61 | void before_thread_runs(void) |
61 | void before_thread_runs(void) |
62 | { |
62 | { |
63 | before_thread_runs_arch(); |
63 | before_thread_runs_arch(); |
64 | fpu_context_restore(&(THREAD->saved_fpu_context)); |
64 | fpu_context_restore(&(THREAD->saved_fpu_context)); |
65 | } |
65 | } |
66 | 66 | ||
67 | 67 | ||
68 | /** Initialize scheduler |
68 | /** Initialize scheduler |
69 | * |
69 | * |
70 | * Initialize kernel scheduler. |
70 | * Initialize kernel scheduler. |
71 | * |
71 | * |
72 | */ |
72 | */ |
73 | void scheduler_init(void) |
73 | void scheduler_init(void) |
74 | { |
74 | { |
75 | } |
75 | } |
76 | 76 | ||
77 | 77 | ||
78 | /** Get thread to be scheduled |
78 | /** Get thread to be scheduled |
79 | * |
79 | * |
80 | * Get the optimal thread to be scheduled |
80 | * Get the optimal thread to be scheduled |
81 | * according to thread accounting and scheduler |
81 | * according to thread accounting and scheduler |
82 | * policy. |
82 | * policy. |
83 | * |
83 | * |
84 | * @return Thread to be scheduled. |
84 | * @return Thread to be scheduled. |
85 | * |
85 | * |
86 | */ |
86 | */ |
87 | struct thread *find_best_thread(void) |
87 | struct thread *find_best_thread(void) |
88 | { |
88 | { |
89 | thread_t *t; |
89 | thread_t *t; |
90 | runq_t *r; |
90 | runq_t *r; |
91 | int i, n; |
91 | int i, n; |
92 | 92 | ||
- | 93 | ASSERT(CPU != NULL); |
|
- | 94 | ||
93 | loop: |
95 | loop: |
94 | cpu_priority_high(); |
96 | cpu_priority_high(); |
95 | 97 | ||
96 | spinlock_lock(&CPU->lock); |
98 | spinlock_lock(&CPU->lock); |
97 | n = CPU->nrdy; |
99 | n = CPU->nrdy; |
98 | spinlock_unlock(&CPU->lock); |
100 | spinlock_unlock(&CPU->lock); |
99 | 101 | ||
100 | cpu_priority_low(); |
102 | cpu_priority_low(); |
101 | 103 | ||
102 | if (n == 0) { |
104 | if (n == 0) { |
103 | #ifdef __SMP__ |
105 | #ifdef __SMP__ |
104 | /* |
106 | /* |
105 | * If the load balancing thread is not running, wake it up and |
107 | * If the load balancing thread is not running, wake it up and |
106 | * set CPU-private flag that the kcpulb has been started. |
108 | * set CPU-private flag that the kcpulb has been started. |
107 | */ |
109 | */ |
108 | if (test_and_set(&CPU->kcpulbstarted) == 0) { |
110 | if (test_and_set(&CPU->kcpulbstarted) == 0) { |
109 | waitq_wakeup(&CPU->kcpulb_wq, 0); |
111 | waitq_wakeup(&CPU->kcpulb_wq, 0); |
110 | goto loop; |
112 | goto loop; |
111 | } |
113 | } |
112 | #endif /* __SMP__ */ |
114 | #endif /* __SMP__ */ |
113 | 115 | ||
114 | /* |
116 | /* |
115 | * For there was nothing to run, the CPU goes to sleep |
117 | * For there was nothing to run, the CPU goes to sleep |
116 | * until a hardware interrupt or an IPI comes. |
118 | * until a hardware interrupt or an IPI comes. |
117 | * This improves energy saving and hyperthreading. |
119 | * This improves energy saving and hyperthreading. |
118 | * On the other hand, several hardware interrupts can be ignored. |
120 | * On the other hand, several hardware interrupts can be ignored. |
119 | */ |
121 | */ |
120 | cpu_sleep(); |
122 | cpu_sleep(); |
121 | goto loop; |
123 | goto loop; |
122 | } |
124 | } |
123 | 125 | ||
124 | cpu_priority_high(); |
126 | cpu_priority_high(); |
125 | 127 | ||
126 | i = 0; |
128 | i = 0; |
127 | retry: |
129 | retry: |
128 | for (; i<RQ_COUNT; i++) { |
130 | for (; i<RQ_COUNT; i++) { |
129 | r = &CPU->rq[i]; |
131 | r = &CPU->rq[i]; |
130 | spinlock_lock(&r->lock); |
132 | spinlock_lock(&r->lock); |
131 | if (r->n == 0) { |
133 | if (r->n == 0) { |
132 | /* |
134 | /* |
133 | * If this queue is empty, try a lower-priority queue. |
135 | * If this queue is empty, try a lower-priority queue. |
134 | */ |
136 | */ |
135 | spinlock_unlock(&r->lock); |
137 | spinlock_unlock(&r->lock); |
136 | continue; |
138 | continue; |
137 | } |
139 | } |
138 | 140 | ||
139 | /* avoid deadlock with relink_rq() */ |
141 | /* avoid deadlock with relink_rq() */ |
140 | if (!spinlock_trylock(&CPU->lock)) { |
142 | if (!spinlock_trylock(&CPU->lock)) { |
141 | /* |
143 | /* |
142 | * Unlock r and try again. |
144 | * Unlock r and try again. |
143 | */ |
145 | */ |
144 | spinlock_unlock(&r->lock); |
146 | spinlock_unlock(&r->lock); |
145 | goto retry; |
147 | goto retry; |
146 | } |
148 | } |
147 | CPU->nrdy--; |
149 | CPU->nrdy--; |
148 | spinlock_unlock(&CPU->lock); |
150 | spinlock_unlock(&CPU->lock); |
149 | 151 | ||
150 | atomic_dec(&nrdy); |
152 | atomic_dec((int *) &nrdy); |
151 | r->n--; |
153 | r->n--; |
152 | 154 | ||
153 | /* |
155 | /* |
154 | * Take the first thread from the queue. |
156 | * Take the first thread from the queue. |
155 | */ |
157 | */ |
156 | t = list_get_instance(r->rq_head.next, thread_t, rq_link); |
158 | t = list_get_instance(r->rq_head.next, thread_t, rq_link); |
157 | list_remove(&t->rq_link); |
159 | list_remove(&t->rq_link); |
158 | 160 | ||
159 | spinlock_unlock(&r->lock); |
161 | spinlock_unlock(&r->lock); |
160 | 162 | ||
161 | spinlock_lock(&t->lock); |
163 | spinlock_lock(&t->lock); |
162 | t->cpu = CPU; |
164 | t->cpu = CPU; |
163 | 165 | ||
164 | t->ticks = us2ticks((i+1)*10000); |
166 | t->ticks = us2ticks((i+1)*10000); |
165 | t->pri = i; /* eventually correct rq index */ |
167 | t->pri = i; /* eventually correct rq index */ |
166 | 168 | ||
167 | /* |
169 | /* |
168 | * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge. |
170 | * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge. |
169 | */ |
171 | */ |
170 | t->flags &= ~X_STOLEN; |
172 | t->flags &= ~X_STOLEN; |
171 | spinlock_unlock(&t->lock); |
173 | spinlock_unlock(&t->lock); |
172 | 174 | ||
173 | return t; |
175 | return t; |
174 | } |
176 | } |
175 | goto loop; |
177 | goto loop; |
176 | 178 | ||
177 | } |
179 | } |
178 | 180 | ||
179 | 181 | ||
180 | /** Prevent rq starvation |
182 | /** Prevent rq starvation |
181 | * |
183 | * |
182 | * Prevent low priority threads from starving in rq's. |
184 | * Prevent low priority threads from starving in rq's. |
183 | * |
185 | * |
184 | * When the function decides to relink rq's, it reconnects |
186 | * When the function decides to relink rq's, it reconnects |
185 | * respective pointers so that in result threads with 'pri' |
187 | * respective pointers so that in result threads with 'pri' |
186 | * greater or equal 'start' are moved to a higher-priority queue. |
188 | * greater or equal 'start' are moved to a higher-priority queue. |
187 | * |
189 | * |
188 | * @param start Threshold priority. |
190 | * @param start Threshold priority. |
189 | * |
191 | * |
190 | */ |
192 | */ |
191 | void relink_rq(int start) |
193 | void relink_rq(int start) |
192 | { |
194 | { |
193 | link_t head; |
195 | link_t head; |
194 | runq_t *r; |
196 | runq_t *r; |
195 | int i, n; |
197 | int i, n; |
196 | 198 | ||
197 | list_initialize(&head); |
199 | list_initialize(&head); |
198 | spinlock_lock(&CPU->lock); |
200 | spinlock_lock(&CPU->lock); |
199 | if (CPU->needs_relink > NEEDS_RELINK_MAX) { |
201 | if (CPU->needs_relink > NEEDS_RELINK_MAX) { |
200 | for (i = start; i<RQ_COUNT-1; i++) { |
202 | for (i = start; i<RQ_COUNT-1; i++) { |
201 | /* remember and empty rq[i + 1] */ |
203 | /* remember and empty rq[i + 1] */ |
202 | r = &CPU->rq[i + 1]; |
204 | r = &CPU->rq[i + 1]; |
203 | spinlock_lock(&r->lock); |
205 | spinlock_lock(&r->lock); |
204 | list_concat(&head, &r->rq_head); |
206 | list_concat(&head, &r->rq_head); |
205 | n = r->n; |
207 | n = r->n; |
206 | r->n = 0; |
208 | r->n = 0; |
207 | spinlock_unlock(&r->lock); |
209 | spinlock_unlock(&r->lock); |
208 | 210 | ||
209 | /* append rq[i + 1] to rq[i] */ |
211 | /* append rq[i + 1] to rq[i] */ |
210 | r = &CPU->rq[i]; |
212 | r = &CPU->rq[i]; |
211 | spinlock_lock(&r->lock); |
213 | spinlock_lock(&r->lock); |
212 | list_concat(&r->rq_head, &head); |
214 | list_concat(&r->rq_head, &head); |
213 | r->n += n; |
215 | r->n += n; |
214 | spinlock_unlock(&r->lock); |
216 | spinlock_unlock(&r->lock); |
215 | } |
217 | } |
216 | CPU->needs_relink = 0; |
218 | CPU->needs_relink = 0; |
217 | } |
219 | } |
218 | spinlock_unlock(&CPU->lock); |
220 | spinlock_unlock(&CPU->lock); |
219 | 221 | ||
220 | } |
222 | } |
221 | 223 | ||
222 | 224 | ||
223 | /** The scheduler |
225 | /** The scheduler |
224 | * |
226 | * |
225 | * The thread scheduling procedure. |
227 | * The thread scheduling procedure. |
226 | * |
228 | * |
227 | */ |
229 | */ |
228 | void scheduler(void) |
230 | void scheduler(void) |
229 | { |
231 | { |
230 | volatile pri_t pri; |
232 | volatile pri_t pri; |
231 | 233 | ||
- | 234 | ASSERT(CPU != NULL); |
|
- | 235 | ||
232 | pri = cpu_priority_high(); |
236 | pri = cpu_priority_high(); |
233 | 237 | ||
234 | if (haltstate) |
238 | if (haltstate) |
235 | halt(); |
239 | halt(); |
236 | 240 | ||
237 | if (THREAD) { |
241 | if (THREAD) { |
238 | spinlock_lock(&THREAD->lock); |
242 | spinlock_lock(&THREAD->lock); |
239 | fpu_context_save(&(THREAD->saved_fpu_context)); |
243 | fpu_context_save(&(THREAD->saved_fpu_context)); |
240 | if (!context_save(&THREAD->saved_context)) { |
244 | if (!context_save(&THREAD->saved_context)) { |
241 | /* |
245 | /* |
242 | * This is the place where threads leave scheduler(); |
246 | * This is the place where threads leave scheduler(); |
243 | */ |
247 | */ |
244 | before_thread_runs(); |
248 | before_thread_runs(); |
245 | spinlock_unlock(&THREAD->lock); |
249 | spinlock_unlock(&THREAD->lock); |
246 | cpu_priority_restore(THREAD->saved_context.pri); |
250 | cpu_priority_restore(THREAD->saved_context.pri); |
247 | return; |
251 | return; |
248 | } |
252 | } |
249 | 253 | ||
250 | /* |
254 | /* |
251 | * CPU priority of preempted thread is recorded here |
255 | * CPU priority of preempted thread is recorded here |
252 | * to facilitate scheduler() invocations from |
256 | * to facilitate scheduler() invocations from |
253 | * cpu_priority_high()'ed code (e.g. waitq_sleep_timeout()). |
257 | * cpu_priority_high()'ed code (e.g. waitq_sleep_timeout()). |
254 | */ |
258 | */ |
255 | THREAD->saved_context.pri = pri; |
259 | THREAD->saved_context.pri = pri; |
256 | } |
260 | } |
257 | 261 | ||
258 | /* |
262 | /* |
259 | * Through the 'THE' structure, we keep track of THREAD, TASK, CPU |
263 | * Through the 'THE' structure, we keep track of THREAD, TASK, CPU |
260 | * and preemption counter. At this point THE could be coming either |
264 | * and preemption counter. At this point THE could be coming either |
261 | * from THREAD's or CPU's stack. |
265 | * from THREAD's or CPU's stack. |
262 | */ |
266 | */ |
263 | the_copy(THE, (the_t *) CPU->stack); |
267 | the_copy(THE, (the_t *) CPU->stack); |
264 | 268 | ||
265 | /* |
269 | /* |
266 | * We may not keep the old stack. |
270 | * We may not keep the old stack. |
267 | * Reason: If we kept the old stack and got blocked, for instance, in |
271 | * Reason: If we kept the old stack and got blocked, for instance, in |
268 | * find_best_thread(), the old thread could get rescheduled by another |
272 | * find_best_thread(), the old thread could get rescheduled by another |
269 | * CPU and overwrite the part of its own stack that was also used by |
273 | * CPU and overwrite the part of its own stack that was also used by |
270 | * the scheduler on this CPU. |
274 | * the scheduler on this CPU. |
271 | * |
275 | * |
272 | * Moreover, we have to bypass the compiler-generated POP sequence |
276 | * Moreover, we have to bypass the compiler-generated POP sequence |
273 | * which is fooled by SP being set to the very top of the stack. |
277 | * which is fooled by SP being set to the very top of the stack. |
274 | * Therefore the scheduler() function continues in |
278 | * Therefore the scheduler() function continues in |
275 | * scheduler_separated_stack(). |
279 | * scheduler_separated_stack(). |
276 | */ |
280 | */ |
277 | context_save(&CPU->saved_context); |
281 | context_save(&CPU->saved_context); |
278 | context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), CPU->stack, CPU_STACK_SIZE); |
282 | context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), CPU->stack, CPU_STACK_SIZE); |
279 | context_restore(&CPU->saved_context); |
283 | context_restore(&CPU->saved_context); |
280 | /* not reached */ |
284 | /* not reached */ |
281 | } |
285 | } |
282 | 286 | ||
283 | 287 | ||
284 | /** Scheduler stack switch wrapper |
288 | /** Scheduler stack switch wrapper |
285 | * |
289 | * |
286 | * Second part of the scheduler() function |
290 | * Second part of the scheduler() function |
287 | * using new stack. Handling the actual context |
291 | * using new stack. Handling the actual context |
288 | * switch to a new thread. |
292 | * switch to a new thread. |
289 | * |
293 | * |
290 | */ |
294 | */ |
291 | void scheduler_separated_stack(void) |
295 | void scheduler_separated_stack(void) |
292 | { |
296 | { |
293 | int priority; |
297 | int priority; |
294 | 298 | ||
- | 299 | ASSERT(CPU != NULL); |
|
- | 300 | ||
295 | if (THREAD) { |
301 | if (THREAD) { |
296 | switch (THREAD->state) { |
302 | switch (THREAD->state) { |
297 | case Running: |
303 | case Running: |
298 | THREAD->state = Ready; |
304 | THREAD->state = Ready; |
299 | spinlock_unlock(&THREAD->lock); |
305 | spinlock_unlock(&THREAD->lock); |
300 | thread_ready(THREAD); |
306 | thread_ready(THREAD); |
301 | break; |
307 | break; |
302 | 308 | ||
303 | case Exiting: |
309 | case Exiting: |
304 | frame_free((__address) THREAD->kstack); |
310 | frame_free((__address) THREAD->kstack); |
305 | if (THREAD->ustack) { |
311 | if (THREAD->ustack) { |
306 | frame_free((__address) THREAD->ustack); |
312 | frame_free((__address) THREAD->ustack); |
307 | } |
313 | } |
308 | 314 | ||
309 | /* |
315 | /* |
310 | * Detach from the containing task. |
316 | * Detach from the containing task. |
311 | */ |
317 | */ |
312 | spinlock_lock(&TASK->lock); |
318 | spinlock_lock(&TASK->lock); |
313 | list_remove(&THREAD->th_link); |
319 | list_remove(&THREAD->th_link); |
314 | spinlock_unlock(&TASK->lock); |
320 | spinlock_unlock(&TASK->lock); |
315 | 321 | ||
316 | spinlock_unlock(&THREAD->lock); |
322 | spinlock_unlock(&THREAD->lock); |
317 | 323 | ||
318 | spinlock_lock(&threads_lock); |
324 | spinlock_lock(&threads_lock); |
319 | list_remove(&THREAD->threads_link); |
325 | list_remove(&THREAD->threads_link); |
320 | spinlock_unlock(&threads_lock); |
326 | spinlock_unlock(&threads_lock); |
321 | 327 | ||
322 | spinlock_lock(&CPU->lock); |
328 | spinlock_lock(&CPU->lock); |
323 | if(CPU->fpu_owner==THREAD) CPU->fpu_owner=NULL; |
329 | if(CPU->fpu_owner==THREAD) CPU->fpu_owner=NULL; |
324 | spinlock_unlock(&CPU->lock); |
330 | spinlock_unlock(&CPU->lock); |
325 | 331 | ||
326 | free(THREAD); |
332 | free(THREAD); |
327 | 333 | ||
328 | break; |
334 | break; |
329 | 335 | ||
330 | case Sleeping: |
336 | case Sleeping: |
331 | /* |
337 | /* |
332 | * Prefer the thread after it's woken up. |
338 | * Prefer the thread after it's woken up. |
333 | */ |
339 | */ |
334 | THREAD->pri = -1; |
340 | THREAD->pri = -1; |
335 | 341 | ||
336 | /* |
342 | /* |
337 | * We need to release wq->lock which we locked in waitq_sleep(). |
343 | * We need to release wq->lock which we locked in waitq_sleep(). |
338 | * Address of wq->lock is kept in THREAD->sleep_queue. |
344 | * Address of wq->lock is kept in THREAD->sleep_queue. |
339 | */ |
345 | */ |
340 | spinlock_unlock(&THREAD->sleep_queue->lock); |
346 | spinlock_unlock(&THREAD->sleep_queue->lock); |
341 | 347 | ||
342 | /* |
348 | /* |
343 | * Check for possible requests for out-of-context invocation. |
349 | * Check for possible requests for out-of-context invocation. |
344 | */ |
350 | */ |
345 | if (THREAD->call_me) { |
351 | if (THREAD->call_me) { |
346 | THREAD->call_me(THREAD->call_me_with); |
352 | THREAD->call_me(THREAD->call_me_with); |
347 | THREAD->call_me = NULL; |
353 | THREAD->call_me = NULL; |
348 | THREAD->call_me_with = NULL; |
354 | THREAD->call_me_with = NULL; |
349 | } |
355 | } |
350 | 356 | ||
351 | spinlock_unlock(&THREAD->lock); |
357 | spinlock_unlock(&THREAD->lock); |
352 | 358 | ||
353 | break; |
359 | break; |
354 | 360 | ||
355 | default: |
361 | default: |
356 | /* |
362 | /* |
357 | * Entering state is unexpected. |
363 | * Entering state is unexpected. |
358 | */ |
364 | */ |
359 | panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]); |
365 | panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]); |
360 | break; |
366 | break; |
361 | } |
367 | } |
362 | THREAD = NULL; |
368 | THREAD = NULL; |
363 | } |
369 | } |
364 | 370 | ||
365 | 371 | ||
366 | THREAD = find_best_thread(); |
372 | THREAD = find_best_thread(); |
367 | 373 | ||
368 | spinlock_lock(&THREAD->lock); |
374 | spinlock_lock(&THREAD->lock); |
369 | priority = THREAD->pri; |
375 | priority = THREAD->pri; |
370 | spinlock_unlock(&THREAD->lock); |
376 | spinlock_unlock(&THREAD->lock); |
371 | 377 | ||
372 | relink_rq(priority); |
378 | relink_rq(priority); |
373 | 379 | ||
374 | spinlock_lock(&THREAD->lock); |
380 | spinlock_lock(&THREAD->lock); |
375 | 381 | ||
376 | /* |
382 | /* |
377 | * If both the old and the new task are the same, lots of work is avoided. |
383 | * If both the old and the new task are the same, lots of work is avoided. |
378 | */ |
384 | */ |
379 | if (TASK != THREAD->task) { |
385 | if (TASK != THREAD->task) { |
380 | vm_t *m1 = NULL; |
386 | vm_t *m1 = NULL; |
381 | vm_t *m2; |
387 | vm_t *m2; |
382 | 388 | ||
383 | if (TASK) { |
389 | if (TASK) { |
384 | spinlock_lock(&TASK->lock); |
390 | spinlock_lock(&TASK->lock); |
385 | m1 = TASK->vm; |
391 | m1 = TASK->vm; |
386 | spinlock_unlock(&TASK->lock); |
392 | spinlock_unlock(&TASK->lock); |
387 | } |
393 | } |
388 | 394 | ||
389 | spinlock_lock(&THREAD->task->lock); |
395 | spinlock_lock(&THREAD->task->lock); |
390 | m2 = THREAD->task->vm; |
396 | m2 = THREAD->task->vm; |
391 | spinlock_unlock(&THREAD->task->lock); |
397 | spinlock_unlock(&THREAD->task->lock); |
392 | 398 | ||
393 | /* |
399 | /* |
394 | * Note that it is possible for two tasks to share one vm mapping. |
400 | * Note that it is possible for two tasks to share one vm mapping. |
395 | */ |
401 | */ |
396 | if (m1 != m2) { |
402 | if (m1 != m2) { |
397 | /* |
403 | /* |
398 | * Both tasks and vm mappings are different. |
404 | * Both tasks and vm mappings are different. |
399 | * Replace the old one with the new one. |
405 | * Replace the old one with the new one. |
400 | */ |
406 | */ |
401 | vm_install(m2); |
407 | vm_install(m2); |
402 | } |
408 | } |
403 | TASK = THREAD->task; |
409 | TASK = THREAD->task; |
404 | } |
410 | } |
405 | 411 | ||
406 | THREAD->state = Running; |
412 | THREAD->state = Running; |
407 | 413 | ||
408 | #ifdef SCHEDULER_VERBOSE |
414 | #ifdef SCHEDULER_VERBOSE |
409 | printf("cpu%d: tid %d (pri=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->pri, THREAD->ticks, CPU->nrdy); |
415 | printf("cpu%d: tid %d (pri=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->pri, THREAD->ticks, CPU->nrdy); |
410 | #endif |
416 | #endif |
411 | 417 | ||
412 | /* |
418 | /* |
413 | * Copy the knowledge of CPU, TASK, THREAD and preemption counter to thread's stack. |
419 | * Copy the knowledge of CPU, TASK, THREAD and preemption counter to thread's stack. |
414 | */ |
420 | */ |
415 | the_copy(THE, (the_t *) THREAD->kstack); |
421 | the_copy(THE, (the_t *) THREAD->kstack); |
416 | 422 | ||
417 | context_restore(&THREAD->saved_context); |
423 | context_restore(&THREAD->saved_context); |
418 | /* not reached */ |
424 | /* not reached */ |
419 | } |
425 | } |
420 | 426 | ||
421 | 427 | ||
422 | #ifdef __SMP__ |
428 | #ifdef __SMP__ |
423 | /** Load balancing thread |
429 | /** Load balancing thread |
424 | * |
430 | * |
425 | * SMP load balancing thread, supervising thread supplies |
431 | * SMP load balancing thread, supervising thread supplies |
426 | * for the CPU it's wired to. |
432 | * for the CPU it's wired to. |
427 | * |
433 | * |
428 | * @param arg Generic thread argument (unused). |
434 | * @param arg Generic thread argument (unused). |
429 | * |
435 | * |
430 | */ |
436 | */ |
431 | void kcpulb(void *arg) |
437 | void kcpulb(void *arg) |
432 | { |
438 | { |
433 | thread_t *t; |
439 | thread_t *t; |
434 | int count, i, j, k = 0; |
440 | int count, i, j, k = 0; |
435 | pri_t pri; |
441 | pri_t pri; |
436 | 442 | ||
437 | loop: |
443 | loop: |
438 | /* |
444 | /* |
439 | * Sleep until there's some work to do. |
445 | * Sleep until there's some work to do. |
440 | */ |
446 | */ |
441 | waitq_sleep(&CPU->kcpulb_wq); |
447 | waitq_sleep(&CPU->kcpulb_wq); |
442 | 448 | ||
443 | not_satisfied: |
449 | not_satisfied: |
444 | /* |
450 | /* |
445 | * Calculate the number of threads that will be migrated/stolen from |
451 | * Calculate the number of threads that will be migrated/stolen from |
446 | * other CPU's. Note that situation can have changed between two |
452 | * other CPU's. Note that situation can have changed between two |
447 | * passes. Each time get the most up to date counts. |
453 | * passes. Each time get the most up to date counts. |
448 | */ |
454 | */ |
449 | pri = cpu_priority_high(); |
455 | pri = cpu_priority_high(); |
450 | spinlock_lock(&CPU->lock); |
456 | spinlock_lock(&CPU->lock); |
451 | count = nrdy / config.cpu_active; |
457 | count = nrdy / config.cpu_active; |
452 | count -= CPU->nrdy; |
458 | count -= CPU->nrdy; |
453 | spinlock_unlock(&CPU->lock); |
459 | spinlock_unlock(&CPU->lock); |
454 | cpu_priority_restore(pri); |
460 | cpu_priority_restore(pri); |
455 | 461 | ||
456 | if (count <= 0) |
462 | if (count <= 0) |
457 | goto satisfied; |
463 | goto satisfied; |
458 | 464 | ||
459 | /* |
465 | /* |
460 | * Searching least priority queues on all CPU's first and most priority queues on all CPU's last. |
466 | * Searching least priority queues on all CPU's first and most priority queues on all CPU's last. |
461 | */ |
467 | */ |
462 | for (j=RQ_COUNT-1; j >= 0; j--) { |
468 | for (j=RQ_COUNT-1; j >= 0; j--) { |
463 | for (i=0; i < config.cpu_active; i++) { |
469 | for (i=0; i < config.cpu_active; i++) { |
464 | link_t *l; |
470 | link_t *l; |
465 | runq_t *r; |
471 | runq_t *r; |
466 | cpu_t *cpu; |
472 | cpu_t *cpu; |
467 | 473 | ||
468 | cpu = &cpus[(i + k) % config.cpu_active]; |
474 | cpu = &cpus[(i + k) % config.cpu_active]; |
469 | 475 | ||
470 | /* |
476 | /* |
471 | * Not interested in ourselves. |
477 | * Not interested in ourselves. |
472 | * Doesn't require interrupt disabling for kcpulb is X_WIRED. |
478 | * Doesn't require interrupt disabling for kcpulb is X_WIRED. |
473 | */ |
479 | */ |
474 | if (CPU == cpu) |
480 | if (CPU == cpu) |
475 | continue; |
481 | continue; |
476 | 482 | ||
477 | restart: pri = cpu_priority_high(); |
483 | restart: pri = cpu_priority_high(); |
478 | r = &cpu->rq[j]; |
484 | r = &cpu->rq[j]; |
479 | spinlock_lock(&r->lock); |
485 | spinlock_lock(&r->lock); |
480 | if (r->n == 0) { |
486 | if (r->n == 0) { |
481 | spinlock_unlock(&r->lock); |
487 | spinlock_unlock(&r->lock); |
482 | cpu_priority_restore(pri); |
488 | cpu_priority_restore(pri); |
483 | continue; |
489 | continue; |
484 | } |
490 | } |
485 | 491 | ||
486 | t = NULL; |
492 | t = NULL; |
487 | l = r->rq_head.prev; /* search rq from the back */ |
493 | l = r->rq_head.prev; /* search rq from the back */ |
488 | while (l != &r->rq_head) { |
494 | while (l != &r->rq_head) { |
489 | t = list_get_instance(l, thread_t, rq_link); |
495 | t = list_get_instance(l, thread_t, rq_link); |
490 | /* |
496 | /* |
491 | * We don't want to steal CPU-wired threads neither threads already stolen. |
497 | * We don't want to steal CPU-wired threads neither threads already stolen. |
492 | * The latter prevents threads from migrating between CPU's without ever being run. |
498 | * The latter prevents threads from migrating between CPU's without ever being run. |
493 | * We don't want to steal threads whose FPU context is still in CPU. |
499 | * We don't want to steal threads whose FPU context is still in CPU. |
494 | */ |
500 | */ |
495 | spinlock_lock(&t->lock); |
501 | spinlock_lock(&t->lock); |
496 | if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) { |
502 | if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) { |
497 | 503 | ||
498 | /* |
504 | /* |
499 | * Remove t from r. |
505 | * Remove t from r. |
500 | */ |
506 | */ |
501 | 507 | ||
502 | spinlock_unlock(&t->lock); |
508 | spinlock_unlock(&t->lock); |
503 | 509 | ||
504 | /* |
510 | /* |
505 | * Here we have to avoid deadlock with relink_rq(), |
511 | * Here we have to avoid deadlock with relink_rq(), |
506 | * because it locks cpu and r in a different order than we do. |
512 | * because it locks cpu and r in a different order than we do. |
507 | */ |
513 | */ |
508 | if (!spinlock_trylock(&cpu->lock)) { |
514 | if (!spinlock_trylock(&cpu->lock)) { |
509 | /* Release all locks and try again. */ |
515 | /* Release all locks and try again. */ |
510 | spinlock_unlock(&r->lock); |
516 | spinlock_unlock(&r->lock); |
511 | cpu_priority_restore(pri); |
517 | cpu_priority_restore(pri); |
512 | goto restart; |
518 | goto restart; |
513 | } |
519 | } |
514 | cpu->nrdy--; |
520 | cpu->nrdy--; |
515 | spinlock_unlock(&cpu->lock); |
521 | spinlock_unlock(&cpu->lock); |
516 | 522 | ||
517 | atomic_dec(&nrdy); |
523 | atomic_dec(&nrdy); |
518 | 524 | ||
519 | r->n--; |
525 | r->n--; |
520 | list_remove(&t->rq_link); |
526 | list_remove(&t->rq_link); |
521 | 527 | ||
522 | break; |
528 | break; |
523 | } |
529 | } |
524 | spinlock_unlock(&t->lock); |
530 | spinlock_unlock(&t->lock); |
525 | l = l->prev; |
531 | l = l->prev; |
526 | t = NULL; |
532 | t = NULL; |
527 | } |
533 | } |
528 | spinlock_unlock(&r->lock); |
534 | spinlock_unlock(&r->lock); |
529 | 535 | ||
530 | if (t) { |
536 | if (t) { |
531 | /* |
537 | /* |
532 | * Ready t on local CPU |
538 | * Ready t on local CPU |
533 | */ |
539 | */ |
534 | spinlock_lock(&t->lock); |
540 | spinlock_lock(&t->lock); |
535 | #ifdef KCPULB_VERBOSE |
541 | #ifdef KCPULB_VERBOSE |
536 | printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, CPU->nrdy, nrdy / config.cpu_active); |
542 | printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, CPU->nrdy, nrdy / config.cpu_active); |
537 | #endif |
543 | #endif |
538 | t->flags |= X_STOLEN; |
544 | t->flags |= X_STOLEN; |
539 | spinlock_unlock(&t->lock); |
545 | spinlock_unlock(&t->lock); |
540 | 546 | ||
541 | thread_ready(t); |
547 | thread_ready(t); |
542 | 548 | ||
543 | cpu_priority_restore(pri); |
549 | cpu_priority_restore(pri); |
544 | 550 | ||
545 | if (--count == 0) |
551 | if (--count == 0) |
546 | goto satisfied; |
552 | goto satisfied; |
547 | 553 | ||
548 | /* |
554 | /* |
549 | * We are not satisfied yet, focus on another CPU next time. |
555 | * We are not satisfied yet, focus on another CPU next time. |
550 | */ |
556 | */ |
551 | k++; |
557 | k++; |
552 | 558 | ||
553 | continue; |
559 | continue; |
554 | } |
560 | } |
555 | cpu_priority_restore(pri); |
561 | cpu_priority_restore(pri); |
556 | } |
562 | } |
557 | } |
563 | } |
558 | 564 | ||
559 | if (CPU->nrdy) { |
565 | if (CPU->nrdy) { |
560 | /* |
566 | /* |
561 | * Be a little bit light-weight and let migrated threads run. |
567 | * Be a little bit light-weight and let migrated threads run. |
562 | */ |
568 | */ |
563 | scheduler(); |
569 | scheduler(); |
564 | } |
570 | } |
565 | else { |
571 | else { |
566 | /* |
572 | /* |
567 | * We failed to migrate a single thread. |
573 | * We failed to migrate a single thread. |
568 | * Something more sophisticated should be done. |
574 | * Something more sophisticated should be done. |
569 | */ |
575 | */ |
570 | scheduler(); |
576 | scheduler(); |
571 | } |
577 | } |
572 | 578 | ||
573 | goto not_satisfied; |
579 | goto not_satisfied; |
574 | 580 | ||
575 | satisfied: |
581 | satisfied: |
576 | /* |
582 | /* |
577 | * Tell find_best_thread() to wake us up later again. |
583 | * Tell find_best_thread() to wake us up later again. |
578 | */ |
584 | */ |
579 | CPU->kcpulbstarted = 0; |
585 | CPU->kcpulbstarted = 0; |
580 | goto loop; |
586 | goto loop; |
581 | } |
587 | } |
582 | 588 | ||
583 | #endif /* __SMP__ */ |
589 | #endif /* __SMP__ */ |
584 | 590 |