Rev 99 | Rev 109 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 99 | Rev 107 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | #include <proc/scheduler.h> |
29 | #include <proc/scheduler.h> |
30 | #include <proc/thread.h> |
30 | #include <proc/thread.h> |
31 | #include <proc/task.h> |
31 | #include <proc/task.h> |
32 | #include <cpu.h> |
32 | #include <cpu.h> |
33 | #include <mm/vm.h> |
33 | #include <mm/vm.h> |
34 | #include <config.h> |
34 | #include <config.h> |
35 | #include <context.h> |
35 | #include <context.h> |
36 | #include <func.h> |
36 | #include <func.h> |
37 | #include <arch.h> |
37 | #include <arch.h> |
38 | #include <arch/asm.h> |
38 | #include <arch/asm.h> |
39 | #include <list.h> |
39 | #include <list.h> |
40 | #include <panic.h> |
40 | #include <panic.h> |
41 | #include <typedefs.h> |
41 | #include <typedefs.h> |
42 | #include <mm/page.h> |
42 | #include <mm/page.h> |
43 | #include <synch/spinlock.h> |
43 | #include <synch/spinlock.h> |
44 | #include <arch/faddr.h> |
44 | #include <arch/faddr.h> |
45 | 45 | ||
46 | #ifdef __SMP__ |
46 | #ifdef __SMP__ |
47 | #include <arch/smp/atomic.h> |
47 | #include <arch/smp/atomic.h> |
48 | #endif /* __SMP__ */ |
48 | #endif /* __SMP__ */ |
49 | 49 | ||
50 | /* |
50 | /* |
51 | * NOTE ON ATOMIC READS: |
51 | * NOTE ON ATOMIC READS: |
52 | * Some architectures cannot read __u32 atomically. |
52 | * Some architectures cannot read __u32 atomically. |
53 | * For that reason, all accesses to nrdy and the likes must be protected by spinlock. |
53 | * For that reason, all accesses to nrdy and the likes must be protected by spinlock. |
54 | */ |
54 | */ |
55 | 55 | ||
56 | spinlock_t nrdylock; |
56 | spinlock_t nrdylock; |
57 | volatile int nrdy; |
57 | volatile int nrdy; |
58 | 58 | ||
- | 59 | ||
- | 60 | /** Initialize context switching |
|
- | 61 | * |
|
- | 62 | * Initialize context switching and lazy FPU |
|
- | 63 | * context switching. |
|
- | 64 | * |
|
- | 65 | */ |
|
59 | void before_thread_runs(void) |
66 | void before_thread_runs(void) |
60 | { |
67 | { |
61 | before_thread_runs_arch(); |
68 | before_thread_runs_arch(); |
62 | fpu_context_restore(&(THREAD->saved_fpu_context)); |
69 | fpu_context_restore(&(THREAD->saved_fpu_context)); |
63 | } |
70 | } |
64 | 71 | ||
65 | 72 | ||
- | 73 | /** Initialize scheduler |
|
- | 74 | * |
|
- | 75 | * Initialize kernel scheduler. |
|
- | 76 | * |
|
- | 77 | */ |
|
66 | void scheduler_init(void) |
78 | void scheduler_init(void) |
67 | { |
79 | { |
68 | spinlock_initialize(&nrdylock); |
80 | spinlock_initialize(&nrdylock); |
69 | } |
81 | } |
70 | 82 | ||
- | 83 | ||
71 | /* cpu_priority_high()'d */ |
84 | /** Get thread to be scheduled |
- | 85 | * |
|
- | 86 | * Get the optimal thread to be scheduled |
|
- | 87 | * according thread accounting and scheduler |
|
- | 88 | * policy. |
|
- | 89 | * |
|
- | 90 | * @return Thread to be scheduled. |
|
- | 91 | * |
|
- | 92 | */ |
|
72 | struct thread *find_best_thread(void) |
93 | struct thread *find_best_thread(void) |
73 | { |
94 | { |
74 | thread_t *t; |
95 | thread_t *t; |
75 | runq_t *r; |
96 | runq_t *r; |
76 | int i, n; |
97 | int i, n; |
77 | 98 | ||
78 | loop: |
99 | loop: |
79 | cpu_priority_high(); |
100 | cpu_priority_high(); |
80 | 101 | ||
81 | spinlock_lock(&CPU->lock); |
102 | spinlock_lock(&CPU->lock); |
82 | n = CPU->nrdy; |
103 | n = CPU->nrdy; |
83 | spinlock_unlock(&CPU->lock); |
104 | spinlock_unlock(&CPU->lock); |
84 | 105 | ||
85 | cpu_priority_low(); |
106 | cpu_priority_low(); |
86 | 107 | ||
87 | if (n == 0) { |
108 | if (n == 0) { |
88 | #ifdef __SMP__ |
109 | #ifdef __SMP__ |
89 | /* |
110 | /* |
90 | * If the load balancing thread is not running, wake it up and |
111 | * If the load balancing thread is not running, wake it up and |
91 | * set CPU-private flag that the kcpulb has been started. |
112 | * set CPU-private flag that the kcpulb has been started. |
92 | */ |
113 | */ |
93 | if (test_and_set(&CPU->kcpulbstarted) == 0) { |
114 | if (test_and_set(&CPU->kcpulbstarted) == 0) { |
94 | waitq_wakeup(&CPU->kcpulb_wq, 0); |
115 | waitq_wakeup(&CPU->kcpulb_wq, 0); |
95 | goto loop; |
116 | goto loop; |
96 | } |
117 | } |
97 | #endif /* __SMP__ */ |
118 | #endif /* __SMP__ */ |
98 | 119 | ||
99 | /* |
120 | /* |
100 | * For there was nothing to run, the CPU goes to sleep |
121 | * For there was nothing to run, the CPU goes to sleep |
101 | * until a hardware interrupt or an IPI comes. |
122 | * until a hardware interrupt or an IPI comes. |
102 | * This improves energy saving and hyperthreading. |
123 | * This improves energy saving and hyperthreading. |
103 | * On the other hand, several hardware interrupts can be ignored. |
124 | * On the other hand, several hardware interrupts can be ignored. |
104 | */ |
125 | */ |
105 | cpu_sleep(); |
126 | cpu_sleep(); |
106 | goto loop; |
127 | goto loop; |
107 | } |
128 | } |
108 | 129 | ||
109 | cpu_priority_high(); |
130 | cpu_priority_high(); |
110 | 131 | ||
111 | for (i = 0; i<RQ_COUNT; i++) { |
132 | for (i = 0; i<RQ_COUNT; i++) { |
112 | r = &CPU->rq[i]; |
133 | r = &CPU->rq[i]; |
113 | spinlock_lock(&r->lock); |
134 | spinlock_lock(&r->lock); |
114 | if (r->n == 0) { |
135 | if (r->n == 0) { |
115 | /* |
136 | /* |
116 | * If this queue is empty, try a lower-priority queue. |
137 | * If this queue is empty, try a lower-priority queue. |
117 | */ |
138 | */ |
118 | spinlock_unlock(&r->lock); |
139 | spinlock_unlock(&r->lock); |
119 | continue; |
140 | continue; |
120 | } |
141 | } |
121 | 142 | ||
122 | spinlock_lock(&nrdylock); |
143 | spinlock_lock(&nrdylock); |
123 | nrdy--; |
144 | nrdy--; |
124 | spinlock_unlock(&nrdylock); |
145 | spinlock_unlock(&nrdylock); |
125 | 146 | ||
126 | spinlock_lock(&CPU->lock); |
147 | spinlock_lock(&CPU->lock); |
127 | CPU->nrdy--; |
148 | CPU->nrdy--; |
128 | spinlock_unlock(&CPU->lock); |
149 | spinlock_unlock(&CPU->lock); |
129 | 150 | ||
130 | r->n--; |
151 | r->n--; |
131 | 152 | ||
132 | /* |
153 | /* |
133 | * Take the first thread from the queue. |
154 | * Take the first thread from the queue. |
134 | */ |
155 | */ |
135 | t = list_get_instance(r->rq_head.next, thread_t, rq_link); |
156 | t = list_get_instance(r->rq_head.next, thread_t, rq_link); |
136 | list_remove(&t->rq_link); |
157 | list_remove(&t->rq_link); |
137 | 158 | ||
138 | spinlock_unlock(&r->lock); |
159 | spinlock_unlock(&r->lock); |
139 | 160 | ||
140 | spinlock_lock(&t->lock); |
161 | spinlock_lock(&t->lock); |
141 | t->cpu = CPU; |
162 | t->cpu = CPU; |
142 | 163 | ||
143 | t->ticks = us2ticks((i+1)*10000); |
164 | t->ticks = us2ticks((i+1)*10000); |
144 | t->pri = i; /* eventually correct rq index */ |
165 | t->pri = i; /* eventually correct rq index */ |
145 | 166 | ||
146 | /* |
167 | /* |
147 | * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge. |
168 | * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge. |
148 | */ |
169 | */ |
149 | t->flags &= ~X_STOLEN; |
170 | t->flags &= ~X_STOLEN; |
150 | spinlock_unlock(&t->lock); |
171 | spinlock_unlock(&t->lock); |
151 | 172 | ||
152 | return t; |
173 | return t; |
153 | } |
174 | } |
154 | goto loop; |
175 | goto loop; |
155 | 176 | ||
156 | } |
177 | } |
157 | 178 | ||
- | 179 | ||
- | 180 | /** Prevent rq starvation |
|
158 | /* |
181 | * |
159 | * This function prevents low priority threads from starving in rq's. |
182 | * Prevent low priority threads from starving in rq's. |
- | 183 | * |
|
160 | * When it decides to relink rq's, it reconnects respective pointers |
184 | * When the function decides to relink rq's, it reconnects |
161 | * so that in result threads with 'pri' greater or equal 'start' are |
185 | * respective pointers so that in result threads with 'pri' |
162 | * moved to a higher-priority queue. |
186 | * greater or equal 'start' are moved to a higher-priority queue. |
- | 187 | * |
|
- | 188 | * @param start Threshold priority. |
|
- | 189 | * |
|
163 | */ |
190 | */ |
164 | void relink_rq(int start) |
191 | void relink_rq(int start) |
165 | { |
192 | { |
166 | link_t head; |
193 | link_t head; |
167 | runq_t *r; |
194 | runq_t *r; |
168 | int i, n; |
195 | int i, n; |
169 | 196 | ||
170 | list_initialize(&head); |
197 | list_initialize(&head); |
171 | spinlock_lock(&CPU->lock); |
198 | spinlock_lock(&CPU->lock); |
172 | if (CPU->needs_relink > NEEDS_RELINK_MAX) { |
199 | if (CPU->needs_relink > NEEDS_RELINK_MAX) { |
173 | for (i = start; i<RQ_COUNT-1; i++) { |
200 | for (i = start; i<RQ_COUNT-1; i++) { |
174 | /* remember and empty rq[i + 1] */ |
201 | /* remember and empty rq[i + 1] */ |
175 | r = &CPU->rq[i + 1]; |
202 | r = &CPU->rq[i + 1]; |
176 | spinlock_lock(&r->lock); |
203 | spinlock_lock(&r->lock); |
177 | list_concat(&head, &r->rq_head); |
204 | list_concat(&head, &r->rq_head); |
178 | n = r->n; |
205 | n = r->n; |
179 | r->n = 0; |
206 | r->n = 0; |
180 | spinlock_unlock(&r->lock); |
207 | spinlock_unlock(&r->lock); |
181 | 208 | ||
182 | /* append rq[i + 1] to rq[i] */ |
209 | /* append rq[i + 1] to rq[i] */ |
183 | r = &CPU->rq[i]; |
210 | r = &CPU->rq[i]; |
184 | spinlock_lock(&r->lock); |
211 | spinlock_lock(&r->lock); |
185 | list_concat(&r->rq_head, &head); |
212 | list_concat(&r->rq_head, &head); |
186 | r->n += n; |
213 | r->n += n; |
187 | spinlock_unlock(&r->lock); |
214 | spinlock_unlock(&r->lock); |
188 | } |
215 | } |
189 | CPU->needs_relink = 0; |
216 | CPU->needs_relink = 0; |
190 | } |
217 | } |
191 | spinlock_unlock(&CPU->lock); |
218 | spinlock_unlock(&CPU->lock); |
192 | 219 | ||
193 | } |
220 | } |
194 | 221 | ||
195 | /* |
222 | |
196 | * The scheduler. |
223 | /** The scheduler |
- | 224 | * |
|
- | 225 | * The thread scheduling procedure. |
|
- | 226 | * |
|
197 | */ |
227 | */ |
198 | void scheduler(void) |
228 | void scheduler(void) |
199 | { |
229 | { |
200 | volatile pri_t pri; |
230 | volatile pri_t pri; |
201 | 231 | ||
202 | pri = cpu_priority_high(); |
232 | pri = cpu_priority_high(); |
203 | 233 | ||
204 | if (haltstate) |
234 | if (haltstate) |
205 | halt(); |
235 | halt(); |
206 | 236 | ||
207 | if (THREAD) { |
237 | if (THREAD) { |
208 | spinlock_lock(&THREAD->lock); |
238 | spinlock_lock(&THREAD->lock); |
209 | fpu_context_save(&(THREAD->saved_fpu_context)); |
239 | fpu_context_save(&(THREAD->saved_fpu_context)); |
210 | if (!context_save(&THREAD->saved_context)) { |
240 | if (!context_save(&THREAD->saved_context)) { |
211 | /* |
241 | /* |
212 | * This is the place where threads leave scheduler(); |
242 | * This is the place where threads leave scheduler(); |
213 | */ |
243 | */ |
214 | before_thread_runs(); |
244 | before_thread_runs(); |
215 | spinlock_unlock(&THREAD->lock); |
245 | spinlock_unlock(&THREAD->lock); |
216 | cpu_priority_restore(THREAD->saved_context.pri); |
246 | cpu_priority_restore(THREAD->saved_context.pri); |
217 | return; |
247 | return; |
218 | } |
248 | } |
219 | THREAD->saved_context.pri = pri; |
249 | THREAD->saved_context.pri = pri; |
220 | } |
250 | } |
221 | 251 | ||
222 | /* |
252 | /* |
223 | * We may not keep the old stack. |
253 | * We may not keep the old stack. |
224 | * Reason: If we kept the old stack and got blocked, for instance, in |
254 | * Reason: If we kept the old stack and got blocked, for instance, in |
225 | * find_best_thread(), the old thread could get rescheduled by another |
255 | * find_best_thread(), the old thread could get rescheduled by another |
226 | * CPU and overwrite the part of its own stack that was also used by |
256 | * CPU and overwrite the part of its own stack that was also used by |
227 | * the scheduler on this CPU. |
257 | * the scheduler on this CPU. |
228 | * |
258 | * |
229 | * Moreover, we have to bypass the compiler-generated POP sequence |
259 | * Moreover, we have to bypass the compiler-generated POP sequence |
230 | * which is fooled by SP being set to the very top of the stack. |
260 | * which is fooled by SP being set to the very top of the stack. |
231 | * Therefore the scheduler() function continues in |
261 | * Therefore the scheduler() function continues in |
232 | * scheduler_separated_stack(). |
262 | * scheduler_separated_stack(). |
233 | */ |
263 | */ |
234 | context_save(&CPU->saved_context); |
264 | context_save(&CPU->saved_context); |
235 | context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), CPU->stack, CPU_STACK_SIZE); |
265 | context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), CPU->stack, CPU_STACK_SIZE); |
236 | context_restore(&CPU->saved_context); |
266 | context_restore(&CPU->saved_context); |
237 | /* not reached */ |
267 | /* not reached */ |
238 | } |
268 | } |
239 | 269 | ||
- | 270 | ||
- | 271 | /** Scheduler stack switch wrapper |
|
- | 272 | * |
|
- | 273 | * Second part of the scheduler() function |
|
- | 274 | * using new stack. Handling the actual context |
|
- | 275 | * switch to a new thread. |
|
- | 276 | * |
|
- | 277 | */ |
|
240 | void scheduler_separated_stack(void) |
278 | void scheduler_separated_stack(void) |
241 | { |
279 | { |
242 | int priority; |
280 | int priority; |
243 | 281 | ||
244 | if (THREAD) { |
282 | if (THREAD) { |
245 | switch (THREAD->state) { |
283 | switch (THREAD->state) { |
246 | case Running: |
284 | case Running: |
247 | THREAD->state = Ready; |
285 | THREAD->state = Ready; |
248 | spinlock_unlock(&THREAD->lock); |
286 | spinlock_unlock(&THREAD->lock); |
249 | thread_ready(THREAD); |
287 | thread_ready(THREAD); |
250 | break; |
288 | break; |
251 | 289 | ||
252 | case Exiting: |
290 | case Exiting: |
253 | frame_free((__address) THREAD->kstack); |
291 | frame_free((__address) THREAD->kstack); |
254 | if (THREAD->ustack) { |
292 | if (THREAD->ustack) { |
255 | frame_free((__address) THREAD->ustack); |
293 | frame_free((__address) THREAD->ustack); |
256 | } |
294 | } |
257 | 295 | ||
258 | /* |
296 | /* |
259 | * Detach from the containing task. |
297 | * Detach from the containing task. |
260 | */ |
298 | */ |
261 | spinlock_lock(&TASK->lock); |
299 | spinlock_lock(&TASK->lock); |
262 | list_remove(&THREAD->th_link); |
300 | list_remove(&THREAD->th_link); |
263 | spinlock_unlock(&TASK->lock); |
301 | spinlock_unlock(&TASK->lock); |
264 | 302 | ||
265 | spinlock_unlock(&THREAD->lock); |
303 | spinlock_unlock(&THREAD->lock); |
266 | 304 | ||
267 | spinlock_lock(&threads_lock); |
305 | spinlock_lock(&threads_lock); |
268 | list_remove(&THREAD->threads_link); |
306 | list_remove(&THREAD->threads_link); |
269 | spinlock_unlock(&threads_lock); |
307 | spinlock_unlock(&threads_lock); |
270 | 308 | ||
271 | spinlock_lock(&CPU->lock); |
309 | spinlock_lock(&CPU->lock); |
272 | if(CPU->fpu_owner==THREAD) CPU->fpu_owner=NULL; |
310 | if(CPU->fpu_owner==THREAD) CPU->fpu_owner=NULL; |
273 | spinlock_unlock(&CPU->lock); |
311 | spinlock_unlock(&CPU->lock); |
274 | 312 | ||
275 | 313 | ||
276 | free(THREAD); |
314 | free(THREAD); |
277 | 315 | ||
278 | break; |
316 | break; |
279 | 317 | ||
280 | case Sleeping: |
318 | case Sleeping: |
281 | /* |
319 | /* |
282 | * Prefer the thread after it's woken up. |
320 | * Prefer the thread after it's woken up. |
283 | */ |
321 | */ |
284 | THREAD->pri = -1; |
322 | THREAD->pri = -1; |
285 | 323 | ||
286 | /* |
324 | /* |
287 | * We need to release wq->lock which we locked in waitq_sleep(). |
325 | * We need to release wq->lock which we locked in waitq_sleep(). |
288 | * Address of wq->lock is kept in THREAD->sleep_queue. |
326 | * Address of wq->lock is kept in THREAD->sleep_queue. |
289 | */ |
327 | */ |
290 | spinlock_unlock(&THREAD->sleep_queue->lock); |
328 | spinlock_unlock(&THREAD->sleep_queue->lock); |
291 | 329 | ||
292 | /* |
330 | /* |
293 | * Check for possible requests for out-of-context invocation. |
331 | * Check for possible requests for out-of-context invocation. |
294 | */ |
332 | */ |
295 | if (THREAD->call_me) { |
333 | if (THREAD->call_me) { |
296 | THREAD->call_me(THREAD->call_me_with); |
334 | THREAD->call_me(THREAD->call_me_with); |
297 | THREAD->call_me = NULL; |
335 | THREAD->call_me = NULL; |
298 | THREAD->call_me_with = NULL; |
336 | THREAD->call_me_with = NULL; |
299 | } |
337 | } |
300 | 338 | ||
301 | spinlock_unlock(&THREAD->lock); |
339 | spinlock_unlock(&THREAD->lock); |
302 | 340 | ||
303 | break; |
341 | break; |
304 | 342 | ||
305 | default: |
343 | default: |
306 | /* |
344 | /* |
307 | * Entering state is unexpected. |
345 | * Entering state is unexpected. |
308 | */ |
346 | */ |
309 | panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]); |
347 | panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]); |
310 | break; |
348 | break; |
311 | } |
349 | } |
312 | THREAD = NULL; |
350 | THREAD = NULL; |
313 | } |
351 | } |
314 | 352 | ||
315 | THREAD = find_best_thread(); |
353 | THREAD = find_best_thread(); |
316 | 354 | ||
317 | spinlock_lock(&THREAD->lock); |
355 | spinlock_lock(&THREAD->lock); |
318 | priority = THREAD->pri; |
356 | priority = THREAD->pri; |
319 | spinlock_unlock(&THREAD->lock); |
357 | spinlock_unlock(&THREAD->lock); |
320 | 358 | ||
321 | relink_rq(priority); |
359 | relink_rq(priority); |
322 | 360 | ||
323 | spinlock_lock(&THREAD->lock); |
361 | spinlock_lock(&THREAD->lock); |
324 | 362 | ||
325 | /* |
363 | /* |
326 | * If both the old and the new task are the same, lots of work is avoided. |
364 | * If both the old and the new task are the same, lots of work is avoided. |
327 | */ |
365 | */ |
328 | if (TASK != THREAD->task) { |
366 | if (TASK != THREAD->task) { |
329 | vm_t *m1 = NULL; |
367 | vm_t *m1 = NULL; |
330 | vm_t *m2; |
368 | vm_t *m2; |
331 | 369 | ||
332 | if (TASK) { |
370 | if (TASK) { |
333 | spinlock_lock(&TASK->lock); |
371 | spinlock_lock(&TASK->lock); |
334 | m1 = TASK->vm; |
372 | m1 = TASK->vm; |
335 | spinlock_unlock(&TASK->lock); |
373 | spinlock_unlock(&TASK->lock); |
336 | } |
374 | } |
337 | 375 | ||
338 | spinlock_lock(&THREAD->task->lock); |
376 | spinlock_lock(&THREAD->task->lock); |
339 | m2 = THREAD->task->vm; |
377 | m2 = THREAD->task->vm; |
340 | spinlock_unlock(&THREAD->task->lock); |
378 | spinlock_unlock(&THREAD->task->lock); |
341 | 379 | ||
342 | /* |
380 | /* |
343 | * Note that it is possible for two tasks to share one vm mapping. |
381 | * Note that it is possible for two tasks to share one vm mapping. |
344 | */ |
382 | */ |
345 | if (m1 != m2) { |
383 | if (m1 != m2) { |
346 | /* |
384 | /* |
347 | * Both tasks and vm mappings are different. |
385 | * Both tasks and vm mappings are different. |
348 | * Replace the old one with the new one. |
386 | * Replace the old one with the new one. |
349 | */ |
387 | */ |
350 | if (m1) { |
388 | if (m1) { |
351 | vm_uninstall(m1); |
389 | vm_uninstall(m1); |
352 | } |
390 | } |
353 | vm_install(m2); |
391 | vm_install(m2); |
354 | } |
392 | } |
355 | TASK = THREAD->task; |
393 | TASK = THREAD->task; |
356 | } |
394 | } |
357 | 395 | ||
358 | THREAD->state = Running; |
396 | THREAD->state = Running; |
359 | 397 | ||
360 | #ifdef SCHEDULER_VERBOSE |
398 | #ifdef SCHEDULER_VERBOSE |
361 | printf("cpu%d: tid %d (pri=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->pri, THREAD->ticks, CPU->nrdy); |
399 | printf("cpu%d: tid %d (pri=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->pri, THREAD->ticks, CPU->nrdy); |
362 | #endif |
400 | #endif |
363 | 401 | ||
364 | context_restore(&THREAD->saved_context); |
402 | context_restore(&THREAD->saved_context); |
365 | /* not reached */ |
403 | /* not reached */ |
366 | } |
404 | } |
367 | 405 | ||
- | 406 | ||
368 | #ifdef __SMP__ |
407 | #ifdef __SMP__ |
- | 408 | /** Load balancing thread |
|
369 | /* |
409 | * |
370 | * This is the load balancing thread. |
410 | * SMP load balancing thread, supervising thread supplies |
371 | * It supervises thread supplies for the CPU it's wired to. |
411 | * for the CPU it's wired to. |
- | 412 | * |
|
- | 413 | * @param arg Generic thread argument (unused). |
|
- | 414 | * |
|
372 | */ |
415 | */ |
373 | void kcpulb(void *arg) |
416 | void kcpulb(void *arg) |
374 | { |
417 | { |
375 | thread_t *t; |
418 | thread_t *t; |
376 | int count, i, j, k = 0; |
419 | int count, i, j, k = 0; |
377 | pri_t pri; |
420 | pri_t pri; |
378 | 421 | ||
379 | loop: |
422 | loop: |
380 | /* |
423 | /* |
381 | * Sleep until there's some work to do. |
424 | * Sleep until there's some work to do. |
382 | */ |
425 | */ |
383 | waitq_sleep(&CPU->kcpulb_wq); |
426 | waitq_sleep(&CPU->kcpulb_wq); |
384 | 427 | ||
385 | not_satisfied: |
428 | not_satisfied: |
386 | /* |
429 | /* |
387 | * Calculate the number of threads that will be migrated/stolen from |
430 | * Calculate the number of threads that will be migrated/stolen from |
388 | * other CPU's. Note that situation can have changed between two |
431 | * other CPU's. Note that situation can have changed between two |
389 | * passes. Each time get the most up to date counts. |
432 | * passes. Each time get the most up to date counts. |
390 | */ |
433 | */ |
391 | pri = cpu_priority_high(); |
434 | pri = cpu_priority_high(); |
392 | spinlock_lock(&CPU->lock); |
435 | spinlock_lock(&CPU->lock); |
393 | count = nrdy / config.cpu_active; |
436 | count = nrdy / config.cpu_active; |
394 | count -= CPU->nrdy; |
437 | count -= CPU->nrdy; |
395 | spinlock_unlock(&CPU->lock); |
438 | spinlock_unlock(&CPU->lock); |
396 | cpu_priority_restore(pri); |
439 | cpu_priority_restore(pri); |
397 | 440 | ||
398 | if (count <= 0) |
441 | if (count <= 0) |
399 | goto satisfied; |
442 | goto satisfied; |
400 | 443 | ||
401 | /* |
444 | /* |
402 | * Searching least priority queues on all CPU's first and most priority queues on all CPU's last. |
445 | * Searching least priority queues on all CPU's first and most priority queues on all CPU's last. |
403 | */ |
446 | */ |
404 | for (j=RQ_COUNT-1; j >= 0; j--) { |
447 | for (j=RQ_COUNT-1; j >= 0; j--) { |
405 | for (i=0; i < config.cpu_active; i++) { |
448 | for (i=0; i < config.cpu_active; i++) { |
406 | link_t *l; |
449 | link_t *l; |
407 | runq_t *r; |
450 | runq_t *r; |
408 | cpu_t *cpu; |
451 | cpu_t *cpu; |
409 | 452 | ||
410 | cpu = &cpus[(i + k) % config.cpu_active]; |
453 | cpu = &cpus[(i + k) % config.cpu_active]; |
411 | r = &cpu->rq[j]; |
454 | r = &cpu->rq[j]; |
412 | 455 | ||
413 | /* |
456 | /* |
414 | * Not interested in ourselves. |
457 | * Not interested in ourselves. |
415 | * Doesn't require interrupt disabling for kcpulb is X_WIRED. |
458 | * Doesn't require interrupt disabling for kcpulb is X_WIRED. |
416 | */ |
459 | */ |
417 | if (CPU == cpu) |
460 | if (CPU == cpu) |
418 | continue; |
461 | continue; |
419 | 462 | ||
420 | restart: pri = cpu_priority_high(); |
463 | restart: pri = cpu_priority_high(); |
421 | spinlock_lock(&r->lock); |
464 | spinlock_lock(&r->lock); |
422 | if (r->n == 0) { |
465 | if (r->n == 0) { |
423 | spinlock_unlock(&r->lock); |
466 | spinlock_unlock(&r->lock); |
424 | cpu_priority_restore(pri); |
467 | cpu_priority_restore(pri); |
425 | continue; |
468 | continue; |
426 | } |
469 | } |
427 | 470 | ||
428 | t = NULL; |
471 | t = NULL; |
429 | l = r->rq_head.prev; /* search rq from the back */ |
472 | l = r->rq_head.prev; /* search rq from the back */ |
430 | while (l != &r->rq_head) { |
473 | while (l != &r->rq_head) { |
431 | t = list_get_instance(l, thread_t, rq_link); |
474 | t = list_get_instance(l, thread_t, rq_link); |
432 | /* |
475 | /* |
433 | * We don't want to steal CPU-wired threads neither threads already stolen. |
476 | * We don't want to steal CPU-wired threads neither threads already stolen. |
434 | * The latter prevents threads from migrating between CPU's without ever being run. |
477 | * The latter prevents threads from migrating between CPU's without ever being run. |
435 | * We don't want to steal threads whose FPU context is still in CPU |
478 | * We don't want to steal threads whose FPU context is still in CPU |
436 | */ |
479 | */ |
437 | spinlock_lock(&t->lock); |
480 | spinlock_lock(&t->lock); |
438 | if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) { |
481 | if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) { |
439 | /* |
482 | /* |
440 | * Remove t from r. |
483 | * Remove t from r. |
441 | */ |
484 | */ |
442 | 485 | ||
443 | spinlock_unlock(&t->lock); |
486 | spinlock_unlock(&t->lock); |
444 | 487 | ||
445 | /* |
488 | /* |
446 | * Here we have to avoid deadlock with relink_rq(), |
489 | * Here we have to avoid deadlock with relink_rq(), |
447 | * because it locks cpu and r in a different order than we do. |
490 | * because it locks cpu and r in a different order than we do. |
448 | */ |
491 | */ |
449 | if (!spinlock_trylock(&cpu->lock)) { |
492 | if (!spinlock_trylock(&cpu->lock)) { |
450 | /* Release all locks and try again. */ |
493 | /* Release all locks and try again. */ |
451 | spinlock_unlock(&r->lock); |
494 | spinlock_unlock(&r->lock); |
452 | cpu_priority_restore(pri); |
495 | cpu_priority_restore(pri); |
453 | goto restart; |
496 | goto restart; |
454 | } |
497 | } |
455 | cpu->nrdy--; |
498 | cpu->nrdy--; |
456 | spinlock_unlock(&cpu->lock); |
499 | spinlock_unlock(&cpu->lock); |
457 | 500 | ||
458 | spinlock_lock(&nrdylock); |
501 | spinlock_lock(&nrdylock); |
459 | nrdy--; |
502 | nrdy--; |
460 | spinlock_unlock(&nrdylock); |
503 | spinlock_unlock(&nrdylock); |
461 | 504 | ||
462 | r->n--; |
505 | r->n--; |
463 | list_remove(&t->rq_link); |
506 | list_remove(&t->rq_link); |
464 | 507 | ||
465 | break; |
508 | break; |
466 | } |
509 | } |
467 | spinlock_unlock(&t->lock); |
510 | spinlock_unlock(&t->lock); |
468 | l = l->prev; |
511 | l = l->prev; |
469 | t = NULL; |
512 | t = NULL; |
470 | } |
513 | } |
471 | spinlock_unlock(&r->lock); |
514 | spinlock_unlock(&r->lock); |
472 | 515 | ||
473 | if (t) { |
516 | if (t) { |
474 | /* |
517 | /* |
475 | * Ready t on local CPU |
518 | * Ready t on local CPU |
476 | */ |
519 | */ |
477 | spinlock_lock(&t->lock); |
520 | spinlock_lock(&t->lock); |
478 | #ifdef KCPULB_VERBOSE |
521 | #ifdef KCPULB_VERBOSE |
479 | printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, CPU->nrdy, nrdy / config.cpu_active); |
522 | printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, CPU->nrdy, nrdy / config.cpu_active); |
480 | #endif |
523 | #endif |
481 | t->flags |= X_STOLEN; |
524 | t->flags |= X_STOLEN; |
482 | spinlock_unlock(&t->lock); |
525 | spinlock_unlock(&t->lock); |
483 | 526 | ||
484 | thread_ready(t); |
527 | thread_ready(t); |
485 | 528 | ||
486 | cpu_priority_restore(pri); |
529 | cpu_priority_restore(pri); |
487 | 530 | ||
488 | if (--count == 0) |
531 | if (--count == 0) |
489 | goto satisfied; |
532 | goto satisfied; |
490 | 533 | ||
491 | /* |
534 | /* |
492 | * We are not satisfied yet, focus on another CPU next time. |
535 | * We are not satisfied yet, focus on another CPU next time. |
493 | */ |
536 | */ |
494 | k++; |
537 | k++; |
495 | 538 | ||
496 | continue; |
539 | continue; |
497 | } |
540 | } |
498 | cpu_priority_restore(pri); |
541 | cpu_priority_restore(pri); |
499 | } |
542 | } |
500 | } |
543 | } |
501 | 544 | ||
502 | if (CPU->nrdy) { |
545 | if (CPU->nrdy) { |
503 | /* |
546 | /* |
504 | * Be a little bit light-weight and let migrated threads run. |
547 | * Be a little bit light-weight and let migrated threads run. |
505 | */ |
548 | */ |
506 | scheduler(); |
549 | scheduler(); |
507 | } |
550 | } |
508 | else { |
551 | else { |
509 | /* |
552 | /* |
510 | * We failed to migrate a single thread. |
553 | * We failed to migrate a single thread. |
511 | * Something more sophisticated should be done. |
554 | * Something more sophisticated should be done. |
512 | */ |
555 | */ |
513 | scheduler(); |
556 | scheduler(); |
514 | } |
557 | } |
515 | 558 | ||
516 | goto not_satisfied; |
559 | goto not_satisfied; |
517 | 560 | ||
518 | satisfied: |
561 | satisfied: |
519 | /* |
562 | /* |
520 | * Tell find_best_thread() to wake us up later again. |
563 | * Tell find_best_thread() to wake us up later again. |
521 | */ |
564 | */ |
522 | CPU->kcpulbstarted = 0; |
565 | CPU->kcpulbstarted = 0; |
523 | goto loop; |
566 | goto loop; |
524 | } |
567 | } |
525 | 568 | ||
526 | #endif /* __SMP__ */ |
569 | #endif /* __SMP__ */ |
527 | 570 |