Rev 52 | Rev 68 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 52 | Rev 57 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | #include <proc/scheduler.h> |
29 | #include <proc/scheduler.h> |
30 | #include <proc/thread.h> |
30 | #include <proc/thread.h> |
31 | #include <proc/task.h> |
31 | #include <proc/task.h> |
32 | #include <cpu.h> |
32 | #include <cpu.h> |
33 | #include <mm/vm.h> |
33 | #include <mm/vm.h> |
34 | #include <config.h> |
34 | #include <config.h> |
35 | #include <context.h> |
35 | #include <context.h> |
36 | #include <func.h> |
36 | #include <func.h> |
37 | #include <arch.h> |
37 | #include <arch.h> |
38 | #include <arch/asm.h> |
38 | #include <arch/asm.h> |
39 | #include <list.h> |
39 | #include <list.h> |
40 | #include <typedefs.h> |
40 | #include <typedefs.h> |
41 | #include <mm/page.h> |
41 | #include <mm/page.h> |
42 | #include <synch/spinlock.h> |
42 | #include <synch/spinlock.h> |
43 | 43 | ||
44 | #ifdef __SMP__ |
44 | #ifdef __SMP__ |
45 | #include <arch/smp/atomic.h> |
45 | #include <arch/smp/atomic.h> |
46 | #endif /* __SMP__ */ |
46 | #endif /* __SMP__ */ |
47 | 47 | ||
48 | /* |
48 | /* |
49 | * NOTE ON ATOMIC READS: |
49 | * NOTE ON ATOMIC READS: |
50 | * Some architectures cannot read __u32 atomically. |
50 | * Some architectures cannot read __u32 atomically. |
51 | * For that reason, all accesses to nrdy and the likes must be protected by spinlock. |
51 | * For that reason, all accesses to nrdy and the likes must be protected by spinlock. |
52 | */ |
52 | */ |
53 | 53 | ||
54 | spinlock_t nrdylock; |
54 | spinlock_t nrdylock; |
55 | volatile int nrdy; |
55 | volatile int nrdy; |
56 | 56 | ||
57 | void before_thread_runs(void) |
57 | void before_thread_runs(void) |
58 | { |
58 | { |
59 | before_thread_runs_arch(); |
59 | before_thread_runs_arch(); |
60 | fpu_context_restore(); |
60 | fpu_context_restore(&(THREAD->saved_fpu_context)); |
61 | } |
61 | } |
62 | 62 | ||
63 | 63 | ||
64 | void scheduler_init(void) |
64 | void scheduler_init(void) |
65 | { |
65 | { |
66 | spinlock_initialize(&nrdylock); |
66 | spinlock_initialize(&nrdylock); |
67 | } |
67 | } |
68 | 68 | ||
69 | /* cpu_priority_high()'d */ |
69 | /* cpu_priority_high()'d */ |
70 | struct thread *find_best_thread(void) |
70 | struct thread *find_best_thread(void) |
71 | { |
71 | { |
72 | thread_t *t; |
72 | thread_t *t; |
73 | runq_t *r; |
73 | runq_t *r; |
74 | int i, n; |
74 | int i, n; |
75 | 75 | ||
76 | loop: |
76 | loop: |
77 | cpu_priority_high(); |
77 | cpu_priority_high(); |
78 | 78 | ||
79 | spinlock_lock(&CPU->lock); |
79 | spinlock_lock(&CPU->lock); |
80 | n = CPU->nrdy; |
80 | n = CPU->nrdy; |
81 | spinlock_unlock(&CPU->lock); |
81 | spinlock_unlock(&CPU->lock); |
82 | 82 | ||
83 | cpu_priority_low(); |
83 | cpu_priority_low(); |
84 | 84 | ||
85 | if (n == 0) { |
85 | if (n == 0) { |
86 | #ifdef __SMP__ |
86 | #ifdef __SMP__ |
87 | /* |
87 | /* |
88 | * If the load balancing thread is not running, wake it up and |
88 | * If the load balancing thread is not running, wake it up and |
89 | * set CPU-private flag that the kcpulb has been started. |
89 | * set CPU-private flag that the kcpulb has been started. |
90 | */ |
90 | */ |
91 | if (test_and_set(&CPU->kcpulbstarted) == 0) { |
91 | if (test_and_set(&CPU->kcpulbstarted) == 0) { |
92 | waitq_wakeup(&CPU->kcpulb_wq, 0); |
92 | waitq_wakeup(&CPU->kcpulb_wq, 0); |
93 | goto loop; |
93 | goto loop; |
94 | } |
94 | } |
95 | #endif /* __SMP__ */ |
95 | #endif /* __SMP__ */ |
96 | 96 | ||
97 | /* |
97 | /* |
98 | * For there was nothing to run, the CPU goes to sleep |
98 | * For there was nothing to run, the CPU goes to sleep |
99 | * until a hardware interrupt or an IPI comes. |
99 | * until a hardware interrupt or an IPI comes. |
100 | * This improves energy saving and hyperthreading. |
100 | * This improves energy saving and hyperthreading. |
101 | * On the other hand, several hardware interrupts can be ignored. |
101 | * On the other hand, several hardware interrupts can be ignored. |
102 | */ |
102 | */ |
103 | cpu_sleep(); |
103 | cpu_sleep(); |
104 | goto loop; |
104 | goto loop; |
105 | } |
105 | } |
106 | 106 | ||
107 | cpu_priority_high(); |
107 | cpu_priority_high(); |
108 | 108 | ||
109 | for (i = 0; i<RQ_COUNT; i++) { |
109 | for (i = 0; i<RQ_COUNT; i++) { |
110 | r = &CPU->rq[i]; |
110 | r = &CPU->rq[i]; |
111 | spinlock_lock(&r->lock); |
111 | spinlock_lock(&r->lock); |
112 | if (r->n == 0) { |
112 | if (r->n == 0) { |
113 | /* |
113 | /* |
114 | * If this queue is empty, try a lower-priority queue. |
114 | * If this queue is empty, try a lower-priority queue. |
115 | */ |
115 | */ |
116 | spinlock_unlock(&r->lock); |
116 | spinlock_unlock(&r->lock); |
117 | continue; |
117 | continue; |
118 | } |
118 | } |
119 | 119 | ||
120 | spinlock_lock(&nrdylock); |
120 | spinlock_lock(&nrdylock); |
121 | nrdy--; |
121 | nrdy--; |
122 | spinlock_unlock(&nrdylock); |
122 | spinlock_unlock(&nrdylock); |
123 | 123 | ||
124 | spinlock_lock(&CPU->lock); |
124 | spinlock_lock(&CPU->lock); |
125 | CPU->nrdy--; |
125 | CPU->nrdy--; |
126 | spinlock_unlock(&CPU->lock); |
126 | spinlock_unlock(&CPU->lock); |
127 | 127 | ||
128 | r->n--; |
128 | r->n--; |
129 | 129 | ||
130 | /* |
130 | /* |
131 | * Take the first thread from the queue. |
131 | * Take the first thread from the queue. |
132 | */ |
132 | */ |
133 | t = list_get_instance(r->rq_head.next, thread_t, rq_link); |
133 | t = list_get_instance(r->rq_head.next, thread_t, rq_link); |
134 | list_remove(&t->rq_link); |
134 | list_remove(&t->rq_link); |
135 | 135 | ||
136 | spinlock_unlock(&r->lock); |
136 | spinlock_unlock(&r->lock); |
137 | 137 | ||
138 | spinlock_lock(&t->lock); |
138 | spinlock_lock(&t->lock); |
139 | t->cpu = CPU; |
139 | t->cpu = CPU; |
140 | 140 | ||
141 | t->ticks = us2ticks((i+1)*10000); |
141 | t->ticks = us2ticks((i+1)*10000); |
142 | t->pri = i; /* eventually correct rq index */ |
142 | t->pri = i; /* eventually correct rq index */ |
143 | 143 | ||
144 | /* |
144 | /* |
145 | * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge. |
145 | * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge. |
146 | */ |
146 | */ |
147 | t->flags &= ~X_STOLEN; |
147 | t->flags &= ~X_STOLEN; |
148 | spinlock_unlock(&t->lock); |
148 | spinlock_unlock(&t->lock); |
149 | 149 | ||
150 | return t; |
150 | return t; |
151 | } |
151 | } |
152 | goto loop; |
152 | goto loop; |
153 | 153 | ||
154 | } |
154 | } |
155 | 155 | ||
156 | /* |
156 | /* |
157 | * This function prevents low priority threads from starving in rq's. |
157 | * This function prevents low priority threads from starving in rq's. |
158 | * When it decides to relink rq's, it reconnects respective pointers |
158 | * When it decides to relink rq's, it reconnects respective pointers |
159 | * so that in result threads with 'pri' greater or equal 'start' are |
159 | * so that in result threads with 'pri' greater or equal 'start' are |
160 | * moved to a higher-priority queue. |
160 | * moved to a higher-priority queue. |
161 | */ |
161 | */ |
162 | void relink_rq(int start) |
162 | void relink_rq(int start) |
163 | { |
163 | { |
164 | link_t head; |
164 | link_t head; |
165 | runq_t *r; |
165 | runq_t *r; |
166 | int i, n; |
166 | int i, n; |
167 | 167 | ||
168 | list_initialize(&head); |
168 | list_initialize(&head); |
169 | spinlock_lock(&CPU->lock); |
169 | spinlock_lock(&CPU->lock); |
170 | if (CPU->needs_relink > NEEDS_RELINK_MAX) { |
170 | if (CPU->needs_relink > NEEDS_RELINK_MAX) { |
171 | for (i = start; i<RQ_COUNT-1; i++) { |
171 | for (i = start; i<RQ_COUNT-1; i++) { |
172 | /* remember and empty rq[i + 1] */ |
172 | /* remember and empty rq[i + 1] */ |
173 | r = &CPU->rq[i + 1]; |
173 | r = &CPU->rq[i + 1]; |
174 | spinlock_lock(&r->lock); |
174 | spinlock_lock(&r->lock); |
175 | list_concat(&head, &r->rq_head); |
175 | list_concat(&head, &r->rq_head); |
176 | n = r->n; |
176 | n = r->n; |
177 | r->n = 0; |
177 | r->n = 0; |
178 | spinlock_unlock(&r->lock); |
178 | spinlock_unlock(&r->lock); |
179 | 179 | ||
180 | /* append rq[i + 1] to rq[i] */ |
180 | /* append rq[i + 1] to rq[i] */ |
181 | r = &CPU->rq[i]; |
181 | r = &CPU->rq[i]; |
182 | spinlock_lock(&r->lock); |
182 | spinlock_lock(&r->lock); |
183 | list_concat(&r->rq_head, &head); |
183 | list_concat(&r->rq_head, &head); |
184 | r->n += n; |
184 | r->n += n; |
185 | spinlock_unlock(&r->lock); |
185 | spinlock_unlock(&r->lock); |
186 | } |
186 | } |
187 | CPU->needs_relink = 0; |
187 | CPU->needs_relink = 0; |
188 | } |
188 | } |
189 | spinlock_unlock(&CPU->lock); |
189 | spinlock_unlock(&CPU->lock); |
190 | 190 | ||
191 | } |
191 | } |
192 | 192 | ||
193 | /* |
193 | /* |
194 | * The scheduler. |
194 | * The scheduler. |
195 | */ |
195 | */ |
196 | void scheduler(void) |
196 | void scheduler(void) |
197 | { |
197 | { |
198 | volatile pri_t pri; |
198 | volatile pri_t pri; |
199 | 199 | ||
200 | pri = cpu_priority_high(); |
200 | pri = cpu_priority_high(); |
201 | 201 | ||
202 | if (haltstate) |
202 | if (haltstate) |
203 | halt(); |
203 | halt(); |
204 | 204 | ||
205 | if (THREAD) { |
205 | if (THREAD) { |
206 | spinlock_lock(&THREAD->lock); |
206 | spinlock_lock(&THREAD->lock); |
- | 207 | fpu_context_save(&(THREAD->saved_fpu_context)); |
|
207 | if (!context_save(&THREAD->saved_context)) { |
208 | if (!context_save(&THREAD->saved_context)) { |
208 | /* |
209 | /* |
209 | * This is the place where threads leave scheduler(); |
210 | * This is the place where threads leave scheduler(); |
210 | */ |
211 | */ |
211 | before_thread_runs(); |
212 | before_thread_runs(); |
212 | spinlock_unlock(&THREAD->lock); |
213 | spinlock_unlock(&THREAD->lock); |
213 | cpu_priority_restore(THREAD->saved_context.pri); |
214 | cpu_priority_restore(THREAD->saved_context.pri); |
214 | return; |
215 | return; |
215 | } |
216 | } |
216 | THREAD->saved_context.pri = pri; |
217 | THREAD->saved_context.pri = pri; |
217 | } |
218 | } |
218 | 219 | ||
219 | /* |
220 | /* |
220 | * We may not keep the old stack. |
221 | * We may not keep the old stack. |
221 | * Reason: If we kept the old stack and got blocked, for instance, in |
222 | * Reason: If we kept the old stack and got blocked, for instance, in |
222 | * find_best_thread(), the old thread could get rescheduled by another |
223 | * find_best_thread(), the old thread could get rescheduled by another |
223 | * CPU and overwrite the part of its own stack that was also used by |
224 | * CPU and overwrite the part of its own stack that was also used by |
224 | * the scheduler on this CPU. |
225 | * the scheduler on this CPU. |
225 | * |
226 | * |
226 | * Moreover, we have to bypass the compiler-generated POP sequence |
227 | * Moreover, we have to bypass the compiler-generated POP sequence |
227 | * which is fooled by SP being set to the very top of the stack. |
228 | * which is fooled by SP being set to the very top of the stack. |
228 | * Therefore the scheduler() function continues in |
229 | * Therefore the scheduler() function continues in |
229 | * scheduler_separated_stack(). |
230 | * scheduler_separated_stack(). |
230 | */ |
231 | */ |
231 | context_save(&CPU->saved_context); |
232 | context_save(&CPU->saved_context); |
232 | CPU->saved_context.sp = (__address) &CPU->stack[CPU_STACK_SIZE-8]; |
233 | CPU->saved_context.sp = (__address) &CPU->stack[CPU_STACK_SIZE-8]; |
233 | CPU->saved_context.pc = (__address) scheduler_separated_stack; |
234 | CPU->saved_context.pc = (__address) scheduler_separated_stack; |
234 | context_restore(&CPU->saved_context); |
235 | context_restore(&CPU->saved_context); |
235 | /* not reached */ |
236 | /* not reached */ |
236 | } |
237 | } |
237 | 238 | ||
238 | void scheduler_separated_stack(void) |
239 | void scheduler_separated_stack(void) |
239 | { |
240 | { |
240 | int priority; |
241 | int priority; |
241 | 242 | ||
242 | if (THREAD) { |
243 | if (THREAD) { |
243 | switch (THREAD->state) { |
244 | switch (THREAD->state) { |
244 | case Running: |
245 | case Running: |
245 | THREAD->state = Ready; |
246 | THREAD->state = Ready; |
246 | spinlock_unlock(&THREAD->lock); |
247 | spinlock_unlock(&THREAD->lock); |
247 | thread_ready(THREAD); |
248 | thread_ready(THREAD); |
248 | break; |
249 | break; |
249 | 250 | ||
250 | case Exiting: |
251 | case Exiting: |
251 | frame_free((__address) THREAD->kstack); |
252 | frame_free((__address) THREAD->kstack); |
252 | if (THREAD->ustack) { |
253 | if (THREAD->ustack) { |
253 | frame_free((__address) THREAD->ustack); |
254 | frame_free((__address) THREAD->ustack); |
254 | } |
255 | } |
255 | 256 | ||
256 | /* |
257 | /* |
257 | * Detach from the containing task. |
258 | * Detach from the containing task. |
258 | */ |
259 | */ |
259 | spinlock_lock(&TASK->lock); |
260 | spinlock_lock(&TASK->lock); |
260 | list_remove(&THREAD->th_link); |
261 | list_remove(&THREAD->th_link); |
261 | spinlock_unlock(&TASK->lock); |
262 | spinlock_unlock(&TASK->lock); |
262 | 263 | ||
263 | spinlock_unlock(&THREAD->lock); |
264 | spinlock_unlock(&THREAD->lock); |
264 | 265 | ||
265 | spinlock_lock(&threads_lock); |
266 | spinlock_lock(&threads_lock); |
266 | list_remove(&THREAD->threads_link); |
267 | list_remove(&THREAD->threads_link); |
267 | spinlock_unlock(&threads_lock); |
268 | spinlock_unlock(&threads_lock); |
268 | 269 | ||
269 | free(THREAD); |
270 | free(THREAD); |
270 | 271 | ||
271 | break; |
272 | break; |
272 | 273 | ||
273 | case Sleeping: |
274 | case Sleeping: |
274 | /* |
275 | /* |
275 | * Prefer the thread after it's woken up. |
276 | * Prefer the thread after it's woken up. |
276 | */ |
277 | */ |
277 | THREAD->pri = -1; |
278 | THREAD->pri = -1; |
278 | 279 | ||
279 | /* |
280 | /* |
280 | * We need to release wq->lock which we locked in waitq_sleep(). |
281 | * We need to release wq->lock which we locked in waitq_sleep(). |
281 | * Address of wq->lock is kept in THREAD->sleep_queue. |
282 | * Address of wq->lock is kept in THREAD->sleep_queue. |
282 | */ |
283 | */ |
283 | spinlock_unlock(&THREAD->sleep_queue->lock); |
284 | spinlock_unlock(&THREAD->sleep_queue->lock); |
284 | 285 | ||
285 | /* |
286 | /* |
286 | * Check for possible requests for out-of-context invocation. |
287 | * Check for possible requests for out-of-context invocation. |
287 | */ |
288 | */ |
288 | if (THREAD->call_me) { |
289 | if (THREAD->call_me) { |
289 | THREAD->call_me(THREAD->call_me_with); |
290 | THREAD->call_me(THREAD->call_me_with); |
290 | THREAD->call_me = NULL; |
291 | THREAD->call_me = NULL; |
291 | THREAD->call_me_with = NULL; |
292 | THREAD->call_me_with = NULL; |
292 | } |
293 | } |
293 | 294 | ||
294 | spinlock_unlock(&THREAD->lock); |
295 | spinlock_unlock(&THREAD->lock); |
295 | 296 | ||
296 | break; |
297 | break; |
297 | 298 | ||
298 | default: |
299 | default: |
299 | /* |
300 | /* |
300 | * Entering state is unexpected. |
301 | * Entering state is unexpected. |
301 | */ |
302 | */ |
302 | panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]); |
303 | panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]); |
303 | break; |
304 | break; |
304 | } |
305 | } |
305 | THREAD = NULL; |
306 | THREAD = NULL; |
306 | } |
307 | } |
307 | 308 | ||
308 | THREAD = find_best_thread(); |
309 | THREAD = find_best_thread(); |
309 | 310 | ||
310 | spinlock_lock(&THREAD->lock); |
311 | spinlock_lock(&THREAD->lock); |
311 | priority = THREAD->pri; |
312 | priority = THREAD->pri; |
312 | spinlock_unlock(&THREAD->lock); |
313 | spinlock_unlock(&THREAD->lock); |
313 | 314 | ||
314 | relink_rq(priority); |
315 | relink_rq(priority); |
315 | 316 | ||
316 | spinlock_lock(&THREAD->lock); |
317 | spinlock_lock(&THREAD->lock); |
317 | 318 | ||
318 | /* |
319 | /* |
319 | * If both the old and the new task are the same, lots of work is avoided. |
320 | * If both the old and the new task are the same, lots of work is avoided. |
320 | */ |
321 | */ |
321 | if (TASK != THREAD->task) { |
322 | if (TASK != THREAD->task) { |
322 | vm_t *m1 = NULL; |
323 | vm_t *m1 = NULL; |
323 | vm_t *m2; |
324 | vm_t *m2; |
324 | 325 | ||
325 | if (TASK) { |
326 | if (TASK) { |
326 | spinlock_lock(&TASK->lock); |
327 | spinlock_lock(&TASK->lock); |
327 | m1 = TASK->vm; |
328 | m1 = TASK->vm; |
328 | spinlock_unlock(&TASK->lock); |
329 | spinlock_unlock(&TASK->lock); |
329 | } |
330 | } |
330 | 331 | ||
331 | spinlock_lock(&THREAD->task->lock); |
332 | spinlock_lock(&THREAD->task->lock); |
332 | m2 = THREAD->task->vm; |
333 | m2 = THREAD->task->vm; |
333 | spinlock_unlock(&THREAD->task->lock); |
334 | spinlock_unlock(&THREAD->task->lock); |
334 | 335 | ||
335 | /* |
336 | /* |
336 | * Note that it is possible for two tasks to share one vm mapping. |
337 | * Note that it is possible for two tasks to share one vm mapping. |
337 | */ |
338 | */ |
338 | if (m1 != m2) { |
339 | if (m1 != m2) { |
339 | /* |
340 | /* |
340 | * Both tasks and vm mappings are different. |
341 | * Both tasks and vm mappings are different. |
341 | * Replace the old one with the new one. |
342 | * Replace the old one with the new one. |
342 | */ |
343 | */ |
343 | if (m1) { |
344 | if (m1) { |
344 | vm_uninstall(m1); |
345 | vm_uninstall(m1); |
345 | } |
346 | } |
346 | vm_install(m2); |
347 | vm_install(m2); |
347 | } |
348 | } |
348 | TASK = THREAD->task; |
349 | TASK = THREAD->task; |
349 | } |
350 | } |
350 | 351 | ||
351 | THREAD->state = Running; |
352 | THREAD->state = Running; |
352 | 353 | ||
353 | #ifdef SCHEDULER_VERBOSE |
354 | #ifdef SCHEDULER_VERBOSE |
354 | printf("cpu%d: tid %d (pri=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->pri, THREAD->ticks, CPU->nrdy); |
355 | printf("cpu%d: tid %d (pri=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->pri, THREAD->ticks, CPU->nrdy); |
355 | #endif |
356 | #endif |
356 | 357 | ||
357 | context_restore(&THREAD->saved_context); |
358 | context_restore(&THREAD->saved_context); |
358 | /* not reached */ |
359 | /* not reached */ |
359 | } |
360 | } |
360 | 361 | ||
361 | #ifdef __SMP__ |
362 | #ifdef __SMP__ |
362 | /* |
363 | /* |
363 | * This is the load balancing thread. |
364 | * This is the load balancing thread. |
364 | * It supervises thread supplies for the CPU it's wired to. |
365 | * It supervises thread supplies for the CPU it's wired to. |
365 | */ |
366 | */ |
366 | void kcpulb(void *arg) |
367 | void kcpulb(void *arg) |
367 | { |
368 | { |
368 | thread_t *t; |
369 | thread_t *t; |
369 | int count, i, j, k = 0; |
370 | int count, i, j, k = 0; |
370 | pri_t pri; |
371 | pri_t pri; |
371 | 372 | ||
372 | loop: |
373 | loop: |
373 | /* |
374 | /* |
374 | * Sleep until there's some work to do. |
375 | * Sleep until there's some work to do. |
375 | */ |
376 | */ |
376 | waitq_sleep(&CPU->kcpulb_wq); |
377 | waitq_sleep(&CPU->kcpulb_wq); |
377 | 378 | ||
378 | not_satisfied: |
379 | not_satisfied: |
379 | /* |
380 | /* |
380 | * Calculate the number of threads that will be migrated/stolen from |
381 | * Calculate the number of threads that will be migrated/stolen from |
381 | * other CPU's. Note that situation can have changed between two |
382 | * other CPU's. Note that situation can have changed between two |
382 | * passes. Each time get the most up to date counts. |
383 | * passes. Each time get the most up to date counts. |
383 | */ |
384 | */ |
384 | pri = cpu_priority_high(); |
385 | pri = cpu_priority_high(); |
385 | spinlock_lock(&CPU->lock); |
386 | spinlock_lock(&CPU->lock); |
386 | count = nrdy / config.cpu_active; |
387 | count = nrdy / config.cpu_active; |
387 | count -= CPU->nrdy; |
388 | count -= CPU->nrdy; |
388 | spinlock_unlock(&CPU->lock); |
389 | spinlock_unlock(&CPU->lock); |
389 | cpu_priority_restore(pri); |
390 | cpu_priority_restore(pri); |
390 | 391 | ||
391 | if (count <= 0) |
392 | if (count <= 0) |
392 | goto satisfied; |
393 | goto satisfied; |
393 | 394 | ||
394 | /* |
395 | /* |
395 | * Searching least priority queues on all CPU's first and most priority queues on all CPU's last. |
396 | * Searching least priority queues on all CPU's first and most priority queues on all CPU's last. |
396 | */ |
397 | */ |
397 | for (j=RQ_COUNT-1; j >= 0; j--) { |
398 | for (j=RQ_COUNT-1; j >= 0; j--) { |
398 | for (i=0; i < config.cpu_active; i++) { |
399 | for (i=0; i < config.cpu_active; i++) { |
399 | link_t *l; |
400 | link_t *l; |
400 | runq_t *r; |
401 | runq_t *r; |
401 | cpu_t *cpu; |
402 | cpu_t *cpu; |
402 | 403 | ||
403 | cpu = &cpus[(i + k) % config.cpu_active]; |
404 | cpu = &cpus[(i + k) % config.cpu_active]; |
404 | r = &cpu->rq[j]; |
405 | r = &cpu->rq[j]; |
405 | 406 | ||
406 | /* |
407 | /* |
407 | * Not interested in ourselves. |
408 | * Not interested in ourselves. |
408 | * Doesn't require interrupt disabling for kcpulb is X_WIRED. |
409 | * Doesn't require interrupt disabling for kcpulb is X_WIRED. |
409 | */ |
410 | */ |
410 | if (CPU == cpu) |
411 | if (CPU == cpu) |
411 | continue; |
412 | continue; |
412 | 413 | ||
413 | restart: pri = cpu_priority_high(); |
414 | restart: pri = cpu_priority_high(); |
414 | spinlock_lock(&r->lock); |
415 | spinlock_lock(&r->lock); |
415 | if (r->n == 0) { |
416 | if (r->n == 0) { |
416 | spinlock_unlock(&r->lock); |
417 | spinlock_unlock(&r->lock); |
417 | cpu_priority_restore(pri); |
418 | cpu_priority_restore(pri); |
418 | continue; |
419 | continue; |
419 | } |
420 | } |
420 | 421 | ||
421 | t = NULL; |
422 | t = NULL; |
422 | l = r->rq_head.prev; /* search rq from the back */ |
423 | l = r->rq_head.prev; /* search rq from the back */ |
423 | while (l != &r->rq_head) { |
424 | while (l != &r->rq_head) { |
424 | t = list_get_instance(l, thread_t, rq_link); |
425 | t = list_get_instance(l, thread_t, rq_link); |
425 | /* |
426 | /* |
426 | * We don't want to steal CPU-wired threads neither threads already stolen. |
427 | * We don't want to steal CPU-wired threads neither threads already stolen. |
427 | * The latter prevents threads from migrating between CPU's without ever being run. |
428 | * The latter prevents threads from migrating between CPU's without ever being run. |
428 | */ |
429 | */ |
429 | spinlock_lock(&t->lock); |
430 | spinlock_lock(&t->lock); |
430 | if (!(t->flags & (X_WIRED | X_STOLEN))) { |
431 | if (!(t->flags & (X_WIRED | X_STOLEN))) { |
431 | /* |
432 | /* |
432 | * Remove t from r. |
433 | * Remove t from r. |
433 | */ |
434 | */ |
434 | 435 | ||
435 | spinlock_unlock(&t->lock); |
436 | spinlock_unlock(&t->lock); |
436 | 437 | ||
437 | /* |
438 | /* |
438 | * Here we have to avoid deadlock with relink_rq(), |
439 | * Here we have to avoid deadlock with relink_rq(), |
439 | * because it locks cpu and r in a different order than we do. |
440 | * because it locks cpu and r in a different order than we do. |
440 | */ |
441 | */ |
441 | if (!spinlock_trylock(&cpu->lock)) { |
442 | if (!spinlock_trylock(&cpu->lock)) { |
442 | /* Release all locks and try again. */ |
443 | /* Release all locks and try again. */ |
443 | spinlock_unlock(&r->lock); |
444 | spinlock_unlock(&r->lock); |
444 | cpu_priority_restore(pri); |
445 | cpu_priority_restore(pri); |
445 | goto restart; |
446 | goto restart; |
446 | } |
447 | } |
447 | cpu->nrdy--; |
448 | cpu->nrdy--; |
448 | spinlock_unlock(&cpu->lock); |
449 | spinlock_unlock(&cpu->lock); |
449 | 450 | ||
450 | spinlock_lock(&nrdylock); |
451 | spinlock_lock(&nrdylock); |
451 | nrdy--; |
452 | nrdy--; |
452 | spinlock_unlock(&nrdylock); |
453 | spinlock_unlock(&nrdylock); |
453 | 454 | ||
454 | r->n--; |
455 | r->n--; |
455 | list_remove(&t->rq_link); |
456 | list_remove(&t->rq_link); |
456 | 457 | ||
457 | break; |
458 | break; |
458 | } |
459 | } |
459 | spinlock_unlock(&t->lock); |
460 | spinlock_unlock(&t->lock); |
460 | l = l->prev; |
461 | l = l->prev; |
461 | t = NULL; |
462 | t = NULL; |
462 | } |
463 | } |
463 | spinlock_unlock(&r->lock); |
464 | spinlock_unlock(&r->lock); |
464 | 465 | ||
465 | if (t) { |
466 | if (t) { |
466 | /* |
467 | /* |
467 | * Ready t on local CPU |
468 | * Ready t on local CPU |
468 | */ |
469 | */ |
469 | spinlock_lock(&t->lock); |
470 | spinlock_lock(&t->lock); |
470 | #ifdef KCPULB_VERBOSE |
471 | #ifdef KCPULB_VERBOSE |
471 | printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, CPU->nrdy, nrdy / config.cpu_active); |
472 | printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, CPU->nrdy, nrdy / config.cpu_active); |
472 | #endif |
473 | #endif |
473 | t->flags |= X_STOLEN; |
474 | t->flags |= X_STOLEN; |
474 | spinlock_unlock(&t->lock); |
475 | spinlock_unlock(&t->lock); |
475 | 476 | ||
476 | thread_ready(t); |
477 | thread_ready(t); |
477 | 478 | ||
478 | cpu_priority_restore(pri); |
479 | cpu_priority_restore(pri); |
479 | 480 | ||
480 | if (--count == 0) |
481 | if (--count == 0) |
481 | goto satisfied; |
482 | goto satisfied; |
482 | 483 | ||
483 | /* |
484 | /* |
484 | * We are not satisfied yet, focus on another CPU next time. |
485 | * We are not satisfied yet, focus on another CPU next time. |
485 | */ |
486 | */ |
486 | k++; |
487 | k++; |
487 | 488 | ||
488 | continue; |
489 | continue; |
489 | } |
490 | } |
490 | cpu_priority_restore(pri); |
491 | cpu_priority_restore(pri); |
491 | } |
492 | } |
492 | } |
493 | } |
493 | 494 | ||
494 | if (CPU->nrdy) { |
495 | if (CPU->nrdy) { |
495 | /* |
496 | /* |
496 | * Be a little bit light-weight and let migrated threads run. |
497 | * Be a little bit light-weight and let migrated threads run. |
497 | */ |
498 | */ |
498 | scheduler(); |
499 | scheduler(); |
499 | } |
500 | } |
500 | else { |
501 | else { |
501 | /* |
502 | /* |
502 | * We failed to migrate a single thread. |
503 | * We failed to migrate a single thread. |
503 | * Something more sophisticated should be done. |
504 | * Something more sophisticated should be done. |
504 | */ |
505 | */ |
505 | scheduler(); |
506 | scheduler(); |
506 | } |
507 | } |
507 | 508 | ||
508 | goto not_satisfied; |
509 | goto not_satisfied; |
509 | 510 | ||
510 | satisfied: |
511 | satisfied: |
511 | /* |
512 | /* |
512 | * Tell find_best_thread() to wake us up later again. |
513 | * Tell find_best_thread() to wake us up later again. |
513 | */ |
514 | */ |
514 | CPU->kcpulbstarted = 0; |
515 | CPU->kcpulbstarted = 0; |
515 | goto loop; |
516 | goto loop; |
516 | } |
517 | } |
517 | 518 | ||
518 | #endif /* __SMP__ */ |
519 | #endif /* __SMP__ */ |
519 | 520 |