Rev 68 | Rev 75 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 68 | Rev 73 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | #include <proc/scheduler.h> |
29 | #include <proc/scheduler.h> |
30 | #include <proc/thread.h> |
30 | #include <proc/thread.h> |
31 | #include <proc/task.h> |
31 | #include <proc/task.h> |
32 | #include <cpu.h> |
32 | #include <cpu.h> |
33 | #include <mm/vm.h> |
33 | #include <mm/vm.h> |
34 | #include <config.h> |
34 | #include <config.h> |
35 | #include <context.h> |
35 | #include <context.h> |
36 | #include <func.h> |
36 | #include <func.h> |
37 | #include <arch.h> |
37 | #include <arch.h> |
38 | #include <arch/asm.h> |
38 | #include <arch/asm.h> |
39 | #include <list.h> |
39 | #include <list.h> |
40 | #include <panic.h> |
40 | #include <panic.h> |
41 | #include <typedefs.h> |
41 | #include <typedefs.h> |
42 | #include <mm/page.h> |
42 | #include <mm/page.h> |
43 | #include <synch/spinlock.h> |
43 | #include <synch/spinlock.h> |
44 | 44 | ||
45 | #ifdef __SMP__ |
45 | #ifdef __SMP__ |
46 | #include <arch/smp/atomic.h> |
46 | #include <arch/smp/atomic.h> |
47 | #endif /* __SMP__ */ |
47 | #endif /* __SMP__ */ |
48 | 48 | ||
49 | /* |
49 | /* |
50 | * NOTE ON ATOMIC READS: |
50 | * NOTE ON ATOMIC READS: |
51 | * Some architectures cannot read __u32 atomically. |
51 | * Some architectures cannot read __u32 atomically. |
52 | * For that reason, all accesses to nrdy and the likes must be protected by spinlock. |
52 | * For that reason, all accesses to nrdy and the likes must be protected by spinlock. |
53 | */ |
53 | */ |
54 | 54 | ||
55 | spinlock_t nrdylock; |
55 | spinlock_t nrdylock; |
56 | volatile int nrdy; |
56 | volatile int nrdy; |
57 | 57 | ||
58 | void before_thread_runs(void) |
58 | void before_thread_runs(void) |
59 | { |
59 | { |
60 | before_thread_runs_arch(); |
60 | before_thread_runs_arch(); |
61 | fpu_context_restore(&(THREAD->saved_fpu_context)); |
61 | fpu_context_restore(&(THREAD->saved_fpu_context)); |
62 | } |
62 | } |
63 | 63 | ||
64 | 64 | ||
65 | void scheduler_init(void) |
65 | void scheduler_init(void) |
66 | { |
66 | { |
67 | spinlock_initialize(&nrdylock); |
67 | spinlock_initialize(&nrdylock); |
68 | } |
68 | } |
69 | 69 | ||
70 | /* cpu_priority_high()'d */ |
70 | /* cpu_priority_high()'d */ |
71 | struct thread *find_best_thread(void) |
71 | struct thread *find_best_thread(void) |
72 | { |
72 | { |
73 | thread_t *t; |
73 | thread_t *t; |
74 | runq_t *r; |
74 | runq_t *r; |
75 | int i, n; |
75 | int i, n; |
76 | 76 | ||
77 | loop: |
77 | loop: |
78 | cpu_priority_high(); |
78 | cpu_priority_high(); |
79 | 79 | ||
80 | spinlock_lock(&CPU->lock); |
80 | spinlock_lock(&CPU->lock); |
81 | n = CPU->nrdy; |
81 | n = CPU->nrdy; |
82 | spinlock_unlock(&CPU->lock); |
82 | spinlock_unlock(&CPU->lock); |
83 | 83 | ||
84 | cpu_priority_low(); |
84 | cpu_priority_low(); |
85 | 85 | ||
86 | if (n == 0) { |
86 | if (n == 0) { |
87 | #ifdef __SMP__ |
87 | #ifdef __SMP__ |
88 | /* |
88 | /* |
89 | * If the load balancing thread is not running, wake it up and |
89 | * If the load balancing thread is not running, wake it up and |
90 | * set CPU-private flag that the kcpulb has been started. |
90 | * set CPU-private flag that the kcpulb has been started. |
91 | */ |
91 | */ |
92 | if (test_and_set(&CPU->kcpulbstarted) == 0) { |
92 | if (test_and_set(&CPU->kcpulbstarted) == 0) { |
93 | waitq_wakeup(&CPU->kcpulb_wq, 0); |
93 | waitq_wakeup(&CPU->kcpulb_wq, 0); |
94 | goto loop; |
94 | goto loop; |
95 | } |
95 | } |
96 | #endif /* __SMP__ */ |
96 | #endif /* __SMP__ */ |
97 | 97 | ||
98 | /* |
98 | /* |
99 | * For there was nothing to run, the CPU goes to sleep |
99 | * For there was nothing to run, the CPU goes to sleep |
100 | * until a hardware interrupt or an IPI comes. |
100 | * until a hardware interrupt or an IPI comes. |
101 | * This improves energy saving and hyperthreading. |
101 | * This improves energy saving and hyperthreading. |
102 | * On the other hand, several hardware interrupts can be ignored. |
102 | * On the other hand, several hardware interrupts can be ignored. |
103 | */ |
103 | */ |
104 | cpu_sleep(); |
104 | cpu_sleep(); |
105 | goto loop; |
105 | goto loop; |
106 | } |
106 | } |
107 | 107 | ||
108 | cpu_priority_high(); |
108 | cpu_priority_high(); |
109 | 109 | ||
110 | for (i = 0; i<RQ_COUNT; i++) { |
110 | for (i = 0; i<RQ_COUNT; i++) { |
111 | r = &CPU->rq[i]; |
111 | r = &CPU->rq[i]; |
112 | spinlock_lock(&r->lock); |
112 | spinlock_lock(&r->lock); |
113 | if (r->n == 0) { |
113 | if (r->n == 0) { |
114 | /* |
114 | /* |
115 | * If this queue is empty, try a lower-priority queue. |
115 | * If this queue is empty, try a lower-priority queue. |
116 | */ |
116 | */ |
117 | spinlock_unlock(&r->lock); |
117 | spinlock_unlock(&r->lock); |
118 | continue; |
118 | continue; |
119 | } |
119 | } |
120 | 120 | ||
121 | spinlock_lock(&nrdylock); |
121 | spinlock_lock(&nrdylock); |
122 | nrdy--; |
122 | nrdy--; |
123 | spinlock_unlock(&nrdylock); |
123 | spinlock_unlock(&nrdylock); |
124 | 124 | ||
125 | spinlock_lock(&CPU->lock); |
125 | spinlock_lock(&CPU->lock); |
126 | CPU->nrdy--; |
126 | CPU->nrdy--; |
127 | spinlock_unlock(&CPU->lock); |
127 | spinlock_unlock(&CPU->lock); |
128 | 128 | ||
129 | r->n--; |
129 | r->n--; |
130 | 130 | ||
131 | /* |
131 | /* |
132 | * Take the first thread from the queue. |
132 | * Take the first thread from the queue. |
133 | */ |
133 | */ |
134 | t = list_get_instance(r->rq_head.next, thread_t, rq_link); |
134 | t = list_get_instance(r->rq_head.next, thread_t, rq_link); |
135 | list_remove(&t->rq_link); |
135 | list_remove(&t->rq_link); |
136 | 136 | ||
137 | spinlock_unlock(&r->lock); |
137 | spinlock_unlock(&r->lock); |
138 | 138 | ||
139 | spinlock_lock(&t->lock); |
139 | spinlock_lock(&t->lock); |
140 | t->cpu = CPU; |
140 | t->cpu = CPU; |
141 | 141 | ||
142 | t->ticks = us2ticks((i+1)*10000); |
142 | t->ticks = us2ticks((i+1)*10000); |
143 | t->pri = i; /* eventually correct rq index */ |
143 | t->pri = i; /* eventually correct rq index */ |
144 | 144 | ||
145 | /* |
145 | /* |
146 | * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge. |
146 | * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge. |
147 | */ |
147 | */ |
148 | t->flags &= ~X_STOLEN; |
148 | t->flags &= ~X_STOLEN; |
149 | spinlock_unlock(&t->lock); |
149 | spinlock_unlock(&t->lock); |
150 | 150 | ||
151 | return t; |
151 | return t; |
152 | } |
152 | } |
153 | goto loop; |
153 | goto loop; |
154 | 154 | ||
155 | } |
155 | } |
156 | 156 | ||
157 | /* |
157 | /* |
158 | * This function prevents low priority threads from starving in rq's. |
158 | * This function prevents low priority threads from starving in rq's. |
159 | * When it decides to relink rq's, it reconnects respective pointers |
159 | * When it decides to relink rq's, it reconnects respective pointers |
160 | * so that in result threads with 'pri' greater or equal 'start' are |
160 | * so that in result threads with 'pri' greater or equal 'start' are |
161 | * moved to a higher-priority queue. |
161 | * moved to a higher-priority queue. |
162 | */ |
162 | */ |
163 | void relink_rq(int start) |
163 | void relink_rq(int start) |
164 | { |
164 | { |
165 | link_t head; |
165 | link_t head; |
166 | runq_t *r; |
166 | runq_t *r; |
167 | int i, n; |
167 | int i, n; |
168 | 168 | ||
169 | list_initialize(&head); |
169 | list_initialize(&head); |
170 | spinlock_lock(&CPU->lock); |
170 | spinlock_lock(&CPU->lock); |
171 | if (CPU->needs_relink > NEEDS_RELINK_MAX) { |
171 | if (CPU->needs_relink > NEEDS_RELINK_MAX) { |
172 | for (i = start; i<RQ_COUNT-1; i++) { |
172 | for (i = start; i<RQ_COUNT-1; i++) { |
173 | /* remember and empty rq[i + 1] */ |
173 | /* remember and empty rq[i + 1] */ |
174 | r = &CPU->rq[i + 1]; |
174 | r = &CPU->rq[i + 1]; |
175 | spinlock_lock(&r->lock); |
175 | spinlock_lock(&r->lock); |
176 | list_concat(&head, &r->rq_head); |
176 | list_concat(&head, &r->rq_head); |
177 | n = r->n; |
177 | n = r->n; |
178 | r->n = 0; |
178 | r->n = 0; |
179 | spinlock_unlock(&r->lock); |
179 | spinlock_unlock(&r->lock); |
180 | 180 | ||
181 | /* append rq[i + 1] to rq[i] */ |
181 | /* append rq[i + 1] to rq[i] */ |
182 | r = &CPU->rq[i]; |
182 | r = &CPU->rq[i]; |
183 | spinlock_lock(&r->lock); |
183 | spinlock_lock(&r->lock); |
184 | list_concat(&r->rq_head, &head); |
184 | list_concat(&r->rq_head, &head); |
185 | r->n += n; |
185 | r->n += n; |
186 | spinlock_unlock(&r->lock); |
186 | spinlock_unlock(&r->lock); |
187 | } |
187 | } |
188 | CPU->needs_relink = 0; |
188 | CPU->needs_relink = 0; |
189 | } |
189 | } |
190 | spinlock_unlock(&CPU->lock); |
190 | spinlock_unlock(&CPU->lock); |
191 | 191 | ||
192 | } |
192 | } |
193 | 193 | ||
194 | /* |
194 | /* |
195 | * The scheduler. |
195 | * The scheduler. |
196 | */ |
196 | */ |
197 | void scheduler(void) |
197 | void scheduler(void) |
198 | { |
198 | { |
199 | volatile pri_t pri; |
199 | volatile pri_t pri; |
200 | 200 | ||
201 | pri = cpu_priority_high(); |
201 | pri = cpu_priority_high(); |
202 | 202 | ||
203 | if (haltstate) |
203 | if (haltstate) |
204 | halt(); |
204 | halt(); |
205 | 205 | ||
206 | if (THREAD) { |
206 | if (THREAD) { |
207 | spinlock_lock(&THREAD->lock); |
207 | spinlock_lock(&THREAD->lock); |
208 | fpu_context_save(&(THREAD->saved_fpu_context)); |
208 | fpu_context_save(&(THREAD->saved_fpu_context)); |
209 | if (!context_save(&THREAD->saved_context)) { |
209 | if (!context_save(&THREAD->saved_context)) { |
210 | /* |
210 | /* |
211 | * This is the place where threads leave scheduler(); |
211 | * This is the place where threads leave scheduler(); |
212 | */ |
212 | */ |
213 | before_thread_runs(); |
213 | before_thread_runs(); |
214 | spinlock_unlock(&THREAD->lock); |
214 | spinlock_unlock(&THREAD->lock); |
215 | cpu_priority_restore(THREAD->saved_context.pri); |
215 | cpu_priority_restore(THREAD->saved_context.pri); |
216 | return; |
216 | return; |
217 | } |
217 | } |
218 | THREAD->saved_context.pri = pri; |
218 | THREAD->saved_context.pri = pri; |
219 | } |
219 | } |
220 | 220 | ||
221 | /* |
221 | /* |
222 | * We may not keep the old stack. |
222 | * We may not keep the old stack. |
223 | * Reason: If we kept the old stack and got blocked, for instance, in |
223 | * Reason: If we kept the old stack and got blocked, for instance, in |
224 | * find_best_thread(), the old thread could get rescheduled by another |
224 | * find_best_thread(), the old thread could get rescheduled by another |
225 | * CPU and overwrite the part of its own stack that was also used by |
225 | * CPU and overwrite the part of its own stack that was also used by |
226 | * the scheduler on this CPU. |
226 | * the scheduler on this CPU. |
227 | * |
227 | * |
228 | * Moreover, we have to bypass the compiler-generated POP sequence |
228 | * Moreover, we have to bypass the compiler-generated POP sequence |
229 | * which is fooled by SP being set to the very top of the stack. |
229 | * which is fooled by SP being set to the very top of the stack. |
230 | * Therefore the scheduler() function continues in |
230 | * Therefore the scheduler() function continues in |
231 | * scheduler_separated_stack(). |
231 | * scheduler_separated_stack(). |
232 | */ |
232 | */ |
233 | context_save(&CPU->saved_context); |
233 | context_save(&CPU->saved_context); |
234 | CPU->saved_context.sp = (__address) &CPU->stack[CPU_STACK_SIZE-8]; |
234 | CPU->saved_context.sp = (__address) &CPU->stack[CPU_STACK_SIZE-8]; |
235 | CPU->saved_context.pc = (__address) scheduler_separated_stack; |
235 | CPU->saved_context.pc = (__address) scheduler_separated_stack; |
236 | context_restore(&CPU->saved_context); |
236 | context_restore(&CPU->saved_context); |
237 | /* not reached */ |
237 | /* not reached */ |
238 | } |
238 | } |
239 | 239 | ||
240 | void scheduler_separated_stack(void) |
240 | void scheduler_separated_stack(void) |
241 | { |
241 | { |
242 | int priority; |
242 | int priority; |
243 | 243 | ||
244 | if (THREAD) { |
244 | if (THREAD) { |
245 | switch (THREAD->state) { |
245 | switch (THREAD->state) { |
246 | case Running: |
246 | case Running: |
247 | THREAD->state = Ready; |
247 | THREAD->state = Ready; |
248 | spinlock_unlock(&THREAD->lock); |
248 | spinlock_unlock(&THREAD->lock); |
249 | thread_ready(THREAD); |
249 | thread_ready(THREAD); |
250 | break; |
250 | break; |
251 | 251 | ||
252 | case Exiting: |
252 | case Exiting: |
253 | frame_free((__address) THREAD->kstack); |
253 | frame_free((__address) THREAD->kstack); |
254 | if (THREAD->ustack) { |
254 | if (THREAD->ustack) { |
255 | frame_free((__address) THREAD->ustack); |
255 | frame_free((__address) THREAD->ustack); |
256 | } |
256 | } |
257 | 257 | ||
258 | /* |
258 | /* |
259 | * Detach from the containing task. |
259 | * Detach from the containing task. |
260 | */ |
260 | */ |
261 | spinlock_lock(&TASK->lock); |
261 | spinlock_lock(&TASK->lock); |
262 | list_remove(&THREAD->th_link); |
262 | list_remove(&THREAD->th_link); |
263 | spinlock_unlock(&TASK->lock); |
263 | spinlock_unlock(&TASK->lock); |
264 | 264 | ||
265 | spinlock_unlock(&THREAD->lock); |
265 | spinlock_unlock(&THREAD->lock); |
266 | 266 | ||
267 | spinlock_lock(&threads_lock); |
267 | spinlock_lock(&threads_lock); |
268 | list_remove(&THREAD->threads_link); |
268 | list_remove(&THREAD->threads_link); |
269 | spinlock_unlock(&threads_lock); |
269 | spinlock_unlock(&threads_lock); |
- | 270 | ||
- | 271 | spinlock_lock(&THREAD->cpu->lock); |
|
- | 272 | if(THREAD->cpu->arch.fpu_owner==THREAD) THREAD->cpu->arch.fpu_owner=NULL; |
|
- | 273 | spinlock_unlock(&THREAD->cpu->lock); |
|
- | 274 | ||
270 | 275 | ||
271 | free(THREAD); |
276 | free(THREAD); |
272 | 277 | ||
273 | break; |
278 | break; |
274 | 279 | ||
275 | case Sleeping: |
280 | case Sleeping: |
276 | /* |
281 | /* |
277 | * Prefer the thread after it's woken up. |
282 | * Prefer the thread after it's woken up. |
278 | */ |
283 | */ |
279 | THREAD->pri = -1; |
284 | THREAD->pri = -1; |
280 | 285 | ||
281 | /* |
286 | /* |
282 | * We need to release wq->lock which we locked in waitq_sleep(). |
287 | * We need to release wq->lock which we locked in waitq_sleep(). |
283 | * Address of wq->lock is kept in THREAD->sleep_queue. |
288 | * Address of wq->lock is kept in THREAD->sleep_queue. |
284 | */ |
289 | */ |
285 | spinlock_unlock(&THREAD->sleep_queue->lock); |
290 | spinlock_unlock(&THREAD->sleep_queue->lock); |
286 | 291 | ||
287 | /* |
292 | /* |
288 | * Check for possible requests for out-of-context invocation. |
293 | * Check for possible requests for out-of-context invocation. |
289 | */ |
294 | */ |
290 | if (THREAD->call_me) { |
295 | if (THREAD->call_me) { |
291 | THREAD->call_me(THREAD->call_me_with); |
296 | THREAD->call_me(THREAD->call_me_with); |
292 | THREAD->call_me = NULL; |
297 | THREAD->call_me = NULL; |
293 | THREAD->call_me_with = NULL; |
298 | THREAD->call_me_with = NULL; |
294 | } |
299 | } |
295 | 300 | ||
296 | spinlock_unlock(&THREAD->lock); |
301 | spinlock_unlock(&THREAD->lock); |
297 | 302 | ||
298 | break; |
303 | break; |
299 | 304 | ||
300 | default: |
305 | default: |
301 | /* |
306 | /* |
302 | * Entering state is unexpected. |
307 | * Entering state is unexpected. |
303 | */ |
308 | */ |
304 | panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]); |
309 | panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]); |
305 | break; |
310 | break; |
306 | } |
311 | } |
307 | THREAD = NULL; |
312 | THREAD = NULL; |
308 | } |
313 | } |
309 | 314 | ||
310 | THREAD = find_best_thread(); |
315 | THREAD = find_best_thread(); |
311 | 316 | ||
312 | spinlock_lock(&THREAD->lock); |
317 | spinlock_lock(&THREAD->lock); |
313 | priority = THREAD->pri; |
318 | priority = THREAD->pri; |
314 | spinlock_unlock(&THREAD->lock); |
319 | spinlock_unlock(&THREAD->lock); |
315 | 320 | ||
316 | relink_rq(priority); |
321 | relink_rq(priority); |
317 | 322 | ||
318 | spinlock_lock(&THREAD->lock); |
323 | spinlock_lock(&THREAD->lock); |
319 | 324 | ||
320 | /* |
325 | /* |
321 | * If both the old and the new task are the same, lots of work is avoided. |
326 | * If both the old and the new task are the same, lots of work is avoided. |
322 | */ |
327 | */ |
323 | if (TASK != THREAD->task) { |
328 | if (TASK != THREAD->task) { |
324 | vm_t *m1 = NULL; |
329 | vm_t *m1 = NULL; |
325 | vm_t *m2; |
330 | vm_t *m2; |
326 | 331 | ||
327 | if (TASK) { |
332 | if (TASK) { |
328 | spinlock_lock(&TASK->lock); |
333 | spinlock_lock(&TASK->lock); |
329 | m1 = TASK->vm; |
334 | m1 = TASK->vm; |
330 | spinlock_unlock(&TASK->lock); |
335 | spinlock_unlock(&TASK->lock); |
331 | } |
336 | } |
332 | 337 | ||
333 | spinlock_lock(&THREAD->task->lock); |
338 | spinlock_lock(&THREAD->task->lock); |
334 | m2 = THREAD->task->vm; |
339 | m2 = THREAD->task->vm; |
335 | spinlock_unlock(&THREAD->task->lock); |
340 | spinlock_unlock(&THREAD->task->lock); |
336 | 341 | ||
337 | /* |
342 | /* |
338 | * Note that it is possible for two tasks to share one vm mapping. |
343 | * Note that it is possible for two tasks to share one vm mapping. |
339 | */ |
344 | */ |
340 | if (m1 != m2) { |
345 | if (m1 != m2) { |
341 | /* |
346 | /* |
342 | * Both tasks and vm mappings are different. |
347 | * Both tasks and vm mappings are different. |
343 | * Replace the old one with the new one. |
348 | * Replace the old one with the new one. |
344 | */ |
349 | */ |
345 | if (m1) { |
350 | if (m1) { |
346 | vm_uninstall(m1); |
351 | vm_uninstall(m1); |
347 | } |
352 | } |
348 | vm_install(m2); |
353 | vm_install(m2); |
349 | } |
354 | } |
350 | TASK = THREAD->task; |
355 | TASK = THREAD->task; |
351 | } |
356 | } |
352 | 357 | ||
353 | THREAD->state = Running; |
358 | THREAD->state = Running; |
354 | 359 | ||
355 | #ifdef SCHEDULER_VERBOSE |
360 | #ifdef SCHEDULER_VERBOSE |
356 | printf("cpu%d: tid %d (pri=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->pri, THREAD->ticks, CPU->nrdy); |
361 | printf("cpu%d: tid %d (pri=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->pri, THREAD->ticks, CPU->nrdy); |
357 | #endif |
362 | #endif |
358 | 363 | ||
359 | context_restore(&THREAD->saved_context); |
364 | context_restore(&THREAD->saved_context); |
360 | /* not reached */ |
365 | /* not reached */ |
361 | } |
366 | } |
362 | 367 | ||
363 | #ifdef __SMP__ |
368 | #ifdef __SMP__ |
364 | /* |
369 | /* |
365 | * This is the load balancing thread. |
370 | * This is the load balancing thread. |
366 | * It supervises thread supplies for the CPU it's wired to. |
371 | * It supervises thread supplies for the CPU it's wired to. |
367 | */ |
372 | */ |
368 | void kcpulb(void *arg) |
373 | void kcpulb(void *arg) |
369 | { |
374 | { |
370 | thread_t *t; |
375 | thread_t *t; |
371 | int count, i, j, k = 0; |
376 | int count, i, j, k = 0; |
372 | pri_t pri; |
377 | pri_t pri; |
373 | 378 | ||
374 | loop: |
379 | loop: |
375 | /* |
380 | /* |
376 | * Sleep until there's some work to do. |
381 | * Sleep until there's some work to do. |
377 | */ |
382 | */ |
378 | waitq_sleep(&CPU->kcpulb_wq); |
383 | waitq_sleep(&CPU->kcpulb_wq); |
379 | 384 | ||
380 | not_satisfied: |
385 | not_satisfied: |
381 | /* |
386 | /* |
382 | * Calculate the number of threads that will be migrated/stolen from |
387 | * Calculate the number of threads that will be migrated/stolen from |
383 | * other CPU's. Note that situation can have changed between two |
388 | * other CPU's. Note that situation can have changed between two |
384 | * passes. Each time get the most up to date counts. |
389 | * passes. Each time get the most up to date counts. |
385 | */ |
390 | */ |
386 | pri = cpu_priority_high(); |
391 | pri = cpu_priority_high(); |
387 | spinlock_lock(&CPU->lock); |
392 | spinlock_lock(&CPU->lock); |
388 | count = nrdy / config.cpu_active; |
393 | count = nrdy / config.cpu_active; |
389 | count -= CPU->nrdy; |
394 | count -= CPU->nrdy; |
390 | spinlock_unlock(&CPU->lock); |
395 | spinlock_unlock(&CPU->lock); |
391 | cpu_priority_restore(pri); |
396 | cpu_priority_restore(pri); |
392 | 397 | ||
393 | if (count <= 0) |
398 | if (count <= 0) |
394 | goto satisfied; |
399 | goto satisfied; |
395 | 400 | ||
396 | /* |
401 | /* |
397 | * Searching least priority queues on all CPU's first and most priority queues on all CPU's last. |
402 | * Searching least priority queues on all CPU's first and most priority queues on all CPU's last. |
398 | */ |
403 | */ |
399 | for (j=RQ_COUNT-1; j >= 0; j--) { |
404 | for (j=RQ_COUNT-1; j >= 0; j--) { |
400 | for (i=0; i < config.cpu_active; i++) { |
405 | for (i=0; i < config.cpu_active; i++) { |
401 | link_t *l; |
406 | link_t *l; |
402 | runq_t *r; |
407 | runq_t *r; |
403 | cpu_t *cpu; |
408 | cpu_t *cpu; |
404 | 409 | ||
405 | cpu = &cpus[(i + k) % config.cpu_active]; |
410 | cpu = &cpus[(i + k) % config.cpu_active]; |
406 | r = &cpu->rq[j]; |
411 | r = &cpu->rq[j]; |
407 | 412 | ||
408 | /* |
413 | /* |
409 | * Not interested in ourselves. |
414 | * Not interested in ourselves. |
410 | * Doesn't require interrupt disabling for kcpulb is X_WIRED. |
415 | * Doesn't require interrupt disabling for kcpulb is X_WIRED. |
411 | */ |
416 | */ |
412 | if (CPU == cpu) |
417 | if (CPU == cpu) |
413 | continue; |
418 | continue; |
414 | 419 | ||
415 | restart: pri = cpu_priority_high(); |
420 | restart: pri = cpu_priority_high(); |
416 | spinlock_lock(&r->lock); |
421 | spinlock_lock(&r->lock); |
417 | if (r->n == 0) { |
422 | if (r->n == 0) { |
418 | spinlock_unlock(&r->lock); |
423 | spinlock_unlock(&r->lock); |
419 | cpu_priority_restore(pri); |
424 | cpu_priority_restore(pri); |
420 | continue; |
425 | continue; |
421 | } |
426 | } |
422 | 427 | ||
423 | t = NULL; |
428 | t = NULL; |
424 | l = r->rq_head.prev; /* search rq from the back */ |
429 | l = r->rq_head.prev; /* search rq from the back */ |
425 | while (l != &r->rq_head) { |
430 | while (l != &r->rq_head) { |
426 | t = list_get_instance(l, thread_t, rq_link); |
431 | t = list_get_instance(l, thread_t, rq_link); |
427 | /* |
432 | /* |
428 | * We don't want to steal CPU-wired threads neither threads already stolen. |
433 | * We don't want to steal CPU-wired threads neither threads already stolen. |
429 | * The latter prevents threads from migrating between CPU's without ever being run. |
434 | * The latter prevents threads from migrating between CPU's without ever being run. |
- | 435 | * We don't want to steal threads whose FPU context is still in CPU |
|
430 | */ |
436 | */ |
431 | spinlock_lock(&t->lock); |
437 | spinlock_lock(&t->lock); |
432 | if (!(t->flags & (X_WIRED | X_STOLEN))) { |
438 | if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) { |
433 | /* |
439 | /* |
434 | * Remove t from r. |
440 | * Remove t from r. |
435 | */ |
441 | */ |
436 | 442 | ||
437 | spinlock_unlock(&t->lock); |
443 | spinlock_unlock(&t->lock); |
438 | 444 | ||
439 | /* |
445 | /* |
440 | * Here we have to avoid deadlock with relink_rq(), |
446 | * Here we have to avoid deadlock with relink_rq(), |
441 | * because it locks cpu and r in a different order than we do. |
447 | * because it locks cpu and r in a different order than we do. |
442 | */ |
448 | */ |
443 | if (!spinlock_trylock(&cpu->lock)) { |
449 | if (!spinlock_trylock(&cpu->lock)) { |
444 | /* Release all locks and try again. */ |
450 | /* Release all locks and try again. */ |
445 | spinlock_unlock(&r->lock); |
451 | spinlock_unlock(&r->lock); |
446 | cpu_priority_restore(pri); |
452 | cpu_priority_restore(pri); |
447 | goto restart; |
453 | goto restart; |
448 | } |
454 | } |
449 | cpu->nrdy--; |
455 | cpu->nrdy--; |
450 | spinlock_unlock(&cpu->lock); |
456 | spinlock_unlock(&cpu->lock); |
451 | 457 | ||
452 | spinlock_lock(&nrdylock); |
458 | spinlock_lock(&nrdylock); |
453 | nrdy--; |
459 | nrdy--; |
454 | spinlock_unlock(&nrdylock); |
460 | spinlock_unlock(&nrdylock); |
455 | 461 | ||
456 | r->n--; |
462 | r->n--; |
457 | list_remove(&t->rq_link); |
463 | list_remove(&t->rq_link); |
458 | 464 | ||
459 | break; |
465 | break; |
460 | } |
466 | } |
461 | spinlock_unlock(&t->lock); |
467 | spinlock_unlock(&t->lock); |
462 | l = l->prev; |
468 | l = l->prev; |
463 | t = NULL; |
469 | t = NULL; |
464 | } |
470 | } |
465 | spinlock_unlock(&r->lock); |
471 | spinlock_unlock(&r->lock); |
466 | 472 | ||
467 | if (t) { |
473 | if (t) { |
468 | /* |
474 | /* |
469 | * Ready t on local CPU |
475 | * Ready t on local CPU |
470 | */ |
476 | */ |
471 | spinlock_lock(&t->lock); |
477 | spinlock_lock(&t->lock); |
472 | #ifdef KCPULB_VERBOSE |
478 | #ifdef KCPULB_VERBOSE |
473 | printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, CPU->nrdy, nrdy / config.cpu_active); |
479 | printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, CPU->nrdy, nrdy / config.cpu_active); |
474 | #endif |
480 | #endif |
475 | t->flags |= X_STOLEN; |
481 | t->flags |= X_STOLEN; |
476 | spinlock_unlock(&t->lock); |
482 | spinlock_unlock(&t->lock); |
477 | 483 | ||
478 | thread_ready(t); |
484 | thread_ready(t); |
479 | 485 | ||
480 | cpu_priority_restore(pri); |
486 | cpu_priority_restore(pri); |
481 | 487 | ||
482 | if (--count == 0) |
488 | if (--count == 0) |
483 | goto satisfied; |
489 | goto satisfied; |
484 | 490 | ||
485 | /* |
491 | /* |
486 | * We are not satisfied yet, focus on another CPU next time. |
492 | * We are not satisfied yet, focus on another CPU next time. |
487 | */ |
493 | */ |
488 | k++; |
494 | k++; |
489 | 495 | ||
490 | continue; |
496 | continue; |
491 | } |
497 | } |
492 | cpu_priority_restore(pri); |
498 | cpu_priority_restore(pri); |
493 | } |
499 | } |
494 | } |
500 | } |
495 | 501 | ||
496 | if (CPU->nrdy) { |
502 | if (CPU->nrdy) { |
497 | /* |
503 | /* |
498 | * Be a little bit light-weight and let migrated threads run. |
504 | * Be a little bit light-weight and let migrated threads run. |
499 | */ |
505 | */ |
500 | scheduler(); |
506 | scheduler(); |
501 | } |
507 | } |
502 | else { |
508 | else { |
503 | /* |
509 | /* |
504 | * We failed to migrate a single thread. |
510 | * We failed to migrate a single thread. |
505 | * Something more sophisticated should be done. |
511 | * Something more sophisticated should be done. |
506 | */ |
512 | */ |
507 | scheduler(); |
513 | scheduler(); |
508 | } |
514 | } |
509 | 515 | ||
510 | goto not_satisfied; |
516 | goto not_satisfied; |
511 | 517 | ||
512 | satisfied: |
518 | satisfied: |
513 | /* |
519 | /* |
514 | * Tell find_best_thread() to wake us up later again. |
520 | * Tell find_best_thread() to wake us up later again. |
515 | */ |
521 | */ |
516 | CPU->kcpulbstarted = 0; |
522 | CPU->kcpulbstarted = 0; |
517 | goto loop; |
523 | goto loop; |
518 | } |
524 | } |
519 | 525 | ||
520 | #endif /* __SMP__ */ |
526 | #endif /* __SMP__ */ |
521 | 527 |