Rev 111 | Rev 114 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 111 | Rev 113 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | #include <proc/scheduler.h> |
29 | #include <proc/scheduler.h> |
30 | #include <proc/thread.h> |
30 | #include <proc/thread.h> |
31 | #include <proc/task.h> |
31 | #include <proc/task.h> |
32 | #include <cpu.h> |
32 | #include <cpu.h> |
33 | #include <mm/vm.h> |
33 | #include <mm/vm.h> |
34 | #include <config.h> |
34 | #include <config.h> |
35 | #include <context.h> |
35 | #include <context.h> |
36 | #include <func.h> |
36 | #include <func.h> |
37 | #include <arch.h> |
37 | #include <arch.h> |
38 | #include <arch/asm.h> |
38 | #include <arch/asm.h> |
39 | #include <list.h> |
39 | #include <list.h> |
40 | #include <panic.h> |
40 | #include <panic.h> |
41 | #include <typedefs.h> |
41 | #include <typedefs.h> |
42 | #include <mm/page.h> |
42 | #include <mm/page.h> |
43 | #include <synch/spinlock.h> |
43 | #include <synch/spinlock.h> |
44 | #include <arch/faddr.h> |
44 | #include <arch/faddr.h> |
45 | #include <arch/atomic.h> |
45 | #include <arch/atomic.h> |
46 | 46 | ||
47 | /* |
- | |
48 | * NOTE ON ATOMIC READS: |
- | |
49 | * Some architectures cannot read __u32 atomically. |
- | |
50 | * For that reason, all accesses to nrdy and the likes must be protected by spinlock. |
- | |
51 | */ |
- | |
52 | - | ||
53 | volatile int nrdy; |
47 | volatile int nrdy; |
54 | 48 | ||
55 | 49 | ||
56 | /** Initialize context switching |
50 | /** Initialize context switching |
57 | * |
51 | * |
58 | * Initialize context switching and lazy FPU |
52 | * Initialize context switching and lazy FPU |
59 | * context switching. |
53 | * context switching. |
60 | * |
54 | * |
61 | */ |
55 | */ |
62 | void before_thread_runs(void) |
56 | void before_thread_runs(void) |
63 | { |
57 | { |
64 | before_thread_runs_arch(); |
58 | before_thread_runs_arch(); |
65 | fpu_context_restore(&(THREAD->saved_fpu_context)); |
59 | fpu_context_restore(&(THREAD->saved_fpu_context)); |
66 | } |
60 | } |
67 | 61 | ||
68 | 62 | ||
69 | /** Initialize scheduler |
63 | /** Initialize scheduler |
70 | * |
64 | * |
71 | * Initialize kernel scheduler. |
65 | * Initialize kernel scheduler. |
72 | * |
66 | * |
73 | */ |
67 | */ |
74 | void scheduler_init(void) |
68 | void scheduler_init(void) |
75 | { |
69 | { |
76 | } |
70 | } |
77 | 71 | ||
78 | 72 | ||
79 | /** Get thread to be scheduled |
73 | /** Get thread to be scheduled |
80 | * |
74 | * |
81 | * Get the optimal thread to be scheduled |
75 | * Get the optimal thread to be scheduled |
82 | * according to thread accounting and scheduler |
76 | * according to thread accounting and scheduler |
83 | * policy. |
77 | * policy. |
84 | * |
78 | * |
85 | * @return Thread to be scheduled. |
79 | * @return Thread to be scheduled. |
86 | * |
80 | * |
87 | */ |
81 | */ |
88 | struct thread *find_best_thread(void) |
82 | struct thread *find_best_thread(void) |
89 | { |
83 | { |
90 | thread_t *t; |
84 | thread_t *t; |
91 | runq_t *r; |
85 | runq_t *r; |
92 | int i, n; |
86 | int i, n; |
93 | 87 | ||
94 | loop: |
88 | loop: |
95 | cpu_priority_high(); |
89 | cpu_priority_high(); |
96 | 90 | ||
97 | spinlock_lock(&CPU->lock); |
91 | spinlock_lock(&CPU->lock); |
98 | n = CPU->nrdy; |
92 | n = CPU->nrdy; |
99 | spinlock_unlock(&CPU->lock); |
93 | spinlock_unlock(&CPU->lock); |
100 | 94 | ||
101 | cpu_priority_low(); |
95 | cpu_priority_low(); |
102 | 96 | ||
103 | if (n == 0) { |
97 | if (n == 0) { |
104 | #ifdef __SMP__ |
98 | #ifdef __SMP__ |
105 | /* |
99 | /* |
106 | * If the load balancing thread is not running, wake it up and |
100 | * If the load balancing thread is not running, wake it up and |
107 | * set CPU-private flag that the kcpulb has been started. |
101 | * set CPU-private flag that the kcpulb has been started. |
108 | */ |
102 | */ |
109 | if (test_and_set(&CPU->kcpulbstarted) == 0) { |
103 | if (test_and_set(&CPU->kcpulbstarted) == 0) { |
110 | waitq_wakeup(&CPU->kcpulb_wq, 0); |
104 | waitq_wakeup(&CPU->kcpulb_wq, 0); |
111 | goto loop; |
105 | goto loop; |
112 | } |
106 | } |
113 | #endif /* __SMP__ */ |
107 | #endif /* __SMP__ */ |
114 | 108 | ||
115 | /* |
109 | /* |
116 | * For there was nothing to run, the CPU goes to sleep |
110 | * For there was nothing to run, the CPU goes to sleep |
117 | * until a hardware interrupt or an IPI comes. |
111 | * until a hardware interrupt or an IPI comes. |
118 | * This improves energy saving and hyperthreading. |
112 | * This improves energy saving and hyperthreading. |
119 | * On the other hand, several hardware interrupts can be ignored. |
113 | * On the other hand, several hardware interrupts can be ignored. |
120 | */ |
114 | */ |
121 | cpu_sleep(); |
115 | cpu_sleep(); |
122 | goto loop; |
116 | goto loop; |
123 | } |
117 | } |
124 | 118 | ||
125 | cpu_priority_high(); |
119 | cpu_priority_high(); |
126 | 120 | ||
127 | for (i = 0; i<RQ_COUNT; i++) { |
121 | for (i = 0; i<RQ_COUNT; i++) { |
128 | r = &CPU->rq[i]; |
122 | r = &CPU->rq[i]; |
129 | spinlock_lock(&r->lock); |
123 | spinlock_lock(&r->lock); |
130 | if (r->n == 0) { |
124 | if (r->n == 0) { |
131 | /* |
125 | /* |
132 | * If this queue is empty, try a lower-priority queue. |
126 | * If this queue is empty, try a lower-priority queue. |
133 | */ |
127 | */ |
134 | spinlock_unlock(&r->lock); |
128 | spinlock_unlock(&r->lock); |
135 | continue; |
129 | continue; |
136 | } |
130 | } |
137 | 131 | ||
138 | atomic_dec(&nrdy); |
132 | atomic_dec(&nrdy); |
139 | 133 | ||
140 | spinlock_lock(&CPU->lock); |
134 | spinlock_lock(&CPU->lock); |
141 | CPU->nrdy--; |
135 | CPU->nrdy--; |
142 | spinlock_unlock(&CPU->lock); |
136 | spinlock_unlock(&CPU->lock); |
143 | 137 | ||
144 | r->n--; |
138 | r->n--; |
145 | 139 | ||
146 | /* |
140 | /* |
147 | * Take the first thread from the queue. |
141 | * Take the first thread from the queue. |
148 | */ |
142 | */ |
149 | t = list_get_instance(r->rq_head.next, thread_t, rq_link); |
143 | t = list_get_instance(r->rq_head.next, thread_t, rq_link); |
150 | list_remove(&t->rq_link); |
144 | list_remove(&t->rq_link); |
151 | 145 | ||
152 | spinlock_unlock(&r->lock); |
146 | spinlock_unlock(&r->lock); |
153 | 147 | ||
154 | spinlock_lock(&t->lock); |
148 | spinlock_lock(&t->lock); |
155 | t->cpu = CPU; |
149 | t->cpu = CPU; |
156 | 150 | ||
157 | t->ticks = us2ticks((i+1)*10000); |
151 | t->ticks = us2ticks((i+1)*10000); |
158 | t->pri = i; /* eventually correct rq index */ |
152 | t->pri = i; /* eventually correct rq index */ |
159 | 153 | ||
160 | /* |
154 | /* |
161 | * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge. |
155 | * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge. |
162 | */ |
156 | */ |
163 | t->flags &= ~X_STOLEN; |
157 | t->flags &= ~X_STOLEN; |
164 | spinlock_unlock(&t->lock); |
158 | spinlock_unlock(&t->lock); |
165 | 159 | ||
166 | return t; |
160 | return t; |
167 | } |
161 | } |
168 | goto loop; |
162 | goto loop; |
169 | 163 | ||
170 | } |
164 | } |
171 | 165 | ||
172 | 166 | ||
173 | /** Prevent rq starvation |
167 | /** Prevent rq starvation |
174 | * |
168 | * |
175 | * Prevent low priority threads from starving in rq's. |
169 | * Prevent low priority threads from starving in rq's. |
176 | * |
170 | * |
177 | * When the function decides to relink rq's, it reconnects |
171 | * When the function decides to relink rq's, it reconnects |
178 | * respective pointers so that in result threads with 'pri' |
172 | * respective pointers so that in result threads with 'pri' |
179 | * greater or equal 'start' are moved to a higher-priority queue. |
173 | * greater or equal 'start' are moved to a higher-priority queue. |
180 | * |
174 | * |
181 | * @param start Threshold priority. |
175 | * @param start Threshold priority. |
182 | * |
176 | * |
183 | */ |
177 | */ |
184 | void relink_rq(int start) |
178 | void relink_rq(int start) |
185 | { |
179 | { |
186 | link_t head; |
180 | link_t head; |
187 | runq_t *r; |
181 | runq_t *r; |
188 | int i, n; |
182 | int i, n; |
189 | 183 | ||
190 | list_initialize(&head); |
184 | list_initialize(&head); |
191 | spinlock_lock(&CPU->lock); |
185 | spinlock_lock(&CPU->lock); |
192 | if (CPU->needs_relink > NEEDS_RELINK_MAX) { |
186 | if (CPU->needs_relink > NEEDS_RELINK_MAX) { |
193 | for (i = start; i<RQ_COUNT-1; i++) { |
187 | for (i = start; i<RQ_COUNT-1; i++) { |
194 | /* remember and empty rq[i + 1] */ |
188 | /* remember and empty rq[i + 1] */ |
195 | r = &CPU->rq[i + 1]; |
189 | r = &CPU->rq[i + 1]; |
196 | spinlock_lock(&r->lock); |
190 | spinlock_lock(&r->lock); |
197 | list_concat(&head, &r->rq_head); |
191 | list_concat(&head, &r->rq_head); |
198 | n = r->n; |
192 | n = r->n; |
199 | r->n = 0; |
193 | r->n = 0; |
200 | spinlock_unlock(&r->lock); |
194 | spinlock_unlock(&r->lock); |
201 | 195 | ||
202 | /* append rq[i + 1] to rq[i] */ |
196 | /* append rq[i + 1] to rq[i] */ |
203 | r = &CPU->rq[i]; |
197 | r = &CPU->rq[i]; |
204 | spinlock_lock(&r->lock); |
198 | spinlock_lock(&r->lock); |
205 | list_concat(&r->rq_head, &head); |
199 | list_concat(&r->rq_head, &head); |
206 | r->n += n; |
200 | r->n += n; |
207 | spinlock_unlock(&r->lock); |
201 | spinlock_unlock(&r->lock); |
208 | } |
202 | } |
209 | CPU->needs_relink = 0; |
203 | CPU->needs_relink = 0; |
210 | } |
204 | } |
211 | spinlock_unlock(&CPU->lock); |
205 | spinlock_unlock(&CPU->lock); |
212 | 206 | ||
213 | } |
207 | } |
214 | 208 | ||
215 | 209 | ||
216 | /** The scheduler |
210 | /** The scheduler |
217 | * |
211 | * |
218 | * The thread scheduling procedure. |
212 | * The thread scheduling procedure. |
219 | * |
213 | * |
220 | */ |
214 | */ |
221 | void scheduler(void) |
215 | void scheduler(void) |
222 | { |
216 | { |
223 | volatile pri_t pri; |
217 | volatile pri_t pri; |
224 | 218 | ||
225 | pri = cpu_priority_high(); |
219 | pri = cpu_priority_high(); |
226 | 220 | ||
227 | if (haltstate) |
221 | if (haltstate) |
228 | halt(); |
222 | halt(); |
229 | 223 | ||
230 | if (THREAD) { |
224 | if (THREAD) { |
231 | spinlock_lock(&THREAD->lock); |
225 | spinlock_lock(&THREAD->lock); |
232 | fpu_context_save(&(THREAD->saved_fpu_context)); |
226 | fpu_context_save(&(THREAD->saved_fpu_context)); |
233 | if (!context_save(&THREAD->saved_context)) { |
227 | if (!context_save(&THREAD->saved_context)) { |
234 | /* |
228 | /* |
235 | * This is the place where threads leave scheduler(); |
229 | * This is the place where threads leave scheduler(); |
236 | */ |
230 | */ |
237 | before_thread_runs(); |
231 | before_thread_runs(); |
238 | spinlock_unlock(&THREAD->lock); |
232 | spinlock_unlock(&THREAD->lock); |
239 | cpu_priority_restore(THREAD->saved_context.pri); |
233 | cpu_priority_restore(THREAD->saved_context.pri); |
240 | return; |
234 | return; |
241 | } |
235 | } |
242 | THREAD->saved_context.pri = pri; |
236 | THREAD->saved_context.pri = pri; |
243 | } |
237 | } |
244 | 238 | ||
245 | /* |
239 | /* |
246 | * We may not keep the old stack. |
240 | * We may not keep the old stack. |
247 | * Reason: If we kept the old stack and got blocked, for instance, in |
241 | * Reason: If we kept the old stack and got blocked, for instance, in |
248 | * find_best_thread(), the old thread could get rescheduled by another |
242 | * find_best_thread(), the old thread could get rescheduled by another |
249 | * CPU and overwrite the part of its own stack that was also used by |
243 | * CPU and overwrite the part of its own stack that was also used by |
250 | * the scheduler on this CPU. |
244 | * the scheduler on this CPU. |
251 | * |
245 | * |
252 | * Moreover, we have to bypass the compiler-generated POP sequence |
246 | * Moreover, we have to bypass the compiler-generated POP sequence |
253 | * which is fooled by SP being set to the very top of the stack. |
247 | * which is fooled by SP being set to the very top of the stack. |
254 | * Therefore the scheduler() function continues in |
248 | * Therefore the scheduler() function continues in |
255 | * scheduler_separated_stack(). |
249 | * scheduler_separated_stack(). |
256 | */ |
250 | */ |
257 | context_save(&CPU->saved_context); |
251 | context_save(&CPU->saved_context); |
258 | context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), CPU->stack, CPU_STACK_SIZE); |
252 | context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), CPU->stack, CPU_STACK_SIZE); |
259 | context_restore(&CPU->saved_context); |
253 | context_restore(&CPU->saved_context); |
260 | /* not reached */ |
254 | /* not reached */ |
261 | } |
255 | } |
262 | 256 | ||
263 | 257 | ||
264 | /** Scheduler stack switch wrapper |
258 | /** Scheduler stack switch wrapper |
265 | * |
259 | * |
266 | * Second part of the scheduler() function |
260 | * Second part of the scheduler() function |
267 | * using new stack. Handling the actual context |
261 | * using new stack. Handling the actual context |
268 | * switch to a new thread. |
262 | * switch to a new thread. |
269 | * |
263 | * |
270 | */ |
264 | */ |
271 | void scheduler_separated_stack(void) |
265 | void scheduler_separated_stack(void) |
272 | { |
266 | { |
273 | int priority; |
267 | int priority; |
274 | 268 | ||
275 | if (THREAD) { |
269 | if (THREAD) { |
276 | switch (THREAD->state) { |
270 | switch (THREAD->state) { |
277 | case Running: |
271 | case Running: |
278 | THREAD->state = Ready; |
272 | THREAD->state = Ready; |
279 | spinlock_unlock(&THREAD->lock); |
273 | spinlock_unlock(&THREAD->lock); |
280 | thread_ready(THREAD); |
274 | thread_ready(THREAD); |
281 | break; |
275 | break; |
282 | 276 | ||
283 | case Exiting: |
277 | case Exiting: |
284 | frame_free((__address) THREAD->kstack); |
278 | frame_free((__address) THREAD->kstack); |
285 | if (THREAD->ustack) { |
279 | if (THREAD->ustack) { |
286 | frame_free((__address) THREAD->ustack); |
280 | frame_free((__address) THREAD->ustack); |
287 | } |
281 | } |
288 | 282 | ||
289 | /* |
283 | /* |
290 | * Detach from the containing task. |
284 | * Detach from the containing task. |
291 | */ |
285 | */ |
292 | spinlock_lock(&TASK->lock); |
286 | spinlock_lock(&TASK->lock); |
293 | list_remove(&THREAD->th_link); |
287 | list_remove(&THREAD->th_link); |
294 | spinlock_unlock(&TASK->lock); |
288 | spinlock_unlock(&TASK->lock); |
295 | 289 | ||
296 | spinlock_unlock(&THREAD->lock); |
290 | spinlock_unlock(&THREAD->lock); |
297 | 291 | ||
298 | spinlock_lock(&threads_lock); |
292 | spinlock_lock(&threads_lock); |
299 | list_remove(&THREAD->threads_link); |
293 | list_remove(&THREAD->threads_link); |
300 | spinlock_unlock(&threads_lock); |
294 | spinlock_unlock(&threads_lock); |
301 | 295 | ||
302 | spinlock_lock(&CPU->lock); |
296 | spinlock_lock(&CPU->lock); |
303 | if(CPU->fpu_owner==THREAD) CPU->fpu_owner=NULL; |
297 | if(CPU->fpu_owner==THREAD) CPU->fpu_owner=NULL; |
304 | spinlock_unlock(&CPU->lock); |
298 | spinlock_unlock(&CPU->lock); |
305 | 299 | ||
306 | 300 | ||
307 | free(THREAD); |
301 | free(THREAD); |
308 | 302 | ||
309 | break; |
303 | break; |
310 | 304 | ||
311 | case Sleeping: |
305 | case Sleeping: |
312 | /* |
306 | /* |
313 | * Prefer the thread after it's woken up. |
307 | * Prefer the thread after it's woken up. |
314 | */ |
308 | */ |
315 | THREAD->pri = -1; |
309 | THREAD->pri = -1; |
316 | 310 | ||
317 | /* |
311 | /* |
318 | * We need to release wq->lock which we locked in waitq_sleep(). |
312 | * We need to release wq->lock which we locked in waitq_sleep(). |
319 | * Address of wq->lock is kept in THREAD->sleep_queue. |
313 | * Address of wq->lock is kept in THREAD->sleep_queue. |
320 | */ |
314 | */ |
321 | spinlock_unlock(&THREAD->sleep_queue->lock); |
315 | spinlock_unlock(&THREAD->sleep_queue->lock); |
322 | 316 | ||
323 | /* |
317 | /* |
324 | * Check for possible requests for out-of-context invocation. |
318 | * Check for possible requests for out-of-context invocation. |
325 | */ |
319 | */ |
326 | if (THREAD->call_me) { |
320 | if (THREAD->call_me) { |
327 | THREAD->call_me(THREAD->call_me_with); |
321 | THREAD->call_me(THREAD->call_me_with); |
328 | THREAD->call_me = NULL; |
322 | THREAD->call_me = NULL; |
329 | THREAD->call_me_with = NULL; |
323 | THREAD->call_me_with = NULL; |
330 | } |
324 | } |
331 | 325 | ||
332 | spinlock_unlock(&THREAD->lock); |
326 | spinlock_unlock(&THREAD->lock); |
333 | 327 | ||
334 | break; |
328 | break; |
335 | 329 | ||
336 | default: |
330 | default: |
337 | /* |
331 | /* |
338 | * Entering state is unexpected. |
332 | * Entering state is unexpected. |
339 | */ |
333 | */ |
340 | panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]); |
334 | panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]); |
341 | break; |
335 | break; |
342 | } |
336 | } |
343 | THREAD = NULL; |
337 | THREAD = NULL; |
344 | } |
338 | } |
345 | 339 | ||
346 | THREAD = find_best_thread(); |
340 | THREAD = find_best_thread(); |
347 | 341 | ||
348 | spinlock_lock(&THREAD->lock); |
342 | spinlock_lock(&THREAD->lock); |
349 | priority = THREAD->pri; |
343 | priority = THREAD->pri; |
350 | spinlock_unlock(&THREAD->lock); |
344 | spinlock_unlock(&THREAD->lock); |
351 | 345 | ||
352 | relink_rq(priority); |
346 | relink_rq(priority); |
353 | 347 | ||
354 | spinlock_lock(&THREAD->lock); |
348 | spinlock_lock(&THREAD->lock); |
355 | 349 | ||
356 | /* |
350 | /* |
357 | * If both the old and the new task are the same, lots of work is avoided. |
351 | * If both the old and the new task are the same, lots of work is avoided. |
358 | */ |
352 | */ |
359 | if (TASK != THREAD->task) { |
353 | if (TASK != THREAD->task) { |
360 | vm_t *m1 = NULL; |
354 | vm_t *m1 = NULL; |
361 | vm_t *m2; |
355 | vm_t *m2; |
362 | 356 | ||
363 | if (TASK) { |
357 | if (TASK) { |
364 | spinlock_lock(&TASK->lock); |
358 | spinlock_lock(&TASK->lock); |
365 | m1 = TASK->vm; |
359 | m1 = TASK->vm; |
366 | spinlock_unlock(&TASK->lock); |
360 | spinlock_unlock(&TASK->lock); |
367 | } |
361 | } |
368 | 362 | ||
369 | spinlock_lock(&THREAD->task->lock); |
363 | spinlock_lock(&THREAD->task->lock); |
370 | m2 = THREAD->task->vm; |
364 | m2 = THREAD->task->vm; |
371 | spinlock_unlock(&THREAD->task->lock); |
365 | spinlock_unlock(&THREAD->task->lock); |
372 | 366 | ||
373 | /* |
367 | /* |
374 | * Note that it is possible for two tasks to share one vm mapping. |
368 | * Note that it is possible for two tasks to share one vm mapping. |
375 | */ |
369 | */ |
376 | if (m1 != m2) { |
370 | if (m1 != m2) { |
377 | /* |
371 | /* |
378 | * Both tasks and vm mappings are different. |
372 | * Both tasks and vm mappings are different. |
379 | * Replace the old one with the new one. |
373 | * Replace the old one with the new one. |
380 | */ |
374 | */ |
381 | if (m1) { |
375 | if (m1) { |
382 | vm_uninstall(m1); |
376 | vm_uninstall(m1); |
383 | } |
377 | } |
384 | vm_install(m2); |
378 | vm_install(m2); |
385 | } |
379 | } |
386 | TASK = THREAD->task; |
380 | TASK = THREAD->task; |
387 | } |
381 | } |
388 | 382 | ||
389 | THREAD->state = Running; |
383 | THREAD->state = Running; |
390 | 384 | ||
391 | #ifdef SCHEDULER_VERBOSE |
385 | #ifdef SCHEDULER_VERBOSE |
392 | printf("cpu%d: tid %d (pri=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->pri, THREAD->ticks, CPU->nrdy); |
386 | printf("cpu%d: tid %d (pri=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->pri, THREAD->ticks, CPU->nrdy); |
393 | #endif |
387 | #endif |
394 | 388 | ||
395 | context_restore(&THREAD->saved_context); |
389 | context_restore(&THREAD->saved_context); |
396 | /* not reached */ |
390 | /* not reached */ |
397 | } |
391 | } |
398 | 392 | ||
399 | 393 | ||
400 | #ifdef __SMP__ |
394 | #ifdef __SMP__ |
401 | /** Load balancing thread |
395 | /** Load balancing thread |
402 | * |
396 | * |
403 | * SMP load balancing thread, supervising thread supplies |
397 | * SMP load balancing thread, supervising thread supplies |
404 | * for the CPU it's wired to. |
398 | * for the CPU it's wired to. |
405 | * |
399 | * |
406 | * @param arg Generic thread argument (unused). |
400 | * @param arg Generic thread argument (unused). |
407 | * |
401 | * |
408 | */ |
402 | */ |
409 | void kcpulb(void *arg) |
403 | void kcpulb(void *arg) |
410 | { |
404 | { |
411 | thread_t *t; |
405 | thread_t *t; |
412 | int count, i, j, k = 0; |
406 | int count, i, j, k = 0; |
413 | pri_t pri; |
407 | pri_t pri; |
414 | 408 | ||
415 | loop: |
409 | loop: |
416 | /* |
410 | /* |
417 | * Sleep until there's some work to do. |
411 | * Sleep until there's some work to do. |
418 | */ |
412 | */ |
419 | waitq_sleep(&CPU->kcpulb_wq); |
413 | waitq_sleep(&CPU->kcpulb_wq); |
420 | 414 | ||
421 | not_satisfied: |
415 | not_satisfied: |
422 | /* |
416 | /* |
423 | * Calculate the number of threads that will be migrated/stolen from |
417 | * Calculate the number of threads that will be migrated/stolen from |
424 | * other CPU's. Note that situation can have changed between two |
418 | * other CPU's. Note that situation can have changed between two |
425 | * passes. Each time get the most up to date counts. |
419 | * passes. Each time get the most up to date counts. |
426 | */ |
420 | */ |
427 | pri = cpu_priority_high(); |
421 | pri = cpu_priority_high(); |
428 | spinlock_lock(&CPU->lock); |
422 | spinlock_lock(&CPU->lock); |
429 | count = nrdy / config.cpu_active; |
423 | count = nrdy / config.cpu_active; |
430 | count -= CPU->nrdy; |
424 | count -= CPU->nrdy; |
431 | spinlock_unlock(&CPU->lock); |
425 | spinlock_unlock(&CPU->lock); |
432 | cpu_priority_restore(pri); |
426 | cpu_priority_restore(pri); |
433 | 427 | ||
434 | if (count <= 0) |
428 | if (count <= 0) |
435 | goto satisfied; |
429 | goto satisfied; |
436 | 430 | ||
437 | /* |
431 | /* |
438 | * Searching least priority queues on all CPU's first and most priority queues on all CPU's last. |
432 | * Searching least priority queues on all CPU's first and most priority queues on all CPU's last. |
439 | */ |
433 | */ |
440 | for (j=RQ_COUNT-1; j >= 0; j--) { |
434 | for (j=RQ_COUNT-1; j >= 0; j--) { |
441 | for (i=0; i < config.cpu_active; i++) { |
435 | for (i=0; i < config.cpu_active; i++) { |
442 | link_t *l; |
436 | link_t *l; |
443 | runq_t *r; |
437 | runq_t *r; |
444 | cpu_t *cpu; |
438 | cpu_t *cpu; |
445 | 439 | ||
446 | cpu = &cpus[(i + k) % config.cpu_active]; |
440 | cpu = &cpus[(i + k) % config.cpu_active]; |
447 | r = &cpu->rq[j]; |
441 | r = &cpu->rq[j]; |
448 | 442 | ||
449 | /* |
443 | /* |
450 | * Not interested in ourselves. |
444 | * Not interested in ourselves. |
451 | * Doesn't require interrupt disabling for kcpulb is X_WIRED. |
445 | * Doesn't require interrupt disabling for kcpulb is X_WIRED. |
452 | */ |
446 | */ |
453 | if (CPU == cpu) |
447 | if (CPU == cpu) |
454 | continue; |
448 | continue; |
455 | 449 | ||
456 | restart: pri = cpu_priority_high(); |
450 | restart: pri = cpu_priority_high(); |
457 | spinlock_lock(&r->lock); |
451 | spinlock_lock(&r->lock); |
458 | if (r->n == 0) { |
452 | if (r->n == 0) { |
459 | spinlock_unlock(&r->lock); |
453 | spinlock_unlock(&r->lock); |
460 | cpu_priority_restore(pri); |
454 | cpu_priority_restore(pri); |
461 | continue; |
455 | continue; |
462 | } |
456 | } |
463 | 457 | ||
464 | t = NULL; |
458 | t = NULL; |
465 | l = r->rq_head.prev; /* search rq from the back */ |
459 | l = r->rq_head.prev; /* search rq from the back */ |
466 | while (l != &r->rq_head) { |
460 | while (l != &r->rq_head) { |
467 | t = list_get_instance(l, thread_t, rq_link); |
461 | t = list_get_instance(l, thread_t, rq_link); |
468 | /* |
462 | /* |
469 | * We don't want to steal CPU-wired threads neither threads already stolen. |
463 | * We don't want to steal CPU-wired threads neither threads already stolen. |
470 | * The latter prevents threads from migrating between CPU's without ever being run. |
464 | * The latter prevents threads from migrating between CPU's without ever being run. |
471 | * We don't want to steal threads whose FPU context is still in CPU |
465 | * We don't want to steal threads whose FPU context is still in CPU |
472 | */ |
466 | */ |
473 | spinlock_lock(&t->lock); |
467 | spinlock_lock(&t->lock); |
474 | if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) { |
468 | if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) { |
475 | /* |
469 | /* |
476 | * Remove t from r. |
470 | * Remove t from r. |
477 | */ |
471 | */ |
478 | 472 | ||
479 | spinlock_unlock(&t->lock); |
473 | spinlock_unlock(&t->lock); |
480 | 474 | ||
481 | /* |
475 | /* |
482 | * Here we have to avoid deadlock with relink_rq(), |
476 | * Here we have to avoid deadlock with relink_rq(), |
483 | * because it locks cpu and r in a different order than we do. |
477 | * because it locks cpu and r in a different order than we do. |
484 | */ |
478 | */ |
485 | if (!spinlock_trylock(&cpu->lock)) { |
479 | if (!spinlock_trylock(&cpu->lock)) { |
486 | /* Release all locks and try again. */ |
480 | /* Release all locks and try again. */ |
487 | spinlock_unlock(&r->lock); |
481 | spinlock_unlock(&r->lock); |
488 | cpu_priority_restore(pri); |
482 | cpu_priority_restore(pri); |
489 | goto restart; |
483 | goto restart; |
490 | } |
484 | } |
491 | cpu->nrdy--; |
485 | cpu->nrdy--; |
492 | spinlock_unlock(&cpu->lock); |
486 | spinlock_unlock(&cpu->lock); |
493 | 487 | ||
494 | atomic_dec(&nrdy); |
488 | atomic_dec(&nrdy); |
495 | 489 | ||
496 | r->n--; |
490 | r->n--; |
497 | list_remove(&t->rq_link); |
491 | list_remove(&t->rq_link); |
498 | 492 | ||
499 | break; |
493 | break; |
500 | } |
494 | } |
501 | spinlock_unlock(&t->lock); |
495 | spinlock_unlock(&t->lock); |
502 | l = l->prev; |
496 | l = l->prev; |
503 | t = NULL; |
497 | t = NULL; |
504 | } |
498 | } |
505 | spinlock_unlock(&r->lock); |
499 | spinlock_unlock(&r->lock); |
506 | 500 | ||
507 | if (t) { |
501 | if (t) { |
508 | /* |
502 | /* |
509 | * Ready t on local CPU |
503 | * Ready t on local CPU |
510 | */ |
504 | */ |
511 | spinlock_lock(&t->lock); |
505 | spinlock_lock(&t->lock); |
512 | #ifdef KCPULB_VERBOSE |
506 | #ifdef KCPULB_VERBOSE |
513 | printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, CPU->nrdy, nrdy / config.cpu_active); |
507 | printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, CPU->nrdy, nrdy / config.cpu_active); |
514 | #endif |
508 | #endif |
515 | t->flags |= X_STOLEN; |
509 | t->flags |= X_STOLEN; |
516 | spinlock_unlock(&t->lock); |
510 | spinlock_unlock(&t->lock); |
517 | 511 | ||
518 | thread_ready(t); |
512 | thread_ready(t); |
519 | 513 | ||
520 | cpu_priority_restore(pri); |
514 | cpu_priority_restore(pri); |
521 | 515 | ||
522 | if (--count == 0) |
516 | if (--count == 0) |
523 | goto satisfied; |
517 | goto satisfied; |
524 | 518 | ||
525 | /* |
519 | /* |
526 | * We are not satisfied yet, focus on another CPU next time. |
520 | * We are not satisfied yet, focus on another CPU next time. |
527 | */ |
521 | */ |
528 | k++; |
522 | k++; |
529 | 523 | ||
530 | continue; |
524 | continue; |
531 | } |
525 | } |
532 | cpu_priority_restore(pri); |
526 | cpu_priority_restore(pri); |
533 | } |
527 | } |
534 | } |
528 | } |
535 | 529 | ||
536 | if (CPU->nrdy) { |
530 | if (CPU->nrdy) { |
537 | /* |
531 | /* |
538 | * Be a little bit light-weight and let migrated threads run. |
532 | * Be a little bit light-weight and let migrated threads run. |
539 | */ |
533 | */ |
540 | scheduler(); |
534 | scheduler(); |
541 | } |
535 | } |
542 | else { |
536 | else { |
543 | /* |
537 | /* |
544 | * We failed to migrate a single thread. |
538 | * We failed to migrate a single thread. |
545 | * Something more sophisticated should be done. |
539 | * Something more sophisticated should be done. |
546 | */ |
540 | */ |
547 | scheduler(); |
541 | scheduler(); |
548 | } |
542 | } |
549 | 543 | ||
550 | goto not_satisfied; |
544 | goto not_satisfied; |
551 | 545 | ||
552 | satisfied: |
546 | satisfied: |
553 | /* |
547 | /* |
554 | * Tell find_best_thread() to wake us up later again. |
548 | * Tell find_best_thread() to wake us up later again. |
555 | */ |
549 | */ |
556 | CPU->kcpulbstarted = 0; |
550 | CPU->kcpulbstarted = 0; |
557 | goto loop; |
551 | goto loop; |
558 | } |
552 | } |
559 | 553 | ||
560 | #endif /* __SMP__ */ |
554 | #endif /* __SMP__ */ |
561 | 555 |