Rev 115 | Rev 125 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 115 | Rev 118 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | #include <proc/scheduler.h> |
29 | #include <proc/scheduler.h> |
30 | #include <proc/thread.h> |
30 | #include <proc/thread.h> |
31 | #include <proc/task.h> |
31 | #include <proc/task.h> |
32 | #include <cpu.h> |
32 | #include <cpu.h> |
33 | #include <mm/vm.h> |
33 | #include <mm/vm.h> |
34 | #include <config.h> |
34 | #include <config.h> |
35 | #include <context.h> |
35 | #include <context.h> |
36 | #include <func.h> |
36 | #include <func.h> |
37 | #include <arch.h> |
37 | #include <arch.h> |
38 | #include <arch/asm.h> |
38 | #include <arch/asm.h> |
39 | #include <list.h> |
39 | #include <list.h> |
40 | #include <panic.h> |
40 | #include <panic.h> |
41 | #include <typedefs.h> |
41 | #include <typedefs.h> |
42 | #include <mm/page.h> |
42 | #include <mm/page.h> |
43 | #include <synch/spinlock.h> |
43 | #include <synch/spinlock.h> |
44 | #include <arch/faddr.h> |
44 | #include <arch/faddr.h> |
45 | #include <arch/atomic.h> |
45 | #include <arch/atomic.h> |
46 | 46 | ||
47 | volatile int nrdy; |
47 | volatile int nrdy; |
48 | 48 | ||
49 | 49 | ||
50 | /** Initialize context switching |
50 | /** Take actions before new thread runs |
51 | * |
51 | * |
52 | * Initialize context switching and lazy FPU |
52 | * Perform actions that need to be |
- | 53 | * taken before the newly selected |
|
53 | * context switching. |
54 | * tread is passed control. |
54 | * |
55 | * |
55 | */ |
56 | */ |
56 | void before_thread_runs(void) |
57 | void before_thread_runs(void) |
57 | { |
58 | { |
58 | before_thread_runs_arch(); |
59 | before_thread_runs_arch(); |
59 | fpu_context_restore(&(THREAD->saved_fpu_context)); |
60 | fpu_context_restore(&(THREAD->saved_fpu_context)); |
60 | } |
61 | } |
61 | 62 | ||
62 | 63 | ||
63 | /** Initialize scheduler |
64 | /** Initialize scheduler |
64 | * |
65 | * |
65 | * Initialize kernel scheduler. |
66 | * Initialize kernel scheduler. |
66 | * |
67 | * |
67 | */ |
68 | */ |
68 | void scheduler_init(void) |
69 | void scheduler_init(void) |
69 | { |
70 | { |
70 | } |
71 | } |
71 | 72 | ||
72 | 73 | ||
73 | /** Get thread to be scheduled |
74 | /** Get thread to be scheduled |
74 | * |
75 | * |
75 | * Get the optimal thread to be scheduled |
76 | * Get the optimal thread to be scheduled |
76 | * according to thread accounting and scheduler |
77 | * according to thread accounting and scheduler |
77 | * policy. |
78 | * policy. |
78 | * |
79 | * |
79 | * @return Thread to be scheduled. |
80 | * @return Thread to be scheduled. |
80 | * |
81 | * |
81 | */ |
82 | */ |
82 | struct thread *find_best_thread(void) |
83 | struct thread *find_best_thread(void) |
83 | { |
84 | { |
84 | thread_t *t; |
85 | thread_t *t; |
85 | runq_t *r; |
86 | runq_t *r; |
86 | int i, n; |
87 | int i, n; |
87 | 88 | ||
88 | loop: |
89 | loop: |
89 | cpu_priority_high(); |
90 | cpu_priority_high(); |
90 | 91 | ||
91 | spinlock_lock(&CPU->lock); |
92 | spinlock_lock(&CPU->lock); |
92 | n = CPU->nrdy; |
93 | n = CPU->nrdy; |
93 | spinlock_unlock(&CPU->lock); |
94 | spinlock_unlock(&CPU->lock); |
94 | 95 | ||
95 | cpu_priority_low(); |
96 | cpu_priority_low(); |
96 | 97 | ||
97 | if (n == 0) { |
98 | if (n == 0) { |
98 | #ifdef __SMP__ |
99 | #ifdef __SMP__ |
99 | /* |
100 | /* |
100 | * If the load balancing thread is not running, wake it up and |
101 | * If the load balancing thread is not running, wake it up and |
101 | * set CPU-private flag that the kcpulb has been started. |
102 | * set CPU-private flag that the kcpulb has been started. |
102 | */ |
103 | */ |
103 | if (test_and_set(&CPU->kcpulbstarted) == 0) { |
104 | if (test_and_set(&CPU->kcpulbstarted) == 0) { |
104 | waitq_wakeup(&CPU->kcpulb_wq, 0); |
105 | waitq_wakeup(&CPU->kcpulb_wq, 0); |
105 | goto loop; |
106 | goto loop; |
106 | } |
107 | } |
107 | #endif /* __SMP__ */ |
108 | #endif /* __SMP__ */ |
108 | 109 | ||
109 | /* |
110 | /* |
110 | * For there was nothing to run, the CPU goes to sleep |
111 | * For there was nothing to run, the CPU goes to sleep |
111 | * until a hardware interrupt or an IPI comes. |
112 | * until a hardware interrupt or an IPI comes. |
112 | * This improves energy saving and hyperthreading. |
113 | * This improves energy saving and hyperthreading. |
113 | * On the other hand, several hardware interrupts can be ignored. |
114 | * On the other hand, several hardware interrupts can be ignored. |
114 | */ |
115 | */ |
115 | cpu_sleep(); |
116 | cpu_sleep(); |
116 | goto loop; |
117 | goto loop; |
117 | } |
118 | } |
118 | 119 | ||
119 | cpu_priority_high(); |
120 | cpu_priority_high(); |
120 | 121 | ||
121 | i = 0; |
122 | i = 0; |
122 | retry: |
123 | retry: |
123 | for (; i<RQ_COUNT; i++) { |
124 | for (; i<RQ_COUNT; i++) { |
124 | r = &CPU->rq[i]; |
125 | r = &CPU->rq[i]; |
125 | spinlock_lock(&r->lock); |
126 | spinlock_lock(&r->lock); |
126 | if (r->n == 0) { |
127 | if (r->n == 0) { |
127 | /* |
128 | /* |
128 | * If this queue is empty, try a lower-priority queue. |
129 | * If this queue is empty, try a lower-priority queue. |
129 | */ |
130 | */ |
130 | spinlock_unlock(&r->lock); |
131 | spinlock_unlock(&r->lock); |
131 | continue; |
132 | continue; |
132 | } |
133 | } |
133 | 134 | ||
134 | /* avoid deadlock with relink_rq() */ |
135 | /* avoid deadlock with relink_rq() */ |
135 | if (!spinlock_trylock(&CPU->lock)) { |
136 | if (!spinlock_trylock(&CPU->lock)) { |
136 | /* |
137 | /* |
137 | * Unlock r and try again. |
138 | * Unlock r and try again. |
138 | */ |
139 | */ |
139 | spinlock_unlock(&r->lock); |
140 | spinlock_unlock(&r->lock); |
140 | goto retry; |
141 | goto retry; |
141 | } |
142 | } |
142 | CPU->nrdy--; |
143 | CPU->nrdy--; |
143 | spinlock_unlock(&CPU->lock); |
144 | spinlock_unlock(&CPU->lock); |
144 | 145 | ||
145 | atomic_dec(&nrdy); |
146 | atomic_dec(&nrdy); |
146 | r->n--; |
147 | r->n--; |
147 | 148 | ||
148 | /* |
149 | /* |
149 | * Take the first thread from the queue. |
150 | * Take the first thread from the queue. |
150 | */ |
151 | */ |
151 | t = list_get_instance(r->rq_head.next, thread_t, rq_link); |
152 | t = list_get_instance(r->rq_head.next, thread_t, rq_link); |
152 | list_remove(&t->rq_link); |
153 | list_remove(&t->rq_link); |
153 | 154 | ||
154 | spinlock_unlock(&r->lock); |
155 | spinlock_unlock(&r->lock); |
155 | 156 | ||
156 | spinlock_lock(&t->lock); |
157 | spinlock_lock(&t->lock); |
157 | t->cpu = CPU; |
158 | t->cpu = CPU; |
158 | 159 | ||
159 | t->ticks = us2ticks((i+1)*10000); |
160 | t->ticks = us2ticks((i+1)*10000); |
160 | t->pri = i; /* eventually correct rq index */ |
161 | t->pri = i; /* eventually correct rq index */ |
161 | 162 | ||
162 | /* |
163 | /* |
163 | * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge. |
164 | * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge. |
164 | */ |
165 | */ |
165 | t->flags &= ~X_STOLEN; |
166 | t->flags &= ~X_STOLEN; |
166 | spinlock_unlock(&t->lock); |
167 | spinlock_unlock(&t->lock); |
167 | 168 | ||
168 | return t; |
169 | return t; |
169 | } |
170 | } |
170 | goto loop; |
171 | goto loop; |
171 | 172 | ||
172 | } |
173 | } |
173 | 174 | ||
174 | 175 | ||
175 | /** Prevent rq starvation |
176 | /** Prevent rq starvation |
176 | * |
177 | * |
177 | * Prevent low priority threads from starving in rq's. |
178 | * Prevent low priority threads from starving in rq's. |
178 | * |
179 | * |
179 | * When the function decides to relink rq's, it reconnects |
180 | * When the function decides to relink rq's, it reconnects |
180 | * respective pointers so that in result threads with 'pri' |
181 | * respective pointers so that in result threads with 'pri' |
181 | * greater or equal 'start' are moved to a higher-priority queue. |
182 | * greater or equal 'start' are moved to a higher-priority queue. |
182 | * |
183 | * |
183 | * @param start Threshold priority. |
184 | * @param start Threshold priority. |
184 | * |
185 | * |
185 | */ |
186 | */ |
186 | void relink_rq(int start) |
187 | void relink_rq(int start) |
187 | { |
188 | { |
188 | link_t head; |
189 | link_t head; |
189 | runq_t *r; |
190 | runq_t *r; |
190 | int i, n; |
191 | int i, n; |
191 | 192 | ||
192 | list_initialize(&head); |
193 | list_initialize(&head); |
193 | spinlock_lock(&CPU->lock); |
194 | spinlock_lock(&CPU->lock); |
194 | if (CPU->needs_relink > NEEDS_RELINK_MAX) { |
195 | if (CPU->needs_relink > NEEDS_RELINK_MAX) { |
195 | for (i = start; i<RQ_COUNT-1; i++) { |
196 | for (i = start; i<RQ_COUNT-1; i++) { |
196 | /* remember and empty rq[i + 1] */ |
197 | /* remember and empty rq[i + 1] */ |
197 | r = &CPU->rq[i + 1]; |
198 | r = &CPU->rq[i + 1]; |
198 | spinlock_lock(&r->lock); |
199 | spinlock_lock(&r->lock); |
199 | list_concat(&head, &r->rq_head); |
200 | list_concat(&head, &r->rq_head); |
200 | n = r->n; |
201 | n = r->n; |
201 | r->n = 0; |
202 | r->n = 0; |
202 | spinlock_unlock(&r->lock); |
203 | spinlock_unlock(&r->lock); |
203 | 204 | ||
204 | /* append rq[i + 1] to rq[i] */ |
205 | /* append rq[i + 1] to rq[i] */ |
205 | r = &CPU->rq[i]; |
206 | r = &CPU->rq[i]; |
206 | spinlock_lock(&r->lock); |
207 | spinlock_lock(&r->lock); |
207 | list_concat(&r->rq_head, &head); |
208 | list_concat(&r->rq_head, &head); |
208 | r->n += n; |
209 | r->n += n; |
209 | spinlock_unlock(&r->lock); |
210 | spinlock_unlock(&r->lock); |
210 | } |
211 | } |
211 | CPU->needs_relink = 0; |
212 | CPU->needs_relink = 0; |
212 | } |
213 | } |
213 | spinlock_unlock(&CPU->lock); |
214 | spinlock_unlock(&CPU->lock); |
214 | 215 | ||
215 | } |
216 | } |
216 | 217 | ||
217 | 218 | ||
218 | /** The scheduler |
219 | /** The scheduler |
219 | * |
220 | * |
220 | * The thread scheduling procedure. |
221 | * The thread scheduling procedure. |
221 | * |
222 | * |
222 | */ |
223 | */ |
223 | void scheduler(void) |
224 | void scheduler(void) |
224 | { |
225 | { |
225 | volatile pri_t pri; |
226 | volatile pri_t pri; |
226 | 227 | ||
227 | pri = cpu_priority_high(); |
228 | pri = cpu_priority_high(); |
228 | 229 | ||
229 | if (haltstate) |
230 | if (haltstate) |
230 | halt(); |
231 | halt(); |
231 | 232 | ||
232 | if (THREAD) { |
233 | if (THREAD) { |
233 | spinlock_lock(&THREAD->lock); |
234 | spinlock_lock(&THREAD->lock); |
234 | fpu_context_save(&(THREAD->saved_fpu_context)); |
235 | fpu_context_save(&(THREAD->saved_fpu_context)); |
235 | if (!context_save(&THREAD->saved_context)) { |
236 | if (!context_save(&THREAD->saved_context)) { |
236 | /* |
237 | /* |
237 | * This is the place where threads leave scheduler(); |
238 | * This is the place where threads leave scheduler(); |
238 | */ |
239 | */ |
239 | before_thread_runs(); |
240 | before_thread_runs(); |
240 | spinlock_unlock(&THREAD->lock); |
241 | spinlock_unlock(&THREAD->lock); |
241 | cpu_priority_restore(THREAD->saved_context.pri); |
242 | cpu_priority_restore(THREAD->saved_context.pri); |
242 | return; |
243 | return; |
243 | } |
244 | } |
244 | THREAD->saved_context.pri = pri; |
245 | THREAD->saved_context.pri = pri; |
245 | } |
246 | } |
246 | 247 | ||
247 | /* |
248 | /* |
248 | * We may not keep the old stack. |
249 | * We may not keep the old stack. |
249 | * Reason: If we kept the old stack and got blocked, for instance, in |
250 | * Reason: If we kept the old stack and got blocked, for instance, in |
250 | * find_best_thread(), the old thread could get rescheduled by another |
251 | * find_best_thread(), the old thread could get rescheduled by another |
251 | * CPU and overwrite the part of its own stack that was also used by |
252 | * CPU and overwrite the part of its own stack that was also used by |
252 | * the scheduler on this CPU. |
253 | * the scheduler on this CPU. |
253 | * |
254 | * |
254 | * Moreover, we have to bypass the compiler-generated POP sequence |
255 | * Moreover, we have to bypass the compiler-generated POP sequence |
255 | * which is fooled by SP being set to the very top of the stack. |
256 | * which is fooled by SP being set to the very top of the stack. |
256 | * Therefore the scheduler() function continues in |
257 | * Therefore the scheduler() function continues in |
257 | * scheduler_separated_stack(). |
258 | * scheduler_separated_stack(). |
258 | */ |
259 | */ |
259 | context_save(&CPU->saved_context); |
260 | context_save(&CPU->saved_context); |
260 | context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), CPU->stack, CPU_STACK_SIZE); |
261 | context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), CPU->stack, CPU_STACK_SIZE); |
261 | context_restore(&CPU->saved_context); |
262 | context_restore(&CPU->saved_context); |
262 | /* not reached */ |
263 | /* not reached */ |
263 | } |
264 | } |
264 | 265 | ||
265 | 266 | ||
266 | /** Scheduler stack switch wrapper |
267 | /** Scheduler stack switch wrapper |
267 | * |
268 | * |
268 | * Second part of the scheduler() function |
269 | * Second part of the scheduler() function |
269 | * using new stack. Handling the actual context |
270 | * using new stack. Handling the actual context |
270 | * switch to a new thread. |
271 | * switch to a new thread. |
271 | * |
272 | * |
272 | */ |
273 | */ |
273 | void scheduler_separated_stack(void) |
274 | void scheduler_separated_stack(void) |
274 | { |
275 | { |
275 | int priority; |
276 | int priority; |
276 | 277 | ||
277 | if (THREAD) { |
278 | if (THREAD) { |
278 | switch (THREAD->state) { |
279 | switch (THREAD->state) { |
279 | case Running: |
280 | case Running: |
280 | THREAD->state = Ready; |
281 | THREAD->state = Ready; |
281 | spinlock_unlock(&THREAD->lock); |
282 | spinlock_unlock(&THREAD->lock); |
282 | thread_ready(THREAD); |
283 | thread_ready(THREAD); |
283 | break; |
284 | break; |
284 | 285 | ||
285 | case Exiting: |
286 | case Exiting: |
286 | frame_free((__address) THREAD->kstack); |
287 | frame_free((__address) THREAD->kstack); |
287 | if (THREAD->ustack) { |
288 | if (THREAD->ustack) { |
288 | frame_free((__address) THREAD->ustack); |
289 | frame_free((__address) THREAD->ustack); |
289 | } |
290 | } |
290 | 291 | ||
291 | /* |
292 | /* |
292 | * Detach from the containing task. |
293 | * Detach from the containing task. |
293 | */ |
294 | */ |
294 | spinlock_lock(&TASK->lock); |
295 | spinlock_lock(&TASK->lock); |
295 | list_remove(&THREAD->th_link); |
296 | list_remove(&THREAD->th_link); |
296 | spinlock_unlock(&TASK->lock); |
297 | spinlock_unlock(&TASK->lock); |
297 | 298 | ||
298 | spinlock_unlock(&THREAD->lock); |
299 | spinlock_unlock(&THREAD->lock); |
299 | 300 | ||
300 | spinlock_lock(&threads_lock); |
301 | spinlock_lock(&threads_lock); |
301 | list_remove(&THREAD->threads_link); |
302 | list_remove(&THREAD->threads_link); |
302 | spinlock_unlock(&threads_lock); |
303 | spinlock_unlock(&threads_lock); |
303 | 304 | ||
304 | spinlock_lock(&CPU->lock); |
305 | spinlock_lock(&CPU->lock); |
305 | if(CPU->fpu_owner==THREAD) CPU->fpu_owner=NULL; |
306 | if(CPU->fpu_owner==THREAD) CPU->fpu_owner=NULL; |
306 | spinlock_unlock(&CPU->lock); |
307 | spinlock_unlock(&CPU->lock); |
307 | 308 | ||
308 | 309 | ||
309 | free(THREAD); |
310 | free(THREAD); |
310 | 311 | ||
311 | break; |
312 | break; |
312 | 313 | ||
313 | case Sleeping: |
314 | case Sleeping: |
314 | /* |
315 | /* |
315 | * Prefer the thread after it's woken up. |
316 | * Prefer the thread after it's woken up. |
316 | */ |
317 | */ |
317 | THREAD->pri = -1; |
318 | THREAD->pri = -1; |
318 | 319 | ||
319 | /* |
320 | /* |
320 | * We need to release wq->lock which we locked in waitq_sleep(). |
321 | * We need to release wq->lock which we locked in waitq_sleep(). |
321 | * Address of wq->lock is kept in THREAD->sleep_queue. |
322 | * Address of wq->lock is kept in THREAD->sleep_queue. |
322 | */ |
323 | */ |
323 | spinlock_unlock(&THREAD->sleep_queue->lock); |
324 | spinlock_unlock(&THREAD->sleep_queue->lock); |
324 | 325 | ||
325 | /* |
326 | /* |
326 | * Check for possible requests for out-of-context invocation. |
327 | * Check for possible requests for out-of-context invocation. |
327 | */ |
328 | */ |
328 | if (THREAD->call_me) { |
329 | if (THREAD->call_me) { |
329 | THREAD->call_me(THREAD->call_me_with); |
330 | THREAD->call_me(THREAD->call_me_with); |
330 | THREAD->call_me = NULL; |
331 | THREAD->call_me = NULL; |
331 | THREAD->call_me_with = NULL; |
332 | THREAD->call_me_with = NULL; |
332 | } |
333 | } |
333 | 334 | ||
334 | spinlock_unlock(&THREAD->lock); |
335 | spinlock_unlock(&THREAD->lock); |
335 | 336 | ||
336 | break; |
337 | break; |
337 | 338 | ||
338 | default: |
339 | default: |
339 | /* |
340 | /* |
340 | * Entering state is unexpected. |
341 | * Entering state is unexpected. |
341 | */ |
342 | */ |
342 | panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]); |
343 | panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]); |
343 | break; |
344 | break; |
344 | } |
345 | } |
345 | THREAD = NULL; |
346 | THREAD = NULL; |
346 | } |
347 | } |
347 | 348 | ||
348 | THREAD = find_best_thread(); |
349 | THREAD = find_best_thread(); |
349 | 350 | ||
350 | spinlock_lock(&THREAD->lock); |
351 | spinlock_lock(&THREAD->lock); |
351 | priority = THREAD->pri; |
352 | priority = THREAD->pri; |
352 | spinlock_unlock(&THREAD->lock); |
353 | spinlock_unlock(&THREAD->lock); |
353 | 354 | ||
354 | relink_rq(priority); |
355 | relink_rq(priority); |
355 | 356 | ||
356 | spinlock_lock(&THREAD->lock); |
357 | spinlock_lock(&THREAD->lock); |
357 | 358 | ||
358 | /* |
359 | /* |
359 | * If both the old and the new task are the same, lots of work is avoided. |
360 | * If both the old and the new task are the same, lots of work is avoided. |
360 | */ |
361 | */ |
361 | if (TASK != THREAD->task) { |
362 | if (TASK != THREAD->task) { |
362 | vm_t *m1 = NULL; |
363 | vm_t *m1 = NULL; |
363 | vm_t *m2; |
364 | vm_t *m2; |
364 | 365 | ||
365 | if (TASK) { |
366 | if (TASK) { |
366 | spinlock_lock(&TASK->lock); |
367 | spinlock_lock(&TASK->lock); |
367 | m1 = TASK->vm; |
368 | m1 = TASK->vm; |
368 | spinlock_unlock(&TASK->lock); |
369 | spinlock_unlock(&TASK->lock); |
369 | } |
370 | } |
370 | 371 | ||
371 | spinlock_lock(&THREAD->task->lock); |
372 | spinlock_lock(&THREAD->task->lock); |
372 | m2 = THREAD->task->vm; |
373 | m2 = THREAD->task->vm; |
373 | spinlock_unlock(&THREAD->task->lock); |
374 | spinlock_unlock(&THREAD->task->lock); |
374 | 375 | ||
375 | /* |
376 | /* |
376 | * Note that it is possible for two tasks to share one vm mapping. |
377 | * Note that it is possible for two tasks to share one vm mapping. |
377 | */ |
378 | */ |
378 | if (m1 != m2) { |
379 | if (m1 != m2) { |
379 | /* |
380 | /* |
380 | * Both tasks and vm mappings are different. |
381 | * Both tasks and vm mappings are different. |
381 | * Replace the old one with the new one. |
382 | * Replace the old one with the new one. |
382 | */ |
383 | */ |
383 | if (m1) { |
384 | if (m1) { |
384 | vm_uninstall(m1); |
385 | vm_uninstall(m1); |
385 | } |
386 | } |
386 | vm_install(m2); |
387 | vm_install(m2); |
387 | } |
388 | } |
388 | TASK = THREAD->task; |
389 | TASK = THREAD->task; |
389 | } |
390 | } |
390 | 391 | ||
391 | THREAD->state = Running; |
392 | THREAD->state = Running; |
392 | 393 | ||
393 | #ifdef SCHEDULER_VERBOSE |
394 | #ifdef SCHEDULER_VERBOSE |
394 | printf("cpu%d: tid %d (pri=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->pri, THREAD->ticks, CPU->nrdy); |
395 | printf("cpu%d: tid %d (pri=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->pri, THREAD->ticks, CPU->nrdy); |
395 | #endif |
396 | #endif |
396 | 397 | ||
397 | context_restore(&THREAD->saved_context); |
398 | context_restore(&THREAD->saved_context); |
398 | /* not reached */ |
399 | /* not reached */ |
399 | } |
400 | } |
400 | 401 | ||
401 | 402 | ||
402 | #ifdef __SMP__ |
403 | #ifdef __SMP__ |
403 | /** Load balancing thread |
404 | /** Load balancing thread |
404 | * |
405 | * |
405 | * SMP load balancing thread, supervising thread supplies |
406 | * SMP load balancing thread, supervising thread supplies |
406 | * for the CPU it's wired to. |
407 | * for the CPU it's wired to. |
407 | * |
408 | * |
408 | * @param arg Generic thread argument (unused). |
409 | * @param arg Generic thread argument (unused). |
409 | * |
410 | * |
410 | */ |
411 | */ |
411 | void kcpulb(void *arg) |
412 | void kcpulb(void *arg) |
412 | { |
413 | { |
413 | thread_t *t; |
414 | thread_t *t; |
414 | int count, i, j, k = 0; |
415 | int count, i, j, k = 0; |
415 | pri_t pri; |
416 | pri_t pri; |
416 | 417 | ||
417 | loop: |
418 | loop: |
418 | /* |
419 | /* |
419 | * Sleep until there's some work to do. |
420 | * Sleep until there's some work to do. |
420 | */ |
421 | */ |
421 | waitq_sleep(&CPU->kcpulb_wq); |
422 | waitq_sleep(&CPU->kcpulb_wq); |
422 | 423 | ||
423 | not_satisfied: |
424 | not_satisfied: |
424 | /* |
425 | /* |
425 | * Calculate the number of threads that will be migrated/stolen from |
426 | * Calculate the number of threads that will be migrated/stolen from |
426 | * other CPU's. Note that situation can have changed between two |
427 | * other CPU's. Note that situation can have changed between two |
427 | * passes. Each time get the most up to date counts. |
428 | * passes. Each time get the most up to date counts. |
428 | */ |
429 | */ |
429 | pri = cpu_priority_high(); |
430 | pri = cpu_priority_high(); |
430 | spinlock_lock(&CPU->lock); |
431 | spinlock_lock(&CPU->lock); |
431 | count = nrdy / config.cpu_active; |
432 | count = nrdy / config.cpu_active; |
432 | count -= CPU->nrdy; |
433 | count -= CPU->nrdy; |
433 | spinlock_unlock(&CPU->lock); |
434 | spinlock_unlock(&CPU->lock); |
434 | cpu_priority_restore(pri); |
435 | cpu_priority_restore(pri); |
435 | 436 | ||
436 | if (count <= 0) |
437 | if (count <= 0) |
437 | goto satisfied; |
438 | goto satisfied; |
438 | 439 | ||
439 | /* |
440 | /* |
440 | * Searching least priority queues on all CPU's first and most priority queues on all CPU's last. |
441 | * Searching least priority queues on all CPU's first and most priority queues on all CPU's last. |
441 | */ |
442 | */ |
442 | for (j=RQ_COUNT-1; j >= 0; j--) { |
443 | for (j=RQ_COUNT-1; j >= 0; j--) { |
443 | for (i=0; i < config.cpu_active; i++) { |
444 | for (i=0; i < config.cpu_active; i++) { |
444 | link_t *l; |
445 | link_t *l; |
445 | runq_t *r; |
446 | runq_t *r; |
446 | cpu_t *cpu; |
447 | cpu_t *cpu; |
447 | 448 | ||
448 | cpu = &cpus[(i + k) % config.cpu_active]; |
449 | cpu = &cpus[(i + k) % config.cpu_active]; |
449 | 450 | ||
450 | /* |
451 | /* |
451 | * Not interested in ourselves. |
452 | * Not interested in ourselves. |
452 | * Doesn't require interrupt disabling for kcpulb is X_WIRED. |
453 | * Doesn't require interrupt disabling for kcpulb is X_WIRED. |
453 | */ |
454 | */ |
454 | if (CPU == cpu) |
455 | if (CPU == cpu) |
455 | continue; |
456 | continue; |
456 | 457 | ||
457 | restart: pri = cpu_priority_high(); |
458 | restart: pri = cpu_priority_high(); |
458 | r = &cpu->rq[j]; |
459 | r = &cpu->rq[j]; |
459 | spinlock_lock(&r->lock); |
460 | spinlock_lock(&r->lock); |
460 | if (r->n == 0) { |
461 | if (r->n == 0) { |
461 | spinlock_unlock(&r->lock); |
462 | spinlock_unlock(&r->lock); |
462 | cpu_priority_restore(pri); |
463 | cpu_priority_restore(pri); |
463 | continue; |
464 | continue; |
464 | } |
465 | } |
465 | 466 | ||
466 | t = NULL; |
467 | t = NULL; |
467 | l = r->rq_head.prev; /* search rq from the back */ |
468 | l = r->rq_head.prev; /* search rq from the back */ |
468 | while (l != &r->rq_head) { |
469 | while (l != &r->rq_head) { |
469 | t = list_get_instance(l, thread_t, rq_link); |
470 | t = list_get_instance(l, thread_t, rq_link); |
470 | /* |
471 | /* |
471 | * We don't want to steal CPU-wired threads neither threads already stolen. |
472 | * We don't want to steal CPU-wired threads neither threads already stolen. |
472 | * The latter prevents threads from migrating between CPU's without ever being run. |
473 | * The latter prevents threads from migrating between CPU's without ever being run. |
473 | * We don't want to steal threads whose FPU context is still in CPU. |
474 | * We don't want to steal threads whose FPU context is still in CPU. |
474 | */ |
475 | */ |
475 | spinlock_lock(&t->lock); |
476 | spinlock_lock(&t->lock); |
476 | if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) { |
477 | if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) { |
477 | 478 | ||
478 | /* |
479 | /* |
479 | * Remove t from r. |
480 | * Remove t from r. |
480 | */ |
481 | */ |
481 | 482 | ||
482 | spinlock_unlock(&t->lock); |
483 | spinlock_unlock(&t->lock); |
483 | 484 | ||
484 | /* |
485 | /* |
485 | * Here we have to avoid deadlock with relink_rq(), |
486 | * Here we have to avoid deadlock with relink_rq(), |
486 | * because it locks cpu and r in a different order than we do. |
487 | * because it locks cpu and r in a different order than we do. |
487 | */ |
488 | */ |
488 | if (!spinlock_trylock(&cpu->lock)) { |
489 | if (!spinlock_trylock(&cpu->lock)) { |
489 | /* Release all locks and try again. */ |
490 | /* Release all locks and try again. */ |
490 | spinlock_unlock(&r->lock); |
491 | spinlock_unlock(&r->lock); |
491 | cpu_priority_restore(pri); |
492 | cpu_priority_restore(pri); |
492 | goto restart; |
493 | goto restart; |
493 | } |
494 | } |
494 | cpu->nrdy--; |
495 | cpu->nrdy--; |
495 | spinlock_unlock(&cpu->lock); |
496 | spinlock_unlock(&cpu->lock); |
496 | 497 | ||
497 | atomic_dec(&nrdy); |
498 | atomic_dec(&nrdy); |
498 | 499 | ||
499 | r->n--; |
500 | r->n--; |
500 | list_remove(&t->rq_link); |
501 | list_remove(&t->rq_link); |
501 | 502 | ||
502 | break; |
503 | break; |
503 | } |
504 | } |
504 | spinlock_unlock(&t->lock); |
505 | spinlock_unlock(&t->lock); |
505 | l = l->prev; |
506 | l = l->prev; |
506 | t = NULL; |
507 | t = NULL; |
507 | } |
508 | } |
508 | spinlock_unlock(&r->lock); |
509 | spinlock_unlock(&r->lock); |
509 | 510 | ||
510 | if (t) { |
511 | if (t) { |
511 | /* |
512 | /* |
512 | * Ready t on local CPU |
513 | * Ready t on local CPU |
513 | */ |
514 | */ |
514 | spinlock_lock(&t->lock); |
515 | spinlock_lock(&t->lock); |
515 | #ifdef KCPULB_VERBOSE |
516 | #ifdef KCPULB_VERBOSE |
516 | printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, CPU->nrdy, nrdy / config.cpu_active); |
517 | printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, CPU->nrdy, nrdy / config.cpu_active); |
517 | #endif |
518 | #endif |
518 | t->flags |= X_STOLEN; |
519 | t->flags |= X_STOLEN; |
519 | spinlock_unlock(&t->lock); |
520 | spinlock_unlock(&t->lock); |
520 | 521 | ||
521 | thread_ready(t); |
522 | thread_ready(t); |
522 | 523 | ||
523 | cpu_priority_restore(pri); |
524 | cpu_priority_restore(pri); |
524 | 525 | ||
525 | if (--count == 0) |
526 | if (--count == 0) |
526 | goto satisfied; |
527 | goto satisfied; |
527 | 528 | ||
528 | /* |
529 | /* |
529 | * We are not satisfied yet, focus on another CPU next time. |
530 | * We are not satisfied yet, focus on another CPU next time. |
530 | */ |
531 | */ |
531 | k++; |
532 | k++; |
532 | 533 | ||
533 | continue; |
534 | continue; |
534 | } |
535 | } |
535 | cpu_priority_restore(pri); |
536 | cpu_priority_restore(pri); |
536 | } |
537 | } |
537 | } |
538 | } |
538 | 539 | ||
539 | if (CPU->nrdy) { |
540 | if (CPU->nrdy) { |
540 | /* |
541 | /* |
541 | * Be a little bit light-weight and let migrated threads run. |
542 | * Be a little bit light-weight and let migrated threads run. |
542 | */ |
543 | */ |
543 | scheduler(); |
544 | scheduler(); |
544 | } |
545 | } |
545 | else { |
546 | else { |
546 | /* |
547 | /* |
547 | * We failed to migrate a single thread. |
548 | * We failed to migrate a single thread. |
548 | * Something more sophisticated should be done. |
549 | * Something more sophisticated should be done. |
549 | */ |
550 | */ |
550 | scheduler(); |
551 | scheduler(); |
551 | } |
552 | } |
552 | 553 | ||
553 | goto not_satisfied; |
554 | goto not_satisfied; |
554 | 555 | ||
555 | satisfied: |
556 | satisfied: |
556 | /* |
557 | /* |
557 | * Tell find_best_thread() to wake us up later again. |
558 | * Tell find_best_thread() to wake us up later again. |
558 | */ |
559 | */ |
559 | CPU->kcpulbstarted = 0; |
560 | CPU->kcpulbstarted = 0; |
560 | goto loop; |
561 | goto loop; |
561 | } |
562 | } |
562 | 563 | ||
563 | #endif /* __SMP__ */ |
564 | #endif /* __SMP__ */ |
564 | 565 |