Rev 258 | Rev 378 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 258 | Rev 309 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | #include <proc/scheduler.h> |
29 | #include <proc/scheduler.h> |
30 | #include <proc/thread.h> |
30 | #include <proc/thread.h> |
31 | #include <proc/task.h> |
31 | #include <proc/task.h> |
32 | #include <cpu.h> |
32 | #include <cpu.h> |
33 | #include <mm/vm.h> |
33 | #include <mm/vm.h> |
34 | #include <config.h> |
34 | #include <config.h> |
35 | #include <context.h> |
35 | #include <context.h> |
36 | #include <func.h> |
36 | #include <func.h> |
37 | #include <arch.h> |
37 | #include <arch.h> |
38 | #include <arch/asm.h> |
38 | #include <arch/asm.h> |
39 | #include <list.h> |
39 | #include <list.h> |
40 | #include <panic.h> |
40 | #include <panic.h> |
41 | #include <typedefs.h> |
41 | #include <typedefs.h> |
42 | #include <mm/page.h> |
42 | #include <mm/page.h> |
43 | #include <synch/spinlock.h> |
43 | #include <synch/spinlock.h> |
44 | #include <arch/faddr.h> |
44 | #include <arch/faddr.h> |
45 | #include <arch/atomic.h> |
45 | #include <arch/atomic.h> |
46 | #include <print.h> |
46 | #include <print.h> |
47 | #include <mm/frame.h> |
47 | #include <mm/frame.h> |
48 | #include <mm/heap.h> |
48 | #include <mm/heap.h> |
49 | #include <debug.h> |
49 | #include <debug.h> |
50 | 50 | ||
51 | volatile count_t nrdy; |
51 | volatile count_t nrdy; |
52 | 52 | ||
53 | 53 | ||
54 | /** Take actions before new thread runs |
54 | /** Take actions before new thread runs |
55 | * |
55 | * |
56 | * Perform actions that need to be |
56 | * Perform actions that need to be |
57 | * taken before the newly selected |
57 | * taken before the newly selected |
58 | * tread is passed control. |
58 | * tread is passed control. |
59 | * |
59 | * |
60 | */ |
60 | */ |
61 | void before_thread_runs(void) |
61 | void before_thread_runs(void) |
62 | { |
62 | { |
63 | before_thread_runs_arch(); |
63 | before_thread_runs_arch(); |
- | 64 | #ifdef FPU_LAZY |
|
- | 65 | if(THREAD==CPU->fpu_owner) |
|
- | 66 | fpu_enable(); |
|
- | 67 | else |
|
- | 68 | fpu_disable(); |
|
- | 69 | #else |
|
- | 70 | fpu_enable(); |
|
- | 71 | if (THREAD->fpu_context_exists) |
|
64 | fpu_context_restore(&(THREAD->saved_fpu_context)); |
72 | fpu_context_restore(&(THREAD->saved_fpu_context)); |
- | 73 | else { |
|
- | 74 | fpu_init(); |
|
- | 75 | THREAD->fpu_context_exists=1; |
|
- | 76 | } |
|
- | 77 | #endif |
|
65 | } |
78 | } |
66 | 79 | ||
- | 80 | #ifdef FPU_LAZY |
|
- | 81 | void scheduler_fpu_lazy_request(void) |
|
- | 82 | { |
|
- | 83 | fpu_enable(); |
|
- | 84 | if (CPU->fpu_owner != NULL) { |
|
- | 85 | fpu_context_save(&CPU->fpu_owner->saved_fpu_context); |
|
- | 86 | /* don't prevent migration */ |
|
- | 87 | CPU->fpu_owner->fpu_context_engaged=0; |
|
- | 88 | } |
|
- | 89 | if (THREAD->fpu_context_exists) |
|
- | 90 | fpu_context_restore(&THREAD->saved_fpu_context); |
|
- | 91 | else { |
|
- | 92 | fpu_init(); |
|
- | 93 | THREAD->fpu_context_exists=1; |
|
- | 94 | } |
|
- | 95 | CPU->fpu_owner=THREAD; |
|
- | 96 | THREAD->fpu_context_engaged = 1; |
|
- | 97 | } |
|
- | 98 | #endif |
|
67 | 99 | ||
68 | /** Initialize scheduler |
100 | /** Initialize scheduler |
69 | * |
101 | * |
70 | * Initialize kernel scheduler. |
102 | * Initialize kernel scheduler. |
71 | * |
103 | * |
72 | */ |
104 | */ |
73 | void scheduler_init(void) |
105 | void scheduler_init(void) |
74 | { |
106 | { |
75 | } |
107 | } |
76 | 108 | ||
77 | 109 | ||
78 | /** Get thread to be scheduled |
110 | /** Get thread to be scheduled |
79 | * |
111 | * |
80 | * Get the optimal thread to be scheduled |
112 | * Get the optimal thread to be scheduled |
81 | * according to thread accounting and scheduler |
113 | * according to thread accounting and scheduler |
82 | * policy. |
114 | * policy. |
83 | * |
115 | * |
84 | * @return Thread to be scheduled. |
116 | * @return Thread to be scheduled. |
85 | * |
117 | * |
86 | */ |
118 | */ |
87 | struct thread *find_best_thread(void) |
119 | struct thread *find_best_thread(void) |
88 | { |
120 | { |
89 | thread_t *t; |
121 | thread_t *t; |
90 | runq_t *r; |
122 | runq_t *r; |
91 | int i, n; |
123 | int i, n; |
92 | 124 | ||
93 | ASSERT(CPU != NULL); |
125 | ASSERT(CPU != NULL); |
94 | 126 | ||
95 | loop: |
127 | loop: |
96 | cpu_priority_high(); |
128 | cpu_priority_high(); |
97 | 129 | ||
98 | spinlock_lock(&CPU->lock); |
130 | spinlock_lock(&CPU->lock); |
99 | n = CPU->nrdy; |
131 | n = CPU->nrdy; |
100 | spinlock_unlock(&CPU->lock); |
132 | spinlock_unlock(&CPU->lock); |
101 | 133 | ||
102 | cpu_priority_low(); |
134 | cpu_priority_low(); |
103 | 135 | ||
104 | if (n == 0) { |
136 | if (n == 0) { |
105 | #ifdef __SMP__ |
137 | #ifdef __SMP__ |
106 | /* |
138 | /* |
107 | * If the load balancing thread is not running, wake it up and |
139 | * If the load balancing thread is not running, wake it up and |
108 | * set CPU-private flag that the kcpulb has been started. |
140 | * set CPU-private flag that the kcpulb has been started. |
109 | */ |
141 | */ |
110 | if (test_and_set(&CPU->kcpulbstarted) == 0) { |
142 | if (test_and_set(&CPU->kcpulbstarted) == 0) { |
111 | waitq_wakeup(&CPU->kcpulb_wq, 0); |
143 | waitq_wakeup(&CPU->kcpulb_wq, 0); |
112 | goto loop; |
144 | goto loop; |
113 | } |
145 | } |
114 | #endif /* __SMP__ */ |
146 | #endif /* __SMP__ */ |
115 | 147 | ||
116 | /* |
148 | /* |
117 | * For there was nothing to run, the CPU goes to sleep |
149 | * For there was nothing to run, the CPU goes to sleep |
118 | * until a hardware interrupt or an IPI comes. |
150 | * until a hardware interrupt or an IPI comes. |
119 | * This improves energy saving and hyperthreading. |
151 | * This improves energy saving and hyperthreading. |
120 | * On the other hand, several hardware interrupts can be ignored. |
152 | * On the other hand, several hardware interrupts can be ignored. |
121 | */ |
153 | */ |
122 | cpu_sleep(); |
154 | cpu_sleep(); |
123 | goto loop; |
155 | goto loop; |
124 | } |
156 | } |
125 | 157 | ||
126 | cpu_priority_high(); |
158 | cpu_priority_high(); |
127 | 159 | ||
128 | i = 0; |
160 | i = 0; |
129 | retry: |
161 | retry: |
130 | for (; i<RQ_COUNT; i++) { |
162 | for (; i<RQ_COUNT; i++) { |
131 | r = &CPU->rq[i]; |
163 | r = &CPU->rq[i]; |
132 | spinlock_lock(&r->lock); |
164 | spinlock_lock(&r->lock); |
133 | if (r->n == 0) { |
165 | if (r->n == 0) { |
134 | /* |
166 | /* |
135 | * If this queue is empty, try a lower-priority queue. |
167 | * If this queue is empty, try a lower-priority queue. |
136 | */ |
168 | */ |
137 | spinlock_unlock(&r->lock); |
169 | spinlock_unlock(&r->lock); |
138 | continue; |
170 | continue; |
139 | } |
171 | } |
140 | 172 | ||
141 | /* avoid deadlock with relink_rq() */ |
173 | /* avoid deadlock with relink_rq() */ |
142 | if (!spinlock_trylock(&CPU->lock)) { |
174 | if (!spinlock_trylock(&CPU->lock)) { |
143 | /* |
175 | /* |
144 | * Unlock r and try again. |
176 | * Unlock r and try again. |
145 | */ |
177 | */ |
146 | spinlock_unlock(&r->lock); |
178 | spinlock_unlock(&r->lock); |
147 | goto retry; |
179 | goto retry; |
148 | } |
180 | } |
149 | CPU->nrdy--; |
181 | CPU->nrdy--; |
150 | spinlock_unlock(&CPU->lock); |
182 | spinlock_unlock(&CPU->lock); |
151 | 183 | ||
152 | atomic_dec((int *) &nrdy); |
184 | atomic_dec((int *) &nrdy); |
153 | r->n--; |
185 | r->n--; |
154 | 186 | ||
155 | /* |
187 | /* |
156 | * Take the first thread from the queue. |
188 | * Take the first thread from the queue. |
157 | */ |
189 | */ |
158 | t = list_get_instance(r->rq_head.next, thread_t, rq_link); |
190 | t = list_get_instance(r->rq_head.next, thread_t, rq_link); |
159 | list_remove(&t->rq_link); |
191 | list_remove(&t->rq_link); |
160 | 192 | ||
161 | spinlock_unlock(&r->lock); |
193 | spinlock_unlock(&r->lock); |
162 | 194 | ||
163 | spinlock_lock(&t->lock); |
195 | spinlock_lock(&t->lock); |
164 | t->cpu = CPU; |
196 | t->cpu = CPU; |
165 | 197 | ||
166 | t->ticks = us2ticks((i+1)*10000); |
198 | t->ticks = us2ticks((i+1)*10000); |
167 | t->pri = i; /* eventually correct rq index */ |
199 | t->pri = i; /* eventually correct rq index */ |
168 | 200 | ||
169 | /* |
201 | /* |
170 | * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge. |
202 | * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge. |
171 | */ |
203 | */ |
172 | t->flags &= ~X_STOLEN; |
204 | t->flags &= ~X_STOLEN; |
173 | spinlock_unlock(&t->lock); |
205 | spinlock_unlock(&t->lock); |
174 | 206 | ||
175 | return t; |
207 | return t; |
176 | } |
208 | } |
177 | goto loop; |
209 | goto loop; |
178 | 210 | ||
179 | } |
211 | } |
180 | 212 | ||
181 | 213 | ||
182 | /** Prevent rq starvation |
214 | /** Prevent rq starvation |
183 | * |
215 | * |
184 | * Prevent low priority threads from starving in rq's. |
216 | * Prevent low priority threads from starving in rq's. |
185 | * |
217 | * |
186 | * When the function decides to relink rq's, it reconnects |
218 | * When the function decides to relink rq's, it reconnects |
187 | * respective pointers so that in result threads with 'pri' |
219 | * respective pointers so that in result threads with 'pri' |
188 | * greater or equal 'start' are moved to a higher-priority queue. |
220 | * greater or equal 'start' are moved to a higher-priority queue. |
189 | * |
221 | * |
190 | * @param start Threshold priority. |
222 | * @param start Threshold priority. |
191 | * |
223 | * |
192 | */ |
224 | */ |
193 | void relink_rq(int start) |
225 | void relink_rq(int start) |
194 | { |
226 | { |
195 | link_t head; |
227 | link_t head; |
196 | runq_t *r; |
228 | runq_t *r; |
197 | int i, n; |
229 | int i, n; |
198 | 230 | ||
199 | list_initialize(&head); |
231 | list_initialize(&head); |
200 | spinlock_lock(&CPU->lock); |
232 | spinlock_lock(&CPU->lock); |
201 | if (CPU->needs_relink > NEEDS_RELINK_MAX) { |
233 | if (CPU->needs_relink > NEEDS_RELINK_MAX) { |
202 | for (i = start; i<RQ_COUNT-1; i++) { |
234 | for (i = start; i<RQ_COUNT-1; i++) { |
203 | /* remember and empty rq[i + 1] */ |
235 | /* remember and empty rq[i + 1] */ |
204 | r = &CPU->rq[i + 1]; |
236 | r = &CPU->rq[i + 1]; |
205 | spinlock_lock(&r->lock); |
237 | spinlock_lock(&r->lock); |
206 | list_concat(&head, &r->rq_head); |
238 | list_concat(&head, &r->rq_head); |
207 | n = r->n; |
239 | n = r->n; |
208 | r->n = 0; |
240 | r->n = 0; |
209 | spinlock_unlock(&r->lock); |
241 | spinlock_unlock(&r->lock); |
210 | 242 | ||
211 | /* append rq[i + 1] to rq[i] */ |
243 | /* append rq[i + 1] to rq[i] */ |
212 | r = &CPU->rq[i]; |
244 | r = &CPU->rq[i]; |
213 | spinlock_lock(&r->lock); |
245 | spinlock_lock(&r->lock); |
214 | list_concat(&r->rq_head, &head); |
246 | list_concat(&r->rq_head, &head); |
215 | r->n += n; |
247 | r->n += n; |
216 | spinlock_unlock(&r->lock); |
248 | spinlock_unlock(&r->lock); |
217 | } |
249 | } |
218 | CPU->needs_relink = 0; |
250 | CPU->needs_relink = 0; |
219 | } |
251 | } |
220 | spinlock_unlock(&CPU->lock); |
252 | spinlock_unlock(&CPU->lock); |
221 | 253 | ||
222 | } |
254 | } |
223 | 255 | ||
224 | 256 | ||
225 | /** The scheduler |
257 | /** The scheduler |
226 | * |
258 | * |
227 | * The thread scheduling procedure. |
259 | * The thread scheduling procedure. |
228 | * |
260 | * |
229 | */ |
261 | */ |
230 | void scheduler(void) |
262 | void scheduler(void) |
231 | { |
263 | { |
232 | volatile pri_t pri; |
264 | volatile pri_t pri; |
233 | 265 | ||
234 | ASSERT(CPU != NULL); |
266 | ASSERT(CPU != NULL); |
235 | 267 | ||
236 | pri = cpu_priority_high(); |
268 | pri = cpu_priority_high(); |
237 | 269 | ||
238 | if (haltstate) |
270 | if (haltstate) |
239 | halt(); |
271 | halt(); |
240 | 272 | ||
241 | if (THREAD) { |
273 | if (THREAD) { |
242 | spinlock_lock(&THREAD->lock); |
274 | spinlock_lock(&THREAD->lock); |
- | 275 | #ifndef FPU_LAZY |
|
243 | fpu_context_save(&(THREAD->saved_fpu_context)); |
276 | fpu_context_save(&(THREAD->saved_fpu_context)); |
- | 277 | #endif |
|
244 | if (!context_save(&THREAD->saved_context)) { |
278 | if (!context_save(&THREAD->saved_context)) { |
245 | /* |
279 | /* |
246 | * This is the place where threads leave scheduler(); |
280 | * This is the place where threads leave scheduler(); |
247 | */ |
281 | */ |
248 | before_thread_runs(); |
282 | before_thread_runs(); |
249 | spinlock_unlock(&THREAD->lock); |
283 | spinlock_unlock(&THREAD->lock); |
250 | cpu_priority_restore(THREAD->saved_context.pri); |
284 | cpu_priority_restore(THREAD->saved_context.pri); |
251 | return; |
285 | return; |
252 | } |
286 | } |
253 | 287 | ||
254 | /* |
288 | /* |
255 | * CPU priority of preempted thread is recorded here |
289 | * CPU priority of preempted thread is recorded here |
256 | * to facilitate scheduler() invocations from |
290 | * to facilitate scheduler() invocations from |
257 | * cpu_priority_high()'ed code (e.g. waitq_sleep_timeout()). |
291 | * cpu_priority_high()'ed code (e.g. waitq_sleep_timeout()). |
258 | */ |
292 | */ |
259 | THREAD->saved_context.pri = pri; |
293 | THREAD->saved_context.pri = pri; |
260 | } |
294 | } |
261 | 295 | ||
262 | /* |
296 | /* |
263 | * Through the 'THE' structure, we keep track of THREAD, TASK, CPU |
297 | * Through the 'THE' structure, we keep track of THREAD, TASK, CPU |
264 | * and preemption counter. At this point THE could be coming either |
298 | * and preemption counter. At this point THE could be coming either |
265 | * from THREAD's or CPU's stack. |
299 | * from THREAD's or CPU's stack. |
266 | */ |
300 | */ |
267 | the_copy(THE, (the_t *) CPU->stack); |
301 | the_copy(THE, (the_t *) CPU->stack); |
268 | 302 | ||
269 | /* |
303 | /* |
270 | * We may not keep the old stack. |
304 | * We may not keep the old stack. |
271 | * Reason: If we kept the old stack and got blocked, for instance, in |
305 | * Reason: If we kept the old stack and got blocked, for instance, in |
272 | * find_best_thread(), the old thread could get rescheduled by another |
306 | * find_best_thread(), the old thread could get rescheduled by another |
273 | * CPU and overwrite the part of its own stack that was also used by |
307 | * CPU and overwrite the part of its own stack that was also used by |
274 | * the scheduler on this CPU. |
308 | * the scheduler on this CPU. |
275 | * |
309 | * |
276 | * Moreover, we have to bypass the compiler-generated POP sequence |
310 | * Moreover, we have to bypass the compiler-generated POP sequence |
277 | * which is fooled by SP being set to the very top of the stack. |
311 | * which is fooled by SP being set to the very top of the stack. |
278 | * Therefore the scheduler() function continues in |
312 | * Therefore the scheduler() function continues in |
279 | * scheduler_separated_stack(). |
313 | * scheduler_separated_stack(). |
280 | */ |
314 | */ |
281 | context_save(&CPU->saved_context); |
315 | context_save(&CPU->saved_context); |
282 | context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), CPU->stack, CPU_STACK_SIZE); |
316 | context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), CPU->stack, CPU_STACK_SIZE); |
283 | context_restore(&CPU->saved_context); |
317 | context_restore(&CPU->saved_context); |
284 | /* not reached */ |
318 | /* not reached */ |
285 | } |
319 | } |
286 | 320 | ||
287 | 321 | ||
288 | /** Scheduler stack switch wrapper |
322 | /** Scheduler stack switch wrapper |
289 | * |
323 | * |
290 | * Second part of the scheduler() function |
324 | * Second part of the scheduler() function |
291 | * using new stack. Handling the actual context |
325 | * using new stack. Handling the actual context |
292 | * switch to a new thread. |
326 | * switch to a new thread. |
293 | * |
327 | * |
294 | */ |
328 | */ |
295 | void scheduler_separated_stack(void) |
329 | void scheduler_separated_stack(void) |
296 | { |
330 | { |
297 | int priority; |
331 | int priority; |
298 | 332 | ||
299 | ASSERT(CPU != NULL); |
333 | ASSERT(CPU != NULL); |
300 | 334 | ||
301 | if (THREAD) { |
335 | if (THREAD) { |
302 | switch (THREAD->state) { |
336 | switch (THREAD->state) { |
303 | case Running: |
337 | case Running: |
304 | THREAD->state = Ready; |
338 | THREAD->state = Ready; |
305 | spinlock_unlock(&THREAD->lock); |
339 | spinlock_unlock(&THREAD->lock); |
306 | thread_ready(THREAD); |
340 | thread_ready(THREAD); |
307 | break; |
341 | break; |
308 | 342 | ||
309 | case Exiting: |
343 | case Exiting: |
310 | frame_free((__address) THREAD->kstack); |
344 | frame_free((__address) THREAD->kstack); |
311 | if (THREAD->ustack) { |
345 | if (THREAD->ustack) { |
312 | frame_free((__address) THREAD->ustack); |
346 | frame_free((__address) THREAD->ustack); |
313 | } |
347 | } |
314 | 348 | ||
315 | /* |
349 | /* |
316 | * Detach from the containing task. |
350 | * Detach from the containing task. |
317 | */ |
351 | */ |
318 | spinlock_lock(&TASK->lock); |
352 | spinlock_lock(&TASK->lock); |
319 | list_remove(&THREAD->th_link); |
353 | list_remove(&THREAD->th_link); |
320 | spinlock_unlock(&TASK->lock); |
354 | spinlock_unlock(&TASK->lock); |
321 | 355 | ||
322 | spinlock_unlock(&THREAD->lock); |
356 | spinlock_unlock(&THREAD->lock); |
323 | 357 | ||
324 | spinlock_lock(&threads_lock); |
358 | spinlock_lock(&threads_lock); |
325 | list_remove(&THREAD->threads_link); |
359 | list_remove(&THREAD->threads_link); |
326 | spinlock_unlock(&threads_lock); |
360 | spinlock_unlock(&threads_lock); |
327 | 361 | ||
328 | spinlock_lock(&CPU->lock); |
362 | spinlock_lock(&CPU->lock); |
329 | if(CPU->fpu_owner==THREAD) CPU->fpu_owner=NULL; |
363 | if(CPU->fpu_owner==THREAD) CPU->fpu_owner=NULL; |
330 | spinlock_unlock(&CPU->lock); |
364 | spinlock_unlock(&CPU->lock); |
331 | 365 | ||
332 | free(THREAD); |
366 | free(THREAD); |
333 | 367 | ||
334 | break; |
368 | break; |
335 | 369 | ||
336 | case Sleeping: |
370 | case Sleeping: |
337 | /* |
371 | /* |
338 | * Prefer the thread after it's woken up. |
372 | * Prefer the thread after it's woken up. |
339 | */ |
373 | */ |
340 | THREAD->pri = -1; |
374 | THREAD->pri = -1; |
341 | 375 | ||
342 | /* |
376 | /* |
343 | * We need to release wq->lock which we locked in waitq_sleep(). |
377 | * We need to release wq->lock which we locked in waitq_sleep(). |
344 | * Address of wq->lock is kept in THREAD->sleep_queue. |
378 | * Address of wq->lock is kept in THREAD->sleep_queue. |
345 | */ |
379 | */ |
346 | spinlock_unlock(&THREAD->sleep_queue->lock); |
380 | spinlock_unlock(&THREAD->sleep_queue->lock); |
347 | 381 | ||
348 | /* |
382 | /* |
349 | * Check for possible requests for out-of-context invocation. |
383 | * Check for possible requests for out-of-context invocation. |
350 | */ |
384 | */ |
351 | if (THREAD->call_me) { |
385 | if (THREAD->call_me) { |
352 | THREAD->call_me(THREAD->call_me_with); |
386 | THREAD->call_me(THREAD->call_me_with); |
353 | THREAD->call_me = NULL; |
387 | THREAD->call_me = NULL; |
354 | THREAD->call_me_with = NULL; |
388 | THREAD->call_me_with = NULL; |
355 | } |
389 | } |
356 | 390 | ||
357 | spinlock_unlock(&THREAD->lock); |
391 | spinlock_unlock(&THREAD->lock); |
358 | 392 | ||
359 | break; |
393 | break; |
360 | 394 | ||
361 | default: |
395 | default: |
362 | /* |
396 | /* |
363 | * Entering state is unexpected. |
397 | * Entering state is unexpected. |
364 | */ |
398 | */ |
365 | panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]); |
399 | panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]); |
366 | break; |
400 | break; |
367 | } |
401 | } |
368 | THREAD = NULL; |
402 | THREAD = NULL; |
369 | } |
403 | } |
370 | 404 | ||
371 | 405 | ||
372 | THREAD = find_best_thread(); |
406 | THREAD = find_best_thread(); |
373 | 407 | ||
374 | spinlock_lock(&THREAD->lock); |
408 | spinlock_lock(&THREAD->lock); |
375 | priority = THREAD->pri; |
409 | priority = THREAD->pri; |
376 | spinlock_unlock(&THREAD->lock); |
410 | spinlock_unlock(&THREAD->lock); |
377 | 411 | ||
378 | relink_rq(priority); |
412 | relink_rq(priority); |
379 | 413 | ||
380 | spinlock_lock(&THREAD->lock); |
414 | spinlock_lock(&THREAD->lock); |
381 | 415 | ||
382 | /* |
416 | /* |
383 | * If both the old and the new task are the same, lots of work is avoided. |
417 | * If both the old and the new task are the same, lots of work is avoided. |
384 | */ |
418 | */ |
385 | if (TASK != THREAD->task) { |
419 | if (TASK != THREAD->task) { |
386 | vm_t *m1 = NULL; |
420 | vm_t *m1 = NULL; |
387 | vm_t *m2; |
421 | vm_t *m2; |
388 | 422 | ||
389 | if (TASK) { |
423 | if (TASK) { |
390 | spinlock_lock(&TASK->lock); |
424 | spinlock_lock(&TASK->lock); |
391 | m1 = TASK->vm; |
425 | m1 = TASK->vm; |
392 | spinlock_unlock(&TASK->lock); |
426 | spinlock_unlock(&TASK->lock); |
393 | } |
427 | } |
394 | 428 | ||
395 | spinlock_lock(&THREAD->task->lock); |
429 | spinlock_lock(&THREAD->task->lock); |
396 | m2 = THREAD->task->vm; |
430 | m2 = THREAD->task->vm; |
397 | spinlock_unlock(&THREAD->task->lock); |
431 | spinlock_unlock(&THREAD->task->lock); |
398 | 432 | ||
399 | /* |
433 | /* |
400 | * Note that it is possible for two tasks to share one vm mapping. |
434 | * Note that it is possible for two tasks to share one vm mapping. |
401 | */ |
435 | */ |
402 | if (m1 != m2) { |
436 | if (m1 != m2) { |
403 | /* |
437 | /* |
404 | * Both tasks and vm mappings are different. |
438 | * Both tasks and vm mappings are different. |
405 | * Replace the old one with the new one. |
439 | * Replace the old one with the new one. |
406 | */ |
440 | */ |
407 | vm_install(m2); |
441 | vm_install(m2); |
408 | } |
442 | } |
409 | TASK = THREAD->task; |
443 | TASK = THREAD->task; |
410 | } |
444 | } |
411 | 445 | ||
412 | THREAD->state = Running; |
446 | THREAD->state = Running; |
413 | 447 | ||
414 | #ifdef SCHEDULER_VERBOSE |
448 | #ifdef SCHEDULER_VERBOSE |
415 | printf("cpu%d: tid %d (pri=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->pri, THREAD->ticks, CPU->nrdy); |
449 | printf("cpu%d: tid %d (pri=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->pri, THREAD->ticks, CPU->nrdy); |
416 | #endif |
450 | #endif |
417 | 451 | ||
418 | /* |
452 | /* |
419 | * Copy the knowledge of CPU, TASK, THREAD and preemption counter to thread's stack. |
453 | * Copy the knowledge of CPU, TASK, THREAD and preemption counter to thread's stack. |
420 | */ |
454 | */ |
421 | the_copy(THE, (the_t *) THREAD->kstack); |
455 | the_copy(THE, (the_t *) THREAD->kstack); |
422 | 456 | ||
423 | context_restore(&THREAD->saved_context); |
457 | context_restore(&THREAD->saved_context); |
424 | /* not reached */ |
458 | /* not reached */ |
425 | } |
459 | } |
426 | 460 | ||
427 | 461 | ||
428 | #ifdef __SMP__ |
462 | #ifdef __SMP__ |
429 | /** Load balancing thread |
463 | /** Load balancing thread |
430 | * |
464 | * |
431 | * SMP load balancing thread, supervising thread supplies |
465 | * SMP load balancing thread, supervising thread supplies |
432 | * for the CPU it's wired to. |
466 | * for the CPU it's wired to. |
433 | * |
467 | * |
434 | * @param arg Generic thread argument (unused). |
468 | * @param arg Generic thread argument (unused). |
435 | * |
469 | * |
436 | */ |
470 | */ |
437 | void kcpulb(void *arg) |
471 | void kcpulb(void *arg) |
438 | { |
472 | { |
439 | thread_t *t; |
473 | thread_t *t; |
440 | int count, i, j, k = 0; |
474 | int count, i, j, k = 0; |
441 | pri_t pri; |
475 | pri_t pri; |
442 | 476 | ||
443 | loop: |
477 | loop: |
444 | /* |
478 | /* |
445 | * Sleep until there's some work to do. |
479 | * Sleep until there's some work to do. |
446 | */ |
480 | */ |
447 | waitq_sleep(&CPU->kcpulb_wq); |
481 | waitq_sleep(&CPU->kcpulb_wq); |
448 | 482 | ||
449 | not_satisfied: |
483 | not_satisfied: |
450 | /* |
484 | /* |
451 | * Calculate the number of threads that will be migrated/stolen from |
485 | * Calculate the number of threads that will be migrated/stolen from |
452 | * other CPU's. Note that situation can have changed between two |
486 | * other CPU's. Note that situation can have changed between two |
453 | * passes. Each time get the most up to date counts. |
487 | * passes. Each time get the most up to date counts. |
454 | */ |
488 | */ |
455 | pri = cpu_priority_high(); |
489 | pri = cpu_priority_high(); |
456 | spinlock_lock(&CPU->lock); |
490 | spinlock_lock(&CPU->lock); |
457 | count = nrdy / config.cpu_active; |
491 | count = nrdy / config.cpu_active; |
458 | count -= CPU->nrdy; |
492 | count -= CPU->nrdy; |
459 | spinlock_unlock(&CPU->lock); |
493 | spinlock_unlock(&CPU->lock); |
460 | cpu_priority_restore(pri); |
494 | cpu_priority_restore(pri); |
461 | 495 | ||
462 | if (count <= 0) |
496 | if (count <= 0) |
463 | goto satisfied; |
497 | goto satisfied; |
464 | 498 | ||
465 | /* |
499 | /* |
466 | * Searching least priority queues on all CPU's first and most priority queues on all CPU's last. |
500 | * Searching least priority queues on all CPU's first and most priority queues on all CPU's last. |
467 | */ |
501 | */ |
468 | for (j=RQ_COUNT-1; j >= 0; j--) { |
502 | for (j=RQ_COUNT-1; j >= 0; j--) { |
469 | for (i=0; i < config.cpu_active; i++) { |
503 | for (i=0; i < config.cpu_active; i++) { |
470 | link_t *l; |
504 | link_t *l; |
471 | runq_t *r; |
505 | runq_t *r; |
472 | cpu_t *cpu; |
506 | cpu_t *cpu; |
473 | 507 | ||
474 | cpu = &cpus[(i + k) % config.cpu_active]; |
508 | cpu = &cpus[(i + k) % config.cpu_active]; |
475 | 509 | ||
476 | /* |
510 | /* |
477 | * Not interested in ourselves. |
511 | * Not interested in ourselves. |
478 | * Doesn't require interrupt disabling for kcpulb is X_WIRED. |
512 | * Doesn't require interrupt disabling for kcpulb is X_WIRED. |
479 | */ |
513 | */ |
480 | if (CPU == cpu) |
514 | if (CPU == cpu) |
481 | continue; |
515 | continue; |
482 | 516 | ||
483 | restart: pri = cpu_priority_high(); |
517 | restart: pri = cpu_priority_high(); |
484 | r = &cpu->rq[j]; |
518 | r = &cpu->rq[j]; |
485 | spinlock_lock(&r->lock); |
519 | spinlock_lock(&r->lock); |
486 | if (r->n == 0) { |
520 | if (r->n == 0) { |
487 | spinlock_unlock(&r->lock); |
521 | spinlock_unlock(&r->lock); |
488 | cpu_priority_restore(pri); |
522 | cpu_priority_restore(pri); |
489 | continue; |
523 | continue; |
490 | } |
524 | } |
491 | 525 | ||
492 | t = NULL; |
526 | t = NULL; |
493 | l = r->rq_head.prev; /* search rq from the back */ |
527 | l = r->rq_head.prev; /* search rq from the back */ |
494 | while (l != &r->rq_head) { |
528 | while (l != &r->rq_head) { |
495 | t = list_get_instance(l, thread_t, rq_link); |
529 | t = list_get_instance(l, thread_t, rq_link); |
496 | /* |
530 | /* |
497 | * We don't want to steal CPU-wired threads neither threads already stolen. |
531 | * We don't want to steal CPU-wired threads neither threads already stolen. |
498 | * The latter prevents threads from migrating between CPU's without ever being run. |
532 | * The latter prevents threads from migrating between CPU's without ever being run. |
499 | * We don't want to steal threads whose FPU context is still in CPU. |
533 | * We don't want to steal threads whose FPU context is still in CPU. |
500 | */ |
534 | */ |
501 | spinlock_lock(&t->lock); |
535 | spinlock_lock(&t->lock); |
502 | if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) { |
536 | if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) { |
503 | 537 | ||
504 | /* |
538 | /* |
505 | * Remove t from r. |
539 | * Remove t from r. |
506 | */ |
540 | */ |
507 | 541 | ||
508 | spinlock_unlock(&t->lock); |
542 | spinlock_unlock(&t->lock); |
509 | 543 | ||
510 | /* |
544 | /* |
511 | * Here we have to avoid deadlock with relink_rq(), |
545 | * Here we have to avoid deadlock with relink_rq(), |
512 | * because it locks cpu and r in a different order than we do. |
546 | * because it locks cpu and r in a different order than we do. |
513 | */ |
547 | */ |
514 | if (!spinlock_trylock(&cpu->lock)) { |
548 | if (!spinlock_trylock(&cpu->lock)) { |
515 | /* Release all locks and try again. */ |
549 | /* Release all locks and try again. */ |
516 | spinlock_unlock(&r->lock); |
550 | spinlock_unlock(&r->lock); |
517 | cpu_priority_restore(pri); |
551 | cpu_priority_restore(pri); |
518 | goto restart; |
552 | goto restart; |
519 | } |
553 | } |
520 | cpu->nrdy--; |
554 | cpu->nrdy--; |
521 | spinlock_unlock(&cpu->lock); |
555 | spinlock_unlock(&cpu->lock); |
522 | 556 | ||
523 | atomic_dec((int *)&nrdy); |
557 | atomic_dec((int *)&nrdy); |
524 | 558 | ||
525 | r->n--; |
559 | r->n--; |
526 | list_remove(&t->rq_link); |
560 | list_remove(&t->rq_link); |
527 | 561 | ||
528 | break; |
562 | break; |
529 | } |
563 | } |
530 | spinlock_unlock(&t->lock); |
564 | spinlock_unlock(&t->lock); |
531 | l = l->prev; |
565 | l = l->prev; |
532 | t = NULL; |
566 | t = NULL; |
533 | } |
567 | } |
534 | spinlock_unlock(&r->lock); |
568 | spinlock_unlock(&r->lock); |
535 | 569 | ||
536 | if (t) { |
570 | if (t) { |
537 | /* |
571 | /* |
538 | * Ready t on local CPU |
572 | * Ready t on local CPU |
539 | */ |
573 | */ |
540 | spinlock_lock(&t->lock); |
574 | spinlock_lock(&t->lock); |
541 | #ifdef KCPULB_VERBOSE |
575 | #ifdef KCPULB_VERBOSE |
542 | printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, CPU->nrdy, nrdy / config.cpu_active); |
576 | printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, CPU->nrdy, nrdy / config.cpu_active); |
543 | #endif |
577 | #endif |
544 | t->flags |= X_STOLEN; |
578 | t->flags |= X_STOLEN; |
545 | spinlock_unlock(&t->lock); |
579 | spinlock_unlock(&t->lock); |
546 | 580 | ||
547 | thread_ready(t); |
581 | thread_ready(t); |
548 | 582 | ||
549 | cpu_priority_restore(pri); |
583 | cpu_priority_restore(pri); |
550 | 584 | ||
551 | if (--count == 0) |
585 | if (--count == 0) |
552 | goto satisfied; |
586 | goto satisfied; |
553 | 587 | ||
554 | /* |
588 | /* |
555 | * We are not satisfied yet, focus on another CPU next time. |
589 | * We are not satisfied yet, focus on another CPU next time. |
556 | */ |
590 | */ |
557 | k++; |
591 | k++; |
558 | 592 | ||
559 | continue; |
593 | continue; |
560 | } |
594 | } |
561 | cpu_priority_restore(pri); |
595 | cpu_priority_restore(pri); |
562 | } |
596 | } |
563 | } |
597 | } |
564 | 598 | ||
565 | if (CPU->nrdy) { |
599 | if (CPU->nrdy) { |
566 | /* |
600 | /* |
567 | * Be a little bit light-weight and let migrated threads run. |
601 | * Be a little bit light-weight and let migrated threads run. |
568 | */ |
602 | */ |
569 | scheduler(); |
603 | scheduler(); |
570 | } |
604 | } |
571 | else { |
605 | else { |
572 | /* |
606 | /* |
573 | * We failed to migrate a single thread. |
607 | * We failed to migrate a single thread. |
574 | * Something more sophisticated should be done. |
608 | * Something more sophisticated should be done. |
575 | */ |
609 | */ |
576 | scheduler(); |
610 | scheduler(); |
577 | } |
611 | } |
578 | 612 | ||
579 | goto not_satisfied; |
613 | goto not_satisfied; |
580 | 614 | ||
581 | satisfied: |
615 | satisfied: |
582 | /* |
616 | /* |
583 | * Tell find_best_thread() to wake us up later again. |
617 | * Tell find_best_thread() to wake us up later again. |
584 | */ |
618 | */ |
585 | CPU->kcpulbstarted = 0; |
619 | CPU->kcpulbstarted = 0; |
586 | goto loop; |
620 | goto loop; |
587 | } |
621 | } |
588 | 622 | ||
589 | #endif /* __SMP__ */ |
623 | #endif /* __SMP__ */ |
590 | 624 |