Rev 784 | Rev 787 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 784 | Rev 785 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | #include <proc/scheduler.h> |
29 | #include <proc/scheduler.h> |
30 | #include <proc/thread.h> |
30 | #include <proc/thread.h> |
31 | #include <proc/task.h> |
31 | #include <proc/task.h> |
32 | #include <mm/heap.h> |
32 | #include <mm/heap.h> |
33 | #include <mm/frame.h> |
33 | #include <mm/frame.h> |
34 | #include <mm/page.h> |
34 | #include <mm/page.h> |
35 | #include <mm/as.h> |
35 | #include <mm/as.h> |
36 | #include <arch/asm.h> |
36 | #include <arch/asm.h> |
37 | #include <arch/faddr.h> |
37 | #include <arch/faddr.h> |
38 | #include <arch/atomic.h> |
38 | #include <arch/atomic.h> |
39 | #include <synch/spinlock.h> |
39 | #include <synch/spinlock.h> |
40 | #include <config.h> |
40 | #include <config.h> |
41 | #include <context.h> |
41 | #include <context.h> |
42 | #include <func.h> |
42 | #include <func.h> |
43 | #include <arch.h> |
43 | #include <arch.h> |
44 | #include <list.h> |
44 | #include <list.h> |
45 | #include <panic.h> |
45 | #include <panic.h> |
46 | #include <typedefs.h> |
46 | #include <typedefs.h> |
47 | #include <cpu.h> |
47 | #include <cpu.h> |
48 | #include <print.h> |
48 | #include <print.h> |
49 | #include <debug.h> |
49 | #include <debug.h> |
50 | 50 | ||
51 | atomic_t nrdy; |
51 | atomic_t nrdy; |
52 | 52 | ||
53 | /** Take actions before new thread runs |
53 | /** Take actions before new thread runs |
54 | * |
54 | * |
55 | * Perform actions that need to be |
55 | * Perform actions that need to be |
56 | * taken before the newly selected |
56 | * taken before the newly selected |
57 | * tread is passed control. |
57 | * tread is passed control. |
58 | * |
58 | * |
59 | */ |
59 | */ |
60 | void before_thread_runs(void) |
60 | void before_thread_runs(void) |
61 | { |
61 | { |
62 | before_thread_runs_arch(); |
62 | before_thread_runs_arch(); |
63 | #ifdef CONFIG_FPU_LAZY |
63 | #ifdef CONFIG_FPU_LAZY |
64 | if(THREAD==CPU->fpu_owner) |
64 | if(THREAD==CPU->fpu_owner) |
65 | fpu_enable(); |
65 | fpu_enable(); |
66 | else |
66 | else |
67 | fpu_disable(); |
67 | fpu_disable(); |
68 | #else |
68 | #else |
69 | fpu_enable(); |
69 | fpu_enable(); |
70 | if (THREAD->fpu_context_exists) |
70 | if (THREAD->fpu_context_exists) |
71 | fpu_context_restore(&(THREAD->saved_fpu_context)); |
71 | fpu_context_restore(&(THREAD->saved_fpu_context)); |
72 | else { |
72 | else { |
73 | fpu_init(); |
73 | fpu_init(); |
74 | THREAD->fpu_context_exists=1; |
74 | THREAD->fpu_context_exists=1; |
75 | } |
75 | } |
76 | #endif |
76 | #endif |
77 | } |
77 | } |
78 | 78 | ||
79 | #ifdef CONFIG_FPU_LAZY |
79 | #ifdef CONFIG_FPU_LAZY |
80 | void scheduler_fpu_lazy_request(void) |
80 | void scheduler_fpu_lazy_request(void) |
81 | { |
81 | { |
82 | fpu_enable(); |
82 | fpu_enable(); |
83 | if (CPU->fpu_owner != NULL) { |
83 | if (CPU->fpu_owner != NULL) { |
84 | fpu_context_save(&CPU->fpu_owner->saved_fpu_context); |
84 | fpu_context_save(&CPU->fpu_owner->saved_fpu_context); |
85 | /* don't prevent migration */ |
85 | /* don't prevent migration */ |
86 | CPU->fpu_owner->fpu_context_engaged=0; |
86 | CPU->fpu_owner->fpu_context_engaged=0; |
87 | } |
87 | } |
88 | if (THREAD->fpu_context_exists) |
88 | if (THREAD->fpu_context_exists) |
89 | fpu_context_restore(&THREAD->saved_fpu_context); |
89 | fpu_context_restore(&THREAD->saved_fpu_context); |
90 | else { |
90 | else { |
91 | fpu_init(); |
91 | fpu_init(); |
92 | THREAD->fpu_context_exists=1; |
92 | THREAD->fpu_context_exists=1; |
93 | } |
93 | } |
94 | CPU->fpu_owner=THREAD; |
94 | CPU->fpu_owner=THREAD; |
95 | THREAD->fpu_context_engaged = 1; |
95 | THREAD->fpu_context_engaged = 1; |
96 | } |
96 | } |
97 | #endif |
97 | #endif |
98 | 98 | ||
99 | /** Initialize scheduler |
99 | /** Initialize scheduler |
100 | * |
100 | * |
101 | * Initialize kernel scheduler. |
101 | * Initialize kernel scheduler. |
102 | * |
102 | * |
103 | */ |
103 | */ |
104 | void scheduler_init(void) |
104 | void scheduler_init(void) |
105 | { |
105 | { |
106 | } |
106 | } |
107 | 107 | ||
108 | 108 | ||
109 | /** Get thread to be scheduled |
109 | /** Get thread to be scheduled |
110 | * |
110 | * |
111 | * Get the optimal thread to be scheduled |
111 | * Get the optimal thread to be scheduled |
112 | * according to thread accounting and scheduler |
112 | * according to thread accounting and scheduler |
113 | * policy. |
113 | * policy. |
114 | * |
114 | * |
115 | * @return Thread to be scheduled. |
115 | * @return Thread to be scheduled. |
116 | * |
116 | * |
117 | */ |
117 | */ |
118 | static thread_t *find_best_thread(void) |
118 | static thread_t *find_best_thread(void) |
119 | { |
119 | { |
120 | thread_t *t; |
120 | thread_t *t; |
121 | runq_t *r; |
121 | runq_t *r; |
122 | int i; |
122 | int i; |
123 | 123 | ||
124 | ASSERT(CPU != NULL); |
124 | ASSERT(CPU != NULL); |
125 | 125 | ||
126 | loop: |
126 | loop: |
127 | interrupts_enable(); |
127 | interrupts_enable(); |
128 | 128 | ||
129 | if (atomic_get(&CPU->nrdy) == 0) { |
129 | if (atomic_get(&CPU->nrdy) == 0) { |
130 | /* |
130 | /* |
131 | * For there was nothing to run, the CPU goes to sleep |
131 | * For there was nothing to run, the CPU goes to sleep |
132 | * until a hardware interrupt or an IPI comes. |
132 | * until a hardware interrupt or an IPI comes. |
133 | * This improves energy saving and hyperthreading. |
133 | * This improves energy saving and hyperthreading. |
134 | * |
- | |
135 | * - we might get an interrupt here that makes some thread runnable, |
- | |
136 | * in such a case we must wait for the next quantum to come |
- | |
137 | */ |
134 | */ |
- | 135 | ||
- | 136 | /* |
|
- | 137 | * An interrupt might occur right now and wake up a thread. |
|
- | 138 | * In such case, the CPU will continue to go to sleep |
|
- | 139 | * even though there is a runnable thread. |
|
- | 140 | */ |
|
- | 141 | ||
138 | cpu_sleep(); |
142 | cpu_sleep(); |
139 | goto loop; |
143 | goto loop; |
140 | } |
144 | } |
141 | 145 | ||
142 | interrupts_disable(); |
146 | interrupts_disable(); |
143 | 147 | ||
144 | i = 0; |
148 | i = 0; |
145 | for (; i<RQ_COUNT; i++) { |
149 | for (; i<RQ_COUNT; i++) { |
146 | r = &CPU->rq[i]; |
150 | r = &CPU->rq[i]; |
147 | spinlock_lock(&r->lock); |
151 | spinlock_lock(&r->lock); |
148 | if (r->n == 0) { |
152 | if (r->n == 0) { |
149 | /* |
153 | /* |
150 | * If this queue is empty, try a lower-priority queue. |
154 | * If this queue is empty, try a lower-priority queue. |
151 | */ |
155 | */ |
152 | spinlock_unlock(&r->lock); |
156 | spinlock_unlock(&r->lock); |
153 | continue; |
157 | continue; |
154 | } |
158 | } |
155 | 159 | ||
156 | atomic_dec(&CPU->nrdy); |
160 | atomic_dec(&CPU->nrdy); |
157 | atomic_dec(&nrdy); |
161 | atomic_dec(&nrdy); |
158 | r->n--; |
162 | r->n--; |
159 | 163 | ||
160 | /* |
164 | /* |
161 | * Take the first thread from the queue. |
165 | * Take the first thread from the queue. |
162 | */ |
166 | */ |
163 | t = list_get_instance(r->rq_head.next, thread_t, rq_link); |
167 | t = list_get_instance(r->rq_head.next, thread_t, rq_link); |
164 | list_remove(&t->rq_link); |
168 | list_remove(&t->rq_link); |
165 | 169 | ||
166 | spinlock_unlock(&r->lock); |
170 | spinlock_unlock(&r->lock); |
167 | 171 | ||
168 | spinlock_lock(&t->lock); |
172 | spinlock_lock(&t->lock); |
169 | t->cpu = CPU; |
173 | t->cpu = CPU; |
170 | 174 | ||
171 | t->ticks = us2ticks((i+1)*10000); |
175 | t->ticks = us2ticks((i+1)*10000); |
172 | t->priority = i; /* eventually correct rq index */ |
176 | t->priority = i; /* eventually correct rq index */ |
173 | 177 | ||
174 | /* |
178 | /* |
175 | * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge. |
179 | * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge. |
176 | */ |
180 | */ |
177 | t->flags &= ~X_STOLEN; |
181 | t->flags &= ~X_STOLEN; |
178 | spinlock_unlock(&t->lock); |
182 | spinlock_unlock(&t->lock); |
179 | 183 | ||
180 | return t; |
184 | return t; |
181 | } |
185 | } |
182 | goto loop; |
186 | goto loop; |
183 | 187 | ||
184 | } |
188 | } |
185 | 189 | ||
186 | 190 | ||
187 | /** Prevent rq starvation |
191 | /** Prevent rq starvation |
188 | * |
192 | * |
189 | * Prevent low priority threads from starving in rq's. |
193 | * Prevent low priority threads from starving in rq's. |
190 | * |
194 | * |
191 | * When the function decides to relink rq's, it reconnects |
195 | * When the function decides to relink rq's, it reconnects |
192 | * respective pointers so that in result threads with 'pri' |
196 | * respective pointers so that in result threads with 'pri' |
193 | * greater or equal 'start' are moved to a higher-priority queue. |
197 | * greater or equal 'start' are moved to a higher-priority queue. |
194 | * |
198 | * |
195 | * @param start Threshold priority. |
199 | * @param start Threshold priority. |
196 | * |
200 | * |
197 | */ |
201 | */ |
198 | static void relink_rq(int start) |
202 | static void relink_rq(int start) |
199 | { |
203 | { |
200 | link_t head; |
204 | link_t head; |
201 | runq_t *r; |
205 | runq_t *r; |
202 | int i, n; |
206 | int i, n; |
203 | 207 | ||
204 | list_initialize(&head); |
208 | list_initialize(&head); |
205 | spinlock_lock(&CPU->lock); |
209 | spinlock_lock(&CPU->lock); |
206 | if (CPU->needs_relink > NEEDS_RELINK_MAX) { |
210 | if (CPU->needs_relink > NEEDS_RELINK_MAX) { |
207 | for (i = start; i<RQ_COUNT-1; i++) { |
211 | for (i = start; i<RQ_COUNT-1; i++) { |
208 | /* remember and empty rq[i + 1] */ |
212 | /* remember and empty rq[i + 1] */ |
209 | r = &CPU->rq[i + 1]; |
213 | r = &CPU->rq[i + 1]; |
210 | spinlock_lock(&r->lock); |
214 | spinlock_lock(&r->lock); |
211 | list_concat(&head, &r->rq_head); |
215 | list_concat(&head, &r->rq_head); |
212 | n = r->n; |
216 | n = r->n; |
213 | r->n = 0; |
217 | r->n = 0; |
214 | spinlock_unlock(&r->lock); |
218 | spinlock_unlock(&r->lock); |
215 | 219 | ||
216 | /* append rq[i + 1] to rq[i] */ |
220 | /* append rq[i + 1] to rq[i] */ |
217 | r = &CPU->rq[i]; |
221 | r = &CPU->rq[i]; |
218 | spinlock_lock(&r->lock); |
222 | spinlock_lock(&r->lock); |
219 | list_concat(&r->rq_head, &head); |
223 | list_concat(&r->rq_head, &head); |
220 | r->n += n; |
224 | r->n += n; |
221 | spinlock_unlock(&r->lock); |
225 | spinlock_unlock(&r->lock); |
222 | } |
226 | } |
223 | CPU->needs_relink = 0; |
227 | CPU->needs_relink = 0; |
224 | } |
228 | } |
225 | spinlock_unlock(&CPU->lock); |
229 | spinlock_unlock(&CPU->lock); |
226 | 230 | ||
227 | } |
231 | } |
228 | 232 | ||
229 | 233 | ||
230 | /** Scheduler stack switch wrapper |
234 | /** Scheduler stack switch wrapper |
231 | * |
235 | * |
232 | * Second part of the scheduler() function |
236 | * Second part of the scheduler() function |
233 | * using new stack. Handling the actual context |
237 | * using new stack. Handling the actual context |
234 | * switch to a new thread. |
238 | * switch to a new thread. |
235 | * |
239 | * |
236 | */ |
240 | */ |
237 | static void scheduler_separated_stack(void) |
241 | static void scheduler_separated_stack(void) |
238 | { |
242 | { |
239 | int priority; |
243 | int priority; |
240 | 244 | ||
241 | ASSERT(CPU != NULL); |
245 | ASSERT(CPU != NULL); |
242 | 246 | ||
243 | if (THREAD) { |
247 | if (THREAD) { |
244 | switch (THREAD->state) { |
248 | switch (THREAD->state) { |
245 | case Running: |
249 | case Running: |
246 | THREAD->state = Ready; |
250 | THREAD->state = Ready; |
247 | spinlock_unlock(&THREAD->lock); |
251 | spinlock_unlock(&THREAD->lock); |
248 | thread_ready(THREAD); |
252 | thread_ready(THREAD); |
249 | break; |
253 | break; |
250 | 254 | ||
251 | case Exiting: |
255 | case Exiting: |
252 | frame_free((__address) THREAD->kstack); |
256 | frame_free((__address) THREAD->kstack); |
253 | if (THREAD->ustack) { |
257 | if (THREAD->ustack) { |
254 | frame_free((__address) THREAD->ustack); |
258 | frame_free((__address) THREAD->ustack); |
255 | } |
259 | } |
256 | 260 | ||
257 | /* |
261 | /* |
258 | * Detach from the containing task. |
262 | * Detach from the containing task. |
259 | */ |
263 | */ |
260 | spinlock_lock(&TASK->lock); |
264 | spinlock_lock(&TASK->lock); |
261 | list_remove(&THREAD->th_link); |
265 | list_remove(&THREAD->th_link); |
262 | spinlock_unlock(&TASK->lock); |
266 | spinlock_unlock(&TASK->lock); |
263 | 267 | ||
264 | spinlock_unlock(&THREAD->lock); |
268 | spinlock_unlock(&THREAD->lock); |
265 | 269 | ||
266 | spinlock_lock(&threads_lock); |
270 | spinlock_lock(&threads_lock); |
267 | list_remove(&THREAD->threads_link); |
271 | list_remove(&THREAD->threads_link); |
268 | spinlock_unlock(&threads_lock); |
272 | spinlock_unlock(&threads_lock); |
269 | 273 | ||
270 | spinlock_lock(&CPU->lock); |
274 | spinlock_lock(&CPU->lock); |
271 | if(CPU->fpu_owner==THREAD) |
275 | if(CPU->fpu_owner==THREAD) |
272 | CPU->fpu_owner=NULL; |
276 | CPU->fpu_owner=NULL; |
273 | spinlock_unlock(&CPU->lock); |
277 | spinlock_unlock(&CPU->lock); |
274 | 278 | ||
275 | free(THREAD); |
279 | free(THREAD); |
276 | 280 | ||
277 | break; |
281 | break; |
278 | 282 | ||
279 | case Sleeping: |
283 | case Sleeping: |
280 | /* |
284 | /* |
281 | * Prefer the thread after it's woken up. |
285 | * Prefer the thread after it's woken up. |
282 | */ |
286 | */ |
283 | THREAD->priority = -1; |
287 | THREAD->priority = -1; |
284 | 288 | ||
285 | /* |
289 | /* |
286 | * We need to release wq->lock which we locked in waitq_sleep(). |
290 | * We need to release wq->lock which we locked in waitq_sleep(). |
287 | * Address of wq->lock is kept in THREAD->sleep_queue. |
291 | * Address of wq->lock is kept in THREAD->sleep_queue. |
288 | */ |
292 | */ |
289 | spinlock_unlock(&THREAD->sleep_queue->lock); |
293 | spinlock_unlock(&THREAD->sleep_queue->lock); |
290 | 294 | ||
291 | /* |
295 | /* |
292 | * Check for possible requests for out-of-context invocation. |
296 | * Check for possible requests for out-of-context invocation. |
293 | */ |
297 | */ |
294 | if (THREAD->call_me) { |
298 | if (THREAD->call_me) { |
295 | THREAD->call_me(THREAD->call_me_with); |
299 | THREAD->call_me(THREAD->call_me_with); |
296 | THREAD->call_me = NULL; |
300 | THREAD->call_me = NULL; |
297 | THREAD->call_me_with = NULL; |
301 | THREAD->call_me_with = NULL; |
298 | } |
302 | } |
299 | 303 | ||
300 | spinlock_unlock(&THREAD->lock); |
304 | spinlock_unlock(&THREAD->lock); |
301 | 305 | ||
302 | break; |
306 | break; |
303 | 307 | ||
304 | default: |
308 | default: |
305 | /* |
309 | /* |
306 | * Entering state is unexpected. |
310 | * Entering state is unexpected. |
307 | */ |
311 | */ |
308 | panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]); |
312 | panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]); |
309 | break; |
313 | break; |
310 | } |
314 | } |
311 | THREAD = NULL; |
315 | THREAD = NULL; |
312 | } |
316 | } |
313 | 317 | ||
314 | 318 | ||
315 | THREAD = find_best_thread(); |
319 | THREAD = find_best_thread(); |
316 | 320 | ||
317 | spinlock_lock(&THREAD->lock); |
321 | spinlock_lock(&THREAD->lock); |
318 | priority = THREAD->priority; |
322 | priority = THREAD->priority; |
319 | spinlock_unlock(&THREAD->lock); |
323 | spinlock_unlock(&THREAD->lock); |
320 | 324 | ||
321 | relink_rq(priority); |
325 | relink_rq(priority); |
322 | 326 | ||
323 | spinlock_lock(&THREAD->lock); |
327 | spinlock_lock(&THREAD->lock); |
324 | 328 | ||
325 | /* |
329 | /* |
326 | * If both the old and the new task are the same, lots of work is avoided. |
330 | * If both the old and the new task are the same, lots of work is avoided. |
327 | */ |
331 | */ |
328 | if (TASK != THREAD->task) { |
332 | if (TASK != THREAD->task) { |
329 | as_t *as1 = NULL; |
333 | as_t *as1 = NULL; |
330 | as_t *as2; |
334 | as_t *as2; |
331 | 335 | ||
332 | if (TASK) { |
336 | if (TASK) { |
333 | spinlock_lock(&TASK->lock); |
337 | spinlock_lock(&TASK->lock); |
334 | as1 = TASK->as; |
338 | as1 = TASK->as; |
335 | spinlock_unlock(&TASK->lock); |
339 | spinlock_unlock(&TASK->lock); |
336 | } |
340 | } |
337 | 341 | ||
338 | spinlock_lock(&THREAD->task->lock); |
342 | spinlock_lock(&THREAD->task->lock); |
339 | as2 = THREAD->task->as; |
343 | as2 = THREAD->task->as; |
340 | spinlock_unlock(&THREAD->task->lock); |
344 | spinlock_unlock(&THREAD->task->lock); |
341 | 345 | ||
342 | /* |
346 | /* |
343 | * Note that it is possible for two tasks to share one address space. |
347 | * Note that it is possible for two tasks to share one address space. |
344 | */ |
348 | */ |
345 | if (as1 != as2) { |
349 | if (as1 != as2) { |
346 | /* |
350 | /* |
347 | * Both tasks and address spaces are different. |
351 | * Both tasks and address spaces are different. |
348 | * Replace the old one with the new one. |
352 | * Replace the old one with the new one. |
349 | */ |
353 | */ |
350 | as_install(as2); |
354 | as_install(as2); |
351 | } |
355 | } |
352 | TASK = THREAD->task; |
356 | TASK = THREAD->task; |
353 | } |
357 | } |
354 | 358 | ||
355 | THREAD->state = Running; |
359 | THREAD->state = Running; |
356 | 360 | ||
357 | #ifdef SCHEDULER_VERBOSE |
361 | #ifdef SCHEDULER_VERBOSE |
358 | printf("cpu%d: tid %d (priority=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, CPU->nrdy); |
362 | printf("cpu%d: tid %d (priority=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, CPU->nrdy); |
359 | #endif |
363 | #endif |
360 | 364 | ||
361 | /* |
365 | /* |
362 | * Copy the knowledge of CPU, TASK, THREAD and preemption counter to thread's stack. |
366 | * Copy the knowledge of CPU, TASK, THREAD and preemption counter to thread's stack. |
363 | */ |
367 | */ |
364 | the_copy(THE, (the_t *) THREAD->kstack); |
368 | the_copy(THE, (the_t *) THREAD->kstack); |
365 | 369 | ||
366 | context_restore(&THREAD->saved_context); |
370 | context_restore(&THREAD->saved_context); |
367 | /* not reached */ |
371 | /* not reached */ |
368 | } |
372 | } |
369 | 373 | ||
370 | 374 | ||
371 | /** The scheduler |
375 | /** The scheduler |
372 | * |
376 | * |
373 | * The thread scheduling procedure. |
377 | * The thread scheduling procedure. |
374 | * Passes control directly to |
378 | * Passes control directly to |
375 | * scheduler_separated_stack(). |
379 | * scheduler_separated_stack(). |
376 | * |
380 | * |
377 | */ |
381 | */ |
378 | void scheduler(void) |
382 | void scheduler(void) |
379 | { |
383 | { |
380 | volatile ipl_t ipl; |
384 | volatile ipl_t ipl; |
381 | 385 | ||
382 | ASSERT(CPU != NULL); |
386 | ASSERT(CPU != NULL); |
383 | 387 | ||
384 | ipl = interrupts_disable(); |
388 | ipl = interrupts_disable(); |
385 | 389 | ||
386 | if (atomic_get(&haltstate)) |
390 | if (atomic_get(&haltstate)) |
387 | halt(); |
391 | halt(); |
388 | 392 | ||
389 | if (THREAD) { |
393 | if (THREAD) { |
390 | spinlock_lock(&THREAD->lock); |
394 | spinlock_lock(&THREAD->lock); |
391 | #ifndef CONFIG_FPU_LAZY |
395 | #ifndef CONFIG_FPU_LAZY |
392 | fpu_context_save(&(THREAD->saved_fpu_context)); |
396 | fpu_context_save(&(THREAD->saved_fpu_context)); |
393 | #endif |
397 | #endif |
394 | if (!context_save(&THREAD->saved_context)) { |
398 | if (!context_save(&THREAD->saved_context)) { |
395 | /* |
399 | /* |
396 | * This is the place where threads leave scheduler(); |
400 | * This is the place where threads leave scheduler(); |
397 | */ |
401 | */ |
398 | before_thread_runs(); |
402 | before_thread_runs(); |
399 | spinlock_unlock(&THREAD->lock); |
403 | spinlock_unlock(&THREAD->lock); |
400 | interrupts_restore(THREAD->saved_context.ipl); |
404 | interrupts_restore(THREAD->saved_context.ipl); |
401 | return; |
405 | return; |
402 | } |
406 | } |
403 | 407 | ||
404 | /* |
408 | /* |
405 | * Interrupt priority level of preempted thread is recorded here |
409 | * Interrupt priority level of preempted thread is recorded here |
406 | * to facilitate scheduler() invocations from interrupts_disable()'d |
410 | * to facilitate scheduler() invocations from interrupts_disable()'d |
407 | * code (e.g. waitq_sleep_timeout()). |
411 | * code (e.g. waitq_sleep_timeout()). |
408 | */ |
412 | */ |
409 | THREAD->saved_context.ipl = ipl; |
413 | THREAD->saved_context.ipl = ipl; |
410 | } |
414 | } |
411 | 415 | ||
412 | /* |
416 | /* |
413 | * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM |
417 | * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM |
414 | * and preemption counter. At this point THE could be coming either |
418 | * and preemption counter. At this point THE could be coming either |
415 | * from THREAD's or CPU's stack. |
419 | * from THREAD's or CPU's stack. |
416 | */ |
420 | */ |
417 | the_copy(THE, (the_t *) CPU->stack); |
421 | the_copy(THE, (the_t *) CPU->stack); |
418 | 422 | ||
419 | /* |
423 | /* |
420 | * We may not keep the old stack. |
424 | * We may not keep the old stack. |
421 | * Reason: If we kept the old stack and got blocked, for instance, in |
425 | * Reason: If we kept the old stack and got blocked, for instance, in |
422 | * find_best_thread(), the old thread could get rescheduled by another |
426 | * find_best_thread(), the old thread could get rescheduled by another |
423 | * CPU and overwrite the part of its own stack that was also used by |
427 | * CPU and overwrite the part of its own stack that was also used by |
424 | * the scheduler on this CPU. |
428 | * the scheduler on this CPU. |
425 | * |
429 | * |
426 | * Moreover, we have to bypass the compiler-generated POP sequence |
430 | * Moreover, we have to bypass the compiler-generated POP sequence |
427 | * which is fooled by SP being set to the very top of the stack. |
431 | * which is fooled by SP being set to the very top of the stack. |
428 | * Therefore the scheduler() function continues in |
432 | * Therefore the scheduler() function continues in |
429 | * scheduler_separated_stack(). |
433 | * scheduler_separated_stack(). |
430 | */ |
434 | */ |
431 | context_save(&CPU->saved_context); |
435 | context_save(&CPU->saved_context); |
432 | context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), (__address) CPU->stack, CPU_STACK_SIZE); |
436 | context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), (__address) CPU->stack, CPU_STACK_SIZE); |
433 | context_restore(&CPU->saved_context); |
437 | context_restore(&CPU->saved_context); |
434 | /* not reached */ |
438 | /* not reached */ |
435 | } |
439 | } |
436 | 440 | ||
437 | 441 | ||
438 | 442 | ||
439 | 443 | ||
440 | 444 | ||
441 | #ifdef CONFIG_SMP |
445 | #ifdef CONFIG_SMP |
442 | /** Load balancing thread |
446 | /** Load balancing thread |
443 | * |
447 | * |
444 | * SMP load balancing thread, supervising thread supplies |
448 | * SMP load balancing thread, supervising thread supplies |
445 | * for the CPU it's wired to. |
449 | * for the CPU it's wired to. |
446 | * |
450 | * |
447 | * @param arg Generic thread argument (unused). |
451 | * @param arg Generic thread argument (unused). |
448 | * |
452 | * |
449 | */ |
453 | */ |
450 | void kcpulb(void *arg) |
454 | void kcpulb(void *arg) |
451 | { |
455 | { |
452 | thread_t *t; |
456 | thread_t *t; |
453 | int count, average, i, j, k = 0; |
457 | int count, average, i, j, k = 0; |
454 | ipl_t ipl; |
458 | ipl_t ipl; |
455 | 459 | ||
456 | loop: |
460 | loop: |
457 | /* |
461 | /* |
458 | * Work in 1s intervals. |
462 | * Work in 1s intervals. |
459 | */ |
463 | */ |
460 | thread_sleep(1); |
464 | thread_sleep(1); |
461 | 465 | ||
462 | not_satisfied: |
466 | not_satisfied: |
463 | /* |
467 | /* |
464 | * Calculate the number of threads that will be migrated/stolen from |
468 | * Calculate the number of threads that will be migrated/stolen from |
465 | * other CPU's. Note that situation can have changed between two |
469 | * other CPU's. Note that situation can have changed between two |
466 | * passes. Each time get the most up to date counts. |
470 | * passes. Each time get the most up to date counts. |
467 | */ |
471 | */ |
468 | average = atomic_get(&nrdy) / config.cpu_active + 1; |
472 | average = atomic_get(&nrdy) / config.cpu_active + 1; |
469 | count = average - atomic_get(&CPU->nrdy); |
473 | count = average - atomic_get(&CPU->nrdy); |
470 | 474 | ||
471 | if (count <= 0) |
475 | if (count <= 0) |
472 | goto satisfied; |
476 | goto satisfied; |
473 | 477 | ||
474 | /* |
478 | /* |
475 | * Searching least priority queues on all CPU's first and most priority queues on all CPU's last. |
479 | * Searching least priority queues on all CPU's first and most priority queues on all CPU's last. |
476 | */ |
480 | */ |
477 | for (j=RQ_COUNT-1; j >= 0; j--) { |
481 | for (j=RQ_COUNT-1; j >= 0; j--) { |
478 | for (i=0; i < config.cpu_active; i++) { |
482 | for (i=0; i < config.cpu_active; i++) { |
479 | link_t *l; |
483 | link_t *l; |
480 | runq_t *r; |
484 | runq_t *r; |
481 | cpu_t *cpu; |
485 | cpu_t *cpu; |
482 | 486 | ||
483 | cpu = &cpus[(i + k) % config.cpu_active]; |
487 | cpu = &cpus[(i + k) % config.cpu_active]; |
484 | 488 | ||
485 | /* |
489 | /* |
486 | * Not interested in ourselves. |
490 | * Not interested in ourselves. |
487 | * Doesn't require interrupt disabling for kcpulb is X_WIRED. |
491 | * Doesn't require interrupt disabling for kcpulb is X_WIRED. |
488 | */ |
492 | */ |
489 | if (CPU == cpu) |
493 | if (CPU == cpu) |
490 | continue; |
494 | continue; |
491 | if (atomic_get(&cpu->nrdy) <= average) |
495 | if (atomic_get(&cpu->nrdy) <= average) |
492 | continue; |
496 | continue; |
493 | 497 | ||
494 | ipl = interrupts_disable(); |
498 | ipl = interrupts_disable(); |
495 | r = &cpu->rq[j]; |
499 | r = &cpu->rq[j]; |
496 | spinlock_lock(&r->lock); |
500 | spinlock_lock(&r->lock); |
497 | if (r->n == 0) { |
501 | if (r->n == 0) { |
498 | spinlock_unlock(&r->lock); |
502 | spinlock_unlock(&r->lock); |
499 | interrupts_restore(ipl); |
503 | interrupts_restore(ipl); |
500 | continue; |
504 | continue; |
501 | } |
505 | } |
502 | 506 | ||
503 | t = NULL; |
507 | t = NULL; |
504 | l = r->rq_head.prev; /* search rq from the back */ |
508 | l = r->rq_head.prev; /* search rq from the back */ |
505 | while (l != &r->rq_head) { |
509 | while (l != &r->rq_head) { |
506 | t = list_get_instance(l, thread_t, rq_link); |
510 | t = list_get_instance(l, thread_t, rq_link); |
507 | /* |
511 | /* |
508 | * We don't want to steal CPU-wired threads neither threads already stolen. |
512 | * We don't want to steal CPU-wired threads neither threads already stolen. |
509 | * The latter prevents threads from migrating between CPU's without ever being run. |
513 | * The latter prevents threads from migrating between CPU's without ever being run. |
510 | * We don't want to steal threads whose FPU context is still in CPU. |
514 | * We don't want to steal threads whose FPU context is still in CPU. |
511 | */ |
515 | */ |
512 | spinlock_lock(&t->lock); |
516 | spinlock_lock(&t->lock); |
513 | if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) { |
517 | if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) { |
514 | /* |
518 | /* |
515 | * Remove t from r. |
519 | * Remove t from r. |
516 | */ |
520 | */ |
517 | spinlock_unlock(&t->lock); |
521 | spinlock_unlock(&t->lock); |
518 | 522 | ||
519 | atomic_dec(&cpu->nrdy); |
523 | atomic_dec(&cpu->nrdy); |
520 | atomic_dec(&nrdy); |
524 | atomic_dec(&nrdy); |
521 | 525 | ||
522 | r->n--; |
526 | r->n--; |
523 | list_remove(&t->rq_link); |
527 | list_remove(&t->rq_link); |
524 | 528 | ||
525 | break; |
529 | break; |
526 | } |
530 | } |
527 | spinlock_unlock(&t->lock); |
531 | spinlock_unlock(&t->lock); |
528 | l = l->prev; |
532 | l = l->prev; |
529 | t = NULL; |
533 | t = NULL; |
530 | } |
534 | } |
531 | spinlock_unlock(&r->lock); |
535 | spinlock_unlock(&r->lock); |
532 | 536 | ||
533 | if (t) { |
537 | if (t) { |
534 | /* |
538 | /* |
535 | * Ready t on local CPU |
539 | * Ready t on local CPU |
536 | */ |
540 | */ |
537 | spinlock_lock(&t->lock); |
541 | spinlock_lock(&t->lock); |
538 | #ifdef KCPULB_VERBOSE |
542 | #ifdef KCPULB_VERBOSE |
539 | printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, atomic_get(&CPU->nrdy), atomic_get(&nrdy) / config.cpu_active); |
543 | printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, atomic_get(&CPU->nrdy), atomic_get(&nrdy) / config.cpu_active); |
540 | #endif |
544 | #endif |
541 | t->flags |= X_STOLEN; |
545 | t->flags |= X_STOLEN; |
542 | spinlock_unlock(&t->lock); |
546 | spinlock_unlock(&t->lock); |
543 | 547 | ||
544 | thread_ready(t); |
548 | thread_ready(t); |
545 | 549 | ||
546 | interrupts_restore(ipl); |
550 | interrupts_restore(ipl); |
547 | 551 | ||
548 | if (--count == 0) |
552 | if (--count == 0) |
549 | goto satisfied; |
553 | goto satisfied; |
550 | 554 | ||
551 | /* |
555 | /* |
552 | * We are not satisfied yet, focus on another CPU next time. |
556 | * We are not satisfied yet, focus on another CPU next time. |
553 | */ |
557 | */ |
554 | k++; |
558 | k++; |
555 | 559 | ||
556 | continue; |
560 | continue; |
557 | } |
561 | } |
558 | interrupts_restore(ipl); |
562 | interrupts_restore(ipl); |
559 | } |
563 | } |
560 | } |
564 | } |
561 | 565 | ||
562 | if (atomic_get(&CPU->nrdy)) { |
566 | if (atomic_get(&CPU->nrdy)) { |
563 | /* |
567 | /* |
564 | * Be a little bit light-weight and let migrated threads run. |
568 | * Be a little bit light-weight and let migrated threads run. |
565 | */ |
569 | */ |
566 | scheduler(); |
570 | scheduler(); |
567 | } else { |
571 | } else { |
568 | /* |
572 | /* |
569 | * We failed to migrate a single thread. |
573 | * We failed to migrate a single thread. |
570 | * Give up this turn. |
574 | * Give up this turn. |
571 | */ |
575 | */ |
572 | goto loop; |
576 | goto loop; |
573 | } |
577 | } |
574 | 578 | ||
575 | goto not_satisfied; |
579 | goto not_satisfied; |
576 | 580 | ||
577 | satisfied: |
581 | satisfied: |
578 | goto loop; |
582 | goto loop; |
579 | } |
583 | } |
580 | 584 | ||
581 | #endif /* CONFIG_SMP */ |
585 | #endif /* CONFIG_SMP */ |
582 | 586 | ||
583 | 587 | ||
584 | /** Print information about threads & scheduler queues */ |
588 | /** Print information about threads & scheduler queues */ |
585 | void sched_print_list(void) |
589 | void sched_print_list(void) |
586 | { |
590 | { |
587 | ipl_t ipl; |
591 | ipl_t ipl; |
588 | int cpu,i; |
592 | int cpu,i; |
589 | runq_t *r; |
593 | runq_t *r; |
590 | thread_t *t; |
594 | thread_t *t; |
591 | link_t *cur; |
595 | link_t *cur; |
592 | 596 | ||
593 | /* We are going to mess with scheduler structures, |
597 | /* We are going to mess with scheduler structures, |
594 | * let's not be interrupted */ |
598 | * let's not be interrupted */ |
595 | ipl = interrupts_disable(); |
599 | ipl = interrupts_disable(); |
596 | printf("*********** Scheduler dump ***********\n"); |
600 | printf("*********** Scheduler dump ***********\n"); |
597 | for (cpu=0;cpu < config.cpu_count; cpu++) { |
601 | for (cpu=0;cpu < config.cpu_count; cpu++) { |
598 | if (!cpus[cpu].active) |
602 | if (!cpus[cpu].active) |
599 | continue; |
603 | continue; |
600 | spinlock_lock(&cpus[cpu].lock); |
604 | spinlock_lock(&cpus[cpu].lock); |
601 | printf("cpu%d: nrdy: %d needs_relink: %d\n", |
605 | printf("cpu%d: nrdy: %d needs_relink: %d\n", |
602 | cpus[cpu].id, atomic_get(&cpus[cpu].nrdy), cpus[cpu].needs_relink); |
606 | cpus[cpu].id, atomic_get(&cpus[cpu].nrdy), cpus[cpu].needs_relink); |
603 | 607 | ||
604 | for (i=0; i<RQ_COUNT; i++) { |
608 | for (i=0; i<RQ_COUNT; i++) { |
605 | r = &cpus[cpu].rq[i]; |
609 | r = &cpus[cpu].rq[i]; |
606 | spinlock_lock(&r->lock); |
610 | spinlock_lock(&r->lock); |
607 | if (!r->n) { |
611 | if (!r->n) { |
608 | spinlock_unlock(&r->lock); |
612 | spinlock_unlock(&r->lock); |
609 | continue; |
613 | continue; |
610 | } |
614 | } |
611 | printf("\tRq %d: ", i); |
615 | printf("\tRq %d: ", i); |
612 | for (cur=r->rq_head.next; cur!=&r->rq_head; cur=cur->next) { |
616 | for (cur=r->rq_head.next; cur!=&r->rq_head; cur=cur->next) { |
613 | t = list_get_instance(cur, thread_t, rq_link); |
617 | t = list_get_instance(cur, thread_t, rq_link); |
614 | printf("%d(%s) ", t->tid, |
618 | printf("%d(%s) ", t->tid, |
615 | thread_states[t->state]); |
619 | thread_states[t->state]); |
616 | } |
620 | } |
617 | printf("\n"); |
621 | printf("\n"); |
618 | spinlock_unlock(&r->lock); |
622 | spinlock_unlock(&r->lock); |
619 | } |
623 | } |
620 | spinlock_unlock(&cpus[cpu].lock); |
624 | spinlock_unlock(&cpus[cpu].lock); |
621 | } |
625 | } |
622 | 626 | ||
623 | interrupts_restore(ipl); |
627 | interrupts_restore(ipl); |
624 | } |
628 | } |
625 | 629 |