Rev 779 | Rev 784 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 779 | Rev 783 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | #include <proc/scheduler.h> |
29 | #include <proc/scheduler.h> |
30 | #include <proc/thread.h> |
30 | #include <proc/thread.h> |
31 | #include <proc/task.h> |
31 | #include <proc/task.h> |
32 | #include <mm/heap.h> |
32 | #include <mm/heap.h> |
33 | #include <mm/frame.h> |
33 | #include <mm/frame.h> |
34 | #include <mm/page.h> |
34 | #include <mm/page.h> |
35 | #include <mm/as.h> |
35 | #include <mm/as.h> |
36 | #include <arch/asm.h> |
36 | #include <arch/asm.h> |
37 | #include <arch/faddr.h> |
37 | #include <arch/faddr.h> |
38 | #include <arch/atomic.h> |
38 | #include <arch/atomic.h> |
39 | #include <synch/spinlock.h> |
39 | #include <synch/spinlock.h> |
40 | #include <config.h> |
40 | #include <config.h> |
41 | #include <context.h> |
41 | #include <context.h> |
42 | #include <func.h> |
42 | #include <func.h> |
43 | #include <arch.h> |
43 | #include <arch.h> |
44 | #include <list.h> |
44 | #include <list.h> |
45 | #include <panic.h> |
45 | #include <panic.h> |
46 | #include <typedefs.h> |
46 | #include <typedefs.h> |
47 | #include <cpu.h> |
47 | #include <cpu.h> |
48 | #include <print.h> |
48 | #include <print.h> |
49 | #include <debug.h> |
49 | #include <debug.h> |
50 | 50 | ||
51 | atomic_t nrdy; |
51 | atomic_t nrdy; |
52 | 52 | ||
53 | /** Take actions before new thread runs |
53 | /** Take actions before new thread runs |
54 | * |
54 | * |
55 | * Perform actions that need to be |
55 | * Perform actions that need to be |
56 | * taken before the newly selected |
56 | * taken before the newly selected |
57 | * tread is passed control. |
57 | * tread is passed control. |
58 | * |
58 | * |
59 | */ |
59 | */ |
60 | void before_thread_runs(void) |
60 | void before_thread_runs(void) |
61 | { |
61 | { |
62 | before_thread_runs_arch(); |
62 | before_thread_runs_arch(); |
63 | #ifdef CONFIG_FPU_LAZY |
63 | #ifdef CONFIG_FPU_LAZY |
64 | if(THREAD==CPU->fpu_owner) |
64 | if(THREAD==CPU->fpu_owner) |
65 | fpu_enable(); |
65 | fpu_enable(); |
66 | else |
66 | else |
67 | fpu_disable(); |
67 | fpu_disable(); |
68 | #else |
68 | #else |
69 | fpu_enable(); |
69 | fpu_enable(); |
70 | if (THREAD->fpu_context_exists) |
70 | if (THREAD->fpu_context_exists) |
71 | fpu_context_restore(&(THREAD->saved_fpu_context)); |
71 | fpu_context_restore(&(THREAD->saved_fpu_context)); |
72 | else { |
72 | else { |
73 | fpu_init(); |
73 | fpu_init(); |
74 | THREAD->fpu_context_exists=1; |
74 | THREAD->fpu_context_exists=1; |
75 | } |
75 | } |
76 | #endif |
76 | #endif |
77 | } |
77 | } |
78 | 78 | ||
79 | #ifdef CONFIG_FPU_LAZY |
79 | #ifdef CONFIG_FPU_LAZY |
80 | void scheduler_fpu_lazy_request(void) |
80 | void scheduler_fpu_lazy_request(void) |
81 | { |
81 | { |
82 | fpu_enable(); |
82 | fpu_enable(); |
83 | if (CPU->fpu_owner != NULL) { |
83 | if (CPU->fpu_owner != NULL) { |
84 | fpu_context_save(&CPU->fpu_owner->saved_fpu_context); |
84 | fpu_context_save(&CPU->fpu_owner->saved_fpu_context); |
85 | /* don't prevent migration */ |
85 | /* don't prevent migration */ |
86 | CPU->fpu_owner->fpu_context_engaged=0; |
86 | CPU->fpu_owner->fpu_context_engaged=0; |
87 | } |
87 | } |
88 | if (THREAD->fpu_context_exists) |
88 | if (THREAD->fpu_context_exists) |
89 | fpu_context_restore(&THREAD->saved_fpu_context); |
89 | fpu_context_restore(&THREAD->saved_fpu_context); |
90 | else { |
90 | else { |
91 | fpu_init(); |
91 | fpu_init(); |
92 | THREAD->fpu_context_exists=1; |
92 | THREAD->fpu_context_exists=1; |
93 | } |
93 | } |
94 | CPU->fpu_owner=THREAD; |
94 | CPU->fpu_owner=THREAD; |
95 | THREAD->fpu_context_engaged = 1; |
95 | THREAD->fpu_context_engaged = 1; |
96 | } |
96 | } |
97 | #endif |
97 | #endif |
98 | 98 | ||
99 | /** Initialize scheduler |
99 | /** Initialize scheduler |
100 | * |
100 | * |
101 | * Initialize kernel scheduler. |
101 | * Initialize kernel scheduler. |
102 | * |
102 | * |
103 | */ |
103 | */ |
104 | void scheduler_init(void) |
104 | void scheduler_init(void) |
105 | { |
105 | { |
106 | } |
106 | } |
107 | 107 | ||
108 | 108 | ||
109 | /** Get thread to be scheduled |
109 | /** Get thread to be scheduled |
110 | * |
110 | * |
111 | * Get the optimal thread to be scheduled |
111 | * Get the optimal thread to be scheduled |
112 | * according to thread accounting and scheduler |
112 | * according to thread accounting and scheduler |
113 | * policy. |
113 | * policy. |
114 | * |
114 | * |
115 | * @return Thread to be scheduled. |
115 | * @return Thread to be scheduled. |
116 | * |
116 | * |
117 | */ |
117 | */ |
118 | static thread_t *find_best_thread(void) |
118 | static thread_t *find_best_thread(void) |
119 | { |
119 | { |
120 | thread_t *t; |
120 | thread_t *t; |
121 | runq_t *r; |
121 | runq_t *r; |
122 | int i, n; |
122 | int i; |
123 | 123 | ||
124 | ASSERT(CPU != NULL); |
124 | ASSERT(CPU != NULL); |
125 | 125 | ||
126 | loop: |
126 | loop: |
127 | interrupts_disable(); |
- | |
128 | - | ||
129 | spinlock_lock(&CPU->lock); |
- | |
130 | n = CPU->nrdy; |
- | |
131 | spinlock_unlock(&CPU->lock); |
- | |
132 | - | ||
133 | interrupts_enable(); |
127 | interrupts_enable(); |
134 | 128 | ||
135 | if (n == 0) { |
129 | if (atomic_get(&CPU->nrdy) == 0) { |
136 | /* |
130 | /* |
137 | * For there was nothing to run, the CPU goes to sleep |
131 | * For there was nothing to run, the CPU goes to sleep |
138 | * until a hardware interrupt or an IPI comes. |
132 | * until a hardware interrupt or an IPI comes. |
139 | * This improves energy saving and hyperthreading. |
133 | * This improves energy saving and hyperthreading. |
140 | */ |
134 | */ |
141 | cpu_sleep(); |
135 | cpu_sleep(); |
142 | goto loop; |
136 | goto loop; |
143 | } |
137 | } |
144 | 138 | ||
145 | interrupts_disable(); |
139 | interrupts_disable(); |
146 | 140 | ||
147 | i = 0; |
141 | i = 0; |
148 | retry: |
- | |
149 | for (; i<RQ_COUNT; i++) { |
142 | for (; i<RQ_COUNT; i++) { |
150 | r = &CPU->rq[i]; |
143 | r = &CPU->rq[i]; |
151 | spinlock_lock(&r->lock); |
144 | spinlock_lock(&r->lock); |
152 | if (r->n == 0) { |
145 | if (r->n == 0) { |
153 | /* |
146 | /* |
154 | * If this queue is empty, try a lower-priority queue. |
147 | * If this queue is empty, try a lower-priority queue. |
155 | */ |
148 | */ |
156 | spinlock_unlock(&r->lock); |
149 | spinlock_unlock(&r->lock); |
157 | continue; |
150 | continue; |
158 | } |
151 | } |
159 | 152 | ||
160 | /* avoid deadlock with relink_rq() */ |
- | |
161 | if (!spinlock_trylock(&CPU->lock)) { |
- | |
162 | /* |
- | |
163 | * Unlock r and try again. |
- | |
164 | */ |
- | |
165 | spinlock_unlock(&r->lock); |
- | |
166 | goto retry; |
- | |
167 | } |
- | |
168 | CPU->nrdy--; |
- | |
169 | spinlock_unlock(&CPU->lock); |
153 | atomic_dec(&CPU->nrdy); |
170 | - | ||
171 | atomic_dec(&nrdy); |
154 | atomic_dec(&nrdy); |
172 | r->n--; |
155 | r->n--; |
173 | 156 | ||
174 | /* |
157 | /* |
175 | * Take the first thread from the queue. |
158 | * Take the first thread from the queue. |
176 | */ |
159 | */ |
177 | t = list_get_instance(r->rq_head.next, thread_t, rq_link); |
160 | t = list_get_instance(r->rq_head.next, thread_t, rq_link); |
178 | list_remove(&t->rq_link); |
161 | list_remove(&t->rq_link); |
179 | 162 | ||
180 | spinlock_unlock(&r->lock); |
163 | spinlock_unlock(&r->lock); |
181 | 164 | ||
182 | spinlock_lock(&t->lock); |
165 | spinlock_lock(&t->lock); |
183 | t->cpu = CPU; |
166 | t->cpu = CPU; |
184 | 167 | ||
185 | t->ticks = us2ticks((i+1)*10000); |
168 | t->ticks = us2ticks((i+1)*10000); |
186 | t->priority = i; /* eventually correct rq index */ |
169 | t->priority = i; /* eventually correct rq index */ |
187 | 170 | ||
188 | /* |
171 | /* |
189 | * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge. |
172 | * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge. |
190 | */ |
173 | */ |
191 | t->flags &= ~X_STOLEN; |
174 | t->flags &= ~X_STOLEN; |
192 | spinlock_unlock(&t->lock); |
175 | spinlock_unlock(&t->lock); |
193 | 176 | ||
194 | return t; |
177 | return t; |
195 | } |
178 | } |
196 | goto loop; |
179 | goto loop; |
197 | 180 | ||
198 | } |
181 | } |
199 | 182 | ||
200 | 183 | ||
201 | /** Prevent rq starvation |
184 | /** Prevent rq starvation |
202 | * |
185 | * |
203 | * Prevent low priority threads from starving in rq's. |
186 | * Prevent low priority threads from starving in rq's. |
204 | * |
187 | * |
205 | * When the function decides to relink rq's, it reconnects |
188 | * When the function decides to relink rq's, it reconnects |
206 | * respective pointers so that in result threads with 'pri' |
189 | * respective pointers so that in result threads with 'pri' |
207 | * greater or equal 'start' are moved to a higher-priority queue. |
190 | * greater or equal 'start' are moved to a higher-priority queue. |
208 | * |
191 | * |
209 | * @param start Threshold priority. |
192 | * @param start Threshold priority. |
210 | * |
193 | * |
211 | */ |
194 | */ |
212 | static void relink_rq(int start) |
195 | static void relink_rq(int start) |
213 | { |
196 | { |
214 | link_t head; |
197 | link_t head; |
215 | runq_t *r; |
198 | runq_t *r; |
216 | int i, n; |
199 | int i, n; |
217 | 200 | ||
218 | list_initialize(&head); |
201 | list_initialize(&head); |
219 | spinlock_lock(&CPU->lock); |
202 | spinlock_lock(&CPU->lock); |
220 | if (CPU->needs_relink > NEEDS_RELINK_MAX) { |
203 | if (CPU->needs_relink > NEEDS_RELINK_MAX) { |
221 | for (i = start; i<RQ_COUNT-1; i++) { |
204 | for (i = start; i<RQ_COUNT-1; i++) { |
222 | /* remember and empty rq[i + 1] */ |
205 | /* remember and empty rq[i + 1] */ |
223 | r = &CPU->rq[i + 1]; |
206 | r = &CPU->rq[i + 1]; |
224 | spinlock_lock(&r->lock); |
207 | spinlock_lock(&r->lock); |
225 | list_concat(&head, &r->rq_head); |
208 | list_concat(&head, &r->rq_head); |
226 | n = r->n; |
209 | n = r->n; |
227 | r->n = 0; |
210 | r->n = 0; |
228 | spinlock_unlock(&r->lock); |
211 | spinlock_unlock(&r->lock); |
229 | 212 | ||
230 | /* append rq[i + 1] to rq[i] */ |
213 | /* append rq[i + 1] to rq[i] */ |
231 | r = &CPU->rq[i]; |
214 | r = &CPU->rq[i]; |
232 | spinlock_lock(&r->lock); |
215 | spinlock_lock(&r->lock); |
233 | list_concat(&r->rq_head, &head); |
216 | list_concat(&r->rq_head, &head); |
234 | r->n += n; |
217 | r->n += n; |
235 | spinlock_unlock(&r->lock); |
218 | spinlock_unlock(&r->lock); |
236 | } |
219 | } |
237 | CPU->needs_relink = 0; |
220 | CPU->needs_relink = 0; |
238 | } |
221 | } |
239 | spinlock_unlock(&CPU->lock); |
222 | spinlock_unlock(&CPU->lock); |
240 | 223 | ||
241 | } |
224 | } |
242 | 225 | ||
243 | 226 | ||
244 | /** Scheduler stack switch wrapper |
227 | /** Scheduler stack switch wrapper |
245 | * |
228 | * |
246 | * Second part of the scheduler() function |
229 | * Second part of the scheduler() function |
247 | * using new stack. Handling the actual context |
230 | * using new stack. Handling the actual context |
248 | * switch to a new thread. |
231 | * switch to a new thread. |
249 | * |
232 | * |
250 | */ |
233 | */ |
251 | static void scheduler_separated_stack(void) |
234 | static void scheduler_separated_stack(void) |
252 | { |
235 | { |
253 | int priority; |
236 | int priority; |
254 | 237 | ||
255 | ASSERT(CPU != NULL); |
238 | ASSERT(CPU != NULL); |
256 | 239 | ||
257 | if (THREAD) { |
240 | if (THREAD) { |
258 | switch (THREAD->state) { |
241 | switch (THREAD->state) { |
259 | case Running: |
242 | case Running: |
260 | THREAD->state = Ready; |
243 | THREAD->state = Ready; |
261 | spinlock_unlock(&THREAD->lock); |
244 | spinlock_unlock(&THREAD->lock); |
262 | thread_ready(THREAD); |
245 | thread_ready(THREAD); |
263 | break; |
246 | break; |
264 | 247 | ||
265 | case Exiting: |
248 | case Exiting: |
266 | frame_free((__address) THREAD->kstack); |
249 | frame_free((__address) THREAD->kstack); |
267 | if (THREAD->ustack) { |
250 | if (THREAD->ustack) { |
268 | frame_free((__address) THREAD->ustack); |
251 | frame_free((__address) THREAD->ustack); |
269 | } |
252 | } |
270 | 253 | ||
271 | /* |
254 | /* |
272 | * Detach from the containing task. |
255 | * Detach from the containing task. |
273 | */ |
256 | */ |
274 | spinlock_lock(&TASK->lock); |
257 | spinlock_lock(&TASK->lock); |
275 | list_remove(&THREAD->th_link); |
258 | list_remove(&THREAD->th_link); |
276 | spinlock_unlock(&TASK->lock); |
259 | spinlock_unlock(&TASK->lock); |
277 | 260 | ||
278 | spinlock_unlock(&THREAD->lock); |
261 | spinlock_unlock(&THREAD->lock); |
279 | 262 | ||
280 | spinlock_lock(&threads_lock); |
263 | spinlock_lock(&threads_lock); |
281 | list_remove(&THREAD->threads_link); |
264 | list_remove(&THREAD->threads_link); |
282 | spinlock_unlock(&threads_lock); |
265 | spinlock_unlock(&threads_lock); |
283 | 266 | ||
284 | spinlock_lock(&CPU->lock); |
267 | spinlock_lock(&CPU->lock); |
285 | if(CPU->fpu_owner==THREAD) |
268 | if(CPU->fpu_owner==THREAD) |
286 | CPU->fpu_owner=NULL; |
269 | CPU->fpu_owner=NULL; |
287 | spinlock_unlock(&CPU->lock); |
270 | spinlock_unlock(&CPU->lock); |
288 | 271 | ||
289 | free(THREAD); |
272 | free(THREAD); |
290 | 273 | ||
291 | break; |
274 | break; |
292 | 275 | ||
293 | case Sleeping: |
276 | case Sleeping: |
294 | /* |
277 | /* |
295 | * Prefer the thread after it's woken up. |
278 | * Prefer the thread after it's woken up. |
296 | */ |
279 | */ |
297 | THREAD->priority = -1; |
280 | THREAD->priority = -1; |
298 | 281 | ||
299 | /* |
282 | /* |
300 | * We need to release wq->lock which we locked in waitq_sleep(). |
283 | * We need to release wq->lock which we locked in waitq_sleep(). |
301 | * Address of wq->lock is kept in THREAD->sleep_queue. |
284 | * Address of wq->lock is kept in THREAD->sleep_queue. |
302 | */ |
285 | */ |
303 | spinlock_unlock(&THREAD->sleep_queue->lock); |
286 | spinlock_unlock(&THREAD->sleep_queue->lock); |
304 | 287 | ||
305 | /* |
288 | /* |
306 | * Check for possible requests for out-of-context invocation. |
289 | * Check for possible requests for out-of-context invocation. |
307 | */ |
290 | */ |
308 | if (THREAD->call_me) { |
291 | if (THREAD->call_me) { |
309 | THREAD->call_me(THREAD->call_me_with); |
292 | THREAD->call_me(THREAD->call_me_with); |
310 | THREAD->call_me = NULL; |
293 | THREAD->call_me = NULL; |
311 | THREAD->call_me_with = NULL; |
294 | THREAD->call_me_with = NULL; |
312 | } |
295 | } |
313 | 296 | ||
314 | spinlock_unlock(&THREAD->lock); |
297 | spinlock_unlock(&THREAD->lock); |
315 | 298 | ||
316 | break; |
299 | break; |
317 | 300 | ||
318 | default: |
301 | default: |
319 | /* |
302 | /* |
320 | * Entering state is unexpected. |
303 | * Entering state is unexpected. |
321 | */ |
304 | */ |
322 | panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]); |
305 | panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]); |
323 | break; |
306 | break; |
324 | } |
307 | } |
325 | THREAD = NULL; |
308 | THREAD = NULL; |
326 | } |
309 | } |
327 | 310 | ||
328 | 311 | ||
329 | THREAD = find_best_thread(); |
312 | THREAD = find_best_thread(); |
330 | 313 | ||
331 | spinlock_lock(&THREAD->lock); |
314 | spinlock_lock(&THREAD->lock); |
332 | priority = THREAD->priority; |
315 | priority = THREAD->priority; |
333 | spinlock_unlock(&THREAD->lock); |
316 | spinlock_unlock(&THREAD->lock); |
334 | 317 | ||
335 | relink_rq(priority); |
318 | relink_rq(priority); |
336 | 319 | ||
337 | spinlock_lock(&THREAD->lock); |
320 | spinlock_lock(&THREAD->lock); |
338 | 321 | ||
339 | /* |
322 | /* |
340 | * If both the old and the new task are the same, lots of work is avoided. |
323 | * If both the old and the new task are the same, lots of work is avoided. |
341 | */ |
324 | */ |
342 | if (TASK != THREAD->task) { |
325 | if (TASK != THREAD->task) { |
343 | as_t *as1 = NULL; |
326 | as_t *as1 = NULL; |
344 | as_t *as2; |
327 | as_t *as2; |
345 | 328 | ||
346 | if (TASK) { |
329 | if (TASK) { |
347 | spinlock_lock(&TASK->lock); |
330 | spinlock_lock(&TASK->lock); |
348 | as1 = TASK->as; |
331 | as1 = TASK->as; |
349 | spinlock_unlock(&TASK->lock); |
332 | spinlock_unlock(&TASK->lock); |
350 | } |
333 | } |
351 | 334 | ||
352 | spinlock_lock(&THREAD->task->lock); |
335 | spinlock_lock(&THREAD->task->lock); |
353 | as2 = THREAD->task->as; |
336 | as2 = THREAD->task->as; |
354 | spinlock_unlock(&THREAD->task->lock); |
337 | spinlock_unlock(&THREAD->task->lock); |
355 | 338 | ||
356 | /* |
339 | /* |
357 | * Note that it is possible for two tasks to share one address space. |
340 | * Note that it is possible for two tasks to share one address space. |
358 | */ |
341 | */ |
359 | if (as1 != as2) { |
342 | if (as1 != as2) { |
360 | /* |
343 | /* |
361 | * Both tasks and address spaces are different. |
344 | * Both tasks and address spaces are different. |
362 | * Replace the old one with the new one. |
345 | * Replace the old one with the new one. |
363 | */ |
346 | */ |
364 | as_install(as2); |
347 | as_install(as2); |
365 | } |
348 | } |
366 | TASK = THREAD->task; |
349 | TASK = THREAD->task; |
367 | } |
350 | } |
368 | 351 | ||
369 | THREAD->state = Running; |
352 | THREAD->state = Running; |
370 | 353 | ||
371 | #ifdef SCHEDULER_VERBOSE |
354 | #ifdef SCHEDULER_VERBOSE |
372 | printf("cpu%d: tid %d (priority=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, CPU->nrdy); |
355 | printf("cpu%d: tid %d (priority=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, CPU->nrdy); |
373 | #endif |
356 | #endif |
374 | 357 | ||
375 | /* |
358 | /* |
376 | * Copy the knowledge of CPU, TASK, THREAD and preemption counter to thread's stack. |
359 | * Copy the knowledge of CPU, TASK, THREAD and preemption counter to thread's stack. |
377 | */ |
360 | */ |
378 | the_copy(THE, (the_t *) THREAD->kstack); |
361 | the_copy(THE, (the_t *) THREAD->kstack); |
379 | 362 | ||
380 | context_restore(&THREAD->saved_context); |
363 | context_restore(&THREAD->saved_context); |
381 | /* not reached */ |
364 | /* not reached */ |
382 | } |
365 | } |
383 | 366 | ||
384 | 367 | ||
385 | /** The scheduler |
368 | /** The scheduler |
386 | * |
369 | * |
387 | * The thread scheduling procedure. |
370 | * The thread scheduling procedure. |
388 | * Passes control directly to |
371 | * Passes control directly to |
389 | * scheduler_separated_stack(). |
372 | * scheduler_separated_stack(). |
390 | * |
373 | * |
391 | */ |
374 | */ |
392 | void scheduler(void) |
375 | void scheduler(void) |
393 | { |
376 | { |
394 | volatile ipl_t ipl; |
377 | volatile ipl_t ipl; |
395 | 378 | ||
396 | ASSERT(CPU != NULL); |
379 | ASSERT(CPU != NULL); |
397 | 380 | ||
398 | ipl = interrupts_disable(); |
381 | ipl = interrupts_disable(); |
399 | 382 | ||
400 | if (atomic_get(&haltstate)) |
383 | if (atomic_get(&haltstate)) |
401 | halt(); |
384 | halt(); |
402 | 385 | ||
403 | if (THREAD) { |
386 | if (THREAD) { |
404 | spinlock_lock(&THREAD->lock); |
387 | spinlock_lock(&THREAD->lock); |
405 | #ifndef CONFIG_FPU_LAZY |
388 | #ifndef CONFIG_FPU_LAZY |
406 | fpu_context_save(&(THREAD->saved_fpu_context)); |
389 | fpu_context_save(&(THREAD->saved_fpu_context)); |
407 | #endif |
390 | #endif |
408 | if (!context_save(&THREAD->saved_context)) { |
391 | if (!context_save(&THREAD->saved_context)) { |
409 | /* |
392 | /* |
410 | * This is the place where threads leave scheduler(); |
393 | * This is the place where threads leave scheduler(); |
411 | */ |
394 | */ |
412 | before_thread_runs(); |
395 | before_thread_runs(); |
413 | spinlock_unlock(&THREAD->lock); |
396 | spinlock_unlock(&THREAD->lock); |
414 | interrupts_restore(THREAD->saved_context.ipl); |
397 | interrupts_restore(THREAD->saved_context.ipl); |
415 | return; |
398 | return; |
416 | } |
399 | } |
417 | 400 | ||
418 | /* |
401 | /* |
419 | * Interrupt priority level of preempted thread is recorded here |
402 | * Interrupt priority level of preempted thread is recorded here |
420 | * to facilitate scheduler() invocations from interrupts_disable()'d |
403 | * to facilitate scheduler() invocations from interrupts_disable()'d |
421 | * code (e.g. waitq_sleep_timeout()). |
404 | * code (e.g. waitq_sleep_timeout()). |
422 | */ |
405 | */ |
423 | THREAD->saved_context.ipl = ipl; |
406 | THREAD->saved_context.ipl = ipl; |
424 | } |
407 | } |
425 | 408 | ||
426 | /* |
409 | /* |
427 | * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM |
410 | * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM |
428 | * and preemption counter. At this point THE could be coming either |
411 | * and preemption counter. At this point THE could be coming either |
429 | * from THREAD's or CPU's stack. |
412 | * from THREAD's or CPU's stack. |
430 | */ |
413 | */ |
431 | the_copy(THE, (the_t *) CPU->stack); |
414 | the_copy(THE, (the_t *) CPU->stack); |
432 | 415 | ||
433 | /* |
416 | /* |
434 | * We may not keep the old stack. |
417 | * We may not keep the old stack. |
435 | * Reason: If we kept the old stack and got blocked, for instance, in |
418 | * Reason: If we kept the old stack and got blocked, for instance, in |
436 | * find_best_thread(), the old thread could get rescheduled by another |
419 | * find_best_thread(), the old thread could get rescheduled by another |
437 | * CPU and overwrite the part of its own stack that was also used by |
420 | * CPU and overwrite the part of its own stack that was also used by |
438 | * the scheduler on this CPU. |
421 | * the scheduler on this CPU. |
439 | * |
422 | * |
440 | * Moreover, we have to bypass the compiler-generated POP sequence |
423 | * Moreover, we have to bypass the compiler-generated POP sequence |
441 | * which is fooled by SP being set to the very top of the stack. |
424 | * which is fooled by SP being set to the very top of the stack. |
442 | * Therefore the scheduler() function continues in |
425 | * Therefore the scheduler() function continues in |
443 | * scheduler_separated_stack(). |
426 | * scheduler_separated_stack(). |
444 | */ |
427 | */ |
445 | context_save(&CPU->saved_context); |
428 | context_save(&CPU->saved_context); |
446 | context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), (__address) CPU->stack, CPU_STACK_SIZE); |
429 | context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), (__address) CPU->stack, CPU_STACK_SIZE); |
447 | context_restore(&CPU->saved_context); |
430 | context_restore(&CPU->saved_context); |
448 | /* not reached */ |
431 | /* not reached */ |
449 | } |
432 | } |
450 | 433 | ||
451 | 434 | ||
452 | 435 | ||
453 | 436 | ||
454 | 437 | ||
455 | #ifdef CONFIG_SMP |
438 | #ifdef CONFIG_SMP |
456 | /** Load balancing thread |
439 | /** Load balancing thread |
457 | * |
440 | * |
458 | * SMP load balancing thread, supervising thread supplies |
441 | * SMP load balancing thread, supervising thread supplies |
459 | * for the CPU it's wired to. |
442 | * for the CPU it's wired to. |
460 | * |
443 | * |
461 | * @param arg Generic thread argument (unused). |
444 | * @param arg Generic thread argument (unused). |
462 | * |
445 | * |
463 | */ |
446 | */ |
464 | void kcpulb(void *arg) |
447 | void kcpulb(void *arg) |
465 | { |
448 | { |
466 | thread_t *t; |
449 | thread_t *t; |
467 | int count, i, j, k = 0; |
450 | int count, average, i, j, k = 0; |
468 | ipl_t ipl; |
451 | ipl_t ipl; |
469 | 452 | ||
470 | loop: |
453 | loop: |
471 | /* |
454 | /* |
472 | * Work in 1s intervals. |
455 | * Work in 1s intervals. |
473 | */ |
456 | */ |
474 | thread_sleep(1); |
457 | thread_sleep(1); |
475 | 458 | ||
476 | not_satisfied: |
459 | not_satisfied: |
477 | /* |
460 | /* |
478 | * Calculate the number of threads that will be migrated/stolen from |
461 | * Calculate the number of threads that will be migrated/stolen from |
479 | * other CPU's. Note that situation can have changed between two |
462 | * other CPU's. Note that situation can have changed between two |
480 | * passes. Each time get the most up to date counts. |
463 | * passes. Each time get the most up to date counts. |
481 | */ |
464 | */ |
482 | ipl = interrupts_disable(); |
- | |
483 | spinlock_lock(&CPU->lock); |
- | |
484 | count = atomic_get(&nrdy) / config.cpu_active; |
465 | average = atomic_get(&nrdy) / config.cpu_active; |
485 | count -= CPU->nrdy; |
466 | count = average - atomic_get(&CPU->nrdy); |
486 | spinlock_unlock(&CPU->lock); |
- | |
487 | interrupts_restore(ipl); |
- | |
488 | 467 | ||
489 | if (count <= 0) |
468 | if (count < 0) |
490 | goto satisfied; |
469 | goto satisfied; |
491 | 470 | ||
- | 471 | if (!count) { /* Try to steal threads from CPU's that have more then average count */ |
|
- | 472 | count = 1; |
|
- | 473 | average += 1; |
|
- | 474 | } |
|
- | 475 | ||
492 | /* |
476 | /* |
493 | * Searching least priority queues on all CPU's first and most priority queues on all CPU's last. |
477 | * Searching least priority queues on all CPU's first and most priority queues on all CPU's last. |
494 | */ |
478 | */ |
495 | for (j=RQ_COUNT-1; j >= 0; j--) { |
479 | for (j=RQ_COUNT-1; j >= 0; j--) { |
496 | for (i=0; i < config.cpu_active; i++) { |
480 | for (i=0; i < config.cpu_active; i++) { |
497 | link_t *l; |
481 | link_t *l; |
498 | runq_t *r; |
482 | runq_t *r; |
499 | cpu_t *cpu; |
483 | cpu_t *cpu; |
500 | 484 | ||
501 | cpu = &cpus[(i + k) % config.cpu_active]; |
485 | cpu = &cpus[(i + k) % config.cpu_active]; |
502 | 486 | ||
503 | /* |
487 | /* |
504 | * Not interested in ourselves. |
488 | * Not interested in ourselves. |
505 | * Doesn't require interrupt disabling for kcpulb is X_WIRED. |
489 | * Doesn't require interrupt disabling for kcpulb is X_WIRED. |
506 | */ |
490 | */ |
507 | if (CPU == cpu) |
491 | if (CPU == cpu) |
- | 492 | continue; |
|
- | 493 | if (atomic_get(&cpu->nrdy) <= average) |
|
508 | continue; |
494 | continue; |
509 | 495 | ||
510 | restart: ipl = interrupts_disable(); |
496 | restart: ipl = interrupts_disable(); |
511 | r = &cpu->rq[j]; |
497 | r = &cpu->rq[j]; |
512 | spinlock_lock(&r->lock); |
498 | spinlock_lock(&r->lock); |
513 | if (r->n == 0) { |
499 | if (r->n == 0) { |
514 | spinlock_unlock(&r->lock); |
500 | spinlock_unlock(&r->lock); |
515 | interrupts_restore(ipl); |
501 | interrupts_restore(ipl); |
516 | continue; |
502 | continue; |
517 | } |
503 | } |
518 | 504 | ||
519 | t = NULL; |
505 | t = NULL; |
520 | l = r->rq_head.prev; /* search rq from the back */ |
506 | l = r->rq_head.prev; /* search rq from the back */ |
521 | while (l != &r->rq_head) { |
507 | while (l != &r->rq_head) { |
522 | t = list_get_instance(l, thread_t, rq_link); |
508 | t = list_get_instance(l, thread_t, rq_link); |
523 | /* |
509 | /* |
524 | * We don't want to steal CPU-wired threads neither threads already stolen. |
510 | * We don't want to steal CPU-wired threads neither threads already stolen. |
525 | * The latter prevents threads from migrating between CPU's without ever being run. |
511 | * The latter prevents threads from migrating between CPU's without ever being run. |
526 | * We don't want to steal threads whose FPU context is still in CPU. |
512 | * We don't want to steal threads whose FPU context is still in CPU. |
527 | */ |
513 | */ |
528 | spinlock_lock(&t->lock); |
514 | spinlock_lock(&t->lock); |
529 | if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) { |
515 | if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) { |
530 | 516 | ||
531 | /* |
517 | /* |
532 | * Remove t from r. |
518 | * Remove t from r. |
533 | */ |
519 | */ |
534 | 520 | ||
535 | spinlock_unlock(&t->lock); |
521 | spinlock_unlock(&t->lock); |
536 | 522 | ||
537 | /* |
523 | /* |
538 | * Here we have to avoid deadlock with relink_rq(), |
524 | * Here we have to avoid deadlock with relink_rq(), |
539 | * because it locks cpu and r in a different order than we do. |
525 | * because it locks cpu and r in a different order than we do. |
540 | */ |
526 | */ |
541 | if (!spinlock_trylock(&cpu->lock)) { |
527 | if (!spinlock_trylock(&cpu->lock)) { |
542 | /* Release all locks and try again. */ |
528 | /* Release all locks and try again. */ |
543 | spinlock_unlock(&r->lock); |
529 | spinlock_unlock(&r->lock); |
544 | interrupts_restore(ipl); |
530 | interrupts_restore(ipl); |
545 | goto restart; |
531 | goto restart; |
546 | } |
532 | } |
547 | cpu->nrdy--; |
533 | atomic_dec(&cpu->nrdy); |
548 | spinlock_unlock(&cpu->lock); |
534 | spinlock_unlock(&cpu->lock); |
549 | 535 | ||
550 | atomic_dec(&nrdy); |
536 | atomic_dec(&nrdy); |
551 | 537 | ||
552 | r->n--; |
538 | r->n--; |
553 | list_remove(&t->rq_link); |
539 | list_remove(&t->rq_link); |
554 | 540 | ||
555 | break; |
541 | break; |
556 | } |
542 | } |
557 | spinlock_unlock(&t->lock); |
543 | spinlock_unlock(&t->lock); |
558 | l = l->prev; |
544 | l = l->prev; |
559 | t = NULL; |
545 | t = NULL; |
560 | } |
546 | } |
561 | spinlock_unlock(&r->lock); |
547 | spinlock_unlock(&r->lock); |
562 | 548 | ||
563 | if (t) { |
549 | if (t) { |
564 | /* |
550 | /* |
565 | * Ready t on local CPU |
551 | * Ready t on local CPU |
566 | */ |
552 | */ |
567 | spinlock_lock(&t->lock); |
553 | spinlock_lock(&t->lock); |
568 | #ifdef KCPULB_VERBOSE |
554 | #ifdef KCPULB_VERBOSE |
569 | printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, CPU->nrdy, atomic_get(&nrdy) / config.cpu_active); |
555 | printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, atomic_get(&CPU->nrdy), atomic_get(&nrdy) / config.cpu_active); |
570 | #endif |
556 | #endif |
571 | t->flags |= X_STOLEN; |
557 | t->flags |= X_STOLEN; |
572 | spinlock_unlock(&t->lock); |
558 | spinlock_unlock(&t->lock); |
573 | 559 | ||
574 | thread_ready(t); |
560 | thread_ready(t); |
575 | 561 | ||
576 | interrupts_restore(ipl); |
562 | interrupts_restore(ipl); |
577 | 563 | ||
578 | if (--count == 0) |
564 | if (--count == 0) |
579 | goto satisfied; |
565 | goto satisfied; |
580 | 566 | ||
581 | /* |
567 | /* |
582 | * We are not satisfied yet, focus on another CPU next time. |
568 | * We are not satisfied yet, focus on another CPU next time. |
583 | */ |
569 | */ |
584 | k++; |
570 | k++; |
585 | 571 | ||
586 | continue; |
572 | continue; |
587 | } |
573 | } |
588 | interrupts_restore(ipl); |
574 | interrupts_restore(ipl); |
589 | } |
575 | } |
590 | } |
576 | } |
591 | 577 | ||
592 | if (CPU->nrdy) { |
578 | if (atomic_get(&CPU->nrdy)) { |
593 | /* |
579 | /* |
594 | * Be a little bit light-weight and let migrated threads run. |
580 | * Be a little bit light-weight and let migrated threads run. |
595 | */ |
581 | */ |
596 | scheduler(); |
582 | scheduler(); |
597 | } else { |
583 | } else { |
598 | /* |
584 | /* |
599 | * We failed to migrate a single thread. |
585 | * We failed to migrate a single thread. |
600 | * Give up this turn. |
586 | * Give up this turn. |
601 | */ |
587 | */ |
602 | goto loop; |
588 | goto loop; |
603 | } |
589 | } |
604 | 590 | ||
605 | goto not_satisfied; |
591 | goto not_satisfied; |
606 | 592 | ||
607 | satisfied: |
593 | satisfied: |
608 | goto loop; |
594 | goto loop; |
609 | } |
595 | } |
610 | 596 | ||
611 | #endif /* CONFIG_SMP */ |
597 | #endif /* CONFIG_SMP */ |
612 | 598 | ||
613 | 599 | ||
614 | /** Print information about threads & scheduler queues */ |
600 | /** Print information about threads & scheduler queues */ |
615 | void sched_print_list(void) |
601 | void sched_print_list(void) |
616 | { |
602 | { |
617 | ipl_t ipl; |
603 | ipl_t ipl; |
618 | int cpu,i; |
604 | int cpu,i; |
619 | runq_t *r; |
605 | runq_t *r; |
620 | thread_t *t; |
606 | thread_t *t; |
621 | link_t *cur; |
607 | link_t *cur; |
622 | 608 | ||
623 | /* We are going to mess with scheduler structures, |
609 | /* We are going to mess with scheduler structures, |
624 | * let's not be interrupted */ |
610 | * let's not be interrupted */ |
625 | ipl = interrupts_disable(); |
611 | ipl = interrupts_disable(); |
626 | printf("*********** Scheduler dump ***********\n"); |
612 | printf("*********** Scheduler dump ***********\n"); |
627 | for (cpu=0;cpu < config.cpu_count; cpu++) { |
613 | for (cpu=0;cpu < config.cpu_count; cpu++) { |
628 | if (!cpus[cpu].active) |
614 | if (!cpus[cpu].active) |
629 | continue; |
615 | continue; |
630 | spinlock_lock(&cpus[cpu].lock); |
616 | spinlock_lock(&cpus[cpu].lock); |
631 | printf("cpu%d: nrdy: %d needs_relink: %d\n", |
617 | printf("cpu%d: nrdy: %d needs_relink: %d\n", |
632 | cpus[cpu].id, cpus[cpu].nrdy, cpus[cpu].needs_relink); |
618 | cpus[cpu].id, atomic_get(&cpus[cpu].nrdy), cpus[cpu].needs_relink); |
633 | 619 | ||
634 | for (i=0; i<RQ_COUNT; i++) { |
620 | for (i=0; i<RQ_COUNT; i++) { |
635 | r = &cpus[cpu].rq[i]; |
621 | r = &cpus[cpu].rq[i]; |
636 | spinlock_lock(&r->lock); |
622 | spinlock_lock(&r->lock); |
637 | if (!r->n) { |
623 | if (!r->n) { |
638 | spinlock_unlock(&r->lock); |
624 | spinlock_unlock(&r->lock); |
639 | continue; |
625 | continue; |
640 | } |
626 | } |
641 | printf("\tRq %d: ", i); |
627 | printf("\tRq %d: ", i); |
642 | for (cur=r->rq_head.next; cur!=&r->rq_head; cur=cur->next) { |
628 | for (cur=r->rq_head.next; cur!=&r->rq_head; cur=cur->next) { |
643 | t = list_get_instance(cur, thread_t, rq_link); |
629 | t = list_get_instance(cur, thread_t, rq_link); |
644 | printf("%d(%s) ", t->tid, |
630 | printf("%d(%s) ", t->tid, |
645 | thread_states[t->state]); |
631 | thread_states[t->state]); |
646 | } |
632 | } |
647 | printf("\n"); |
633 | printf("\n"); |
648 | spinlock_unlock(&r->lock); |
634 | spinlock_unlock(&r->lock); |
649 | } |
635 | } |
650 | spinlock_unlock(&cpus[cpu].lock); |
636 | spinlock_unlock(&cpus[cpu].lock); |
651 | } |
637 | } |
652 | 638 | ||
653 | interrupts_restore(ipl); |
639 | interrupts_restore(ipl); |
654 | } |
640 | } |
655 | 641 |