Rev 814 | Rev 827 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 814 | Rev 823 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | #include <proc/scheduler.h> |
29 | #include <proc/scheduler.h> |
30 | #include <proc/thread.h> |
30 | #include <proc/thread.h> |
31 | #include <proc/task.h> |
31 | #include <proc/task.h> |
32 | #include <mm/frame.h> |
32 | #include <mm/frame.h> |
33 | #include <mm/page.h> |
33 | #include <mm/page.h> |
34 | #include <mm/as.h> |
34 | #include <mm/as.h> |
35 | #include <arch/asm.h> |
35 | #include <arch/asm.h> |
36 | #include <arch/faddr.h> |
36 | #include <arch/faddr.h> |
37 | #include <arch/atomic.h> |
37 | #include <arch/atomic.h> |
38 | #include <synch/spinlock.h> |
38 | #include <synch/spinlock.h> |
39 | #include <config.h> |
39 | #include <config.h> |
40 | #include <context.h> |
40 | #include <context.h> |
41 | #include <func.h> |
41 | #include <func.h> |
42 | #include <arch.h> |
42 | #include <arch.h> |
43 | #include <adt/list.h> |
43 | #include <adt/list.h> |
44 | #include <panic.h> |
44 | #include <panic.h> |
45 | #include <typedefs.h> |
45 | #include <typedefs.h> |
46 | #include <cpu.h> |
46 | #include <cpu.h> |
47 | #include <print.h> |
47 | #include <print.h> |
48 | #include <debug.h> |
48 | #include <debug.h> |
49 | 49 | ||
50 | atomic_t nrdy; |
50 | atomic_t nrdy; |
51 | 51 | ||
52 | /** Take actions before new thread runs |
52 | /** Take actions before new thread runs |
53 | * |
53 | * |
54 | * Perform actions that need to be |
54 | * Perform actions that need to be |
55 | * taken before the newly selected |
55 | * taken before the newly selected |
56 | * tread is passed control. |
56 | * tread is passed control. |
57 | * |
57 | * |
58 | */ |
58 | */ |
59 | void before_thread_runs(void) |
59 | void before_thread_runs(void) |
60 | { |
60 | { |
61 | before_thread_runs_arch(); |
61 | before_thread_runs_arch(); |
62 | #ifdef CONFIG_FPU_LAZY |
62 | #ifdef CONFIG_FPU_LAZY |
63 | if(THREAD==CPU->fpu_owner) |
63 | if(THREAD==CPU->fpu_owner) |
64 | fpu_enable(); |
64 | fpu_enable(); |
65 | else |
65 | else |
66 | fpu_disable(); |
66 | fpu_disable(); |
67 | #else |
67 | #else |
68 | fpu_enable(); |
68 | fpu_enable(); |
69 | if (THREAD->fpu_context_exists) |
69 | if (THREAD->fpu_context_exists) |
70 | fpu_context_restore(&(THREAD->saved_fpu_context)); |
70 | fpu_context_restore(&(THREAD->saved_fpu_context)); |
71 | else { |
71 | else { |
72 | fpu_init(); |
72 | fpu_init(); |
73 | THREAD->fpu_context_exists=1; |
73 | THREAD->fpu_context_exists=1; |
74 | } |
74 | } |
75 | #endif |
75 | #endif |
76 | } |
76 | } |
77 | 77 | ||
78 | #ifdef CONFIG_FPU_LAZY |
78 | #ifdef CONFIG_FPU_LAZY |
79 | void scheduler_fpu_lazy_request(void) |
79 | void scheduler_fpu_lazy_request(void) |
80 | { |
80 | { |
81 | fpu_enable(); |
81 | fpu_enable(); |
82 | if (CPU->fpu_owner != NULL) { |
82 | if (CPU->fpu_owner != NULL) { |
83 | fpu_context_save(&CPU->fpu_owner->saved_fpu_context); |
83 | fpu_context_save(&CPU->fpu_owner->saved_fpu_context); |
84 | /* don't prevent migration */ |
84 | /* don't prevent migration */ |
85 | CPU->fpu_owner->fpu_context_engaged=0; |
85 | CPU->fpu_owner->fpu_context_engaged=0; |
86 | } |
86 | } |
87 | if (THREAD->fpu_context_exists) |
87 | if (THREAD->fpu_context_exists) |
88 | fpu_context_restore(&THREAD->saved_fpu_context); |
88 | fpu_context_restore(&THREAD->saved_fpu_context); |
89 | else { |
89 | else { |
90 | fpu_init(); |
90 | fpu_init(); |
91 | THREAD->fpu_context_exists=1; |
91 | THREAD->fpu_context_exists=1; |
92 | } |
92 | } |
93 | CPU->fpu_owner=THREAD; |
93 | CPU->fpu_owner=THREAD; |
94 | THREAD->fpu_context_engaged = 1; |
94 | THREAD->fpu_context_engaged = 1; |
95 | } |
95 | } |
96 | #endif |
96 | #endif |
97 | 97 | ||
98 | /** Initialize scheduler |
98 | /** Initialize scheduler |
99 | * |
99 | * |
100 | * Initialize kernel scheduler. |
100 | * Initialize kernel scheduler. |
101 | * |
101 | * |
102 | */ |
102 | */ |
103 | void scheduler_init(void) |
103 | void scheduler_init(void) |
104 | { |
104 | { |
105 | } |
105 | } |
106 | 106 | ||
107 | 107 | ||
108 | /** Get thread to be scheduled |
108 | /** Get thread to be scheduled |
109 | * |
109 | * |
110 | * Get the optimal thread to be scheduled |
110 | * Get the optimal thread to be scheduled |
111 | * according to thread accounting and scheduler |
111 | * according to thread accounting and scheduler |
112 | * policy. |
112 | * policy. |
113 | * |
113 | * |
114 | * @return Thread to be scheduled. |
114 | * @return Thread to be scheduled. |
115 | * |
115 | * |
116 | */ |
116 | */ |
117 | static thread_t *find_best_thread(void) |
117 | static thread_t *find_best_thread(void) |
118 | { |
118 | { |
119 | thread_t *t; |
119 | thread_t *t; |
120 | runq_t *r; |
120 | runq_t *r; |
121 | int i; |
121 | int i; |
122 | 122 | ||
123 | ASSERT(CPU != NULL); |
123 | ASSERT(CPU != NULL); |
124 | 124 | ||
125 | loop: |
125 | loop: |
126 | interrupts_enable(); |
126 | interrupts_enable(); |
127 | 127 | ||
128 | if (atomic_get(&CPU->nrdy) == 0) { |
128 | if (atomic_get(&CPU->nrdy) == 0) { |
129 | /* |
129 | /* |
130 | * For there was nothing to run, the CPU goes to sleep |
130 | * For there was nothing to run, the CPU goes to sleep |
131 | * until a hardware interrupt or an IPI comes. |
131 | * until a hardware interrupt or an IPI comes. |
132 | * This improves energy saving and hyperthreading. |
132 | * This improves energy saving and hyperthreading. |
133 | */ |
133 | */ |
134 | 134 | ||
135 | /* |
135 | /* |
136 | * An interrupt might occur right now and wake up a thread. |
136 | * An interrupt might occur right now and wake up a thread. |
137 | * In such case, the CPU will continue to go to sleep |
137 | * In such case, the CPU will continue to go to sleep |
138 | * even though there is a runnable thread. |
138 | * even though there is a runnable thread. |
139 | */ |
139 | */ |
140 | 140 | ||
141 | cpu_sleep(); |
141 | cpu_sleep(); |
142 | goto loop; |
142 | goto loop; |
143 | } |
143 | } |
144 | 144 | ||
145 | interrupts_disable(); |
145 | interrupts_disable(); |
146 | 146 | ||
147 | i = 0; |
147 | i = 0; |
148 | for (; i<RQ_COUNT; i++) { |
148 | for (; i<RQ_COUNT; i++) { |
149 | r = &CPU->rq[i]; |
149 | r = &CPU->rq[i]; |
150 | spinlock_lock(&r->lock); |
150 | spinlock_lock(&r->lock); |
151 | if (r->n == 0) { |
151 | if (r->n == 0) { |
152 | /* |
152 | /* |
153 | * If this queue is empty, try a lower-priority queue. |
153 | * If this queue is empty, try a lower-priority queue. |
154 | */ |
154 | */ |
155 | spinlock_unlock(&r->lock); |
155 | spinlock_unlock(&r->lock); |
156 | continue; |
156 | continue; |
157 | } |
157 | } |
158 | 158 | ||
159 | atomic_dec(&CPU->nrdy); |
159 | atomic_dec(&CPU->nrdy); |
160 | atomic_dec(&nrdy); |
160 | atomic_dec(&nrdy); |
161 | r->n--; |
161 | r->n--; |
162 | 162 | ||
163 | /* |
163 | /* |
164 | * Take the first thread from the queue. |
164 | * Take the first thread from the queue. |
165 | */ |
165 | */ |
166 | t = list_get_instance(r->rq_head.next, thread_t, rq_link); |
166 | t = list_get_instance(r->rq_head.next, thread_t, rq_link); |
167 | list_remove(&t->rq_link); |
167 | list_remove(&t->rq_link); |
168 | 168 | ||
169 | spinlock_unlock(&r->lock); |
169 | spinlock_unlock(&r->lock); |
170 | 170 | ||
171 | spinlock_lock(&t->lock); |
171 | spinlock_lock(&t->lock); |
172 | t->cpu = CPU; |
172 | t->cpu = CPU; |
173 | 173 | ||
174 | t->ticks = us2ticks((i+1)*10000); |
174 | t->ticks = us2ticks((i+1)*10000); |
175 | t->priority = i; /* eventually correct rq index */ |
175 | t->priority = i; /* eventually correct rq index */ |
176 | 176 | ||
177 | /* |
177 | /* |
178 | * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge. |
178 | * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge. |
179 | */ |
179 | */ |
180 | t->flags &= ~X_STOLEN; |
180 | t->flags &= ~X_STOLEN; |
181 | spinlock_unlock(&t->lock); |
181 | spinlock_unlock(&t->lock); |
182 | 182 | ||
183 | return t; |
183 | return t; |
184 | } |
184 | } |
185 | goto loop; |
185 | goto loop; |
186 | 186 | ||
187 | } |
187 | } |
188 | 188 | ||
189 | 189 | ||
190 | /** Prevent rq starvation |
190 | /** Prevent rq starvation |
191 | * |
191 | * |
192 | * Prevent low priority threads from starving in rq's. |
192 | * Prevent low priority threads from starving in rq's. |
193 | * |
193 | * |
194 | * When the function decides to relink rq's, it reconnects |
194 | * When the function decides to relink rq's, it reconnects |
195 | * respective pointers so that in result threads with 'pri' |
195 | * respective pointers so that in result threads with 'pri' |
196 | * greater or equal 'start' are moved to a higher-priority queue. |
196 | * greater or equal 'start' are moved to a higher-priority queue. |
197 | * |
197 | * |
198 | * @param start Threshold priority. |
198 | * @param start Threshold priority. |
199 | * |
199 | * |
200 | */ |
200 | */ |
201 | static void relink_rq(int start) |
201 | static void relink_rq(int start) |
202 | { |
202 | { |
203 | link_t head; |
203 | link_t head; |
204 | runq_t *r; |
204 | runq_t *r; |
205 | int i, n; |
205 | int i, n; |
206 | 206 | ||
207 | list_initialize(&head); |
207 | list_initialize(&head); |
208 | spinlock_lock(&CPU->lock); |
208 | spinlock_lock(&CPU->lock); |
209 | if (CPU->needs_relink > NEEDS_RELINK_MAX) { |
209 | if (CPU->needs_relink > NEEDS_RELINK_MAX) { |
210 | for (i = start; i<RQ_COUNT-1; i++) { |
210 | for (i = start; i<RQ_COUNT-1; i++) { |
211 | /* remember and empty rq[i + 1] */ |
211 | /* remember and empty rq[i + 1] */ |
212 | r = &CPU->rq[i + 1]; |
212 | r = &CPU->rq[i + 1]; |
213 | spinlock_lock(&r->lock); |
213 | spinlock_lock(&r->lock); |
214 | list_concat(&head, &r->rq_head); |
214 | list_concat(&head, &r->rq_head); |
215 | n = r->n; |
215 | n = r->n; |
216 | r->n = 0; |
216 | r->n = 0; |
217 | spinlock_unlock(&r->lock); |
217 | spinlock_unlock(&r->lock); |
218 | 218 | ||
219 | /* append rq[i + 1] to rq[i] */ |
219 | /* append rq[i + 1] to rq[i] */ |
220 | r = &CPU->rq[i]; |
220 | r = &CPU->rq[i]; |
221 | spinlock_lock(&r->lock); |
221 | spinlock_lock(&r->lock); |
222 | list_concat(&r->rq_head, &head); |
222 | list_concat(&r->rq_head, &head); |
223 | r->n += n; |
223 | r->n += n; |
224 | spinlock_unlock(&r->lock); |
224 | spinlock_unlock(&r->lock); |
225 | } |
225 | } |
226 | CPU->needs_relink = 0; |
226 | CPU->needs_relink = 0; |
227 | } |
227 | } |
228 | spinlock_unlock(&CPU->lock); |
228 | spinlock_unlock(&CPU->lock); |
229 | 229 | ||
230 | } |
230 | } |
231 | 231 | ||
232 | 232 | ||
233 | /** Scheduler stack switch wrapper |
233 | /** Scheduler stack switch wrapper |
234 | * |
234 | * |
235 | * Second part of the scheduler() function |
235 | * Second part of the scheduler() function |
236 | * using new stack. Handling the actual context |
236 | * using new stack. Handling the actual context |
237 | * switch to a new thread. |
237 | * switch to a new thread. |
238 | * |
238 | * |
239 | * Assume THREAD->lock is held. |
239 | * Assume THREAD->lock is held. |
240 | */ |
240 | */ |
241 | static void scheduler_separated_stack(void) |
241 | static void scheduler_separated_stack(void) |
242 | { |
242 | { |
243 | int priority; |
243 | int priority; |
244 | 244 | ||
245 | ASSERT(CPU != NULL); |
245 | ASSERT(CPU != NULL); |
246 | 246 | ||
247 | if (THREAD) { |
247 | if (THREAD) { |
248 | switch (THREAD->state) { |
248 | switch (THREAD->state) { |
249 | case Running: |
249 | case Running: |
250 | THREAD->state = Ready; |
250 | THREAD->state = Ready; |
251 | spinlock_unlock(&THREAD->lock); |
251 | spinlock_unlock(&THREAD->lock); |
252 | thread_ready(THREAD); |
252 | thread_ready(THREAD); |
253 | break; |
253 | break; |
254 | 254 | ||
255 | case Exiting: |
255 | case Exiting: |
256 | thread_destroy(THREAD); |
256 | thread_destroy(THREAD); |
257 | break; |
257 | break; |
258 | 258 | ||
259 | case Sleeping: |
259 | case Sleeping: |
260 | /* |
260 | /* |
261 | * Prefer the thread after it's woken up. |
261 | * Prefer the thread after it's woken up. |
262 | */ |
262 | */ |
263 | THREAD->priority = -1; |
263 | THREAD->priority = -1; |
264 | 264 | ||
265 | /* |
265 | /* |
266 | * We need to release wq->lock which we locked in waitq_sleep(). |
266 | * We need to release wq->lock which we locked in waitq_sleep(). |
267 | * Address of wq->lock is kept in THREAD->sleep_queue. |
267 | * Address of wq->lock is kept in THREAD->sleep_queue. |
268 | */ |
268 | */ |
269 | spinlock_unlock(&THREAD->sleep_queue->lock); |
269 | spinlock_unlock(&THREAD->sleep_queue->lock); |
270 | 270 | ||
271 | /* |
271 | /* |
272 | * Check for possible requests for out-of-context invocation. |
272 | * Check for possible requests for out-of-context invocation. |
273 | */ |
273 | */ |
274 | if (THREAD->call_me) { |
274 | if (THREAD->call_me) { |
275 | THREAD->call_me(THREAD->call_me_with); |
275 | THREAD->call_me(THREAD->call_me_with); |
276 | THREAD->call_me = NULL; |
276 | THREAD->call_me = NULL; |
277 | THREAD->call_me_with = NULL; |
277 | THREAD->call_me_with = NULL; |
278 | } |
278 | } |
279 | 279 | ||
280 | spinlock_unlock(&THREAD->lock); |
280 | spinlock_unlock(&THREAD->lock); |
281 | 281 | ||
282 | break; |
282 | break; |
283 | 283 | ||
284 | default: |
284 | default: |
285 | /* |
285 | /* |
286 | * Entering state is unexpected. |
286 | * Entering state is unexpected. |
287 | */ |
287 | */ |
288 | panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]); |
288 | panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]); |
289 | break; |
289 | break; |
290 | } |
290 | } |
291 | THREAD = NULL; |
291 | THREAD = NULL; |
292 | } |
292 | } |
293 | 293 | ||
294 | 294 | ||
295 | THREAD = find_best_thread(); |
295 | THREAD = find_best_thread(); |
296 | 296 | ||
297 | spinlock_lock(&THREAD->lock); |
297 | spinlock_lock(&THREAD->lock); |
298 | priority = THREAD->priority; |
298 | priority = THREAD->priority; |
299 | spinlock_unlock(&THREAD->lock); |
299 | spinlock_unlock(&THREAD->lock); |
300 | 300 | ||
301 | relink_rq(priority); |
301 | relink_rq(priority); |
302 | 302 | ||
303 | spinlock_lock(&THREAD->lock); |
303 | spinlock_lock(&THREAD->lock); |
304 | 304 | ||
305 | /* |
305 | /* |
306 | * If both the old and the new task are the same, lots of work is avoided. |
306 | * If both the old and the new task are the same, lots of work is avoided. |
307 | */ |
307 | */ |
308 | if (TASK != THREAD->task) { |
308 | if (TASK != THREAD->task) { |
309 | as_t *as1 = NULL; |
309 | as_t *as1 = NULL; |
310 | as_t *as2; |
310 | as_t *as2; |
311 | 311 | ||
312 | if (TASK) { |
312 | if (TASK) { |
313 | spinlock_lock(&TASK->lock); |
313 | spinlock_lock(&TASK->lock); |
314 | as1 = TASK->as; |
314 | as1 = TASK->as; |
315 | spinlock_unlock(&TASK->lock); |
315 | spinlock_unlock(&TASK->lock); |
316 | } |
316 | } |
317 | 317 | ||
318 | spinlock_lock(&THREAD->task->lock); |
318 | spinlock_lock(&THREAD->task->lock); |
319 | as2 = THREAD->task->as; |
319 | as2 = THREAD->task->as; |
320 | spinlock_unlock(&THREAD->task->lock); |
320 | spinlock_unlock(&THREAD->task->lock); |
321 | 321 | ||
322 | /* |
322 | /* |
323 | * Note that it is possible for two tasks to share one address space. |
323 | * Note that it is possible for two tasks to share one address space. |
324 | */ |
324 | */ |
325 | if (as1 != as2) { |
325 | if (as1 != as2) { |
326 | /* |
326 | /* |
327 | * Both tasks and address spaces are different. |
327 | * Both tasks and address spaces are different. |
328 | * Replace the old one with the new one. |
328 | * Replace the old one with the new one. |
329 | */ |
329 | */ |
330 | as_install(as2); |
330 | as_switch(as1, as2); |
331 | } |
331 | } |
332 | TASK = THREAD->task; |
332 | TASK = THREAD->task; |
333 | } |
333 | } |
334 | 334 | ||
335 | THREAD->state = Running; |
335 | THREAD->state = Running; |
336 | 336 | ||
337 | #ifdef SCHEDULER_VERBOSE |
337 | #ifdef SCHEDULER_VERBOSE |
338 | printf("cpu%d: tid %d (priority=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, CPU->nrdy); |
338 | printf("cpu%d: tid %d (priority=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, atomic_get(&CPU->nrdy)); |
339 | #endif |
339 | #endif |
340 | 340 | ||
341 | /* |
341 | /* |
342 | * Copy the knowledge of CPU, TASK, THREAD and preemption counter to thread's stack. |
342 | * Copy the knowledge of CPU, TASK, THREAD and preemption counter to thread's stack. |
343 | */ |
343 | */ |
344 | the_copy(THE, (the_t *) THREAD->kstack); |
344 | the_copy(THE, (the_t *) THREAD->kstack); |
345 | 345 | ||
346 | context_restore(&THREAD->saved_context); |
346 | context_restore(&THREAD->saved_context); |
347 | /* not reached */ |
347 | /* not reached */ |
348 | } |
348 | } |
349 | 349 | ||
350 | 350 | ||
351 | /** The scheduler |
351 | /** The scheduler |
352 | * |
352 | * |
353 | * The thread scheduling procedure. |
353 | * The thread scheduling procedure. |
354 | * Passes control directly to |
354 | * Passes control directly to |
355 | * scheduler_separated_stack(). |
355 | * scheduler_separated_stack(). |
356 | * |
356 | * |
357 | */ |
357 | */ |
358 | void scheduler(void) |
358 | void scheduler(void) |
359 | { |
359 | { |
360 | volatile ipl_t ipl; |
360 | volatile ipl_t ipl; |
361 | 361 | ||
362 | ASSERT(CPU != NULL); |
362 | ASSERT(CPU != NULL); |
363 | 363 | ||
364 | ipl = interrupts_disable(); |
364 | ipl = interrupts_disable(); |
365 | 365 | ||
366 | if (atomic_get(&haltstate)) |
366 | if (atomic_get(&haltstate)) |
367 | halt(); |
367 | halt(); |
368 | 368 | ||
369 | if (THREAD) { |
369 | if (THREAD) { |
370 | spinlock_lock(&THREAD->lock); |
370 | spinlock_lock(&THREAD->lock); |
371 | #ifndef CONFIG_FPU_LAZY |
371 | #ifndef CONFIG_FPU_LAZY |
372 | fpu_context_save(&(THREAD->saved_fpu_context)); |
372 | fpu_context_save(&(THREAD->saved_fpu_context)); |
373 | #endif |
373 | #endif |
374 | if (!context_save(&THREAD->saved_context)) { |
374 | if (!context_save(&THREAD->saved_context)) { |
375 | /* |
375 | /* |
376 | * This is the place where threads leave scheduler(); |
376 | * This is the place where threads leave scheduler(); |
377 | */ |
377 | */ |
378 | before_thread_runs(); |
378 | before_thread_runs(); |
379 | spinlock_unlock(&THREAD->lock); |
379 | spinlock_unlock(&THREAD->lock); |
380 | interrupts_restore(THREAD->saved_context.ipl); |
380 | interrupts_restore(THREAD->saved_context.ipl); |
381 | return; |
381 | return; |
382 | } |
382 | } |
383 | 383 | ||
384 | /* |
384 | /* |
385 | * Interrupt priority level of preempted thread is recorded here |
385 | * Interrupt priority level of preempted thread is recorded here |
386 | * to facilitate scheduler() invocations from interrupts_disable()'d |
386 | * to facilitate scheduler() invocations from interrupts_disable()'d |
387 | * code (e.g. waitq_sleep_timeout()). |
387 | * code (e.g. waitq_sleep_timeout()). |
388 | */ |
388 | */ |
389 | THREAD->saved_context.ipl = ipl; |
389 | THREAD->saved_context.ipl = ipl; |
390 | } |
390 | } |
391 | 391 | ||
392 | /* |
392 | /* |
393 | * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM |
393 | * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM |
394 | * and preemption counter. At this point THE could be coming either |
394 | * and preemption counter. At this point THE could be coming either |
395 | * from THREAD's or CPU's stack. |
395 | * from THREAD's or CPU's stack. |
396 | */ |
396 | */ |
397 | the_copy(THE, (the_t *) CPU->stack); |
397 | the_copy(THE, (the_t *) CPU->stack); |
398 | 398 | ||
399 | /* |
399 | /* |
400 | * We may not keep the old stack. |
400 | * We may not keep the old stack. |
401 | * Reason: If we kept the old stack and got blocked, for instance, in |
401 | * Reason: If we kept the old stack and got blocked, for instance, in |
402 | * find_best_thread(), the old thread could get rescheduled by another |
402 | * find_best_thread(), the old thread could get rescheduled by another |
403 | * CPU and overwrite the part of its own stack that was also used by |
403 | * CPU and overwrite the part of its own stack that was also used by |
404 | * the scheduler on this CPU. |
404 | * the scheduler on this CPU. |
405 | * |
405 | * |
406 | * Moreover, we have to bypass the compiler-generated POP sequence |
406 | * Moreover, we have to bypass the compiler-generated POP sequence |
407 | * which is fooled by SP being set to the very top of the stack. |
407 | * which is fooled by SP being set to the very top of the stack. |
408 | * Therefore the scheduler() function continues in |
408 | * Therefore the scheduler() function continues in |
409 | * scheduler_separated_stack(). |
409 | * scheduler_separated_stack(). |
410 | */ |
410 | */ |
411 | context_save(&CPU->saved_context); |
411 | context_save(&CPU->saved_context); |
412 | context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), (__address) CPU->stack, CPU_STACK_SIZE); |
412 | context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), (__address) CPU->stack, CPU_STACK_SIZE); |
413 | context_restore(&CPU->saved_context); |
413 | context_restore(&CPU->saved_context); |
414 | /* not reached */ |
414 | /* not reached */ |
415 | } |
415 | } |
416 | 416 | ||
417 | 417 | ||
418 | 418 | ||
419 | 419 | ||
420 | 420 | ||
421 | #ifdef CONFIG_SMP |
421 | #ifdef CONFIG_SMP |
422 | /** Load balancing thread |
422 | /** Load balancing thread |
423 | * |
423 | * |
424 | * SMP load balancing thread, supervising thread supplies |
424 | * SMP load balancing thread, supervising thread supplies |
425 | * for the CPU it's wired to. |
425 | * for the CPU it's wired to. |
426 | * |
426 | * |
427 | * @param arg Generic thread argument (unused). |
427 | * @param arg Generic thread argument (unused). |
428 | * |
428 | * |
429 | */ |
429 | */ |
430 | void kcpulb(void *arg) |
430 | void kcpulb(void *arg) |
431 | { |
431 | { |
432 | thread_t *t; |
432 | thread_t *t; |
433 | int count, average, i, j, k = 0; |
433 | int count, average, i, j, k = 0; |
434 | ipl_t ipl; |
434 | ipl_t ipl; |
435 | 435 | ||
436 | loop: |
436 | loop: |
437 | /* |
437 | /* |
438 | * Work in 1s intervals. |
438 | * Work in 1s intervals. |
439 | */ |
439 | */ |
440 | thread_sleep(1); |
440 | thread_sleep(1); |
441 | 441 | ||
442 | not_satisfied: |
442 | not_satisfied: |
443 | /* |
443 | /* |
444 | * Calculate the number of threads that will be migrated/stolen from |
444 | * Calculate the number of threads that will be migrated/stolen from |
445 | * other CPU's. Note that situation can have changed between two |
445 | * other CPU's. Note that situation can have changed between two |
446 | * passes. Each time get the most up to date counts. |
446 | * passes. Each time get the most up to date counts. |
447 | */ |
447 | */ |
448 | average = atomic_get(&nrdy) / config.cpu_active + 1; |
448 | average = atomic_get(&nrdy) / config.cpu_active + 1; |
449 | count = average - atomic_get(&CPU->nrdy); |
449 | count = average - atomic_get(&CPU->nrdy); |
450 | 450 | ||
451 | if (count <= 0) |
451 | if (count <= 0) |
452 | goto satisfied; |
452 | goto satisfied; |
453 | 453 | ||
454 | /* |
454 | /* |
455 | * Searching least priority queues on all CPU's first and most priority queues on all CPU's last. |
455 | * Searching least priority queues on all CPU's first and most priority queues on all CPU's last. |
456 | */ |
456 | */ |
457 | for (j=RQ_COUNT-1; j >= 0; j--) { |
457 | for (j=RQ_COUNT-1; j >= 0; j--) { |
458 | for (i=0; i < config.cpu_active; i++) { |
458 | for (i=0; i < config.cpu_active; i++) { |
459 | link_t *l; |
459 | link_t *l; |
460 | runq_t *r; |
460 | runq_t *r; |
461 | cpu_t *cpu; |
461 | cpu_t *cpu; |
462 | 462 | ||
463 | cpu = &cpus[(i + k) % config.cpu_active]; |
463 | cpu = &cpus[(i + k) % config.cpu_active]; |
464 | 464 | ||
465 | /* |
465 | /* |
466 | * Not interested in ourselves. |
466 | * Not interested in ourselves. |
467 | * Doesn't require interrupt disabling for kcpulb is X_WIRED. |
467 | * Doesn't require interrupt disabling for kcpulb is X_WIRED. |
468 | */ |
468 | */ |
469 | if (CPU == cpu) |
469 | if (CPU == cpu) |
470 | continue; |
470 | continue; |
471 | if (atomic_get(&cpu->nrdy) <= average) |
471 | if (atomic_get(&cpu->nrdy) <= average) |
472 | continue; |
472 | continue; |
473 | 473 | ||
474 | ipl = interrupts_disable(); |
474 | ipl = interrupts_disable(); |
475 | r = &cpu->rq[j]; |
475 | r = &cpu->rq[j]; |
476 | spinlock_lock(&r->lock); |
476 | spinlock_lock(&r->lock); |
477 | if (r->n == 0) { |
477 | if (r->n == 0) { |
478 | spinlock_unlock(&r->lock); |
478 | spinlock_unlock(&r->lock); |
479 | interrupts_restore(ipl); |
479 | interrupts_restore(ipl); |
480 | continue; |
480 | continue; |
481 | } |
481 | } |
482 | 482 | ||
483 | t = NULL; |
483 | t = NULL; |
484 | l = r->rq_head.prev; /* search rq from the back */ |
484 | l = r->rq_head.prev; /* search rq from the back */ |
485 | while (l != &r->rq_head) { |
485 | while (l != &r->rq_head) { |
486 | t = list_get_instance(l, thread_t, rq_link); |
486 | t = list_get_instance(l, thread_t, rq_link); |
487 | /* |
487 | /* |
488 | * We don't want to steal CPU-wired threads neither threads already stolen. |
488 | * We don't want to steal CPU-wired threads neither threads already stolen. |
489 | * The latter prevents threads from migrating between CPU's without ever being run. |
489 | * The latter prevents threads from migrating between CPU's without ever being run. |
490 | * We don't want to steal threads whose FPU context is still in CPU. |
490 | * We don't want to steal threads whose FPU context is still in CPU. |
491 | */ |
491 | */ |
492 | spinlock_lock(&t->lock); |
492 | spinlock_lock(&t->lock); |
493 | if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) { |
493 | if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) { |
494 | /* |
494 | /* |
495 | * Remove t from r. |
495 | * Remove t from r. |
496 | */ |
496 | */ |
497 | spinlock_unlock(&t->lock); |
497 | spinlock_unlock(&t->lock); |
498 | 498 | ||
499 | atomic_dec(&cpu->nrdy); |
499 | atomic_dec(&cpu->nrdy); |
500 | atomic_dec(&nrdy); |
500 | atomic_dec(&nrdy); |
501 | 501 | ||
502 | r->n--; |
502 | r->n--; |
503 | list_remove(&t->rq_link); |
503 | list_remove(&t->rq_link); |
504 | 504 | ||
505 | break; |
505 | break; |
506 | } |
506 | } |
507 | spinlock_unlock(&t->lock); |
507 | spinlock_unlock(&t->lock); |
508 | l = l->prev; |
508 | l = l->prev; |
509 | t = NULL; |
509 | t = NULL; |
510 | } |
510 | } |
511 | spinlock_unlock(&r->lock); |
511 | spinlock_unlock(&r->lock); |
512 | 512 | ||
513 | if (t) { |
513 | if (t) { |
514 | /* |
514 | /* |
515 | * Ready t on local CPU |
515 | * Ready t on local CPU |
516 | */ |
516 | */ |
517 | spinlock_lock(&t->lock); |
517 | spinlock_lock(&t->lock); |
518 | #ifdef KCPULB_VERBOSE |
518 | #ifdef KCPULB_VERBOSE |
519 | printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, atomic_get(&CPU->nrdy), atomic_get(&nrdy) / config.cpu_active); |
519 | printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, atomic_get(&CPU->nrdy), atomic_get(&nrdy) / config.cpu_active); |
520 | #endif |
520 | #endif |
521 | t->flags |= X_STOLEN; |
521 | t->flags |= X_STOLEN; |
522 | spinlock_unlock(&t->lock); |
522 | spinlock_unlock(&t->lock); |
523 | 523 | ||
524 | thread_ready(t); |
524 | thread_ready(t); |
525 | 525 | ||
526 | interrupts_restore(ipl); |
526 | interrupts_restore(ipl); |
527 | 527 | ||
528 | if (--count == 0) |
528 | if (--count == 0) |
529 | goto satisfied; |
529 | goto satisfied; |
530 | 530 | ||
531 | /* |
531 | /* |
532 | * We are not satisfied yet, focus on another CPU next time. |
532 | * We are not satisfied yet, focus on another CPU next time. |
533 | */ |
533 | */ |
534 | k++; |
534 | k++; |
535 | 535 | ||
536 | continue; |
536 | continue; |
537 | } |
537 | } |
538 | interrupts_restore(ipl); |
538 | interrupts_restore(ipl); |
539 | } |
539 | } |
540 | } |
540 | } |
541 | 541 | ||
542 | if (atomic_get(&CPU->nrdy)) { |
542 | if (atomic_get(&CPU->nrdy)) { |
543 | /* |
543 | /* |
544 | * Be a little bit light-weight and let migrated threads run. |
544 | * Be a little bit light-weight and let migrated threads run. |
545 | */ |
545 | */ |
546 | scheduler(); |
546 | scheduler(); |
547 | } else { |
547 | } else { |
548 | /* |
548 | /* |
549 | * We failed to migrate a single thread. |
549 | * We failed to migrate a single thread. |
550 | * Give up this turn. |
550 | * Give up this turn. |
551 | */ |
551 | */ |
552 | goto loop; |
552 | goto loop; |
553 | } |
553 | } |
554 | 554 | ||
555 | goto not_satisfied; |
555 | goto not_satisfied; |
556 | 556 | ||
557 | satisfied: |
557 | satisfied: |
558 | goto loop; |
558 | goto loop; |
559 | } |
559 | } |
560 | 560 | ||
561 | #endif /* CONFIG_SMP */ |
561 | #endif /* CONFIG_SMP */ |
562 | 562 | ||
563 | 563 | ||
564 | /** Print information about threads & scheduler queues */ |
564 | /** Print information about threads & scheduler queues */ |
565 | void sched_print_list(void) |
565 | void sched_print_list(void) |
566 | { |
566 | { |
567 | ipl_t ipl; |
567 | ipl_t ipl; |
568 | int cpu,i; |
568 | int cpu,i; |
569 | runq_t *r; |
569 | runq_t *r; |
570 | thread_t *t; |
570 | thread_t *t; |
571 | link_t *cur; |
571 | link_t *cur; |
572 | 572 | ||
573 | /* We are going to mess with scheduler structures, |
573 | /* We are going to mess with scheduler structures, |
574 | * let's not be interrupted */ |
574 | * let's not be interrupted */ |
575 | ipl = interrupts_disable(); |
575 | ipl = interrupts_disable(); |
576 | printf("*********** Scheduler dump ***********\n"); |
576 | printf("*********** Scheduler dump ***********\n"); |
577 | for (cpu=0;cpu < config.cpu_count; cpu++) { |
577 | for (cpu=0;cpu < config.cpu_count; cpu++) { |
578 | if (!cpus[cpu].active) |
578 | if (!cpus[cpu].active) |
579 | continue; |
579 | continue; |
580 | spinlock_lock(&cpus[cpu].lock); |
580 | spinlock_lock(&cpus[cpu].lock); |
581 | printf("cpu%d: nrdy: %d needs_relink: %d\n", |
581 | printf("cpu%d: nrdy: %d needs_relink: %d\n", |
582 | cpus[cpu].id, atomic_get(&cpus[cpu].nrdy), cpus[cpu].needs_relink); |
582 | cpus[cpu].id, atomic_get(&cpus[cpu].nrdy), cpus[cpu].needs_relink); |
583 | 583 | ||
584 | for (i=0; i<RQ_COUNT; i++) { |
584 | for (i=0; i<RQ_COUNT; i++) { |
585 | r = &cpus[cpu].rq[i]; |
585 | r = &cpus[cpu].rq[i]; |
586 | spinlock_lock(&r->lock); |
586 | spinlock_lock(&r->lock); |
587 | if (!r->n) { |
587 | if (!r->n) { |
588 | spinlock_unlock(&r->lock); |
588 | spinlock_unlock(&r->lock); |
589 | continue; |
589 | continue; |
590 | } |
590 | } |
591 | printf("\tRq %d: ", i); |
591 | printf("\tRq %d: ", i); |
592 | for (cur=r->rq_head.next; cur!=&r->rq_head; cur=cur->next) { |
592 | for (cur=r->rq_head.next; cur!=&r->rq_head; cur=cur->next) { |
593 | t = list_get_instance(cur, thread_t, rq_link); |
593 | t = list_get_instance(cur, thread_t, rq_link); |
594 | printf("%d(%s) ", t->tid, |
594 | printf("%d(%s) ", t->tid, |
595 | thread_states[t->state]); |
595 | thread_states[t->state]); |
596 | } |
596 | } |
597 | printf("\n"); |
597 | printf("\n"); |
598 | spinlock_unlock(&r->lock); |
598 | spinlock_unlock(&r->lock); |
599 | } |
599 | } |
600 | spinlock_unlock(&cpus[cpu].lock); |
600 | spinlock_unlock(&cpus[cpu].lock); |
601 | } |
601 | } |
602 | 602 | ||
603 | interrupts_restore(ipl); |
603 | interrupts_restore(ipl); |
604 | } |
604 | } |
605 | 605 |