Rev 675 | Rev 775 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 675 | Rev 703 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | #include <proc/scheduler.h> |
29 | #include <proc/scheduler.h> |
30 | #include <proc/thread.h> |
30 | #include <proc/thread.h> |
31 | #include <proc/task.h> |
31 | #include <proc/task.h> |
32 | #include <mm/heap.h> |
32 | #include <mm/heap.h> |
33 | #include <mm/frame.h> |
33 | #include <mm/frame.h> |
34 | #include <mm/page.h> |
34 | #include <mm/page.h> |
35 | #include <mm/vm.h> |
35 | #include <mm/as.h> |
36 | #include <arch/asm.h> |
36 | #include <arch/asm.h> |
37 | #include <arch/faddr.h> |
37 | #include <arch/faddr.h> |
38 | #include <arch/atomic.h> |
38 | #include <arch/atomic.h> |
39 | #include <synch/spinlock.h> |
39 | #include <synch/spinlock.h> |
40 | #include <config.h> |
40 | #include <config.h> |
41 | #include <context.h> |
41 | #include <context.h> |
42 | #include <func.h> |
42 | #include <func.h> |
43 | #include <arch.h> |
43 | #include <arch.h> |
44 | #include <list.h> |
44 | #include <list.h> |
45 | #include <panic.h> |
45 | #include <panic.h> |
46 | #include <typedefs.h> |
46 | #include <typedefs.h> |
47 | #include <cpu.h> |
47 | #include <cpu.h> |
48 | #include <print.h> |
48 | #include <print.h> |
49 | #include <debug.h> |
49 | #include <debug.h> |
50 | 50 | ||
51 | atomic_t nrdy; |
51 | atomic_t nrdy; |
52 | 52 | ||
53 | /** Take actions before new thread runs |
53 | /** Take actions before new thread runs |
54 | * |
54 | * |
55 | * Perform actions that need to be |
55 | * Perform actions that need to be |
56 | * taken before the newly selected |
56 | * taken before the newly selected |
57 | * tread is passed control. |
57 | * tread is passed control. |
58 | * |
58 | * |
59 | */ |
59 | */ |
60 | void before_thread_runs(void) |
60 | void before_thread_runs(void) |
61 | { |
61 | { |
62 | before_thread_runs_arch(); |
62 | before_thread_runs_arch(); |
63 | #ifdef CONFIG_FPU_LAZY |
63 | #ifdef CONFIG_FPU_LAZY |
64 | if(THREAD==CPU->fpu_owner) |
64 | if(THREAD==CPU->fpu_owner) |
65 | fpu_enable(); |
65 | fpu_enable(); |
66 | else |
66 | else |
67 | fpu_disable(); |
67 | fpu_disable(); |
68 | #else |
68 | #else |
69 | fpu_enable(); |
69 | fpu_enable(); |
70 | if (THREAD->fpu_context_exists) |
70 | if (THREAD->fpu_context_exists) |
71 | fpu_context_restore(&(THREAD->saved_fpu_context)); |
71 | fpu_context_restore(&(THREAD->saved_fpu_context)); |
72 | else { |
72 | else { |
73 | fpu_init(); |
73 | fpu_init(); |
74 | THREAD->fpu_context_exists=1; |
74 | THREAD->fpu_context_exists=1; |
75 | } |
75 | } |
76 | #endif |
76 | #endif |
77 | } |
77 | } |
78 | 78 | ||
79 | #ifdef CONFIG_FPU_LAZY |
79 | #ifdef CONFIG_FPU_LAZY |
80 | void scheduler_fpu_lazy_request(void) |
80 | void scheduler_fpu_lazy_request(void) |
81 | { |
81 | { |
82 | fpu_enable(); |
82 | fpu_enable(); |
83 | if (CPU->fpu_owner != NULL) { |
83 | if (CPU->fpu_owner != NULL) { |
84 | fpu_context_save(&CPU->fpu_owner->saved_fpu_context); |
84 | fpu_context_save(&CPU->fpu_owner->saved_fpu_context); |
85 | /* don't prevent migration */ |
85 | /* don't prevent migration */ |
86 | CPU->fpu_owner->fpu_context_engaged=0; |
86 | CPU->fpu_owner->fpu_context_engaged=0; |
87 | } |
87 | } |
88 | if (THREAD->fpu_context_exists) |
88 | if (THREAD->fpu_context_exists) |
89 | fpu_context_restore(&THREAD->saved_fpu_context); |
89 | fpu_context_restore(&THREAD->saved_fpu_context); |
90 | else { |
90 | else { |
91 | fpu_init(); |
91 | fpu_init(); |
92 | THREAD->fpu_context_exists=1; |
92 | THREAD->fpu_context_exists=1; |
93 | } |
93 | } |
94 | CPU->fpu_owner=THREAD; |
94 | CPU->fpu_owner=THREAD; |
95 | THREAD->fpu_context_engaged = 1; |
95 | THREAD->fpu_context_engaged = 1; |
96 | } |
96 | } |
97 | #endif |
97 | #endif |
98 | 98 | ||
99 | /** Initialize scheduler |
99 | /** Initialize scheduler |
100 | * |
100 | * |
101 | * Initialize kernel scheduler. |
101 | * Initialize kernel scheduler. |
102 | * |
102 | * |
103 | */ |
103 | */ |
104 | void scheduler_init(void) |
104 | void scheduler_init(void) |
105 | { |
105 | { |
106 | } |
106 | } |
107 | 107 | ||
108 | 108 | ||
109 | /** Get thread to be scheduled |
109 | /** Get thread to be scheduled |
110 | * |
110 | * |
111 | * Get the optimal thread to be scheduled |
111 | * Get the optimal thread to be scheduled |
112 | * according to thread accounting and scheduler |
112 | * according to thread accounting and scheduler |
113 | * policy. |
113 | * policy. |
114 | * |
114 | * |
115 | * @return Thread to be scheduled. |
115 | * @return Thread to be scheduled. |
116 | * |
116 | * |
117 | */ |
117 | */ |
118 | static thread_t *find_best_thread(void) |
118 | static thread_t *find_best_thread(void) |
119 | { |
119 | { |
120 | thread_t *t; |
120 | thread_t *t; |
121 | runq_t *r; |
121 | runq_t *r; |
122 | int i, n; |
122 | int i, n; |
123 | 123 | ||
124 | ASSERT(CPU != NULL); |
124 | ASSERT(CPU != NULL); |
125 | 125 | ||
126 | loop: |
126 | loop: |
127 | interrupts_disable(); |
127 | interrupts_disable(); |
128 | 128 | ||
129 | spinlock_lock(&CPU->lock); |
129 | spinlock_lock(&CPU->lock); |
130 | n = CPU->nrdy; |
130 | n = CPU->nrdy; |
131 | spinlock_unlock(&CPU->lock); |
131 | spinlock_unlock(&CPU->lock); |
132 | 132 | ||
133 | interrupts_enable(); |
133 | interrupts_enable(); |
134 | 134 | ||
135 | if (n == 0) { |
135 | if (n == 0) { |
136 | #ifdef CONFIG_SMP |
136 | #ifdef CONFIG_SMP |
137 | /* |
137 | /* |
138 | * If the load balancing thread is not running, wake it up and |
138 | * If the load balancing thread is not running, wake it up and |
139 | * set CPU-private flag that the kcpulb has been started. |
139 | * set CPU-private flag that the kcpulb has been started. |
140 | */ |
140 | */ |
141 | if (test_and_set(&CPU->kcpulbstarted) == 0) { |
141 | if (test_and_set(&CPU->kcpulbstarted) == 0) { |
142 | waitq_wakeup(&CPU->kcpulb_wq, 0); |
142 | waitq_wakeup(&CPU->kcpulb_wq, 0); |
143 | goto loop; |
143 | goto loop; |
144 | } |
144 | } |
145 | #endif /* CONFIG_SMP */ |
145 | #endif /* CONFIG_SMP */ |
146 | 146 | ||
147 | /* |
147 | /* |
148 | * For there was nothing to run, the CPU goes to sleep |
148 | * For there was nothing to run, the CPU goes to sleep |
149 | * until a hardware interrupt or an IPI comes. |
149 | * until a hardware interrupt or an IPI comes. |
150 | * This improves energy saving and hyperthreading. |
150 | * This improves energy saving and hyperthreading. |
151 | * On the other hand, several hardware interrupts can be ignored. |
151 | * On the other hand, several hardware interrupts can be ignored. |
152 | */ |
152 | */ |
153 | cpu_sleep(); |
153 | cpu_sleep(); |
154 | goto loop; |
154 | goto loop; |
155 | } |
155 | } |
156 | 156 | ||
157 | interrupts_disable(); |
157 | interrupts_disable(); |
158 | 158 | ||
159 | i = 0; |
159 | i = 0; |
160 | retry: |
160 | retry: |
161 | for (; i<RQ_COUNT; i++) { |
161 | for (; i<RQ_COUNT; i++) { |
162 | r = &CPU->rq[i]; |
162 | r = &CPU->rq[i]; |
163 | spinlock_lock(&r->lock); |
163 | spinlock_lock(&r->lock); |
164 | if (r->n == 0) { |
164 | if (r->n == 0) { |
165 | /* |
165 | /* |
166 | * If this queue is empty, try a lower-priority queue. |
166 | * If this queue is empty, try a lower-priority queue. |
167 | */ |
167 | */ |
168 | spinlock_unlock(&r->lock); |
168 | spinlock_unlock(&r->lock); |
169 | continue; |
169 | continue; |
170 | } |
170 | } |
171 | 171 | ||
172 | /* avoid deadlock with relink_rq() */ |
172 | /* avoid deadlock with relink_rq() */ |
173 | if (!spinlock_trylock(&CPU->lock)) { |
173 | if (!spinlock_trylock(&CPU->lock)) { |
174 | /* |
174 | /* |
175 | * Unlock r and try again. |
175 | * Unlock r and try again. |
176 | */ |
176 | */ |
177 | spinlock_unlock(&r->lock); |
177 | spinlock_unlock(&r->lock); |
178 | goto retry; |
178 | goto retry; |
179 | } |
179 | } |
180 | CPU->nrdy--; |
180 | CPU->nrdy--; |
181 | spinlock_unlock(&CPU->lock); |
181 | spinlock_unlock(&CPU->lock); |
182 | 182 | ||
183 | atomic_dec(&nrdy); |
183 | atomic_dec(&nrdy); |
184 | r->n--; |
184 | r->n--; |
185 | 185 | ||
186 | /* |
186 | /* |
187 | * Take the first thread from the queue. |
187 | * Take the first thread from the queue. |
188 | */ |
188 | */ |
189 | t = list_get_instance(r->rq_head.next, thread_t, rq_link); |
189 | t = list_get_instance(r->rq_head.next, thread_t, rq_link); |
190 | list_remove(&t->rq_link); |
190 | list_remove(&t->rq_link); |
191 | 191 | ||
192 | spinlock_unlock(&r->lock); |
192 | spinlock_unlock(&r->lock); |
193 | 193 | ||
194 | spinlock_lock(&t->lock); |
194 | spinlock_lock(&t->lock); |
195 | t->cpu = CPU; |
195 | t->cpu = CPU; |
196 | 196 | ||
197 | t->ticks = us2ticks((i+1)*10000); |
197 | t->ticks = us2ticks((i+1)*10000); |
198 | t->priority = i; /* eventually correct rq index */ |
198 | t->priority = i; /* eventually correct rq index */ |
199 | 199 | ||
200 | /* |
200 | /* |
201 | * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge. |
201 | * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge. |
202 | */ |
202 | */ |
203 | t->flags &= ~X_STOLEN; |
203 | t->flags &= ~X_STOLEN; |
204 | spinlock_unlock(&t->lock); |
204 | spinlock_unlock(&t->lock); |
205 | 205 | ||
206 | return t; |
206 | return t; |
207 | } |
207 | } |
208 | goto loop; |
208 | goto loop; |
209 | 209 | ||
210 | } |
210 | } |
211 | 211 | ||
212 | 212 | ||
213 | /** Prevent rq starvation |
213 | /** Prevent rq starvation |
214 | * |
214 | * |
215 | * Prevent low priority threads from starving in rq's. |
215 | * Prevent low priority threads from starving in rq's. |
216 | * |
216 | * |
217 | * When the function decides to relink rq's, it reconnects |
217 | * When the function decides to relink rq's, it reconnects |
218 | * respective pointers so that in result threads with 'pri' |
218 | * respective pointers so that in result threads with 'pri' |
219 | * greater or equal 'start' are moved to a higher-priority queue. |
219 | * greater or equal 'start' are moved to a higher-priority queue. |
220 | * |
220 | * |
221 | * @param start Threshold priority. |
221 | * @param start Threshold priority. |
222 | * |
222 | * |
223 | */ |
223 | */ |
224 | static void relink_rq(int start) |
224 | static void relink_rq(int start) |
225 | { |
225 | { |
226 | link_t head; |
226 | link_t head; |
227 | runq_t *r; |
227 | runq_t *r; |
228 | int i, n; |
228 | int i, n; |
229 | 229 | ||
230 | list_initialize(&head); |
230 | list_initialize(&head); |
231 | spinlock_lock(&CPU->lock); |
231 | spinlock_lock(&CPU->lock); |
232 | if (CPU->needs_relink > NEEDS_RELINK_MAX) { |
232 | if (CPU->needs_relink > NEEDS_RELINK_MAX) { |
233 | for (i = start; i<RQ_COUNT-1; i++) { |
233 | for (i = start; i<RQ_COUNT-1; i++) { |
234 | /* remember and empty rq[i + 1] */ |
234 | /* remember and empty rq[i + 1] */ |
235 | r = &CPU->rq[i + 1]; |
235 | r = &CPU->rq[i + 1]; |
236 | spinlock_lock(&r->lock); |
236 | spinlock_lock(&r->lock); |
237 | list_concat(&head, &r->rq_head); |
237 | list_concat(&head, &r->rq_head); |
238 | n = r->n; |
238 | n = r->n; |
239 | r->n = 0; |
239 | r->n = 0; |
240 | spinlock_unlock(&r->lock); |
240 | spinlock_unlock(&r->lock); |
241 | 241 | ||
242 | /* append rq[i + 1] to rq[i] */ |
242 | /* append rq[i + 1] to rq[i] */ |
243 | r = &CPU->rq[i]; |
243 | r = &CPU->rq[i]; |
244 | spinlock_lock(&r->lock); |
244 | spinlock_lock(&r->lock); |
245 | list_concat(&r->rq_head, &head); |
245 | list_concat(&r->rq_head, &head); |
246 | r->n += n; |
246 | r->n += n; |
247 | spinlock_unlock(&r->lock); |
247 | spinlock_unlock(&r->lock); |
248 | } |
248 | } |
249 | CPU->needs_relink = 0; |
249 | CPU->needs_relink = 0; |
250 | } |
250 | } |
251 | spinlock_unlock(&CPU->lock); |
251 | spinlock_unlock(&CPU->lock); |
252 | 252 | ||
253 | } |
253 | } |
254 | 254 | ||
255 | 255 | ||
256 | /** Scheduler stack switch wrapper |
256 | /** Scheduler stack switch wrapper |
257 | * |
257 | * |
258 | * Second part of the scheduler() function |
258 | * Second part of the scheduler() function |
259 | * using new stack. Handling the actual context |
259 | * using new stack. Handling the actual context |
260 | * switch to a new thread. |
260 | * switch to a new thread. |
261 | * |
261 | * |
262 | */ |
262 | */ |
263 | static void scheduler_separated_stack(void) |
263 | static void scheduler_separated_stack(void) |
264 | { |
264 | { |
265 | int priority; |
265 | int priority; |
266 | 266 | ||
267 | ASSERT(CPU != NULL); |
267 | ASSERT(CPU != NULL); |
268 | 268 | ||
269 | if (THREAD) { |
269 | if (THREAD) { |
270 | switch (THREAD->state) { |
270 | switch (THREAD->state) { |
271 | case Running: |
271 | case Running: |
272 | THREAD->state = Ready; |
272 | THREAD->state = Ready; |
273 | spinlock_unlock(&THREAD->lock); |
273 | spinlock_unlock(&THREAD->lock); |
274 | thread_ready(THREAD); |
274 | thread_ready(THREAD); |
275 | break; |
275 | break; |
276 | 276 | ||
277 | case Exiting: |
277 | case Exiting: |
278 | frame_free((__address) THREAD->kstack); |
278 | frame_free((__address) THREAD->kstack); |
279 | if (THREAD->ustack) { |
279 | if (THREAD->ustack) { |
280 | frame_free((__address) THREAD->ustack); |
280 | frame_free((__address) THREAD->ustack); |
281 | } |
281 | } |
282 | 282 | ||
283 | /* |
283 | /* |
284 | * Detach from the containing task. |
284 | * Detach from the containing task. |
285 | */ |
285 | */ |
286 | spinlock_lock(&TASK->lock); |
286 | spinlock_lock(&TASK->lock); |
287 | list_remove(&THREAD->th_link); |
287 | list_remove(&THREAD->th_link); |
288 | spinlock_unlock(&TASK->lock); |
288 | spinlock_unlock(&TASK->lock); |
289 | 289 | ||
290 | spinlock_unlock(&THREAD->lock); |
290 | spinlock_unlock(&THREAD->lock); |
291 | 291 | ||
292 | spinlock_lock(&threads_lock); |
292 | spinlock_lock(&threads_lock); |
293 | list_remove(&THREAD->threads_link); |
293 | list_remove(&THREAD->threads_link); |
294 | spinlock_unlock(&threads_lock); |
294 | spinlock_unlock(&threads_lock); |
295 | 295 | ||
296 | spinlock_lock(&CPU->lock); |
296 | spinlock_lock(&CPU->lock); |
297 | if(CPU->fpu_owner==THREAD) |
297 | if(CPU->fpu_owner==THREAD) |
298 | CPU->fpu_owner=NULL; |
298 | CPU->fpu_owner=NULL; |
299 | spinlock_unlock(&CPU->lock); |
299 | spinlock_unlock(&CPU->lock); |
300 | 300 | ||
301 | free(THREAD); |
301 | free(THREAD); |
302 | 302 | ||
303 | break; |
303 | break; |
304 | 304 | ||
305 | case Sleeping: |
305 | case Sleeping: |
306 | /* |
306 | /* |
307 | * Prefer the thread after it's woken up. |
307 | * Prefer the thread after it's woken up. |
308 | */ |
308 | */ |
309 | THREAD->priority = -1; |
309 | THREAD->priority = -1; |
310 | 310 | ||
311 | /* |
311 | /* |
312 | * We need to release wq->lock which we locked in waitq_sleep(). |
312 | * We need to release wq->lock which we locked in waitq_sleep(). |
313 | * Address of wq->lock is kept in THREAD->sleep_queue. |
313 | * Address of wq->lock is kept in THREAD->sleep_queue. |
314 | */ |
314 | */ |
315 | spinlock_unlock(&THREAD->sleep_queue->lock); |
315 | spinlock_unlock(&THREAD->sleep_queue->lock); |
316 | 316 | ||
317 | /* |
317 | /* |
318 | * Check for possible requests for out-of-context invocation. |
318 | * Check for possible requests for out-of-context invocation. |
319 | */ |
319 | */ |
320 | if (THREAD->call_me) { |
320 | if (THREAD->call_me) { |
321 | THREAD->call_me(THREAD->call_me_with); |
321 | THREAD->call_me(THREAD->call_me_with); |
322 | THREAD->call_me = NULL; |
322 | THREAD->call_me = NULL; |
323 | THREAD->call_me_with = NULL; |
323 | THREAD->call_me_with = NULL; |
324 | } |
324 | } |
325 | 325 | ||
326 | spinlock_unlock(&THREAD->lock); |
326 | spinlock_unlock(&THREAD->lock); |
327 | 327 | ||
328 | break; |
328 | break; |
329 | 329 | ||
330 | default: |
330 | default: |
331 | /* |
331 | /* |
332 | * Entering state is unexpected. |
332 | * Entering state is unexpected. |
333 | */ |
333 | */ |
334 | panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]); |
334 | panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]); |
335 | break; |
335 | break; |
336 | } |
336 | } |
337 | THREAD = NULL; |
337 | THREAD = NULL; |
338 | } |
338 | } |
339 | 339 | ||
340 | 340 | ||
341 | THREAD = find_best_thread(); |
341 | THREAD = find_best_thread(); |
342 | 342 | ||
343 | spinlock_lock(&THREAD->lock); |
343 | spinlock_lock(&THREAD->lock); |
344 | priority = THREAD->priority; |
344 | priority = THREAD->priority; |
345 | spinlock_unlock(&THREAD->lock); |
345 | spinlock_unlock(&THREAD->lock); |
346 | 346 | ||
347 | relink_rq(priority); |
347 | relink_rq(priority); |
348 | 348 | ||
349 | spinlock_lock(&THREAD->lock); |
349 | spinlock_lock(&THREAD->lock); |
350 | 350 | ||
351 | /* |
351 | /* |
352 | * If both the old and the new task are the same, lots of work is avoided. |
352 | * If both the old and the new task are the same, lots of work is avoided. |
353 | */ |
353 | */ |
354 | if (TASK != THREAD->task) { |
354 | if (TASK != THREAD->task) { |
355 | vm_t *m1 = NULL; |
355 | as_t *as1 = NULL; |
356 | vm_t *m2; |
356 | as_t *as2; |
357 | 357 | ||
358 | if (TASK) { |
358 | if (TASK) { |
359 | spinlock_lock(&TASK->lock); |
359 | spinlock_lock(&TASK->lock); |
360 | m1 = TASK->vm; |
360 | as1 = TASK->as; |
361 | spinlock_unlock(&TASK->lock); |
361 | spinlock_unlock(&TASK->lock); |
362 | } |
362 | } |
363 | 363 | ||
364 | spinlock_lock(&THREAD->task->lock); |
364 | spinlock_lock(&THREAD->task->lock); |
365 | m2 = THREAD->task->vm; |
365 | as2 = THREAD->task->as; |
366 | spinlock_unlock(&THREAD->task->lock); |
366 | spinlock_unlock(&THREAD->task->lock); |
367 | 367 | ||
368 | /* |
368 | /* |
369 | * Note that it is possible for two tasks to share one vm mapping. |
369 | * Note that it is possible for two tasks to share one address space. |
370 | */ |
370 | */ |
371 | if (m1 != m2) { |
371 | if (as1 != as2) { |
372 | /* |
372 | /* |
373 | * Both tasks and vm mappings are different. |
373 | * Both tasks and address spaces are different. |
374 | * Replace the old one with the new one. |
374 | * Replace the old one with the new one. |
375 | */ |
375 | */ |
376 | vm_install(m2); |
376 | as_install(as2); |
377 | } |
377 | } |
378 | TASK = THREAD->task; |
378 | TASK = THREAD->task; |
379 | } |
379 | } |
380 | 380 | ||
381 | THREAD->state = Running; |
381 | THREAD->state = Running; |
382 | 382 | ||
383 | #ifdef SCHEDULER_VERBOSE |
383 | #ifdef SCHEDULER_VERBOSE |
384 | printf("cpu%d: tid %d (priority=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, CPU->nrdy); |
384 | printf("cpu%d: tid %d (priority=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, CPU->nrdy); |
385 | #endif |
385 | #endif |
386 | 386 | ||
387 | /* |
387 | /* |
388 | * Copy the knowledge of CPU, TASK, THREAD and preemption counter to thread's stack. |
388 | * Copy the knowledge of CPU, TASK, THREAD and preemption counter to thread's stack. |
389 | */ |
389 | */ |
390 | the_copy(THE, (the_t *) THREAD->kstack); |
390 | the_copy(THE, (the_t *) THREAD->kstack); |
391 | 391 | ||
392 | context_restore(&THREAD->saved_context); |
392 | context_restore(&THREAD->saved_context); |
393 | /* not reached */ |
393 | /* not reached */ |
394 | } |
394 | } |
395 | 395 | ||
396 | 396 | ||
397 | /** The scheduler |
397 | /** The scheduler |
398 | * |
398 | * |
399 | * The thread scheduling procedure. |
399 | * The thread scheduling procedure. |
400 | * Passes control directly to |
400 | * Passes control directly to |
401 | * scheduler_separated_stack(). |
401 | * scheduler_separated_stack(). |
402 | * |
402 | * |
403 | */ |
403 | */ |
404 | void scheduler(void) |
404 | void scheduler(void) |
405 | { |
405 | { |
406 | volatile ipl_t ipl; |
406 | volatile ipl_t ipl; |
407 | 407 | ||
408 | ASSERT(CPU != NULL); |
408 | ASSERT(CPU != NULL); |
409 | 409 | ||
410 | ipl = interrupts_disable(); |
410 | ipl = interrupts_disable(); |
411 | 411 | ||
412 | if (atomic_get(&haltstate)) |
412 | if (atomic_get(&haltstate)) |
413 | halt(); |
413 | halt(); |
414 | 414 | ||
415 | if (THREAD) { |
415 | if (THREAD) { |
416 | spinlock_lock(&THREAD->lock); |
416 | spinlock_lock(&THREAD->lock); |
417 | #ifndef CONFIG_FPU_LAZY |
417 | #ifndef CONFIG_FPU_LAZY |
418 | fpu_context_save(&(THREAD->saved_fpu_context)); |
418 | fpu_context_save(&(THREAD->saved_fpu_context)); |
419 | #endif |
419 | #endif |
420 | if (!context_save(&THREAD->saved_context)) { |
420 | if (!context_save(&THREAD->saved_context)) { |
421 | /* |
421 | /* |
422 | * This is the place where threads leave scheduler(); |
422 | * This is the place where threads leave scheduler(); |
423 | */ |
423 | */ |
424 | before_thread_runs(); |
424 | before_thread_runs(); |
425 | spinlock_unlock(&THREAD->lock); |
425 | spinlock_unlock(&THREAD->lock); |
426 | interrupts_restore(THREAD->saved_context.ipl); |
426 | interrupts_restore(THREAD->saved_context.ipl); |
427 | return; |
427 | return; |
428 | } |
428 | } |
429 | 429 | ||
430 | /* |
430 | /* |
431 | * Interrupt priority level of preempted thread is recorded here |
431 | * Interrupt priority level of preempted thread is recorded here |
432 | * to facilitate scheduler() invocations from interrupts_disable()'d |
432 | * to facilitate scheduler() invocations from interrupts_disable()'d |
433 | * code (e.g. waitq_sleep_timeout()). |
433 | * code (e.g. waitq_sleep_timeout()). |
434 | */ |
434 | */ |
435 | THREAD->saved_context.ipl = ipl; |
435 | THREAD->saved_context.ipl = ipl; |
436 | } |
436 | } |
437 | 437 | ||
438 | /* |
438 | /* |
439 | * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM |
439 | * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM |
440 | * and preemption counter. At this point THE could be coming either |
440 | * and preemption counter. At this point THE could be coming either |
441 | * from THREAD's or CPU's stack. |
441 | * from THREAD's or CPU's stack. |
442 | */ |
442 | */ |
443 | the_copy(THE, (the_t *) CPU->stack); |
443 | the_copy(THE, (the_t *) CPU->stack); |
444 | 444 | ||
445 | /* |
445 | /* |
446 | * We may not keep the old stack. |
446 | * We may not keep the old stack. |
447 | * Reason: If we kept the old stack and got blocked, for instance, in |
447 | * Reason: If we kept the old stack and got blocked, for instance, in |
448 | * find_best_thread(), the old thread could get rescheduled by another |
448 | * find_best_thread(), the old thread could get rescheduled by another |
449 | * CPU and overwrite the part of its own stack that was also used by |
449 | * CPU and overwrite the part of its own stack that was also used by |
450 | * the scheduler on this CPU. |
450 | * the scheduler on this CPU. |
451 | * |
451 | * |
452 | * Moreover, we have to bypass the compiler-generated POP sequence |
452 | * Moreover, we have to bypass the compiler-generated POP sequence |
453 | * which is fooled by SP being set to the very top of the stack. |
453 | * which is fooled by SP being set to the very top of the stack. |
454 | * Therefore the scheduler() function continues in |
454 | * Therefore the scheduler() function continues in |
455 | * scheduler_separated_stack(). |
455 | * scheduler_separated_stack(). |
456 | */ |
456 | */ |
457 | context_save(&CPU->saved_context); |
457 | context_save(&CPU->saved_context); |
458 | context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), (__address) CPU->stack, CPU_STACK_SIZE); |
458 | context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), (__address) CPU->stack, CPU_STACK_SIZE); |
459 | context_restore(&CPU->saved_context); |
459 | context_restore(&CPU->saved_context); |
460 | /* not reached */ |
460 | /* not reached */ |
461 | } |
461 | } |
462 | 462 | ||
463 | 463 | ||
464 | 464 | ||
465 | 465 | ||
466 | 466 | ||
467 | #ifdef CONFIG_SMP |
467 | #ifdef CONFIG_SMP |
468 | /** Load balancing thread |
468 | /** Load balancing thread |
469 | * |
469 | * |
470 | * SMP load balancing thread, supervising thread supplies |
470 | * SMP load balancing thread, supervising thread supplies |
471 | * for the CPU it's wired to. |
471 | * for the CPU it's wired to. |
472 | * |
472 | * |
473 | * @param arg Generic thread argument (unused). |
473 | * @param arg Generic thread argument (unused). |
474 | * |
474 | * |
475 | */ |
475 | */ |
476 | void kcpulb(void *arg) |
476 | void kcpulb(void *arg) |
477 | { |
477 | { |
478 | thread_t *t; |
478 | thread_t *t; |
479 | int count, i, j, k = 0; |
479 | int count, i, j, k = 0; |
480 | ipl_t ipl; |
480 | ipl_t ipl; |
481 | 481 | ||
482 | loop: |
482 | loop: |
483 | /* |
483 | /* |
484 | * Sleep until there's some work to do. |
484 | * Sleep until there's some work to do. |
485 | */ |
485 | */ |
486 | waitq_sleep(&CPU->kcpulb_wq); |
486 | waitq_sleep(&CPU->kcpulb_wq); |
487 | 487 | ||
488 | not_satisfied: |
488 | not_satisfied: |
489 | /* |
489 | /* |
490 | * Calculate the number of threads that will be migrated/stolen from |
490 | * Calculate the number of threads that will be migrated/stolen from |
491 | * other CPU's. Note that situation can have changed between two |
491 | * other CPU's. Note that situation can have changed between two |
492 | * passes. Each time get the most up to date counts. |
492 | * passes. Each time get the most up to date counts. |
493 | */ |
493 | */ |
494 | ipl = interrupts_disable(); |
494 | ipl = interrupts_disable(); |
495 | spinlock_lock(&CPU->lock); |
495 | spinlock_lock(&CPU->lock); |
496 | count = atomic_get(&nrdy) / config.cpu_active; |
496 | count = atomic_get(&nrdy) / config.cpu_active; |
497 | count -= CPU->nrdy; |
497 | count -= CPU->nrdy; |
498 | spinlock_unlock(&CPU->lock); |
498 | spinlock_unlock(&CPU->lock); |
499 | interrupts_restore(ipl); |
499 | interrupts_restore(ipl); |
500 | 500 | ||
501 | if (count <= 0) |
501 | if (count <= 0) |
502 | goto satisfied; |
502 | goto satisfied; |
503 | 503 | ||
504 | /* |
504 | /* |
505 | * Searching least priority queues on all CPU's first and most priority queues on all CPU's last. |
505 | * Searching least priority queues on all CPU's first and most priority queues on all CPU's last. |
506 | */ |
506 | */ |
507 | for (j=RQ_COUNT-1; j >= 0; j--) { |
507 | for (j=RQ_COUNT-1; j >= 0; j--) { |
508 | for (i=0; i < config.cpu_active; i++) { |
508 | for (i=0; i < config.cpu_active; i++) { |
509 | link_t *l; |
509 | link_t *l; |
510 | runq_t *r; |
510 | runq_t *r; |
511 | cpu_t *cpu; |
511 | cpu_t *cpu; |
512 | 512 | ||
513 | cpu = &cpus[(i + k) % config.cpu_active]; |
513 | cpu = &cpus[(i + k) % config.cpu_active]; |
514 | 514 | ||
515 | /* |
515 | /* |
516 | * Not interested in ourselves. |
516 | * Not interested in ourselves. |
517 | * Doesn't require interrupt disabling for kcpulb is X_WIRED. |
517 | * Doesn't require interrupt disabling for kcpulb is X_WIRED. |
518 | */ |
518 | */ |
519 | if (CPU == cpu) |
519 | if (CPU == cpu) |
520 | continue; |
520 | continue; |
521 | 521 | ||
522 | restart: ipl = interrupts_disable(); |
522 | restart: ipl = interrupts_disable(); |
523 | r = &cpu->rq[j]; |
523 | r = &cpu->rq[j]; |
524 | spinlock_lock(&r->lock); |
524 | spinlock_lock(&r->lock); |
525 | if (r->n == 0) { |
525 | if (r->n == 0) { |
526 | spinlock_unlock(&r->lock); |
526 | spinlock_unlock(&r->lock); |
527 | interrupts_restore(ipl); |
527 | interrupts_restore(ipl); |
528 | continue; |
528 | continue; |
529 | } |
529 | } |
530 | 530 | ||
531 | t = NULL; |
531 | t = NULL; |
532 | l = r->rq_head.prev; /* search rq from the back */ |
532 | l = r->rq_head.prev; /* search rq from the back */ |
533 | while (l != &r->rq_head) { |
533 | while (l != &r->rq_head) { |
534 | t = list_get_instance(l, thread_t, rq_link); |
534 | t = list_get_instance(l, thread_t, rq_link); |
535 | /* |
535 | /* |
536 | * We don't want to steal CPU-wired threads neither threads already stolen. |
536 | * We don't want to steal CPU-wired threads neither threads already stolen. |
537 | * The latter prevents threads from migrating between CPU's without ever being run. |
537 | * The latter prevents threads from migrating between CPU's without ever being run. |
538 | * We don't want to steal threads whose FPU context is still in CPU. |
538 | * We don't want to steal threads whose FPU context is still in CPU. |
539 | */ |
539 | */ |
540 | spinlock_lock(&t->lock); |
540 | spinlock_lock(&t->lock); |
541 | if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) { |
541 | if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) { |
542 | 542 | ||
543 | /* |
543 | /* |
544 | * Remove t from r. |
544 | * Remove t from r. |
545 | */ |
545 | */ |
546 | 546 | ||
547 | spinlock_unlock(&t->lock); |
547 | spinlock_unlock(&t->lock); |
548 | 548 | ||
549 | /* |
549 | /* |
550 | * Here we have to avoid deadlock with relink_rq(), |
550 | * Here we have to avoid deadlock with relink_rq(), |
551 | * because it locks cpu and r in a different order than we do. |
551 | * because it locks cpu and r in a different order than we do. |
552 | */ |
552 | */ |
553 | if (!spinlock_trylock(&cpu->lock)) { |
553 | if (!spinlock_trylock(&cpu->lock)) { |
554 | /* Release all locks and try again. */ |
554 | /* Release all locks and try again. */ |
555 | spinlock_unlock(&r->lock); |
555 | spinlock_unlock(&r->lock); |
556 | interrupts_restore(ipl); |
556 | interrupts_restore(ipl); |
557 | goto restart; |
557 | goto restart; |
558 | } |
558 | } |
559 | cpu->nrdy--; |
559 | cpu->nrdy--; |
560 | spinlock_unlock(&cpu->lock); |
560 | spinlock_unlock(&cpu->lock); |
561 | 561 | ||
562 | atomic_dec(&nrdy); |
562 | atomic_dec(&nrdy); |
563 | 563 | ||
564 | r->n--; |
564 | r->n--; |
565 | list_remove(&t->rq_link); |
565 | list_remove(&t->rq_link); |
566 | 566 | ||
567 | break; |
567 | break; |
568 | } |
568 | } |
569 | spinlock_unlock(&t->lock); |
569 | spinlock_unlock(&t->lock); |
570 | l = l->prev; |
570 | l = l->prev; |
571 | t = NULL; |
571 | t = NULL; |
572 | } |
572 | } |
573 | spinlock_unlock(&r->lock); |
573 | spinlock_unlock(&r->lock); |
574 | 574 | ||
575 | if (t) { |
575 | if (t) { |
576 | /* |
576 | /* |
577 | * Ready t on local CPU |
577 | * Ready t on local CPU |
578 | */ |
578 | */ |
579 | spinlock_lock(&t->lock); |
579 | spinlock_lock(&t->lock); |
580 | #ifdef KCPULB_VERBOSE |
580 | #ifdef KCPULB_VERBOSE |
581 | printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, CPU->nrdy, nrdy / config.cpu_active); |
581 | printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, CPU->nrdy, nrdy / config.cpu_active); |
582 | #endif |
582 | #endif |
583 | t->flags |= X_STOLEN; |
583 | t->flags |= X_STOLEN; |
584 | spinlock_unlock(&t->lock); |
584 | spinlock_unlock(&t->lock); |
585 | 585 | ||
586 | thread_ready(t); |
586 | thread_ready(t); |
587 | 587 | ||
588 | interrupts_restore(ipl); |
588 | interrupts_restore(ipl); |
589 | 589 | ||
590 | if (--count == 0) |
590 | if (--count == 0) |
591 | goto satisfied; |
591 | goto satisfied; |
592 | 592 | ||
593 | /* |
593 | /* |
594 | * We are not satisfied yet, focus on another CPU next time. |
594 | * We are not satisfied yet, focus on another CPU next time. |
595 | */ |
595 | */ |
596 | k++; |
596 | k++; |
597 | 597 | ||
598 | continue; |
598 | continue; |
599 | } |
599 | } |
600 | interrupts_restore(ipl); |
600 | interrupts_restore(ipl); |
601 | } |
601 | } |
602 | } |
602 | } |
603 | 603 | ||
604 | if (CPU->nrdy) { |
604 | if (CPU->nrdy) { |
605 | /* |
605 | /* |
606 | * Be a little bit light-weight and let migrated threads run. |
606 | * Be a little bit light-weight and let migrated threads run. |
607 | */ |
607 | */ |
608 | scheduler(); |
608 | scheduler(); |
609 | } |
609 | } |
610 | else { |
610 | else { |
611 | /* |
611 | /* |
612 | * We failed to migrate a single thread. |
612 | * We failed to migrate a single thread. |
613 | * Something more sophisticated should be done. |
613 | * Something more sophisticated should be done. |
614 | */ |
614 | */ |
615 | scheduler(); |
615 | scheduler(); |
616 | } |
616 | } |
617 | 617 | ||
618 | goto not_satisfied; |
618 | goto not_satisfied; |
619 | 619 | ||
620 | satisfied: |
620 | satisfied: |
621 | /* |
621 | /* |
622 | * Tell find_best_thread() to wake us up later again. |
622 | * Tell find_best_thread() to wake us up later again. |
623 | */ |
623 | */ |
624 | atomic_set(&CPU->kcpulbstarted,0); |
624 | atomic_set(&CPU->kcpulbstarted,0); |
625 | goto loop; |
625 | goto loop; |
626 | } |
626 | } |
627 | 627 | ||
628 | #endif /* CONFIG_SMP */ |
628 | #endif /* CONFIG_SMP */ |
629 | 629 |