Rev 109 | Rev 113 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
1 | jermar | 1 | /* |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
||
3 | * All rights reserved. |
||
4 | * |
||
5 | * Redistribution and use in source and binary forms, with or without |
||
6 | * modification, are permitted provided that the following conditions |
||
7 | * are met: |
||
8 | * |
||
9 | * - Redistributions of source code must retain the above copyright |
||
10 | * notice, this list of conditions and the following disclaimer. |
||
11 | * - Redistributions in binary form must reproduce the above copyright |
||
12 | * notice, this list of conditions and the following disclaimer in the |
||
13 | * documentation and/or other materials provided with the distribution. |
||
14 | * - The name of the author may not be used to endorse or promote products |
||
15 | * derived from this software without specific prior written permission. |
||
16 | * |
||
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
||
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
||
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
||
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
||
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
||
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
||
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
||
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
||
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
||
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
||
27 | */ |
||
28 | |||
29 | #include <proc/scheduler.h> |
||
30 | #include <proc/thread.h> |
||
31 | #include <proc/task.h> |
||
32 | #include <cpu.h> |
||
33 | #include <mm/vm.h> |
||
34 | #include <config.h> |
||
35 | #include <context.h> |
||
36 | #include <func.h> |
||
37 | #include <arch.h> |
||
38 | #include <arch/asm.h> |
||
39 | #include <list.h> |
||
68 | decky | 40 | #include <panic.h> |
1 | jermar | 41 | #include <typedefs.h> |
42 | #include <mm/page.h> |
||
43 | #include <synch/spinlock.h> |
||
76 | jermar | 44 | #include <arch/faddr.h> |
111 | palkovsky | 45 | #include <arch/atomic.h> |
1 | jermar | 46 | |
47 | /* |
||
48 | * NOTE ON ATOMIC READS: |
||
49 | * Some architectures cannot read __u32 atomically. |
||
50 | * For that reason, all accesses to nrdy and the likes must be protected by spinlock. |
||
51 | */ |
||
52 | |||
53 | volatile int nrdy; |
||
54 | |||
107 | decky | 55 | |
56 | /** Initialize context switching |
||
57 | * |
||
58 | * Initialize context switching and lazy FPU |
||
59 | * context switching. |
||
60 | * |
||
61 | */ |
||
52 | vana | 62 | void before_thread_runs(void) |
63 | { |
||
64 | before_thread_runs_arch(); |
||
57 | vana | 65 | fpu_context_restore(&(THREAD->saved_fpu_context)); |
52 | vana | 66 | } |
67 | |||
68 | |||
107 | decky | 69 | /** Initialize scheduler |
70 | * |
||
71 | * Initialize kernel scheduler. |
||
72 | * |
||
73 | */ |
||
1 | jermar | 74 | void scheduler_init(void) |
75 | { |
||
76 | } |
||
77 | |||
107 | decky | 78 | |
79 | /** Get thread to be scheduled |
||
80 | * |
||
81 | * Get the optimal thread to be scheduled |
||
109 | jermar | 82 | * according to thread accounting and scheduler |
107 | decky | 83 | * policy. |
84 | * |
||
85 | * @return Thread to be scheduled. |
||
86 | * |
||
87 | */ |
||
1 | jermar | 88 | struct thread *find_best_thread(void) |
89 | { |
||
90 | thread_t *t; |
||
91 | runq_t *r; |
||
92 | int i, n; |
||
93 | |||
94 | loop: |
||
95 | cpu_priority_high(); |
||
96 | |||
15 | jermar | 97 | spinlock_lock(&CPU->lock); |
98 | n = CPU->nrdy; |
||
99 | spinlock_unlock(&CPU->lock); |
||
1 | jermar | 100 | |
101 | cpu_priority_low(); |
||
102 | |||
103 | if (n == 0) { |
||
104 | #ifdef __SMP__ |
||
105 | /* |
||
106 | * If the load balancing thread is not running, wake it up and |
||
107 | * set CPU-private flag that the kcpulb has been started. |
||
108 | */ |
||
15 | jermar | 109 | if (test_and_set(&CPU->kcpulbstarted) == 0) { |
110 | waitq_wakeup(&CPU->kcpulb_wq, 0); |
||
1 | jermar | 111 | goto loop; |
112 | } |
||
113 | #endif /* __SMP__ */ |
||
114 | |||
115 | /* |
||
116 | * For there was nothing to run, the CPU goes to sleep |
||
117 | * until a hardware interrupt or an IPI comes. |
||
118 | * This improves energy saving and hyperthreading. |
||
119 | * On the other hand, several hardware interrupts can be ignored. |
||
120 | */ |
||
121 | cpu_sleep(); |
||
122 | goto loop; |
||
123 | } |
||
124 | |||
125 | cpu_priority_high(); |
||
126 | |||
127 | for (i = 0; i<RQ_COUNT; i++) { |
||
15 | jermar | 128 | r = &CPU->rq[i]; |
1 | jermar | 129 | spinlock_lock(&r->lock); |
130 | if (r->n == 0) { |
||
131 | /* |
||
132 | * If this queue is empty, try a lower-priority queue. |
||
133 | */ |
||
134 | spinlock_unlock(&r->lock); |
||
135 | continue; |
||
136 | } |
||
137 | |||
111 | palkovsky | 138 | atomic_dec(&nrdy); |
1 | jermar | 139 | |
15 | jermar | 140 | spinlock_lock(&CPU->lock); |
141 | CPU->nrdy--; |
||
142 | spinlock_unlock(&CPU->lock); |
||
1 | jermar | 143 | |
144 | r->n--; |
||
145 | |||
146 | /* |
||
147 | * Take the first thread from the queue. |
||
148 | */ |
||
149 | t = list_get_instance(r->rq_head.next, thread_t, rq_link); |
||
150 | list_remove(&t->rq_link); |
||
151 | |||
152 | spinlock_unlock(&r->lock); |
||
153 | |||
154 | spinlock_lock(&t->lock); |
||
15 | jermar | 155 | t->cpu = CPU; |
1 | jermar | 156 | |
157 | t->ticks = us2ticks((i+1)*10000); |
||
158 | t->pri = i; /* eventually correct rq index */ |
||
159 | |||
160 | /* |
||
161 | * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge. |
||
162 | */ |
||
163 | t->flags &= ~X_STOLEN; |
||
164 | spinlock_unlock(&t->lock); |
||
165 | |||
166 | return t; |
||
167 | } |
||
168 | goto loop; |
||
169 | |||
170 | } |
||
171 | |||
107 | decky | 172 | |
173 | /** Prevent rq starvation |
||
174 | * |
||
175 | * Prevent low priority threads from starving in rq's. |
||
176 | * |
||
177 | * When the function decides to relink rq's, it reconnects |
||
178 | * respective pointers so that in result threads with 'pri' |
||
179 | * greater or equal 'start' are moved to a higher-priority queue. |
||
180 | * |
||
181 | * @param start Threshold priority. |
||
182 | * |
||
1 | jermar | 183 | */ |
184 | void relink_rq(int start) |
||
185 | { |
||
186 | link_t head; |
||
187 | runq_t *r; |
||
188 | int i, n; |
||
189 | |||
190 | list_initialize(&head); |
||
15 | jermar | 191 | spinlock_lock(&CPU->lock); |
192 | if (CPU->needs_relink > NEEDS_RELINK_MAX) { |
||
1 | jermar | 193 | for (i = start; i<RQ_COUNT-1; i++) { |
194 | /* remember and empty rq[i + 1] */ |
||
15 | jermar | 195 | r = &CPU->rq[i + 1]; |
1 | jermar | 196 | spinlock_lock(&r->lock); |
197 | list_concat(&head, &r->rq_head); |
||
198 | n = r->n; |
||
199 | r->n = 0; |
||
200 | spinlock_unlock(&r->lock); |
||
201 | |||
202 | /* append rq[i + 1] to rq[i] */ |
||
15 | jermar | 203 | r = &CPU->rq[i]; |
1 | jermar | 204 | spinlock_lock(&r->lock); |
205 | list_concat(&r->rq_head, &head); |
||
206 | r->n += n; |
||
207 | spinlock_unlock(&r->lock); |
||
208 | } |
||
15 | jermar | 209 | CPU->needs_relink = 0; |
1 | jermar | 210 | } |
15 | jermar | 211 | spinlock_unlock(&CPU->lock); |
1 | jermar | 212 | |
213 | } |
||
214 | |||
107 | decky | 215 | |
216 | /** The scheduler |
||
217 | * |
||
218 | * The thread scheduling procedure. |
||
219 | * |
||
1 | jermar | 220 | */ |
221 | void scheduler(void) |
||
222 | { |
||
223 | volatile pri_t pri; |
||
224 | |||
225 | pri = cpu_priority_high(); |
||
226 | |||
227 | if (haltstate) |
||
228 | halt(); |
||
229 | |||
15 | jermar | 230 | if (THREAD) { |
231 | spinlock_lock(&THREAD->lock); |
||
57 | vana | 232 | fpu_context_save(&(THREAD->saved_fpu_context)); |
15 | jermar | 233 | if (!context_save(&THREAD->saved_context)) { |
1 | jermar | 234 | /* |
235 | * This is the place where threads leave scheduler(); |
||
236 | */ |
||
22 | jermar | 237 | before_thread_runs(); |
15 | jermar | 238 | spinlock_unlock(&THREAD->lock); |
239 | cpu_priority_restore(THREAD->saved_context.pri); |
||
1 | jermar | 240 | return; |
241 | } |
||
15 | jermar | 242 | THREAD->saved_context.pri = pri; |
1 | jermar | 243 | } |
244 | |||
245 | /* |
||
246 | * We may not keep the old stack. |
||
247 | * Reason: If we kept the old stack and got blocked, for instance, in |
||
248 | * find_best_thread(), the old thread could get rescheduled by another |
||
249 | * CPU and overwrite the part of its own stack that was also used by |
||
250 | * the scheduler on this CPU. |
||
251 | * |
||
252 | * Moreover, we have to bypass the compiler-generated POP sequence |
||
253 | * which is fooled by SP being set to the very top of the stack. |
||
254 | * Therefore the scheduler() function continues in |
||
255 | * scheduler_separated_stack(). |
||
256 | */ |
||
15 | jermar | 257 | context_save(&CPU->saved_context); |
97 | jermar | 258 | context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), CPU->stack, CPU_STACK_SIZE); |
15 | jermar | 259 | context_restore(&CPU->saved_context); |
1 | jermar | 260 | /* not reached */ |
261 | } |
||
262 | |||
107 | decky | 263 | |
264 | /** Scheduler stack switch wrapper |
||
265 | * |
||
266 | * Second part of the scheduler() function |
||
267 | * using new stack. Handling the actual context |
||
268 | * switch to a new thread. |
||
269 | * |
||
270 | */ |
||
1 | jermar | 271 | void scheduler_separated_stack(void) |
272 | { |
||
273 | int priority; |
||
274 | |||
15 | jermar | 275 | if (THREAD) { |
276 | switch (THREAD->state) { |
||
1 | jermar | 277 | case Running: |
15 | jermar | 278 | THREAD->state = Ready; |
279 | spinlock_unlock(&THREAD->lock); |
||
280 | thread_ready(THREAD); |
||
1 | jermar | 281 | break; |
282 | |||
283 | case Exiting: |
||
15 | jermar | 284 | frame_free((__address) THREAD->kstack); |
285 | if (THREAD->ustack) { |
||
286 | frame_free((__address) THREAD->ustack); |
||
1 | jermar | 287 | } |
288 | |||
289 | /* |
||
290 | * Detach from the containing task. |
||
291 | */ |
||
15 | jermar | 292 | spinlock_lock(&TASK->lock); |
293 | list_remove(&THREAD->th_link); |
||
294 | spinlock_unlock(&TASK->lock); |
||
1 | jermar | 295 | |
15 | jermar | 296 | spinlock_unlock(&THREAD->lock); |
1 | jermar | 297 | |
298 | spinlock_lock(&threads_lock); |
||
15 | jermar | 299 | list_remove(&THREAD->threads_link); |
1 | jermar | 300 | spinlock_unlock(&threads_lock); |
73 | vana | 301 | |
99 | jermar | 302 | spinlock_lock(&CPU->lock); |
303 | if(CPU->fpu_owner==THREAD) CPU->fpu_owner=NULL; |
||
304 | spinlock_unlock(&CPU->lock); |
||
73 | vana | 305 | |
1 | jermar | 306 | |
15 | jermar | 307 | free(THREAD); |
1 | jermar | 308 | |
309 | break; |
||
310 | |||
311 | case Sleeping: |
||
312 | /* |
||
313 | * Prefer the thread after it's woken up. |
||
314 | */ |
||
15 | jermar | 315 | THREAD->pri = -1; |
1 | jermar | 316 | |
317 | /* |
||
318 | * We need to release wq->lock which we locked in waitq_sleep(). |
||
15 | jermar | 319 | * Address of wq->lock is kept in THREAD->sleep_queue. |
1 | jermar | 320 | */ |
15 | jermar | 321 | spinlock_unlock(&THREAD->sleep_queue->lock); |
1 | jermar | 322 | |
323 | /* |
||
324 | * Check for possible requests for out-of-context invocation. |
||
325 | */ |
||
15 | jermar | 326 | if (THREAD->call_me) { |
327 | THREAD->call_me(THREAD->call_me_with); |
||
328 | THREAD->call_me = NULL; |
||
329 | THREAD->call_me_with = NULL; |
||
1 | jermar | 330 | } |
331 | |||
15 | jermar | 332 | spinlock_unlock(&THREAD->lock); |
1 | jermar | 333 | |
334 | break; |
||
335 | |||
336 | default: |
||
337 | /* |
||
338 | * Entering state is unexpected. |
||
339 | */ |
||
15 | jermar | 340 | panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]); |
1 | jermar | 341 | break; |
342 | } |
||
15 | jermar | 343 | THREAD = NULL; |
1 | jermar | 344 | } |
345 | |||
15 | jermar | 346 | THREAD = find_best_thread(); |
1 | jermar | 347 | |
15 | jermar | 348 | spinlock_lock(&THREAD->lock); |
349 | priority = THREAD->pri; |
||
350 | spinlock_unlock(&THREAD->lock); |
||
1 | jermar | 351 | |
352 | relink_rq(priority); |
||
353 | |||
15 | jermar | 354 | spinlock_lock(&THREAD->lock); |
1 | jermar | 355 | |
356 | /* |
||
357 | * If both the old and the new task are the same, lots of work is avoided. |
||
358 | */ |
||
15 | jermar | 359 | if (TASK != THREAD->task) { |
1 | jermar | 360 | vm_t *m1 = NULL; |
361 | vm_t *m2; |
||
362 | |||
15 | jermar | 363 | if (TASK) { |
364 | spinlock_lock(&TASK->lock); |
||
365 | m1 = TASK->vm; |
||
366 | spinlock_unlock(&TASK->lock); |
||
1 | jermar | 367 | } |
368 | |||
15 | jermar | 369 | spinlock_lock(&THREAD->task->lock); |
370 | m2 = THREAD->task->vm; |
||
371 | spinlock_unlock(&THREAD->task->lock); |
||
1 | jermar | 372 | |
373 | /* |
||
374 | * Note that it is possible for two tasks to share one vm mapping. |
||
375 | */ |
||
376 | if (m1 != m2) { |
||
377 | /* |
||
378 | * Both tasks and vm mappings are different. |
||
379 | * Replace the old one with the new one. |
||
380 | */ |
||
381 | if (m1) { |
||
382 | vm_uninstall(m1); |
||
383 | } |
||
384 | vm_install(m2); |
||
385 | } |
||
15 | jermar | 386 | TASK = THREAD->task; |
1 | jermar | 387 | } |
388 | |||
15 | jermar | 389 | THREAD->state = Running; |
1 | jermar | 390 | |
391 | #ifdef SCHEDULER_VERBOSE |
||
15 | jermar | 392 | printf("cpu%d: tid %d (pri=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->pri, THREAD->ticks, CPU->nrdy); |
1 | jermar | 393 | #endif |
394 | |||
15 | jermar | 395 | context_restore(&THREAD->saved_context); |
1 | jermar | 396 | /* not reached */ |
397 | } |
||
398 | |||
107 | decky | 399 | |
1 | jermar | 400 | #ifdef __SMP__ |
107 | decky | 401 | /** Load balancing thread |
402 | * |
||
403 | * SMP load balancing thread, supervising thread supplies |
||
404 | * for the CPU it's wired to. |
||
405 | * |
||
406 | * @param arg Generic thread argument (unused). |
||
407 | * |
||
1 | jermar | 408 | */ |
409 | void kcpulb(void *arg) |
||
410 | { |
||
411 | thread_t *t; |
||
412 | int count, i, j, k = 0; |
||
413 | pri_t pri; |
||
414 | |||
415 | loop: |
||
416 | /* |
||
417 | * Sleep until there's some work to do. |
||
418 | */ |
||
15 | jermar | 419 | waitq_sleep(&CPU->kcpulb_wq); |
1 | jermar | 420 | |
421 | not_satisfied: |
||
422 | /* |
||
423 | * Calculate the number of threads that will be migrated/stolen from |
||
424 | * other CPU's. Note that situation can have changed between two |
||
425 | * passes. Each time get the most up to date counts. |
||
426 | */ |
||
427 | pri = cpu_priority_high(); |
||
15 | jermar | 428 | spinlock_lock(&CPU->lock); |
1 | jermar | 429 | count = nrdy / config.cpu_active; |
15 | jermar | 430 | count -= CPU->nrdy; |
431 | spinlock_unlock(&CPU->lock); |
||
1 | jermar | 432 | cpu_priority_restore(pri); |
433 | |||
434 | if (count <= 0) |
||
435 | goto satisfied; |
||
436 | |||
437 | /* |
||
438 | * Searching least priority queues on all CPU's first and most priority queues on all CPU's last. |
||
439 | */ |
||
440 | for (j=RQ_COUNT-1; j >= 0; j--) { |
||
441 | for (i=0; i < config.cpu_active; i++) { |
||
442 | link_t *l; |
||
443 | runq_t *r; |
||
444 | cpu_t *cpu; |
||
445 | |||
446 | cpu = &cpus[(i + k) % config.cpu_active]; |
||
447 | r = &cpu->rq[j]; |
||
448 | |||
449 | /* |
||
450 | * Not interested in ourselves. |
||
451 | * Doesn't require interrupt disabling for kcpulb is X_WIRED. |
||
452 | */ |
||
15 | jermar | 453 | if (CPU == cpu) |
1 | jermar | 454 | continue; |
455 | |||
456 | restart: pri = cpu_priority_high(); |
||
457 | spinlock_lock(&r->lock); |
||
458 | if (r->n == 0) { |
||
459 | spinlock_unlock(&r->lock); |
||
460 | cpu_priority_restore(pri); |
||
461 | continue; |
||
462 | } |
||
463 | |||
464 | t = NULL; |
||
465 | l = r->rq_head.prev; /* search rq from the back */ |
||
466 | while (l != &r->rq_head) { |
||
467 | t = list_get_instance(l, thread_t, rq_link); |
||
468 | /* |
||
469 | * We don't want to steal CPU-wired threads neither threads already stolen. |
||
470 | * The latter prevents threads from migrating between CPU's without ever being run. |
||
73 | vana | 471 | * We don't want to steal threads whose FPU context is still in CPU |
472 | */ |
||
1 | jermar | 473 | spinlock_lock(&t->lock); |
73 | vana | 474 | if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) { |
1 | jermar | 475 | /* |
476 | * Remove t from r. |
||
477 | */ |
||
478 | |||
479 | spinlock_unlock(&t->lock); |
||
480 | |||
481 | /* |
||
482 | * Here we have to avoid deadlock with relink_rq(), |
||
483 | * because it locks cpu and r in a different order than we do. |
||
484 | */ |
||
485 | if (!spinlock_trylock(&cpu->lock)) { |
||
486 | /* Release all locks and try again. */ |
||
487 | spinlock_unlock(&r->lock); |
||
488 | cpu_priority_restore(pri); |
||
489 | goto restart; |
||
490 | } |
||
491 | cpu->nrdy--; |
||
492 | spinlock_unlock(&cpu->lock); |
||
493 | |||
111 | palkovsky | 494 | atomic_dec(&nrdy); |
1 | jermar | 495 | |
496 | r->n--; |
||
497 | list_remove(&t->rq_link); |
||
498 | |||
499 | break; |
||
500 | } |
||
501 | spinlock_unlock(&t->lock); |
||
502 | l = l->prev; |
||
503 | t = NULL; |
||
504 | } |
||
505 | spinlock_unlock(&r->lock); |
||
506 | |||
507 | if (t) { |
||
508 | /* |
||
509 | * Ready t on local CPU |
||
510 | */ |
||
511 | spinlock_lock(&t->lock); |
||
512 | #ifdef KCPULB_VERBOSE |
||
15 | jermar | 513 | printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, CPU->nrdy, nrdy / config.cpu_active); |
1 | jermar | 514 | #endif |
515 | t->flags |= X_STOLEN; |
||
516 | spinlock_unlock(&t->lock); |
||
517 | |||
518 | thread_ready(t); |
||
519 | |||
520 | cpu_priority_restore(pri); |
||
521 | |||
522 | if (--count == 0) |
||
523 | goto satisfied; |
||
524 | |||
525 | /* |
||
526 | * We are not satisfied yet, focus on another CPU next time. |
||
527 | */ |
||
528 | k++; |
||
529 | |||
530 | continue; |
||
531 | } |
||
532 | cpu_priority_restore(pri); |
||
533 | } |
||
534 | } |
||
535 | |||
15 | jermar | 536 | if (CPU->nrdy) { |
1 | jermar | 537 | /* |
538 | * Be a little bit light-weight and let migrated threads run. |
||
539 | */ |
||
540 | scheduler(); |
||
541 | } |
||
542 | else { |
||
543 | /* |
||
544 | * We failed to migrate a single thread. |
||
545 | * Something more sophisticated should be done. |
||
546 | */ |
||
547 | scheduler(); |
||
548 | } |
||
549 | |||
550 | goto not_satisfied; |
||
551 | |||
552 | satisfied: |
||
553 | /* |
||
554 | * Tell find_best_thread() to wake us up later again. |
||
555 | */ |
||
15 | jermar | 556 | CPU->kcpulbstarted = 0; |
1 | jermar | 557 | goto loop; |
558 | } |
||
559 | |||
560 | #endif /* __SMP__ */ |