Rev 2292 | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
1 | jermar | 1 | /* |
2071 | jermar | 2 | * Copyright (c) 2001-2007 Jakub Jermar |
1 | jermar | 3 | * All rights reserved. |
4 | * |
||
5 | * Redistribution and use in source and binary forms, with or without |
||
6 | * modification, are permitted provided that the following conditions |
||
7 | * are met: |
||
8 | * |
||
9 | * - Redistributions of source code must retain the above copyright |
||
10 | * notice, this list of conditions and the following disclaimer. |
||
11 | * - Redistributions in binary form must reproduce the above copyright |
||
12 | * notice, this list of conditions and the following disclaimer in the |
||
13 | * documentation and/or other materials provided with the distribution. |
||
14 | * - The name of the author may not be used to endorse or promote products |
||
15 | * derived from this software without specific prior written permission. |
||
16 | * |
||
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
||
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
||
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
||
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
||
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
||
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
||
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
||
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
||
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
||
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
||
27 | */ |
||
28 | |||
1757 | jermar | 29 | /** @addtogroup genericproc |
1702 | cejka | 30 | * @{ |
31 | */ |
||
32 | |||
1248 | jermar | 33 | /** |
1702 | cejka | 34 | * @file |
1248 | jermar | 35 | * @brief Scheduler and load balancing. |
36 | * |
||
1264 | jermar | 37 | * This file contains the scheduler and kcpulb kernel thread which |
1248 | jermar | 38 | * performs load-balancing of per-CPU run queues. |
39 | */ |
||
40 | |||
1 | jermar | 41 | #include <proc/scheduler.h> |
42 | #include <proc/thread.h> |
||
43 | #include <proc/task.h> |
||
378 | jermar | 44 | #include <mm/frame.h> |
45 | #include <mm/page.h> |
||
703 | jermar | 46 | #include <mm/as.h> |
2089 | decky | 47 | #include <time/timeout.h> |
1571 | jermar | 48 | #include <time/delay.h> |
378 | jermar | 49 | #include <arch/asm.h> |
50 | #include <arch/faddr.h> |
||
2030 | decky | 51 | #include <arch/cycle.h> |
1104 | jermar | 52 | #include <atomic.h> |
378 | jermar | 53 | #include <synch/spinlock.h> |
1 | jermar | 54 | #include <config.h> |
55 | #include <context.h> |
||
2089 | decky | 56 | #include <fpu_context.h> |
1 | jermar | 57 | #include <func.h> |
58 | #include <arch.h> |
||
788 | jermar | 59 | #include <adt/list.h> |
68 | decky | 60 | #include <panic.h> |
378 | jermar | 61 | #include <cpu.h> |
195 | vana | 62 | #include <print.h> |
227 | jermar | 63 | #include <debug.h> |
2307 | hudecek | 64 | #include <synch/rcu.h> |
1 | jermar | 65 | |
1187 | jermar | 66 | static void before_task_runs(void); |
67 | static void before_thread_runs(void); |
||
68 | static void after_thread_ran(void); |
||
898 | jermar | 69 | static void scheduler_separated_stack(void); |
195 | vana | 70 | |
898 | jermar | 71 | atomic_t nrdy; /**< Number of ready threads in the system. */ |
72 | |||
1187 | jermar | 73 | /** Carry out actions before new task runs. */ |
74 | void before_task_runs(void) |
||
75 | { |
||
76 | before_task_runs_arch(); |
||
77 | } |
||
78 | |||
897 | jermar | 79 | /** Take actions before new thread runs. |
107 | decky | 80 | * |
118 | jermar | 81 | * Perform actions that need to be |
82 | * taken before the newly selected |
||
83 | * tread is passed control. |
||
107 | decky | 84 | * |
827 | palkovsky | 85 | * THREAD->lock is locked on entry |
86 | * |
||
107 | decky | 87 | */ |
52 | vana | 88 | void before_thread_runs(void) |
89 | { |
||
309 | palkovsky | 90 | before_thread_runs_arch(); |
906 | palkovsky | 91 | #ifdef CONFIG_FPU_LAZY |
1882 | jermar | 92 | if(THREAD == CPU->fpu_owner) |
309 | palkovsky | 93 | fpu_enable(); |
94 | else |
||
95 | fpu_disable(); |
||
906 | palkovsky | 96 | #else |
309 | palkovsky | 97 | fpu_enable(); |
98 | if (THREAD->fpu_context_exists) |
||
906 | palkovsky | 99 | fpu_context_restore(THREAD->saved_fpu_context); |
309 | palkovsky | 100 | else { |
906 | palkovsky | 101 | fpu_init(); |
1882 | jermar | 102 | THREAD->fpu_context_exists = 1; |
309 | palkovsky | 103 | } |
906 | palkovsky | 104 | #endif |
52 | vana | 105 | } |
106 | |||
898 | jermar | 107 | /** Take actions after THREAD had run. |
897 | jermar | 108 | * |
109 | * Perform actions that need to be |
||
110 | * taken after the running thread |
||
898 | jermar | 111 | * had been preempted by the scheduler. |
897 | jermar | 112 | * |
113 | * THREAD->lock is locked on entry |
||
114 | * |
||
115 | */ |
||
116 | void after_thread_ran(void) |
||
117 | { |
||
118 | after_thread_ran_arch(); |
||
2307 | hudecek | 119 | rcu_run_callbacks(); |
897 | jermar | 120 | } |
121 | |||
458 | decky | 122 | #ifdef CONFIG_FPU_LAZY |
309 | palkovsky | 123 | void scheduler_fpu_lazy_request(void) |
124 | { |
||
907 | palkovsky | 125 | restart: |
309 | palkovsky | 126 | fpu_enable(); |
827 | palkovsky | 127 | spinlock_lock(&CPU->lock); |
128 | |||
129 | /* Save old context */ |
||
309 | palkovsky | 130 | if (CPU->fpu_owner != NULL) { |
827 | palkovsky | 131 | spinlock_lock(&CPU->fpu_owner->lock); |
906 | palkovsky | 132 | fpu_context_save(CPU->fpu_owner->saved_fpu_context); |
309 | palkovsky | 133 | /* don't prevent migration */ |
1882 | jermar | 134 | CPU->fpu_owner->fpu_context_engaged = 0; |
827 | palkovsky | 135 | spinlock_unlock(&CPU->fpu_owner->lock); |
907 | palkovsky | 136 | CPU->fpu_owner = NULL; |
309 | palkovsky | 137 | } |
827 | palkovsky | 138 | |
139 | spinlock_lock(&THREAD->lock); |
||
898 | jermar | 140 | if (THREAD->fpu_context_exists) { |
906 | palkovsky | 141 | fpu_context_restore(THREAD->saved_fpu_context); |
898 | jermar | 142 | } else { |
906 | palkovsky | 143 | /* Allocate FPU context */ |
144 | if (!THREAD->saved_fpu_context) { |
||
145 | /* Might sleep */ |
||
146 | spinlock_unlock(&THREAD->lock); |
||
907 | palkovsky | 147 | spinlock_unlock(&CPU->lock); |
2067 | jermar | 148 | THREAD->saved_fpu_context = |
2118 | decky | 149 | (fpu_context_t *) slab_alloc(fpu_context_slab, 0); |
907 | palkovsky | 150 | /* We may have switched CPUs during slab_alloc */ |
151 | goto restart; |
||
906 | palkovsky | 152 | } |
153 | fpu_init(); |
||
1882 | jermar | 154 | THREAD->fpu_context_exists = 1; |
309 | palkovsky | 155 | } |
1882 | jermar | 156 | CPU->fpu_owner = THREAD; |
309 | palkovsky | 157 | THREAD->fpu_context_engaged = 1; |
898 | jermar | 158 | spinlock_unlock(&THREAD->lock); |
827 | palkovsky | 159 | |
160 | spinlock_unlock(&CPU->lock); |
||
309 | palkovsky | 161 | } |
162 | #endif |
||
52 | vana | 163 | |
107 | decky | 164 | /** Initialize scheduler |
165 | * |
||
166 | * Initialize kernel scheduler. |
||
167 | * |
||
168 | */ |
||
1 | jermar | 169 | void scheduler_init(void) |
170 | { |
||
171 | } |
||
172 | |||
107 | decky | 173 | /** Get thread to be scheduled |
174 | * |
||
175 | * Get the optimal thread to be scheduled |
||
109 | jermar | 176 | * according to thread accounting and scheduler |
107 | decky | 177 | * policy. |
178 | * |
||
179 | * @return Thread to be scheduled. |
||
180 | * |
||
181 | */ |
||
483 | jermar | 182 | static thread_t *find_best_thread(void) |
1 | jermar | 183 | { |
184 | thread_t *t; |
||
185 | runq_t *r; |
||
783 | palkovsky | 186 | int i; |
1 | jermar | 187 | |
227 | jermar | 188 | ASSERT(CPU != NULL); |
189 | |||
1 | jermar | 190 | loop: |
413 | jermar | 191 | interrupts_enable(); |
1 | jermar | 192 | |
783 | palkovsky | 193 | if (atomic_get(&CPU->nrdy) == 0) { |
1 | jermar | 194 | /* |
195 | * For there was nothing to run, the CPU goes to sleep |
||
196 | * until a hardware interrupt or an IPI comes. |
||
197 | * This improves energy saving and hyperthreading. |
||
198 | */ |
||
785 | jermar | 199 | |
200 | /* |
||
201 | * An interrupt might occur right now and wake up a thread. |
||
202 | * In such case, the CPU will continue to go to sleep |
||
203 | * even though there is a runnable thread. |
||
204 | */ |
||
205 | |||
1 | jermar | 206 | cpu_sleep(); |
207 | goto loop; |
||
208 | } |
||
209 | |||
413 | jermar | 210 | interrupts_disable(); |
114 | jermar | 211 | |
2307 | hudecek | 212 | for (i = 0; i < RQ_COUNT; i++) { |
15 | jermar | 213 | r = &CPU->rq[i]; |
1 | jermar | 214 | spinlock_lock(&r->lock); |
215 | if (r->n == 0) { |
||
216 | /* |
||
217 | * If this queue is empty, try a lower-priority queue. |
||
218 | */ |
||
219 | spinlock_unlock(&r->lock); |
||
220 | continue; |
||
221 | } |
||
213 | jermar | 222 | |
783 | palkovsky | 223 | atomic_dec(&CPU->nrdy); |
475 | jermar | 224 | atomic_dec(&nrdy); |
1 | jermar | 225 | r->n--; |
226 | |||
227 | /* |
||
228 | * Take the first thread from the queue. |
||
229 | */ |
||
230 | t = list_get_instance(r->rq_head.next, thread_t, rq_link); |
||
231 | list_remove(&t->rq_link); |
||
232 | |||
233 | spinlock_unlock(&r->lock); |
||
234 | |||
235 | spinlock_lock(&t->lock); |
||
15 | jermar | 236 | t->cpu = CPU; |
1 | jermar | 237 | |
2067 | jermar | 238 | t->ticks = us2ticks((i + 1) * 10000); |
898 | jermar | 239 | t->priority = i; /* correct rq index */ |
1 | jermar | 240 | |
241 | /* |
||
1854 | jermar | 242 | * Clear the THREAD_FLAG_STOLEN flag so that t can be migrated |
243 | * when load balancing needs emerge. |
||
1 | jermar | 244 | */ |
1854 | jermar | 245 | t->flags &= ~THREAD_FLAG_STOLEN; |
1 | jermar | 246 | spinlock_unlock(&t->lock); |
247 | |||
248 | return t; |
||
249 | } |
||
250 | goto loop; |
||
251 | |||
252 | } |
||
253 | |||
107 | decky | 254 | /** Prevent rq starvation |
255 | * |
||
256 | * Prevent low priority threads from starving in rq's. |
||
257 | * |
||
258 | * When the function decides to relink rq's, it reconnects |
||
259 | * respective pointers so that in result threads with 'pri' |
||
1708 | jermar | 260 | * greater or equal start are moved to a higher-priority queue. |
107 | decky | 261 | * |
262 | * @param start Threshold priority. |
||
263 | * |
||
1 | jermar | 264 | */ |
452 | decky | 265 | static void relink_rq(int start) |
1 | jermar | 266 | { |
267 | link_t head; |
||
268 | runq_t *r; |
||
269 | int i, n; |
||
270 | |||
271 | list_initialize(&head); |
||
15 | jermar | 272 | spinlock_lock(&CPU->lock); |
273 | if (CPU->needs_relink > NEEDS_RELINK_MAX) { |
||
2067 | jermar | 274 | for (i = start; i < RQ_COUNT - 1; i++) { |
1 | jermar | 275 | /* remember and empty rq[i + 1] */ |
15 | jermar | 276 | r = &CPU->rq[i + 1]; |
1 | jermar | 277 | spinlock_lock(&r->lock); |
278 | list_concat(&head, &r->rq_head); |
||
279 | n = r->n; |
||
280 | r->n = 0; |
||
281 | spinlock_unlock(&r->lock); |
||
282 | |||
283 | /* append rq[i + 1] to rq[i] */ |
||
15 | jermar | 284 | r = &CPU->rq[i]; |
1 | jermar | 285 | spinlock_lock(&r->lock); |
286 | list_concat(&r->rq_head, &head); |
||
287 | r->n += n; |
||
288 | spinlock_unlock(&r->lock); |
||
289 | } |
||
15 | jermar | 290 | CPU->needs_relink = 0; |
1 | jermar | 291 | } |
784 | palkovsky | 292 | spinlock_unlock(&CPU->lock); |
1 | jermar | 293 | |
294 | } |
||
295 | |||
898 | jermar | 296 | /** The scheduler |
297 | * |
||
298 | * The thread scheduling procedure. |
||
299 | * Passes control directly to |
||
300 | * scheduler_separated_stack(). |
||
301 | * |
||
302 | */ |
||
303 | void scheduler(void) |
||
304 | { |
||
305 | volatile ipl_t ipl; |
||
107 | decky | 306 | |
898 | jermar | 307 | ASSERT(CPU != NULL); |
308 | |||
309 | ipl = interrupts_disable(); |
||
310 | |||
311 | if (atomic_get(&haltstate)) |
||
312 | halt(); |
||
1007 | decky | 313 | |
898 | jermar | 314 | if (THREAD) { |
315 | spinlock_lock(&THREAD->lock); |
||
2030 | decky | 316 | |
317 | /* Update thread accounting */ |
||
318 | THREAD->cycles += get_cycle() - THREAD->last_cycle; |
||
319 | |||
906 | palkovsky | 320 | #ifndef CONFIG_FPU_LAZY |
321 | fpu_context_save(THREAD->saved_fpu_context); |
||
322 | #endif |
||
898 | jermar | 323 | if (!context_save(&THREAD->saved_context)) { |
324 | /* |
||
325 | * This is the place where threads leave scheduler(); |
||
326 | */ |
||
2030 | decky | 327 | |
328 | /* Save current CPU cycle */ |
||
329 | THREAD->last_cycle = get_cycle(); |
||
330 | |||
898 | jermar | 331 | spinlock_unlock(&THREAD->lock); |
332 | interrupts_restore(THREAD->saved_context.ipl); |
||
1007 | decky | 333 | |
898 | jermar | 334 | return; |
335 | } |
||
336 | |||
337 | /* |
||
2067 | jermar | 338 | * Interrupt priority level of preempted thread is recorded |
339 | * here to facilitate scheduler() invocations from |
||
340 | * interrupts_disable()'d code (e.g. waitq_sleep_timeout()). |
||
898 | jermar | 341 | */ |
342 | THREAD->saved_context.ipl = ipl; |
||
343 | } |
||
344 | |||
345 | /* |
||
346 | * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM |
||
347 | * and preemption counter. At this point THE could be coming either |
||
348 | * from THREAD's or CPU's stack. |
||
349 | */ |
||
350 | the_copy(THE, (the_t *) CPU->stack); |
||
351 | |||
352 | /* |
||
353 | * We may not keep the old stack. |
||
354 | * Reason: If we kept the old stack and got blocked, for instance, in |
||
355 | * find_best_thread(), the old thread could get rescheduled by another |
||
356 | * CPU and overwrite the part of its own stack that was also used by |
||
357 | * the scheduler on this CPU. |
||
358 | * |
||
359 | * Moreover, we have to bypass the compiler-generated POP sequence |
||
360 | * which is fooled by SP being set to the very top of the stack. |
||
361 | * Therefore the scheduler() function continues in |
||
362 | * scheduler_separated_stack(). |
||
363 | */ |
||
364 | context_save(&CPU->saved_context); |
||
1854 | jermar | 365 | context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), |
2087 | jermar | 366 | (uintptr_t) CPU->stack, CPU_STACK_SIZE); |
898 | jermar | 367 | context_restore(&CPU->saved_context); |
368 | /* not reached */ |
||
369 | } |
||
370 | |||
107 | decky | 371 | /** Scheduler stack switch wrapper |
372 | * |
||
373 | * Second part of the scheduler() function |
||
374 | * using new stack. Handling the actual context |
||
375 | * switch to a new thread. |
||
376 | * |
||
787 | palkovsky | 377 | * Assume THREAD->lock is held. |
107 | decky | 378 | */ |
898 | jermar | 379 | void scheduler_separated_stack(void) |
1 | jermar | 380 | { |
381 | int priority; |
||
2307 | hudecek | 382 | DEADLOCK_PROBE_INIT(p_joinwq); |
383 | |||
227 | jermar | 384 | ASSERT(CPU != NULL); |
1007 | decky | 385 | |
15 | jermar | 386 | if (THREAD) { |
898 | jermar | 387 | /* must be run after the switch to scheduler stack */ |
897 | jermar | 388 | after_thread_ran(); |
389 | |||
15 | jermar | 390 | switch (THREAD->state) { |
1888 | jermar | 391 | case Running: |
125 | jermar | 392 | spinlock_unlock(&THREAD->lock); |
393 | thread_ready(THREAD); |
||
394 | break; |
||
1 | jermar | 395 | |
1888 | jermar | 396 | case Exiting: |
1571 | jermar | 397 | repeat: |
2040 | decky | 398 | if (THREAD->detached) { |
1571 | jermar | 399 | thread_destroy(THREAD); |
400 | } else { |
||
401 | /* |
||
2067 | jermar | 402 | * The thread structure is kept allocated until |
403 | * somebody calls thread_detach() on it. |
||
1571 | jermar | 404 | */ |
405 | if (!spinlock_trylock(&THREAD->join_wq.lock)) { |
||
406 | /* |
||
407 | * Avoid deadlock. |
||
408 | */ |
||
409 | spinlock_unlock(&THREAD->lock); |
||
410 | delay(10); |
||
411 | spinlock_lock(&THREAD->lock); |
||
2307 | hudecek | 412 | DEADLOCK_PROBE(p_joinwq, |
413 | DEADLOCK_THRESHOLD); |
||
1571 | jermar | 414 | goto repeat; |
415 | } |
||
416 | _waitq_wakeup_unsafe(&THREAD->join_wq, false); |
||
417 | spinlock_unlock(&THREAD->join_wq.lock); |
||
418 | |||
419 | THREAD->state = Undead; |
||
420 | spinlock_unlock(&THREAD->lock); |
||
421 | } |
||
125 | jermar | 422 | break; |
787 | palkovsky | 423 | |
1888 | jermar | 424 | case Sleeping: |
125 | jermar | 425 | /* |
426 | * Prefer the thread after it's woken up. |
||
427 | */ |
||
413 | jermar | 428 | THREAD->priority = -1; |
1 | jermar | 429 | |
125 | jermar | 430 | /* |
2067 | jermar | 431 | * We need to release wq->lock which we locked in |
432 | * waitq_sleep(). Address of wq->lock is kept in |
||
433 | * THREAD->sleep_queue. |
||
125 | jermar | 434 | */ |
435 | spinlock_unlock(&THREAD->sleep_queue->lock); |
||
1 | jermar | 436 | |
125 | jermar | 437 | /* |
2067 | jermar | 438 | * Check for possible requests for out-of-context |
439 | * invocation. |
||
125 | jermar | 440 | */ |
441 | if (THREAD->call_me) { |
||
442 | THREAD->call_me(THREAD->call_me_with); |
||
443 | THREAD->call_me = NULL; |
||
444 | THREAD->call_me_with = NULL; |
||
445 | } |
||
2292 | hudecek | 446 | |
125 | jermar | 447 | spinlock_unlock(&THREAD->lock); |
1 | jermar | 448 | |
125 | jermar | 449 | break; |
450 | |||
1888 | jermar | 451 | default: |
125 | jermar | 452 | /* |
453 | * Entering state is unexpected. |
||
454 | */ |
||
2307 | hudecek | 455 | panic("tid%llu: unexpected state %s\n", THREAD->tid, |
456 | thread_states[THREAD->state]); |
||
125 | jermar | 457 | break; |
1 | jermar | 458 | } |
897 | jermar | 459 | |
15 | jermar | 460 | THREAD = NULL; |
1 | jermar | 461 | } |
2292 | hudecek | 462 | |
15 | jermar | 463 | THREAD = find_best_thread(); |
1 | jermar | 464 | |
15 | jermar | 465 | spinlock_lock(&THREAD->lock); |
413 | jermar | 466 | priority = THREAD->priority; |
15 | jermar | 467 | spinlock_unlock(&THREAD->lock); |
192 | jermar | 468 | |
1 | jermar | 469 | relink_rq(priority); |
470 | |||
471 | /* |
||
2067 | jermar | 472 | * If both the old and the new task are the same, lots of work is |
473 | * avoided. |
||
1 | jermar | 474 | */ |
15 | jermar | 475 | if (TASK != THREAD->task) { |
703 | jermar | 476 | as_t *as1 = NULL; |
477 | as_t *as2; |
||
1 | jermar | 478 | |
15 | jermar | 479 | if (TASK) { |
480 | spinlock_lock(&TASK->lock); |
||
703 | jermar | 481 | as1 = TASK->as; |
15 | jermar | 482 | spinlock_unlock(&TASK->lock); |
1 | jermar | 483 | } |
484 | |||
15 | jermar | 485 | spinlock_lock(&THREAD->task->lock); |
703 | jermar | 486 | as2 = THREAD->task->as; |
15 | jermar | 487 | spinlock_unlock(&THREAD->task->lock); |
1 | jermar | 488 | |
489 | /* |
||
2067 | jermar | 490 | * Note that it is possible for two tasks to share one address |
491 | * space. |
||
1 | jermar | 492 | */ |
703 | jermar | 493 | if (as1 != as2) { |
1 | jermar | 494 | /* |
703 | jermar | 495 | * Both tasks and address spaces are different. |
1 | jermar | 496 | * Replace the old one with the new one. |
497 | */ |
||
823 | jermar | 498 | as_switch(as1, as2); |
1 | jermar | 499 | } |
906 | palkovsky | 500 | TASK = THREAD->task; |
1187 | jermar | 501 | before_task_runs(); |
1 | jermar | 502 | } |
503 | |||
1380 | jermar | 504 | spinlock_lock(&THREAD->lock); |
15 | jermar | 505 | THREAD->state = Running; |
1 | jermar | 506 | |
906 | palkovsky | 507 | #ifdef SCHEDULER_VERBOSE |
2307 | hudecek | 508 | printf("cpu%d: tid %llu (priority=%d, ticks=%llu, nrdy=%ld)\n", |
2087 | jermar | 509 | CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, |
510 | atomic_get(&CPU->nrdy)); |
||
906 | palkovsky | 511 | #endif |
1 | jermar | 512 | |
213 | jermar | 513 | /* |
897 | jermar | 514 | * Some architectures provide late kernel PA2KA(identity) |
515 | * mapping in a page fault handler. However, the page fault |
||
516 | * handler uses the kernel stack of the running thread and |
||
517 | * therefore cannot be used to map it. The kernel stack, if |
||
518 | * necessary, is to be mapped in before_thread_runs(). This |
||
519 | * function must be executed before the switch to the new stack. |
||
520 | */ |
||
521 | before_thread_runs(); |
||
522 | |||
523 | /* |
||
2067 | jermar | 524 | * Copy the knowledge of CPU, TASK, THREAD and preemption counter to |
525 | * thread's stack. |
||
213 | jermar | 526 | */ |
184 | jermar | 527 | the_copy(THE, (the_t *) THREAD->kstack); |
528 | |||
15 | jermar | 529 | context_restore(&THREAD->saved_context); |
1 | jermar | 530 | /* not reached */ |
531 | } |
||
532 | |||
458 | decky | 533 | #ifdef CONFIG_SMP |
107 | decky | 534 | /** Load balancing thread |
535 | * |
||
536 | * SMP load balancing thread, supervising thread supplies |
||
537 | * for the CPU it's wired to. |
||
538 | * |
||
539 | * @param arg Generic thread argument (unused). |
||
540 | * |
||
1 | jermar | 541 | */ |
542 | void kcpulb(void *arg) |
||
543 | { |
||
544 | thread_t *t; |
||
2118 | decky | 545 | int count, average, j, k = 0; |
546 | unsigned int i; |
||
413 | jermar | 547 | ipl_t ipl; |
1 | jermar | 548 | |
1576 | jermar | 549 | /* |
550 | * Detach kcpulb as nobody will call thread_join_timeout() on it. |
||
551 | */ |
||
552 | thread_detach(THREAD); |
||
553 | |||
1 | jermar | 554 | loop: |
555 | /* |
||
779 | jermar | 556 | * Work in 1s intervals. |
1 | jermar | 557 | */ |
779 | jermar | 558 | thread_sleep(1); |
1 | jermar | 559 | |
560 | not_satisfied: |
||
561 | /* |
||
562 | * Calculate the number of threads that will be migrated/stolen from |
||
563 | * other CPU's. Note that situation can have changed between two |
||
564 | * passes. Each time get the most up to date counts. |
||
565 | */ |
||
784 | palkovsky | 566 | average = atomic_get(&nrdy) / config.cpu_active + 1; |
783 | palkovsky | 567 | count = average - atomic_get(&CPU->nrdy); |
1 | jermar | 568 | |
784 | palkovsky | 569 | if (count <= 0) |
1 | jermar | 570 | goto satisfied; |
571 | |||
572 | /* |
||
2067 | jermar | 573 | * Searching least priority queues on all CPU's first and most priority |
574 | * queues on all CPU's last. |
||
1 | jermar | 575 | */ |
2307 | hudecek | 576 | for (j = RQ_COUNT - 1; j >= 0; j--) { |
2067 | jermar | 577 | for (i = 0; i < config.cpu_active; i++) { |
1 | jermar | 578 | link_t *l; |
579 | runq_t *r; |
||
580 | cpu_t *cpu; |
||
581 | |||
582 | cpu = &cpus[(i + k) % config.cpu_active]; |
||
583 | |||
584 | /* |
||
585 | * Not interested in ourselves. |
||
2067 | jermar | 586 | * Doesn't require interrupt disabling for kcpulb has |
587 | * THREAD_FLAG_WIRED. |
||
1 | jermar | 588 | */ |
15 | jermar | 589 | if (CPU == cpu) |
783 | palkovsky | 590 | continue; |
591 | if (atomic_get(&cpu->nrdy) <= average) |
||
592 | continue; |
||
1 | jermar | 593 | |
784 | palkovsky | 594 | ipl = interrupts_disable(); |
115 | jermar | 595 | r = &cpu->rq[j]; |
1 | jermar | 596 | spinlock_lock(&r->lock); |
597 | if (r->n == 0) { |
||
598 | spinlock_unlock(&r->lock); |
||
413 | jermar | 599 | interrupts_restore(ipl); |
1 | jermar | 600 | continue; |
601 | } |
||
602 | |||
603 | t = NULL; |
||
604 | l = r->rq_head.prev; /* search rq from the back */ |
||
605 | while (l != &r->rq_head) { |
||
606 | t = list_get_instance(l, thread_t, rq_link); |
||
607 | /* |
||
2067 | jermar | 608 | * We don't want to steal CPU-wired threads |
609 | * neither threads already stolen. The latter |
||
610 | * prevents threads from migrating between CPU's |
||
611 | * without ever being run. We don't want to |
||
612 | * steal threads whose FPU context is still in |
||
613 | * CPU. |
||
73 | vana | 614 | */ |
1 | jermar | 615 | spinlock_lock(&t->lock); |
2067 | jermar | 616 | if ((!(t->flags & (THREAD_FLAG_WIRED | |
2307 | hudecek | 617 | THREAD_FLAG_STOLEN))) && |
618 | (!(t->fpu_context_engaged))) { |
||
1 | jermar | 619 | /* |
620 | * Remove t from r. |
||
621 | */ |
||
622 | spinlock_unlock(&t->lock); |
||
623 | |||
783 | palkovsky | 624 | atomic_dec(&cpu->nrdy); |
475 | jermar | 625 | atomic_dec(&nrdy); |
1 | jermar | 626 | |
125 | jermar | 627 | r->n--; |
1 | jermar | 628 | list_remove(&t->rq_link); |
629 | |||
630 | break; |
||
631 | } |
||
632 | spinlock_unlock(&t->lock); |
||
633 | l = l->prev; |
||
634 | t = NULL; |
||
635 | } |
||
636 | spinlock_unlock(&r->lock); |
||
637 | |||
638 | if (t) { |
||
639 | /* |
||
640 | * Ready t on local CPU |
||
641 | */ |
||
642 | spinlock_lock(&t->lock); |
||
906 | palkovsky | 643 | #ifdef KCPULB_VERBOSE |
2307 | hudecek | 644 | printf("kcpulb%d: TID %llu -> cpu%d, nrdy=%ld, " |
2087 | jermar | 645 | "avg=%nd\n", CPU->id, t->tid, CPU->id, |
646 | atomic_get(&CPU->nrdy), |
||
647 | atomic_get(&nrdy) / config.cpu_active); |
||
906 | palkovsky | 648 | #endif |
1854 | jermar | 649 | t->flags |= THREAD_FLAG_STOLEN; |
1115 | jermar | 650 | t->state = Entering; |
1 | jermar | 651 | spinlock_unlock(&t->lock); |
652 | |||
653 | thread_ready(t); |
||
654 | |||
413 | jermar | 655 | interrupts_restore(ipl); |
1 | jermar | 656 | |
657 | if (--count == 0) |
||
658 | goto satisfied; |
||
659 | |||
660 | /* |
||
2067 | jermar | 661 | * We are not satisfied yet, focus on another |
662 | * CPU next time. |
||
1 | jermar | 663 | */ |
664 | k++; |
||
665 | |||
666 | continue; |
||
667 | } |
||
413 | jermar | 668 | interrupts_restore(ipl); |
1 | jermar | 669 | } |
670 | } |
||
671 | |||
783 | palkovsky | 672 | if (atomic_get(&CPU->nrdy)) { |
1 | jermar | 673 | /* |
674 | * Be a little bit light-weight and let migrated threads run. |
||
675 | */ |
||
676 | scheduler(); |
||
779 | jermar | 677 | } else { |
1 | jermar | 678 | /* |
679 | * We failed to migrate a single thread. |
||
779 | jermar | 680 | * Give up this turn. |
1 | jermar | 681 | */ |
779 | jermar | 682 | goto loop; |
1 | jermar | 683 | } |
684 | |||
685 | goto not_satisfied; |
||
125 | jermar | 686 | |
1 | jermar | 687 | satisfied: |
688 | goto loop; |
||
689 | } |
||
690 | |||
458 | decky | 691 | #endif /* CONFIG_SMP */ |
775 | palkovsky | 692 | |
693 | |||
694 | /** Print information about threads & scheduler queues */ |
||
695 | void sched_print_list(void) |
||
696 | { |
||
697 | ipl_t ipl; |
||
2118 | decky | 698 | unsigned int cpu, i; |
775 | palkovsky | 699 | runq_t *r; |
700 | thread_t *t; |
||
701 | link_t *cur; |
||
702 | |||
703 | /* We are going to mess with scheduler structures, |
||
704 | * let's not be interrupted */ |
||
705 | ipl = interrupts_disable(); |
||
2118 | decky | 706 | for (cpu = 0; cpu < config.cpu_count; cpu++) { |
898 | jermar | 707 | |
775 | palkovsky | 708 | if (!cpus[cpu].active) |
709 | continue; |
||
898 | jermar | 710 | |
775 | palkovsky | 711 | spinlock_lock(&cpus[cpu].lock); |
1221 | decky | 712 | printf("cpu%d: address=%p, nrdy=%ld, needs_relink=%ld\n", |
2087 | jermar | 713 | cpus[cpu].id, &cpus[cpu], atomic_get(&cpus[cpu].nrdy), |
714 | cpus[cpu].needs_relink); |
||
775 | palkovsky | 715 | |
2067 | jermar | 716 | for (i = 0; i < RQ_COUNT; i++) { |
775 | palkovsky | 717 | r = &cpus[cpu].rq[i]; |
718 | spinlock_lock(&r->lock); |
||
719 | if (!r->n) { |
||
720 | spinlock_unlock(&r->lock); |
||
721 | continue; |
||
722 | } |
||
898 | jermar | 723 | printf("\trq[%d]: ", i); |
2067 | jermar | 724 | for (cur = r->rq_head.next; cur != &r->rq_head; |
725 | cur = cur->next) { |
||
775 | palkovsky | 726 | t = list_get_instance(cur, thread_t, rq_link); |
2307 | hudecek | 727 | printf("%llu(%s) ", t->tid, |
2087 | jermar | 728 | thread_states[t->state]); |
775 | palkovsky | 729 | } |
730 | printf("\n"); |
||
731 | spinlock_unlock(&r->lock); |
||
732 | } |
||
733 | spinlock_unlock(&cpus[cpu].lock); |
||
734 | } |
||
735 | |||
736 | interrupts_restore(ipl); |
||
737 | } |
||
1702 | cejka | 738 | |
1757 | jermar | 739 | /** @} |
1702 | cejka | 740 | */ |