Rev 1576 | Rev 1705 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
1 | jermar | 1 | /* |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
||
3 | * All rights reserved. |
||
4 | * |
||
5 | * Redistribution and use in source and binary forms, with or without |
||
6 | * modification, are permitted provided that the following conditions |
||
7 | * are met: |
||
8 | * |
||
9 | * - Redistributions of source code must retain the above copyright |
||
10 | * notice, this list of conditions and the following disclaimer. |
||
11 | * - Redistributions in binary form must reproduce the above copyright |
||
12 | * notice, this list of conditions and the following disclaimer in the |
||
13 | * documentation and/or other materials provided with the distribution. |
||
14 | * - The name of the author may not be used to endorse or promote products |
||
15 | * derived from this software without specific prior written permission. |
||
16 | * |
||
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
||
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
||
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
||
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
||
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
||
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
||
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
||
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
||
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
||
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
||
27 | */ |
||
28 | |||
1702 | cejka | 29 | /** |
30 | * @defgroup proc Proc |
||
31 | * @ingroup kernel |
||
32 | * @{ |
||
33 | * @} |
||
34 | */ |
||
35 | |||
36 | /** @addtogroup genericproc generic |
||
37 | * @ingroup proc |
||
38 | * @{ |
||
39 | */ |
||
40 | |||
1248 | jermar | 41 | /** |
1702 | cejka | 42 | * @file |
1248 | jermar | 43 | * @brief Scheduler and load balancing. |
44 | * |
||
1264 | jermar | 45 | * This file contains the scheduler and kcpulb kernel thread which |
1248 | jermar | 46 | * performs load-balancing of per-CPU run queues. |
47 | */ |
||
48 | |||
1 | jermar | 49 | #include <proc/scheduler.h> |
50 | #include <proc/thread.h> |
||
51 | #include <proc/task.h> |
||
378 | jermar | 52 | #include <mm/frame.h> |
53 | #include <mm/page.h> |
||
703 | jermar | 54 | #include <mm/as.h> |
1571 | jermar | 55 | #include <time/delay.h> |
378 | jermar | 56 | #include <arch/asm.h> |
57 | #include <arch/faddr.h> |
||
1104 | jermar | 58 | #include <atomic.h> |
378 | jermar | 59 | #include <synch/spinlock.h> |
1 | jermar | 60 | #include <config.h> |
61 | #include <context.h> |
||
62 | #include <func.h> |
||
63 | #include <arch.h> |
||
788 | jermar | 64 | #include <adt/list.h> |
68 | decky | 65 | #include <panic.h> |
1 | jermar | 66 | #include <typedefs.h> |
378 | jermar | 67 | #include <cpu.h> |
195 | vana | 68 | #include <print.h> |
227 | jermar | 69 | #include <debug.h> |
1 | jermar | 70 | |
1187 | jermar | 71 | static void before_task_runs(void); |
72 | static void before_thread_runs(void); |
||
73 | static void after_thread_ran(void); |
||
898 | jermar | 74 | static void scheduler_separated_stack(void); |
195 | vana | 75 | |
898 | jermar | 76 | atomic_t nrdy; /**< Number of ready threads in the system. */ |
77 | |||
1187 | jermar | 78 | /** Carry out actions before new task runs. */ |
79 | void before_task_runs(void) |
||
80 | { |
||
81 | before_task_runs_arch(); |
||
82 | } |
||
83 | |||
897 | jermar | 84 | /** Take actions before new thread runs. |
107 | decky | 85 | * |
118 | jermar | 86 | * Perform actions that need to be |
87 | * taken before the newly selected |
||
88 | * tread is passed control. |
||
107 | decky | 89 | * |
827 | palkovsky | 90 | * THREAD->lock is locked on entry |
91 | * |
||
107 | decky | 92 | */ |
52 | vana | 93 | void before_thread_runs(void) |
94 | { |
||
309 | palkovsky | 95 | before_thread_runs_arch(); |
906 | palkovsky | 96 | #ifdef CONFIG_FPU_LAZY |
309 | palkovsky | 97 | if(THREAD==CPU->fpu_owner) |
98 | fpu_enable(); |
||
99 | else |
||
100 | fpu_disable(); |
||
906 | palkovsky | 101 | #else |
309 | palkovsky | 102 | fpu_enable(); |
103 | if (THREAD->fpu_context_exists) |
||
906 | palkovsky | 104 | fpu_context_restore(THREAD->saved_fpu_context); |
309 | palkovsky | 105 | else { |
906 | palkovsky | 106 | fpu_init(); |
309 | palkovsky | 107 | THREAD->fpu_context_exists=1; |
108 | } |
||
906 | palkovsky | 109 | #endif |
52 | vana | 110 | } |
111 | |||
898 | jermar | 112 | /** Take actions after THREAD had run. |
897 | jermar | 113 | * |
114 | * Perform actions that need to be |
||
115 | * taken after the running thread |
||
898 | jermar | 116 | * had been preempted by the scheduler. |
897 | jermar | 117 | * |
118 | * THREAD->lock is locked on entry |
||
119 | * |
||
120 | */ |
||
121 | void after_thread_ran(void) |
||
122 | { |
||
123 | after_thread_ran_arch(); |
||
124 | } |
||
125 | |||
458 | decky | 126 | #ifdef CONFIG_FPU_LAZY |
309 | palkovsky | 127 | void scheduler_fpu_lazy_request(void) |
128 | { |
||
907 | palkovsky | 129 | restart: |
309 | palkovsky | 130 | fpu_enable(); |
827 | palkovsky | 131 | spinlock_lock(&CPU->lock); |
132 | |||
133 | /* Save old context */ |
||
309 | palkovsky | 134 | if (CPU->fpu_owner != NULL) { |
827 | palkovsky | 135 | spinlock_lock(&CPU->fpu_owner->lock); |
906 | palkovsky | 136 | fpu_context_save(CPU->fpu_owner->saved_fpu_context); |
309 | palkovsky | 137 | /* don't prevent migration */ |
138 | CPU->fpu_owner->fpu_context_engaged=0; |
||
827 | palkovsky | 139 | spinlock_unlock(&CPU->fpu_owner->lock); |
907 | palkovsky | 140 | CPU->fpu_owner = NULL; |
309 | palkovsky | 141 | } |
827 | palkovsky | 142 | |
143 | spinlock_lock(&THREAD->lock); |
||
898 | jermar | 144 | if (THREAD->fpu_context_exists) { |
906 | palkovsky | 145 | fpu_context_restore(THREAD->saved_fpu_context); |
898 | jermar | 146 | } else { |
906 | palkovsky | 147 | /* Allocate FPU context */ |
148 | if (!THREAD->saved_fpu_context) { |
||
149 | /* Might sleep */ |
||
150 | spinlock_unlock(&THREAD->lock); |
||
907 | palkovsky | 151 | spinlock_unlock(&CPU->lock); |
906 | palkovsky | 152 | THREAD->saved_fpu_context = slab_alloc(fpu_context_slab, |
153 | 0); |
||
907 | palkovsky | 154 | /* We may have switched CPUs during slab_alloc */ |
155 | goto restart; |
||
906 | palkovsky | 156 | } |
157 | fpu_init(); |
||
309 | palkovsky | 158 | THREAD->fpu_context_exists=1; |
159 | } |
||
160 | CPU->fpu_owner=THREAD; |
||
161 | THREAD->fpu_context_engaged = 1; |
||
898 | jermar | 162 | spinlock_unlock(&THREAD->lock); |
827 | palkovsky | 163 | |
164 | spinlock_unlock(&CPU->lock); |
||
309 | palkovsky | 165 | } |
166 | #endif |
||
52 | vana | 167 | |
107 | decky | 168 | /** Initialize scheduler |
169 | * |
||
170 | * Initialize kernel scheduler. |
||
171 | * |
||
172 | */ |
||
1 | jermar | 173 | void scheduler_init(void) |
174 | { |
||
175 | } |
||
176 | |||
107 | decky | 177 | /** Get thread to be scheduled |
178 | * |
||
179 | * Get the optimal thread to be scheduled |
||
109 | jermar | 180 | * according to thread accounting and scheduler |
107 | decky | 181 | * policy. |
182 | * |
||
183 | * @return Thread to be scheduled. |
||
184 | * |
||
185 | */ |
||
483 | jermar | 186 | static thread_t *find_best_thread(void) |
1 | jermar | 187 | { |
188 | thread_t *t; |
||
189 | runq_t *r; |
||
783 | palkovsky | 190 | int i; |
1 | jermar | 191 | |
227 | jermar | 192 | ASSERT(CPU != NULL); |
193 | |||
1 | jermar | 194 | loop: |
413 | jermar | 195 | interrupts_enable(); |
1 | jermar | 196 | |
783 | palkovsky | 197 | if (atomic_get(&CPU->nrdy) == 0) { |
1 | jermar | 198 | /* |
199 | * For there was nothing to run, the CPU goes to sleep |
||
200 | * until a hardware interrupt or an IPI comes. |
||
201 | * This improves energy saving and hyperthreading. |
||
202 | */ |
||
785 | jermar | 203 | |
204 | /* |
||
205 | * An interrupt might occur right now and wake up a thread. |
||
206 | * In such case, the CPU will continue to go to sleep |
||
207 | * even though there is a runnable thread. |
||
208 | */ |
||
209 | |||
1 | jermar | 210 | cpu_sleep(); |
211 | goto loop; |
||
212 | } |
||
213 | |||
413 | jermar | 214 | interrupts_disable(); |
114 | jermar | 215 | |
898 | jermar | 216 | for (i = 0; i<RQ_COUNT; i++) { |
15 | jermar | 217 | r = &CPU->rq[i]; |
1 | jermar | 218 | spinlock_lock(&r->lock); |
219 | if (r->n == 0) { |
||
220 | /* |
||
221 | * If this queue is empty, try a lower-priority queue. |
||
222 | */ |
||
223 | spinlock_unlock(&r->lock); |
||
224 | continue; |
||
225 | } |
||
213 | jermar | 226 | |
783 | palkovsky | 227 | atomic_dec(&CPU->nrdy); |
475 | jermar | 228 | atomic_dec(&nrdy); |
1 | jermar | 229 | r->n--; |
230 | |||
231 | /* |
||
232 | * Take the first thread from the queue. |
||
233 | */ |
||
234 | t = list_get_instance(r->rq_head.next, thread_t, rq_link); |
||
235 | list_remove(&t->rq_link); |
||
236 | |||
237 | spinlock_unlock(&r->lock); |
||
238 | |||
239 | spinlock_lock(&t->lock); |
||
15 | jermar | 240 | t->cpu = CPU; |
1 | jermar | 241 | |
242 | t->ticks = us2ticks((i+1)*10000); |
||
898 | jermar | 243 | t->priority = i; /* correct rq index */ |
1 | jermar | 244 | |
245 | /* |
||
246 | * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge. |
||
247 | */ |
||
248 | t->flags &= ~X_STOLEN; |
||
249 | spinlock_unlock(&t->lock); |
||
250 | |||
251 | return t; |
||
252 | } |
||
253 | goto loop; |
||
254 | |||
255 | } |
||
256 | |||
107 | decky | 257 | /** Prevent rq starvation |
258 | * |
||
259 | * Prevent low priority threads from starving in rq's. |
||
260 | * |
||
261 | * When the function decides to relink rq's, it reconnects |
||
262 | * respective pointers so that in result threads with 'pri' |
||
1229 | jermar | 263 | * greater or equal @start are moved to a higher-priority queue. |
107 | decky | 264 | * |
265 | * @param start Threshold priority. |
||
266 | * |
||
1 | jermar | 267 | */ |
452 | decky | 268 | static void relink_rq(int start) |
1 | jermar | 269 | { |
270 | link_t head; |
||
271 | runq_t *r; |
||
272 | int i, n; |
||
273 | |||
274 | list_initialize(&head); |
||
15 | jermar | 275 | spinlock_lock(&CPU->lock); |
276 | if (CPU->needs_relink > NEEDS_RELINK_MAX) { |
||
1 | jermar | 277 | for (i = start; i<RQ_COUNT-1; i++) { |
278 | /* remember and empty rq[i + 1] */ |
||
15 | jermar | 279 | r = &CPU->rq[i + 1]; |
1 | jermar | 280 | spinlock_lock(&r->lock); |
281 | list_concat(&head, &r->rq_head); |
||
282 | n = r->n; |
||
283 | r->n = 0; |
||
284 | spinlock_unlock(&r->lock); |
||
285 | |||
286 | /* append rq[i + 1] to rq[i] */ |
||
15 | jermar | 287 | r = &CPU->rq[i]; |
1 | jermar | 288 | spinlock_lock(&r->lock); |
289 | list_concat(&r->rq_head, &head); |
||
290 | r->n += n; |
||
291 | spinlock_unlock(&r->lock); |
||
292 | } |
||
15 | jermar | 293 | CPU->needs_relink = 0; |
1 | jermar | 294 | } |
784 | palkovsky | 295 | spinlock_unlock(&CPU->lock); |
1 | jermar | 296 | |
297 | } |
||
298 | |||
898 | jermar | 299 | /** The scheduler |
300 | * |
||
301 | * The thread scheduling procedure. |
||
302 | * Passes control directly to |
||
303 | * scheduler_separated_stack(). |
||
304 | * |
||
305 | */ |
||
306 | void scheduler(void) |
||
307 | { |
||
308 | volatile ipl_t ipl; |
||
107 | decky | 309 | |
898 | jermar | 310 | ASSERT(CPU != NULL); |
311 | |||
312 | ipl = interrupts_disable(); |
||
313 | |||
314 | if (atomic_get(&haltstate)) |
||
315 | halt(); |
||
1007 | decky | 316 | |
898 | jermar | 317 | if (THREAD) { |
318 | spinlock_lock(&THREAD->lock); |
||
906 | palkovsky | 319 | #ifndef CONFIG_FPU_LAZY |
320 | fpu_context_save(THREAD->saved_fpu_context); |
||
321 | #endif |
||
898 | jermar | 322 | if (!context_save(&THREAD->saved_context)) { |
323 | /* |
||
324 | * This is the place where threads leave scheduler(); |
||
325 | */ |
||
326 | spinlock_unlock(&THREAD->lock); |
||
327 | interrupts_restore(THREAD->saved_context.ipl); |
||
1007 | decky | 328 | |
898 | jermar | 329 | return; |
330 | } |
||
331 | |||
332 | /* |
||
333 | * Interrupt priority level of preempted thread is recorded here |
||
334 | * to facilitate scheduler() invocations from interrupts_disable()'d |
||
335 | * code (e.g. waitq_sleep_timeout()). |
||
336 | */ |
||
337 | THREAD->saved_context.ipl = ipl; |
||
338 | } |
||
339 | |||
340 | /* |
||
341 | * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM |
||
342 | * and preemption counter. At this point THE could be coming either |
||
343 | * from THREAD's or CPU's stack. |
||
344 | */ |
||
345 | the_copy(THE, (the_t *) CPU->stack); |
||
346 | |||
347 | /* |
||
348 | * We may not keep the old stack. |
||
349 | * Reason: If we kept the old stack and got blocked, for instance, in |
||
350 | * find_best_thread(), the old thread could get rescheduled by another |
||
351 | * CPU and overwrite the part of its own stack that was also used by |
||
352 | * the scheduler on this CPU. |
||
353 | * |
||
354 | * Moreover, we have to bypass the compiler-generated POP sequence |
||
355 | * which is fooled by SP being set to the very top of the stack. |
||
356 | * Therefore the scheduler() function continues in |
||
357 | * scheduler_separated_stack(). |
||
358 | */ |
||
359 | context_save(&CPU->saved_context); |
||
360 | context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), (__address) CPU->stack, CPU_STACK_SIZE); |
||
361 | context_restore(&CPU->saved_context); |
||
362 | /* not reached */ |
||
363 | } |
||
364 | |||
107 | decky | 365 | /** Scheduler stack switch wrapper |
366 | * |
||
367 | * Second part of the scheduler() function |
||
368 | * using new stack. Handling the actual context |
||
369 | * switch to a new thread. |
||
370 | * |
||
787 | palkovsky | 371 | * Assume THREAD->lock is held. |
107 | decky | 372 | */ |
898 | jermar | 373 | void scheduler_separated_stack(void) |
1 | jermar | 374 | { |
375 | int priority; |
||
1007 | decky | 376 | |
227 | jermar | 377 | ASSERT(CPU != NULL); |
1007 | decky | 378 | |
15 | jermar | 379 | if (THREAD) { |
898 | jermar | 380 | /* must be run after the switch to scheduler stack */ |
897 | jermar | 381 | after_thread_ran(); |
382 | |||
15 | jermar | 383 | switch (THREAD->state) { |
1 | jermar | 384 | case Running: |
125 | jermar | 385 | spinlock_unlock(&THREAD->lock); |
386 | thread_ready(THREAD); |
||
387 | break; |
||
1 | jermar | 388 | |
389 | case Exiting: |
||
1571 | jermar | 390 | repeat: |
391 | if (THREAD->detached) { |
||
392 | thread_destroy(THREAD); |
||
393 | } else { |
||
394 | /* |
||
395 | * The thread structure is kept allocated until somebody |
||
396 | * calls thread_detach() on it. |
||
397 | */ |
||
398 | if (!spinlock_trylock(&THREAD->join_wq.lock)) { |
||
399 | /* |
||
400 | * Avoid deadlock. |
||
401 | */ |
||
402 | spinlock_unlock(&THREAD->lock); |
||
403 | delay(10); |
||
404 | spinlock_lock(&THREAD->lock); |
||
405 | goto repeat; |
||
406 | } |
||
407 | _waitq_wakeup_unsafe(&THREAD->join_wq, false); |
||
408 | spinlock_unlock(&THREAD->join_wq.lock); |
||
409 | |||
410 | THREAD->state = Undead; |
||
411 | spinlock_unlock(&THREAD->lock); |
||
412 | } |
||
125 | jermar | 413 | break; |
787 | palkovsky | 414 | |
1 | jermar | 415 | case Sleeping: |
125 | jermar | 416 | /* |
417 | * Prefer the thread after it's woken up. |
||
418 | */ |
||
413 | jermar | 419 | THREAD->priority = -1; |
1 | jermar | 420 | |
125 | jermar | 421 | /* |
422 | * We need to release wq->lock which we locked in waitq_sleep(). |
||
423 | * Address of wq->lock is kept in THREAD->sleep_queue. |
||
424 | */ |
||
425 | spinlock_unlock(&THREAD->sleep_queue->lock); |
||
1 | jermar | 426 | |
125 | jermar | 427 | /* |
428 | * Check for possible requests for out-of-context invocation. |
||
429 | */ |
||
430 | if (THREAD->call_me) { |
||
431 | THREAD->call_me(THREAD->call_me_with); |
||
432 | THREAD->call_me = NULL; |
||
433 | THREAD->call_me_with = NULL; |
||
434 | } |
||
1 | jermar | 435 | |
125 | jermar | 436 | spinlock_unlock(&THREAD->lock); |
1 | jermar | 437 | |
125 | jermar | 438 | break; |
439 | |||
1 | jermar | 440 | default: |
125 | jermar | 441 | /* |
442 | * Entering state is unexpected. |
||
443 | */ |
||
444 | panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]); |
||
445 | break; |
||
1 | jermar | 446 | } |
897 | jermar | 447 | |
15 | jermar | 448 | THREAD = NULL; |
1 | jermar | 449 | } |
198 | jermar | 450 | |
15 | jermar | 451 | THREAD = find_best_thread(); |
1 | jermar | 452 | |
15 | jermar | 453 | spinlock_lock(&THREAD->lock); |
413 | jermar | 454 | priority = THREAD->priority; |
15 | jermar | 455 | spinlock_unlock(&THREAD->lock); |
192 | jermar | 456 | |
1 | jermar | 457 | relink_rq(priority); |
458 | |||
459 | /* |
||
460 | * If both the old and the new task are the same, lots of work is avoided. |
||
461 | */ |
||
15 | jermar | 462 | if (TASK != THREAD->task) { |
703 | jermar | 463 | as_t *as1 = NULL; |
464 | as_t *as2; |
||
1 | jermar | 465 | |
15 | jermar | 466 | if (TASK) { |
467 | spinlock_lock(&TASK->lock); |
||
703 | jermar | 468 | as1 = TASK->as; |
15 | jermar | 469 | spinlock_unlock(&TASK->lock); |
1 | jermar | 470 | } |
471 | |||
15 | jermar | 472 | spinlock_lock(&THREAD->task->lock); |
703 | jermar | 473 | as2 = THREAD->task->as; |
15 | jermar | 474 | spinlock_unlock(&THREAD->task->lock); |
1 | jermar | 475 | |
476 | /* |
||
703 | jermar | 477 | * Note that it is possible for two tasks to share one address space. |
1 | jermar | 478 | */ |
703 | jermar | 479 | if (as1 != as2) { |
1 | jermar | 480 | /* |
703 | jermar | 481 | * Both tasks and address spaces are different. |
1 | jermar | 482 | * Replace the old one with the new one. |
483 | */ |
||
823 | jermar | 484 | as_switch(as1, as2); |
1 | jermar | 485 | } |
906 | palkovsky | 486 | TASK = THREAD->task; |
1187 | jermar | 487 | before_task_runs(); |
1 | jermar | 488 | } |
489 | |||
1380 | jermar | 490 | spinlock_lock(&THREAD->lock); |
15 | jermar | 491 | THREAD->state = Running; |
1 | jermar | 492 | |
906 | palkovsky | 493 | #ifdef SCHEDULER_VERBOSE |
1196 | cejka | 494 | printf("cpu%d: tid %d (priority=%d,ticks=%lld,nrdy=%ld)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, atomic_get(&CPU->nrdy)); |
906 | palkovsky | 495 | #endif |
1 | jermar | 496 | |
213 | jermar | 497 | /* |
897 | jermar | 498 | * Some architectures provide late kernel PA2KA(identity) |
499 | * mapping in a page fault handler. However, the page fault |
||
500 | * handler uses the kernel stack of the running thread and |
||
501 | * therefore cannot be used to map it. The kernel stack, if |
||
502 | * necessary, is to be mapped in before_thread_runs(). This |
||
503 | * function must be executed before the switch to the new stack. |
||
504 | */ |
||
505 | before_thread_runs(); |
||
506 | |||
507 | /* |
||
213 | jermar | 508 | * Copy the knowledge of CPU, TASK, THREAD and preemption counter to thread's stack. |
509 | */ |
||
184 | jermar | 510 | the_copy(THE, (the_t *) THREAD->kstack); |
511 | |||
15 | jermar | 512 | context_restore(&THREAD->saved_context); |
1 | jermar | 513 | /* not reached */ |
514 | } |
||
515 | |||
458 | decky | 516 | #ifdef CONFIG_SMP |
107 | decky | 517 | /** Load balancing thread |
518 | * |
||
519 | * SMP load balancing thread, supervising thread supplies |
||
520 | * for the CPU it's wired to. |
||
521 | * |
||
522 | * @param arg Generic thread argument (unused). |
||
523 | * |
||
1 | jermar | 524 | */ |
525 | void kcpulb(void *arg) |
||
526 | { |
||
527 | thread_t *t; |
||
783 | palkovsky | 528 | int count, average, i, j, k = 0; |
413 | jermar | 529 | ipl_t ipl; |
1 | jermar | 530 | |
1576 | jermar | 531 | /* |
532 | * Detach kcpulb as nobody will call thread_join_timeout() on it. |
||
533 | */ |
||
534 | thread_detach(THREAD); |
||
535 | |||
1 | jermar | 536 | loop: |
537 | /* |
||
779 | jermar | 538 | * Work in 1s intervals. |
1 | jermar | 539 | */ |
779 | jermar | 540 | thread_sleep(1); |
1 | jermar | 541 | |
542 | not_satisfied: |
||
543 | /* |
||
544 | * Calculate the number of threads that will be migrated/stolen from |
||
545 | * other CPU's. Note that situation can have changed between two |
||
546 | * passes. Each time get the most up to date counts. |
||
547 | */ |
||
784 | palkovsky | 548 | average = atomic_get(&nrdy) / config.cpu_active + 1; |
783 | palkovsky | 549 | count = average - atomic_get(&CPU->nrdy); |
1 | jermar | 550 | |
784 | palkovsky | 551 | if (count <= 0) |
1 | jermar | 552 | goto satisfied; |
553 | |||
554 | /* |
||
555 | * Searching least priority queues on all CPU's first and most priority queues on all CPU's last. |
||
556 | */ |
||
557 | for (j=RQ_COUNT-1; j >= 0; j--) { |
||
558 | for (i=0; i < config.cpu_active; i++) { |
||
559 | link_t *l; |
||
560 | runq_t *r; |
||
561 | cpu_t *cpu; |
||
562 | |||
563 | cpu = &cpus[(i + k) % config.cpu_active]; |
||
564 | |||
565 | /* |
||
566 | * Not interested in ourselves. |
||
567 | * Doesn't require interrupt disabling for kcpulb is X_WIRED. |
||
568 | */ |
||
15 | jermar | 569 | if (CPU == cpu) |
783 | palkovsky | 570 | continue; |
571 | if (atomic_get(&cpu->nrdy) <= average) |
||
572 | continue; |
||
1 | jermar | 573 | |
784 | palkovsky | 574 | ipl = interrupts_disable(); |
115 | jermar | 575 | r = &cpu->rq[j]; |
1 | jermar | 576 | spinlock_lock(&r->lock); |
577 | if (r->n == 0) { |
||
578 | spinlock_unlock(&r->lock); |
||
413 | jermar | 579 | interrupts_restore(ipl); |
1 | jermar | 580 | continue; |
581 | } |
||
582 | |||
583 | t = NULL; |
||
584 | l = r->rq_head.prev; /* search rq from the back */ |
||
585 | while (l != &r->rq_head) { |
||
586 | t = list_get_instance(l, thread_t, rq_link); |
||
587 | /* |
||
125 | jermar | 588 | * We don't want to steal CPU-wired threads neither threads already stolen. |
1 | jermar | 589 | * The latter prevents threads from migrating between CPU's without ever being run. |
125 | jermar | 590 | * We don't want to steal threads whose FPU context is still in CPU. |
73 | vana | 591 | */ |
1 | jermar | 592 | spinlock_lock(&t->lock); |
73 | vana | 593 | if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) { |
1 | jermar | 594 | /* |
595 | * Remove t from r. |
||
596 | */ |
||
597 | spinlock_unlock(&t->lock); |
||
598 | |||
783 | palkovsky | 599 | atomic_dec(&cpu->nrdy); |
475 | jermar | 600 | atomic_dec(&nrdy); |
1 | jermar | 601 | |
125 | jermar | 602 | r->n--; |
1 | jermar | 603 | list_remove(&t->rq_link); |
604 | |||
605 | break; |
||
606 | } |
||
607 | spinlock_unlock(&t->lock); |
||
608 | l = l->prev; |
||
609 | t = NULL; |
||
610 | } |
||
611 | spinlock_unlock(&r->lock); |
||
612 | |||
613 | if (t) { |
||
614 | /* |
||
615 | * Ready t on local CPU |
||
616 | */ |
||
617 | spinlock_lock(&t->lock); |
||
906 | palkovsky | 618 | #ifdef KCPULB_VERBOSE |
1196 | cejka | 619 | printf("kcpulb%d: TID %d -> cpu%d, nrdy=%ld, avg=%nd\n", CPU->id, t->tid, CPU->id, atomic_get(&CPU->nrdy), atomic_get(&nrdy) / config.cpu_active); |
906 | palkovsky | 620 | #endif |
1 | jermar | 621 | t->flags |= X_STOLEN; |
1115 | jermar | 622 | t->state = Entering; |
1 | jermar | 623 | spinlock_unlock(&t->lock); |
624 | |||
625 | thread_ready(t); |
||
626 | |||
413 | jermar | 627 | interrupts_restore(ipl); |
1 | jermar | 628 | |
629 | if (--count == 0) |
||
630 | goto satisfied; |
||
631 | |||
632 | /* |
||
125 | jermar | 633 | * We are not satisfied yet, focus on another CPU next time. |
1 | jermar | 634 | */ |
635 | k++; |
||
636 | |||
637 | continue; |
||
638 | } |
||
413 | jermar | 639 | interrupts_restore(ipl); |
1 | jermar | 640 | } |
641 | } |
||
642 | |||
783 | palkovsky | 643 | if (atomic_get(&CPU->nrdy)) { |
1 | jermar | 644 | /* |
645 | * Be a little bit light-weight and let migrated threads run. |
||
646 | */ |
||
647 | scheduler(); |
||
779 | jermar | 648 | } else { |
1 | jermar | 649 | /* |
650 | * We failed to migrate a single thread. |
||
779 | jermar | 651 | * Give up this turn. |
1 | jermar | 652 | */ |
779 | jermar | 653 | goto loop; |
1 | jermar | 654 | } |
655 | |||
656 | goto not_satisfied; |
||
125 | jermar | 657 | |
1 | jermar | 658 | satisfied: |
659 | goto loop; |
||
660 | } |
||
661 | |||
458 | decky | 662 | #endif /* CONFIG_SMP */ |
775 | palkovsky | 663 | |
664 | |||
665 | /** Print information about threads & scheduler queues */ |
||
666 | void sched_print_list(void) |
||
667 | { |
||
668 | ipl_t ipl; |
||
669 | int cpu,i; |
||
670 | runq_t *r; |
||
671 | thread_t *t; |
||
672 | link_t *cur; |
||
673 | |||
674 | /* We are going to mess with scheduler structures, |
||
675 | * let's not be interrupted */ |
||
676 | ipl = interrupts_disable(); |
||
677 | for (cpu=0;cpu < config.cpu_count; cpu++) { |
||
898 | jermar | 678 | |
775 | palkovsky | 679 | if (!cpus[cpu].active) |
680 | continue; |
||
898 | jermar | 681 | |
775 | palkovsky | 682 | spinlock_lock(&cpus[cpu].lock); |
1221 | decky | 683 | printf("cpu%d: address=%p, nrdy=%ld, needs_relink=%ld\n", |
1062 | jermar | 684 | cpus[cpu].id, &cpus[cpu], atomic_get(&cpus[cpu].nrdy), cpus[cpu].needs_relink); |
775 | palkovsky | 685 | |
686 | for (i=0; i<RQ_COUNT; i++) { |
||
687 | r = &cpus[cpu].rq[i]; |
||
688 | spinlock_lock(&r->lock); |
||
689 | if (!r->n) { |
||
690 | spinlock_unlock(&r->lock); |
||
691 | continue; |
||
692 | } |
||
898 | jermar | 693 | printf("\trq[%d]: ", i); |
775 | palkovsky | 694 | for (cur=r->rq_head.next; cur!=&r->rq_head; cur=cur->next) { |
695 | t = list_get_instance(cur, thread_t, rq_link); |
||
696 | printf("%d(%s) ", t->tid, |
||
697 | thread_states[t->state]); |
||
698 | } |
||
699 | printf("\n"); |
||
700 | spinlock_unlock(&r->lock); |
||
701 | } |
||
702 | spinlock_unlock(&cpus[cpu].lock); |
||
703 | } |
||
704 | |||
705 | interrupts_restore(ipl); |
||
706 | } |
||
1702 | cejka | 707 | |
708 | /** @} |
||
709 | */ |
||
710 |