Rev 3798 | Details | Compare with Previous | Last modification | View Log | RSS feed
| Rev | Author | Line No. | Line |
|---|---|---|---|
| 1 | jermar | 1 | /* |
| 2071 | jermar | 2 | * Copyright (c) 2001-2007 Jakub Jermar |
| 1 | jermar | 3 | * All rights reserved. |
| 4 | * |
||
| 5 | * Redistribution and use in source and binary forms, with or without |
||
| 6 | * modification, are permitted provided that the following conditions |
||
| 7 | * are met: |
||
| 8 | * |
||
| 9 | * - Redistributions of source code must retain the above copyright |
||
| 10 | * notice, this list of conditions and the following disclaimer. |
||
| 11 | * - Redistributions in binary form must reproduce the above copyright |
||
| 12 | * notice, this list of conditions and the following disclaimer in the |
||
| 13 | * documentation and/or other materials provided with the distribution. |
||
| 14 | * - The name of the author may not be used to endorse or promote products |
||
| 15 | * derived from this software without specific prior written permission. |
||
| 16 | * |
||
| 17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
||
| 18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
||
| 19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
||
| 20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
||
| 21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
||
| 22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
||
| 23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
||
| 24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
||
| 25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
||
| 26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
||
| 27 | */ |
||
| 28 | |||
| 1757 | jermar | 29 | /** @addtogroup genericproc |
| 1702 | cejka | 30 | * @{ |
| 31 | */ |
||
| 32 | |||
| 1248 | jermar | 33 | /** |
| 1702 | cejka | 34 | * @file |
| 1248 | jermar | 35 | * @brief Scheduler and load balancing. |
| 36 | * |
||
| 1264 | jermar | 37 | * This file contains the scheduler and kcpulb kernel thread which |
| 1248 | jermar | 38 | * performs load-balancing of per-CPU run queues. |
| 39 | */ |
||
| 40 | |||
| 1 | jermar | 41 | #include <proc/scheduler.h> |
| 42 | #include <proc/thread.h> |
||
| 43 | #include <proc/task.h> |
||
| 378 | jermar | 44 | #include <mm/frame.h> |
| 45 | #include <mm/page.h> |
||
| 703 | jermar | 46 | #include <mm/as.h> |
| 2089 | decky | 47 | #include <time/timeout.h> |
| 1571 | jermar | 48 | #include <time/delay.h> |
| 378 | jermar | 49 | #include <arch/asm.h> |
| 50 | #include <arch/faddr.h> |
||
| 2030 | decky | 51 | #include <arch/cycle.h> |
| 1104 | jermar | 52 | #include <atomic.h> |
| 378 | jermar | 53 | #include <synch/spinlock.h> |
| 1 | jermar | 54 | #include <config.h> |
| 55 | #include <context.h> |
||
| 2089 | decky | 56 | #include <fpu_context.h> |
| 1 | jermar | 57 | #include <func.h> |
| 58 | #include <arch.h> |
||
| 788 | jermar | 59 | #include <adt/list.h> |
| 68 | decky | 60 | #include <panic.h> |
| 378 | jermar | 61 | #include <cpu.h> |
| 195 | vana | 62 | #include <print.h> |
| 227 | jermar | 63 | #include <debug.h> |
| 4679 | rimsky | 64 | #include <arch/smp/sun4v/smp.h> |
| 1 | jermar | 65 | |
| 1187 | jermar | 66 | static void before_task_runs(void); |
| 67 | static void before_thread_runs(void); |
||
| 68 | static void after_thread_ran(void); |
||
| 898 | jermar | 69 | static void scheduler_separated_stack(void); |
| 195 | vana | 70 | |
| 898 | jermar | 71 | atomic_t nrdy; /**< Number of ready threads in the system. */ |
| 72 | |||
| 1187 | jermar | 73 | /** Carry out actions before new task runs. */ |
| 74 | void before_task_runs(void) |
||
| 75 | { |
||
| 76 | before_task_runs_arch(); |
||
| 77 | } |
||
| 78 | |||
| 897 | jermar | 79 | /** Take actions before new thread runs. |
| 107 | decky | 80 | * |
| 118 | jermar | 81 | * Perform actions that need to be |
| 82 | * taken before the newly selected |
||
| 83 | * tread is passed control. |
||
| 107 | decky | 84 | * |
| 827 | palkovsky | 85 | * THREAD->lock is locked on entry |
| 86 | * |
||
| 107 | decky | 87 | */ |
| 52 | vana | 88 | void before_thread_runs(void) |
| 89 | { |
||
| 309 | palkovsky | 90 | before_thread_runs_arch(); |
| 906 | palkovsky | 91 | #ifdef CONFIG_FPU_LAZY |
| 1882 | jermar | 92 | if(THREAD == CPU->fpu_owner) |
| 309 | palkovsky | 93 | fpu_enable(); |
| 94 | else |
||
| 95 | fpu_disable(); |
||
| 906 | palkovsky | 96 | #else |
| 309 | palkovsky | 97 | fpu_enable(); |
| 98 | if (THREAD->fpu_context_exists) |
||
| 906 | palkovsky | 99 | fpu_context_restore(THREAD->saved_fpu_context); |
| 309 | palkovsky | 100 | else { |
| 906 | palkovsky | 101 | fpu_init(); |
| 1882 | jermar | 102 | THREAD->fpu_context_exists = 1; |
| 309 | palkovsky | 103 | } |
| 906 | palkovsky | 104 | #endif |
| 52 | vana | 105 | } |
| 106 | |||
| 898 | jermar | 107 | /** Take actions after THREAD had run. |
| 897 | jermar | 108 | * |
| 109 | * Perform actions that need to be |
||
| 110 | * taken after the running thread |
||
| 898 | jermar | 111 | * had been preempted by the scheduler. |
| 897 | jermar | 112 | * |
| 113 | * THREAD->lock is locked on entry |
||
| 114 | * |
||
| 115 | */ |
||
| 116 | void after_thread_ran(void) |
||
| 117 | { |
||
| 118 | after_thread_ran_arch(); |
||
| 119 | } |
||
| 120 | |||
| 458 | decky | 121 | #ifdef CONFIG_FPU_LAZY |
| 309 | palkovsky | 122 | void scheduler_fpu_lazy_request(void) |
| 123 | { |
||
| 907 | palkovsky | 124 | restart: |
| 309 | palkovsky | 125 | fpu_enable(); |
| 827 | palkovsky | 126 | spinlock_lock(&CPU->lock); |
| 127 | |||
| 128 | /* Save old context */ |
||
| 309 | palkovsky | 129 | if (CPU->fpu_owner != NULL) { |
| 827 | palkovsky | 130 | spinlock_lock(&CPU->fpu_owner->lock); |
| 906 | palkovsky | 131 | fpu_context_save(CPU->fpu_owner->saved_fpu_context); |
| 309 | palkovsky | 132 | /* don't prevent migration */ |
| 1882 | jermar | 133 | CPU->fpu_owner->fpu_context_engaged = 0; |
| 827 | palkovsky | 134 | spinlock_unlock(&CPU->fpu_owner->lock); |
| 907 | palkovsky | 135 | CPU->fpu_owner = NULL; |
| 309 | palkovsky | 136 | } |
| 827 | palkovsky | 137 | |
| 138 | spinlock_lock(&THREAD->lock); |
||
| 898 | jermar | 139 | if (THREAD->fpu_context_exists) { |
| 906 | palkovsky | 140 | fpu_context_restore(THREAD->saved_fpu_context); |
| 898 | jermar | 141 | } else { |
| 906 | palkovsky | 142 | /* Allocate FPU context */ |
| 143 | if (!THREAD->saved_fpu_context) { |
||
| 144 | /* Might sleep */ |
||
| 145 | spinlock_unlock(&THREAD->lock); |
||
| 907 | palkovsky | 146 | spinlock_unlock(&CPU->lock); |
| 2067 | jermar | 147 | THREAD->saved_fpu_context = |
| 2118 | decky | 148 | (fpu_context_t *) slab_alloc(fpu_context_slab, 0); |
| 907 | palkovsky | 149 | /* We may have switched CPUs during slab_alloc */ |
| 150 | goto restart; |
||
| 906 | palkovsky | 151 | } |
| 152 | fpu_init(); |
||
| 1882 | jermar | 153 | THREAD->fpu_context_exists = 1; |
| 309 | palkovsky | 154 | } |
| 1882 | jermar | 155 | CPU->fpu_owner = THREAD; |
| 309 | palkovsky | 156 | THREAD->fpu_context_engaged = 1; |
| 898 | jermar | 157 | spinlock_unlock(&THREAD->lock); |
| 827 | palkovsky | 158 | |
| 159 | spinlock_unlock(&CPU->lock); |
||
| 309 | palkovsky | 160 | } |
| 161 | #endif |
||
| 52 | vana | 162 | |
| 107 | decky | 163 | /** Initialize scheduler |
| 164 | * |
||
| 165 | * Initialize kernel scheduler. |
||
| 166 | * |
||
| 167 | */ |
||
| 1 | jermar | 168 | void scheduler_init(void) |
| 169 | { |
||
| 170 | } |
||
| 171 | |||
| 107 | decky | 172 | /** Get thread to be scheduled |
| 173 | * |
||
| 174 | * Get the optimal thread to be scheduled |
||
| 109 | jermar | 175 | * according to thread accounting and scheduler |
| 107 | decky | 176 | * policy. |
| 177 | * |
||
| 178 | * @return Thread to be scheduled. |
||
| 179 | * |
||
| 180 | */ |
||
| 483 | jermar | 181 | static thread_t *find_best_thread(void) |
| 1 | jermar | 182 | { |
| 183 | thread_t *t; |
||
| 184 | runq_t *r; |
||
| 783 | palkovsky | 185 | int i; |
| 1 | jermar | 186 | |
| 227 | jermar | 187 | ASSERT(CPU != NULL); |
| 188 | |||
| 1 | jermar | 189 | loop: |
| 413 | jermar | 190 | interrupts_enable(); |
| 1 | jermar | 191 | |
| 783 | palkovsky | 192 | if (atomic_get(&CPU->nrdy) == 0) { |
| 1 | jermar | 193 | /* |
| 194 | * For there was nothing to run, the CPU goes to sleep |
||
| 195 | * until a hardware interrupt or an IPI comes. |
||
| 196 | * This improves energy saving and hyperthreading. |
||
| 197 | */ |
||
| 785 | jermar | 198 | |
| 199 | /* |
||
| 200 | * An interrupt might occur right now and wake up a thread. |
||
| 201 | * In such case, the CPU will continue to go to sleep |
||
| 202 | * even though there is a runnable thread. |
||
| 203 | */ |
||
| 204 | |||
| 1 | jermar | 205 | cpu_sleep(); |
| 206 | goto loop; |
||
| 207 | } |
||
| 208 | |||
| 413 | jermar | 209 | interrupts_disable(); |
| 114 | jermar | 210 | |
| 2285 | jermar | 211 | for (i = 0; i < RQ_COUNT; i++) { |
| 15 | jermar | 212 | r = &CPU->rq[i]; |
| 1 | jermar | 213 | spinlock_lock(&r->lock); |
| 214 | if (r->n == 0) { |
||
| 215 | /* |
||
| 216 | * If this queue is empty, try a lower-priority queue. |
||
| 217 | */ |
||
| 218 | spinlock_unlock(&r->lock); |
||
| 219 | continue; |
||
| 220 | } |
||
| 213 | jermar | 221 | |
| 783 | palkovsky | 222 | atomic_dec(&CPU->nrdy); |
| 475 | jermar | 223 | atomic_dec(&nrdy); |
| 4679 | rimsky | 224 | if (CPU->arch.exec_unit) |
| 225 | atomic_dec(&(CPU->arch.exec_unit->nrdy)); |
||
| 1 | jermar | 226 | r->n--; |
| 227 | |||
| 228 | /* |
||
| 229 | * Take the first thread from the queue. |
||
| 230 | */ |
||
| 231 | t = list_get_instance(r->rq_head.next, thread_t, rq_link); |
||
| 232 | list_remove(&t->rq_link); |
||
| 233 | |||
| 234 | spinlock_unlock(&r->lock); |
||
| 235 | |||
| 236 | spinlock_lock(&t->lock); |
||
| 15 | jermar | 237 | t->cpu = CPU; |
| 1 | jermar | 238 | |
| 2067 | jermar | 239 | t->ticks = us2ticks((i + 1) * 10000); |
| 898 | jermar | 240 | t->priority = i; /* correct rq index */ |
| 1 | jermar | 241 | |
| 242 | /* |
||
| 1854 | jermar | 243 | * Clear the THREAD_FLAG_STOLEN flag so that t can be migrated |
| 244 | * when load balancing needs emerge. |
||
| 1 | jermar | 245 | */ |
| 1854 | jermar | 246 | t->flags &= ~THREAD_FLAG_STOLEN; |
| 1 | jermar | 247 | spinlock_unlock(&t->lock); |
| 248 | |||
| 249 | return t; |
||
| 250 | } |
||
| 251 | goto loop; |
||
| 252 | |||
| 253 | } |
||
| 254 | |||
| 107 | decky | 255 | /** Prevent rq starvation |
| 256 | * |
||
| 257 | * Prevent low priority threads from starving in rq's. |
||
| 258 | * |
||
| 259 | * When the function decides to relink rq's, it reconnects |
||
| 260 | * respective pointers so that in result threads with 'pri' |
||
| 1708 | jermar | 261 | * greater or equal start are moved to a higher-priority queue. |
| 107 | decky | 262 | * |
| 263 | * @param start Threshold priority. |
||
| 264 | * |
||
| 1 | jermar | 265 | */ |
| 452 | decky | 266 | static void relink_rq(int start) |
| 1 | jermar | 267 | { |
| 268 | link_t head; |
||
| 269 | runq_t *r; |
||
| 270 | int i, n; |
||
| 271 | |||
| 272 | list_initialize(&head); |
||
| 15 | jermar | 273 | spinlock_lock(&CPU->lock); |
| 274 | if (CPU->needs_relink > NEEDS_RELINK_MAX) { |
||
| 2067 | jermar | 275 | for (i = start; i < RQ_COUNT - 1; i++) { |
| 1 | jermar | 276 | /* remember and empty rq[i + 1] */ |
| 15 | jermar | 277 | r = &CPU->rq[i + 1]; |
| 1 | jermar | 278 | spinlock_lock(&r->lock); |
| 279 | list_concat(&head, &r->rq_head); |
||
| 280 | n = r->n; |
||
| 281 | r->n = 0; |
||
| 282 | spinlock_unlock(&r->lock); |
||
| 283 | |||
| 284 | /* append rq[i + 1] to rq[i] */ |
||
| 15 | jermar | 285 | r = &CPU->rq[i]; |
| 1 | jermar | 286 | spinlock_lock(&r->lock); |
| 287 | list_concat(&r->rq_head, &head); |
||
| 288 | r->n += n; |
||
| 289 | spinlock_unlock(&r->lock); |
||
| 290 | } |
||
| 15 | jermar | 291 | CPU->needs_relink = 0; |
| 1 | jermar | 292 | } |
| 784 | palkovsky | 293 | spinlock_unlock(&CPU->lock); |
| 1 | jermar | 294 | |
| 295 | } |
||
| 296 | |||
| 898 | jermar | 297 | /** The scheduler |
| 298 | * |
||
| 299 | * The thread scheduling procedure. |
||
| 300 | * Passes control directly to |
||
| 301 | * scheduler_separated_stack(). |
||
| 302 | * |
||
| 303 | */ |
||
| 304 | void scheduler(void) |
||
| 305 | { |
||
| 306 | volatile ipl_t ipl; |
||
| 107 | decky | 307 | |
| 898 | jermar | 308 | ASSERT(CPU != NULL); |
| 309 | |||
| 310 | ipl = interrupts_disable(); |
||
| 311 | |||
| 312 | if (atomic_get(&haltstate)) |
||
| 313 | halt(); |
||
| 1007 | decky | 314 | |
| 898 | jermar | 315 | if (THREAD) { |
| 316 | spinlock_lock(&THREAD->lock); |
||
| 2030 | decky | 317 | |
| 318 | /* Update thread accounting */ |
||
| 319 | THREAD->cycles += get_cycle() - THREAD->last_cycle; |
||
| 320 | |||
| 906 | palkovsky | 321 | #ifndef CONFIG_FPU_LAZY |
| 322 | fpu_context_save(THREAD->saved_fpu_context); |
||
| 323 | #endif |
||
| 898 | jermar | 324 | if (!context_save(&THREAD->saved_context)) { |
| 325 | /* |
||
| 326 | * This is the place where threads leave scheduler(); |
||
| 327 | */ |
||
| 2030 | decky | 328 | |
| 329 | /* Save current CPU cycle */ |
||
| 330 | THREAD->last_cycle = get_cycle(); |
||
| 331 | |||
| 898 | jermar | 332 | spinlock_unlock(&THREAD->lock); |
| 333 | interrupts_restore(THREAD->saved_context.ipl); |
||
| 1007 | decky | 334 | |
| 898 | jermar | 335 | return; |
| 336 | } |
||
| 337 | |||
| 338 | /* |
||
| 2067 | jermar | 339 | * Interrupt priority level of preempted thread is recorded |
| 340 | * here to facilitate scheduler() invocations from |
||
| 341 | * interrupts_disable()'d code (e.g. waitq_sleep_timeout()). |
||
| 898 | jermar | 342 | */ |
| 343 | THREAD->saved_context.ipl = ipl; |
||
| 344 | } |
||
| 345 | |||
| 346 | /* |
||
| 347 | * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM |
||
| 348 | * and preemption counter. At this point THE could be coming either |
||
| 349 | * from THREAD's or CPU's stack. |
||
| 350 | */ |
||
| 351 | the_copy(THE, (the_t *) CPU->stack); |
||
| 352 | |||
| 353 | /* |
||
| 354 | * We may not keep the old stack. |
||
| 355 | * Reason: If we kept the old stack and got blocked, for instance, in |
||
| 356 | * find_best_thread(), the old thread could get rescheduled by another |
||
| 357 | * CPU and overwrite the part of its own stack that was also used by |
||
| 358 | * the scheduler on this CPU. |
||
| 359 | * |
||
| 360 | * Moreover, we have to bypass the compiler-generated POP sequence |
||
| 361 | * which is fooled by SP being set to the very top of the stack. |
||
| 362 | * Therefore the scheduler() function continues in |
||
| 363 | * scheduler_separated_stack(). |
||
| 364 | */ |
||
| 365 | context_save(&CPU->saved_context); |
||
| 1854 | jermar | 366 | context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), |
| 2087 | jermar | 367 | (uintptr_t) CPU->stack, CPU_STACK_SIZE); |
| 898 | jermar | 368 | context_restore(&CPU->saved_context); |
| 369 | /* not reached */ |
||
| 370 | } |
||
| 371 | |||
| 107 | decky | 372 | /** Scheduler stack switch wrapper |
| 373 | * |
||
| 374 | * Second part of the scheduler() function |
||
| 375 | * using new stack. Handling the actual context |
||
| 376 | * switch to a new thread. |
||
| 377 | * |
||
| 787 | palkovsky | 378 | * Assume THREAD->lock is held. |
| 107 | decky | 379 | */ |
| 898 | jermar | 380 | void scheduler_separated_stack(void) |
| 1 | jermar | 381 | { |
| 382 | int priority; |
||
| 2183 | jermar | 383 | DEADLOCK_PROBE_INIT(p_joinwq); |
| 384 | |||
| 227 | jermar | 385 | ASSERT(CPU != NULL); |
| 3798 | rimsky | 386 | |
| 15 | jermar | 387 | if (THREAD) { |
| 898 | jermar | 388 | /* must be run after the switch to scheduler stack */ |
| 897 | jermar | 389 | after_thread_ran(); |
| 390 | |||
| 15 | jermar | 391 | switch (THREAD->state) { |
| 1888 | jermar | 392 | case Running: |
| 125 | jermar | 393 | spinlock_unlock(&THREAD->lock); |
| 394 | thread_ready(THREAD); |
||
| 395 | break; |
||
| 1 | jermar | 396 | |
| 1888 | jermar | 397 | case Exiting: |
| 1571 | jermar | 398 | repeat: |
| 2040 | decky | 399 | if (THREAD->detached) { |
| 1571 | jermar | 400 | thread_destroy(THREAD); |
| 401 | } else { |
||
| 402 | /* |
||
| 2067 | jermar | 403 | * The thread structure is kept allocated until |
| 404 | * somebody calls thread_detach() on it. |
||
| 1571 | jermar | 405 | */ |
| 406 | if (!spinlock_trylock(&THREAD->join_wq.lock)) { |
||
| 407 | /* |
||
| 408 | * Avoid deadlock. |
||
| 409 | */ |
||
| 410 | spinlock_unlock(&THREAD->lock); |
||
| 2446 | jermar | 411 | delay(HZ); |
| 1571 | jermar | 412 | spinlock_lock(&THREAD->lock); |
| 2183 | jermar | 413 | DEADLOCK_PROBE(p_joinwq, |
| 414 | DEADLOCK_THRESHOLD); |
||
| 1571 | jermar | 415 | goto repeat; |
| 416 | } |
||
| 2310 | jermar | 417 | _waitq_wakeup_unsafe(&THREAD->join_wq, |
| 418 | WAKEUP_FIRST); |
||
| 1571 | jermar | 419 | spinlock_unlock(&THREAD->join_wq.lock); |
| 420 | |||
| 2451 | jermar | 421 | THREAD->state = Lingering; |
| 1571 | jermar | 422 | spinlock_unlock(&THREAD->lock); |
| 423 | } |
||
| 125 | jermar | 424 | break; |
| 787 | palkovsky | 425 | |
| 1888 | jermar | 426 | case Sleeping: |
| 125 | jermar | 427 | /* |
| 428 | * Prefer the thread after it's woken up. |
||
| 429 | */ |
||
| 413 | jermar | 430 | THREAD->priority = -1; |
| 1 | jermar | 431 | |
| 125 | jermar | 432 | /* |
| 2067 | jermar | 433 | * We need to release wq->lock which we locked in |
| 434 | * waitq_sleep(). Address of wq->lock is kept in |
||
| 435 | * THREAD->sleep_queue. |
||
| 125 | jermar | 436 | */ |
| 437 | spinlock_unlock(&THREAD->sleep_queue->lock); |
||
| 1 | jermar | 438 | |
| 125 | jermar | 439 | /* |
| 2067 | jermar | 440 | * Check for possible requests for out-of-context |
| 441 | * invocation. |
||
| 125 | jermar | 442 | */ |
| 443 | if (THREAD->call_me) { |
||
| 444 | THREAD->call_me(THREAD->call_me_with); |
||
| 445 | THREAD->call_me = NULL; |
||
| 446 | THREAD->call_me_with = NULL; |
||
| 447 | } |
||
| 1 | jermar | 448 | |
| 125 | jermar | 449 | spinlock_unlock(&THREAD->lock); |
| 1 | jermar | 450 | |
| 125 | jermar | 451 | break; |
| 452 | |||
| 1888 | jermar | 453 | default: |
| 125 | jermar | 454 | /* |
| 455 | * Entering state is unexpected. |
||
| 456 | */ |
||
| 3181 | jermar | 457 | panic("tid%" PRIu64 ": unexpected state %s\n", |
| 458 | THREAD->tid, thread_states[THREAD->state]); |
||
| 125 | jermar | 459 | break; |
| 1 | jermar | 460 | } |
| 897 | jermar | 461 | |
| 15 | jermar | 462 | THREAD = NULL; |
| 1 | jermar | 463 | } |
| 198 | jermar | 464 | |
| 15 | jermar | 465 | THREAD = find_best_thread(); |
| 3798 | rimsky | 466 | |
| 15 | jermar | 467 | spinlock_lock(&THREAD->lock); |
| 413 | jermar | 468 | priority = THREAD->priority; |
| 15 | jermar | 469 | spinlock_unlock(&THREAD->lock); |
| 192 | jermar | 470 | |
| 1 | jermar | 471 | relink_rq(priority); |
| 472 | |||
| 473 | /* |
||
| 2067 | jermar | 474 | * If both the old and the new task are the same, lots of work is |
| 475 | * avoided. |
||
| 1 | jermar | 476 | */ |
| 15 | jermar | 477 | if (TASK != THREAD->task) { |
| 703 | jermar | 478 | as_t *as1 = NULL; |
| 479 | as_t *as2; |
||
| 1 | jermar | 480 | |
| 15 | jermar | 481 | if (TASK) { |
| 482 | spinlock_lock(&TASK->lock); |
||
| 703 | jermar | 483 | as1 = TASK->as; |
| 15 | jermar | 484 | spinlock_unlock(&TASK->lock); |
| 1 | jermar | 485 | } |
| 486 | |||
| 15 | jermar | 487 | spinlock_lock(&THREAD->task->lock); |
| 703 | jermar | 488 | as2 = THREAD->task->as; |
| 15 | jermar | 489 | spinlock_unlock(&THREAD->task->lock); |
| 1 | jermar | 490 | |
| 491 | /* |
||
| 2067 | jermar | 492 | * Note that it is possible for two tasks to share one address |
| 493 | * space. |
||
| 1 | jermar | 494 | */ |
| 703 | jermar | 495 | if (as1 != as2) { |
| 1 | jermar | 496 | /* |
| 703 | jermar | 497 | * Both tasks and address spaces are different. |
| 1 | jermar | 498 | * Replace the old one with the new one. |
| 499 | */ |
||
| 823 | jermar | 500 | as_switch(as1, as2); |
| 1 | jermar | 501 | } |
| 906 | palkovsky | 502 | TASK = THREAD->task; |
| 1187 | jermar | 503 | before_task_runs(); |
| 1 | jermar | 504 | } |
| 505 | |||
| 1380 | jermar | 506 | spinlock_lock(&THREAD->lock); |
| 15 | jermar | 507 | THREAD->state = Running; |
| 1 | jermar | 508 | |
| 906 | palkovsky | 509 | #ifdef SCHEDULER_VERBOSE |
| 3181 | jermar | 510 | printf("cpu%u: tid %" PRIu64 " (priority=%d, ticks=%" PRIu64 |
| 511 | ", nrdy=%ld)\n", CPU->id, THREAD->tid, THREAD->priority, |
||
| 512 | THREAD->ticks, atomic_get(&CPU->nrdy)); |
||
| 906 | palkovsky | 513 | #endif |
| 1 | jermar | 514 | |
| 213 | jermar | 515 | /* |
| 897 | jermar | 516 | * Some architectures provide late kernel PA2KA(identity) |
| 517 | * mapping in a page fault handler. However, the page fault |
||
| 518 | * handler uses the kernel stack of the running thread and |
||
| 519 | * therefore cannot be used to map it. The kernel stack, if |
||
| 520 | * necessary, is to be mapped in before_thread_runs(). This |
||
| 521 | * function must be executed before the switch to the new stack. |
||
| 522 | */ |
||
| 523 | before_thread_runs(); |
||
| 524 | |||
| 525 | /* |
||
| 2067 | jermar | 526 | * Copy the knowledge of CPU, TASK, THREAD and preemption counter to |
| 527 | * thread's stack. |
||
| 213 | jermar | 528 | */ |
| 184 | jermar | 529 | the_copy(THE, (the_t *) THREAD->kstack); |
| 530 | |||
| 15 | jermar | 531 | context_restore(&THREAD->saved_context); |
| 1 | jermar | 532 | /* not reached */ |
| 533 | } |
||
| 534 | |||
| 458 | decky | 535 | #ifdef CONFIG_SMP |
| 107 | decky | 536 | /** Load balancing thread |
| 537 | * |
||
| 538 | * SMP load balancing thread, supervising thread supplies |
||
| 539 | * for the CPU it's wired to. |
||
| 540 | * |
||
| 541 | * @param arg Generic thread argument (unused). |
||
| 542 | * |
||
| 1 | jermar | 543 | */ |
| 544 | void kcpulb(void *arg) |
||
| 545 | { |
||
| 546 | thread_t *t; |
||
| 2118 | decky | 547 | int count, average, j, k = 0; |
| 548 | unsigned int i; |
||
| 413 | jermar | 549 | ipl_t ipl; |
| 1 | jermar | 550 | |
| 1576 | jermar | 551 | /* |
| 552 | * Detach kcpulb as nobody will call thread_join_timeout() on it. |
||
| 553 | */ |
||
| 554 | thread_detach(THREAD); |
||
| 555 | |||
| 1 | jermar | 556 | loop: |
| 557 | /* |
||
| 779 | jermar | 558 | * Work in 1s intervals. |
| 1 | jermar | 559 | */ |
| 779 | jermar | 560 | thread_sleep(1); |
| 1 | jermar | 561 | |
| 562 | not_satisfied: |
||
| 563 | /* |
||
| 564 | * Calculate the number of threads that will be migrated/stolen from |
||
| 565 | * other CPU's. Note that situation can have changed between two |
||
| 566 | * passes. Each time get the most up to date counts. |
||
| 567 | */ |
||
| 784 | palkovsky | 568 | average = atomic_get(&nrdy) / config.cpu_active + 1; |
| 783 | palkovsky | 569 | count = average - atomic_get(&CPU->nrdy); |
| 1 | jermar | 570 | |
| 4679 | rimsky | 571 | /* calculate number of threads to be stolen from other exec. units */ |
| 572 | spinlock_lock(&(CPU->arch.exec_unit->proposed_nrdy_lock)); |
||
| 573 | bool eu_busy = calculate_optimal_nrdy(CPU->arch.exec_unit); |
||
| 574 | unsigned int count_other_eus = CPU->arch.proposed_nrdy |
||
| 575 | - atomic_get(&(CPU->nrdy)); |
||
| 576 | spinlock_unlock(&(CPU->arch.exec_unit->proposed_nrdy_lock)); |
||
| 577 | |||
| 578 | /* |
||
| 579 | * If the CPU's parent core is overloaded, do not do the load |
||
| 580 | * balancing, otherwise we would migrate threads which should be |
||
| 581 | * migrated to other cores and since a thread cannot be migrated |
||
| 582 | * multiple times, it would not be migrated to the other core |
||
| 583 | * in the future. |
||
| 584 | */ |
||
| 585 | if (eu_busy) |
||
| 586 | return; |
||
| 587 | |||
| 588 | /* |
||
| 589 | * get the maximum - stole enough threads to satisfy both the need to |
||
| 590 | * have all virtual CPUs equally busy and the need to have all the |
||
| 591 | * cores equally busy |
||
| 592 | */ |
||
| 593 | if (((int) count_other_eus) > count) |
||
| 594 | count = count_other_eus; |
||
| 595 | |||
| 784 | palkovsky | 596 | if (count <= 0) |
| 1 | jermar | 597 | goto satisfied; |
| 598 | |||
| 599 | /* |
||
| 2067 | jermar | 600 | * Searching least priority queues on all CPU's first and most priority |
| 601 | * queues on all CPU's last. |
||
| 1 | jermar | 602 | */ |
| 2285 | jermar | 603 | for (j = RQ_COUNT - 1; j >= 0; j--) { |
| 2067 | jermar | 604 | for (i = 0; i < config.cpu_active; i++) { |
| 1 | jermar | 605 | link_t *l; |
| 606 | runq_t *r; |
||
| 607 | cpu_t *cpu; |
||
| 608 | |||
| 609 | cpu = &cpus[(i + k) % config.cpu_active]; |
||
| 610 | |||
| 611 | /* |
||
| 612 | * Not interested in ourselves. |
||
| 2067 | jermar | 613 | * Doesn't require interrupt disabling for kcpulb has |
| 614 | * THREAD_FLAG_WIRED. |
||
| 1 | jermar | 615 | */ |
| 15 | jermar | 616 | if (CPU == cpu) |
| 783 | palkovsky | 617 | continue; |
| 618 | if (atomic_get(&cpu->nrdy) <= average) |
||
| 619 | continue; |
||
| 1 | jermar | 620 | |
| 784 | palkovsky | 621 | ipl = interrupts_disable(); |
| 115 | jermar | 622 | r = &cpu->rq[j]; |
| 1 | jermar | 623 | spinlock_lock(&r->lock); |
| 624 | if (r->n == 0) { |
||
| 625 | spinlock_unlock(&r->lock); |
||
| 413 | jermar | 626 | interrupts_restore(ipl); |
| 1 | jermar | 627 | continue; |
| 628 | } |
||
| 629 | |||
| 630 | t = NULL; |
||
| 631 | l = r->rq_head.prev; /* search rq from the back */ |
||
| 632 | while (l != &r->rq_head) { |
||
| 633 | t = list_get_instance(l, thread_t, rq_link); |
||
| 634 | /* |
||
| 2067 | jermar | 635 | * We don't want to steal CPU-wired threads |
| 636 | * neither threads already stolen. The latter |
||
| 637 | * prevents threads from migrating between CPU's |
||
| 638 | * without ever being run. We don't want to |
||
| 639 | * steal threads whose FPU context is still in |
||
| 640 | * CPU. |
||
| 73 | vana | 641 | */ |
| 1 | jermar | 642 | spinlock_lock(&t->lock); |
| 2067 | jermar | 643 | if ((!(t->flags & (THREAD_FLAG_WIRED | |
| 2285 | jermar | 644 | THREAD_FLAG_STOLEN))) && |
| 645 | (!(t->fpu_context_engaged))) { |
||
| 1 | jermar | 646 | /* |
| 647 | * Remove t from r. |
||
| 648 | */ |
||
| 649 | spinlock_unlock(&t->lock); |
||
| 650 | |||
| 783 | palkovsky | 651 | atomic_dec(&cpu->nrdy); |
| 475 | jermar | 652 | atomic_dec(&nrdy); |
| 4679 | rimsky | 653 | if (cpu->arch.exec_unit) |
| 654 | atomic_dec(&(cpu->arch.exec_unit->nrdy)); |
||
| 1 | jermar | 655 | |
| 125 | jermar | 656 | r->n--; |
| 1 | jermar | 657 | list_remove(&t->rq_link); |
| 658 | |||
| 659 | break; |
||
| 660 | } |
||
| 661 | spinlock_unlock(&t->lock); |
||
| 662 | l = l->prev; |
||
| 663 | t = NULL; |
||
| 664 | } |
||
| 665 | spinlock_unlock(&r->lock); |
||
| 666 | |||
| 667 | if (t) { |
||
| 668 | /* |
||
| 669 | * Ready t on local CPU |
||
| 670 | */ |
||
| 671 | spinlock_lock(&t->lock); |
||
| 906 | palkovsky | 672 | #ifdef KCPULB_VERBOSE |
| 3181 | jermar | 673 | printf("kcpulb%u: TID %" PRIu64 " -> cpu%u, " |
| 674 | "nrdy=%ld, avg=%ld\n", CPU->id, t->tid, |
||
| 675 | CPU->id, atomic_get(&CPU->nrdy), |
||
| 2087 | jermar | 676 | atomic_get(&nrdy) / config.cpu_active); |
| 906 | palkovsky | 677 | #endif |
| 1854 | jermar | 678 | t->flags |= THREAD_FLAG_STOLEN; |
| 1115 | jermar | 679 | t->state = Entering; |
| 1 | jermar | 680 | spinlock_unlock(&t->lock); |
| 681 | |||
| 682 | thread_ready(t); |
||
| 683 | |||
| 413 | jermar | 684 | interrupts_restore(ipl); |
| 1 | jermar | 685 | |
| 686 | if (--count == 0) |
||
| 687 | goto satisfied; |
||
| 688 | |||
| 689 | /* |
||
| 2067 | jermar | 690 | * We are not satisfied yet, focus on another |
| 691 | * CPU next time. |
||
| 1 | jermar | 692 | */ |
| 693 | k++; |
||
| 694 | |||
| 695 | continue; |
||
| 696 | } |
||
| 413 | jermar | 697 | interrupts_restore(ipl); |
| 1 | jermar | 698 | } |
| 699 | } |
||
| 700 | |||
| 783 | palkovsky | 701 | if (atomic_get(&CPU->nrdy)) { |
| 1 | jermar | 702 | /* |
| 703 | * Be a little bit light-weight and let migrated threads run. |
||
| 704 | */ |
||
| 705 | scheduler(); |
||
| 779 | jermar | 706 | } else { |
| 1 | jermar | 707 | /* |
| 708 | * We failed to migrate a single thread. |
||
| 779 | jermar | 709 | * Give up this turn. |
| 1 | jermar | 710 | */ |
| 779 | jermar | 711 | goto loop; |
| 1 | jermar | 712 | } |
| 713 | |||
| 714 | goto not_satisfied; |
||
| 125 | jermar | 715 | |
| 1 | jermar | 716 | satisfied: |
| 717 | goto loop; |
||
| 718 | } |
||
| 719 | |||
| 458 | decky | 720 | #endif /* CONFIG_SMP */ |
| 775 | palkovsky | 721 | |
| 722 | |||
| 723 | /** Print information about threads & scheduler queues */ |
||
| 724 | void sched_print_list(void) |
||
| 725 | { |
||
| 726 | ipl_t ipl; |
||
| 2118 | decky | 727 | unsigned int cpu, i; |
| 775 | palkovsky | 728 | runq_t *r; |
| 729 | thread_t *t; |
||
| 730 | link_t *cur; |
||
| 731 | |||
| 732 | /* We are going to mess with scheduler structures, |
||
| 733 | * let's not be interrupted */ |
||
| 734 | ipl = interrupts_disable(); |
||
| 2118 | decky | 735 | for (cpu = 0; cpu < config.cpu_count; cpu++) { |
| 898 | jermar | 736 | |
| 775 | palkovsky | 737 | if (!cpus[cpu].active) |
| 738 | continue; |
||
| 898 | jermar | 739 | |
| 775 | palkovsky | 740 | spinlock_lock(&cpus[cpu].lock); |
| 3062 | decky | 741 | printf("cpu%u: address=%p, nrdy=%ld, needs_relink=%" PRIc "\n", |
| 2087 | jermar | 742 | cpus[cpu].id, &cpus[cpu], atomic_get(&cpus[cpu].nrdy), |
| 743 | cpus[cpu].needs_relink); |
||
| 775 | palkovsky | 744 | |
| 2067 | jermar | 745 | for (i = 0; i < RQ_COUNT; i++) { |
| 775 | palkovsky | 746 | r = &cpus[cpu].rq[i]; |
| 747 | spinlock_lock(&r->lock); |
||
| 748 | if (!r->n) { |
||
| 749 | spinlock_unlock(&r->lock); |
||
| 750 | continue; |
||
| 751 | } |
||
| 3062 | decky | 752 | printf("\trq[%u]: ", i); |
| 2067 | jermar | 753 | for (cur = r->rq_head.next; cur != &r->rq_head; |
| 754 | cur = cur->next) { |
||
| 775 | palkovsky | 755 | t = list_get_instance(cur, thread_t, rq_link); |
| 3062 | decky | 756 | printf("%" PRIu64 "(%s) ", t->tid, |
| 2087 | jermar | 757 | thread_states[t->state]); |
| 775 | palkovsky | 758 | } |
| 759 | printf("\n"); |
||
| 760 | spinlock_unlock(&r->lock); |
||
| 761 | } |
||
| 762 | spinlock_unlock(&cpus[cpu].lock); |
||
| 763 | } |
||
| 764 | |||
| 765 | interrupts_restore(ipl); |
||
| 766 | } |
||
| 1702 | cejka | 767 | |
| 1757 | jermar | 768 | /** @} |
| 1702 | cejka | 769 | */ |