Rev 823 | Rev 897 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
| Rev | Author | Line No. | Line |
|---|---|---|---|
| 1 | jermar | 1 | /* |
| 2 | * Copyright (C) 2001-2004 Jakub Jermar |
||
| 3 | * All rights reserved. |
||
| 4 | * |
||
| 5 | * Redistribution and use in source and binary forms, with or without |
||
| 6 | * modification, are permitted provided that the following conditions |
||
| 7 | * are met: |
||
| 8 | * |
||
| 9 | * - Redistributions of source code must retain the above copyright |
||
| 10 | * notice, this list of conditions and the following disclaimer. |
||
| 11 | * - Redistributions in binary form must reproduce the above copyright |
||
| 12 | * notice, this list of conditions and the following disclaimer in the |
||
| 13 | * documentation and/or other materials provided with the distribution. |
||
| 14 | * - The name of the author may not be used to endorse or promote products |
||
| 15 | * derived from this software without specific prior written permission. |
||
| 16 | * |
||
| 17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
||
| 18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
||
| 19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
||
| 20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
||
| 21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
||
| 22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
||
| 23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
||
| 24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
||
| 25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
||
| 26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
||
| 27 | */ |
||
| 28 | |||
| 29 | #include <proc/scheduler.h> |
||
| 30 | #include <proc/thread.h> |
||
| 31 | #include <proc/task.h> |
||
| 378 | jermar | 32 | #include <mm/frame.h> |
| 33 | #include <mm/page.h> |
||
| 703 | jermar | 34 | #include <mm/as.h> |
| 378 | jermar | 35 | #include <arch/asm.h> |
| 36 | #include <arch/faddr.h> |
||
| 37 | #include <arch/atomic.h> |
||
| 38 | #include <synch/spinlock.h> |
||
| 1 | jermar | 39 | #include <config.h> |
| 40 | #include <context.h> |
||
| 41 | #include <func.h> |
||
| 42 | #include <arch.h> |
||
| 788 | jermar | 43 | #include <adt/list.h> |
| 68 | decky | 44 | #include <panic.h> |
| 1 | jermar | 45 | #include <typedefs.h> |
| 378 | jermar | 46 | #include <cpu.h> |
| 195 | vana | 47 | #include <print.h> |
| 227 | jermar | 48 | #include <debug.h> |
| 1 | jermar | 49 | |
| 475 | jermar | 50 | atomic_t nrdy; |
| 195 | vana | 51 | |
| 118 | jermar | 52 | /** Take actions before new thread runs |
| 107 | decky | 53 | * |
| 118 | jermar | 54 | * Perform actions that need to be |
| 55 | * taken before the newly selected |
||
| 56 | * tread is passed control. |
||
| 107 | decky | 57 | * |
| 827 | palkovsky | 58 | * THREAD->lock is locked on entry |
| 59 | * |
||
| 107 | decky | 60 | */ |
| 52 | vana | 61 | void before_thread_runs(void) |
| 62 | { |
||
| 309 | palkovsky | 63 | before_thread_runs_arch(); |
| 458 | decky | 64 | #ifdef CONFIG_FPU_LAZY |
| 309 | palkovsky | 65 | if(THREAD==CPU->fpu_owner) |
| 66 | fpu_enable(); |
||
| 67 | else |
||
| 68 | fpu_disable(); |
||
| 69 | #else |
||
| 70 | fpu_enable(); |
||
| 71 | if (THREAD->fpu_context_exists) |
||
| 72 | fpu_context_restore(&(THREAD->saved_fpu_context)); |
||
| 73 | else { |
||
| 827 | palkovsky | 74 | fpu_init(&(THREAD->saved_fpu_context)); |
| 309 | palkovsky | 75 | THREAD->fpu_context_exists=1; |
| 76 | } |
||
| 77 | #endif |
||
| 52 | vana | 78 | } |
| 79 | |||
| 458 | decky | 80 | #ifdef CONFIG_FPU_LAZY |
| 309 | palkovsky | 81 | void scheduler_fpu_lazy_request(void) |
| 82 | { |
||
| 83 | fpu_enable(); |
||
| 827 | palkovsky | 84 | spinlock_lock(&CPU->lock); |
| 85 | |||
| 86 | /* Save old context */ |
||
| 309 | palkovsky | 87 | if (CPU->fpu_owner != NULL) { |
| 827 | palkovsky | 88 | spinlock_lock(&CPU->fpu_owner->lock); |
| 309 | palkovsky | 89 | fpu_context_save(&CPU->fpu_owner->saved_fpu_context); |
| 90 | /* don't prevent migration */ |
||
| 91 | CPU->fpu_owner->fpu_context_engaged=0; |
||
| 827 | palkovsky | 92 | spinlock_unlock(&CPU->fpu_owner->lock); |
| 309 | palkovsky | 93 | } |
| 827 | palkovsky | 94 | |
| 95 | spinlock_lock(&THREAD->lock); |
||
| 309 | palkovsky | 96 | if (THREAD->fpu_context_exists) |
| 97 | fpu_context_restore(&THREAD->saved_fpu_context); |
||
| 98 | else { |
||
| 827 | palkovsky | 99 | fpu_init(&(THREAD->saved_fpu_context)); |
| 309 | palkovsky | 100 | THREAD->fpu_context_exists=1; |
| 101 | } |
||
| 102 | CPU->fpu_owner=THREAD; |
||
| 103 | THREAD->fpu_context_engaged = 1; |
||
| 827 | palkovsky | 104 | |
| 105 | spinlock_unlock(&THREAD->lock); |
||
| 106 | spinlock_unlock(&CPU->lock); |
||
| 309 | palkovsky | 107 | } |
| 108 | #endif |
||
| 52 | vana | 109 | |
| 107 | decky | 110 | /** Initialize scheduler |
| 111 | * |
||
| 112 | * Initialize kernel scheduler. |
||
| 113 | * |
||
| 114 | */ |
||
| 1 | jermar | 115 | void scheduler_init(void) |
| 116 | { |
||
| 117 | } |
||
| 118 | |||
| 107 | decky | 119 | |
| 120 | /** Get thread to be scheduled |
||
| 121 | * |
||
| 122 | * Get the optimal thread to be scheduled |
||
| 109 | jermar | 123 | * according to thread accounting and scheduler |
| 107 | decky | 124 | * policy. |
| 125 | * |
||
| 126 | * @return Thread to be scheduled. |
||
| 127 | * |
||
| 128 | */ |
||
| 483 | jermar | 129 | static thread_t *find_best_thread(void) |
| 1 | jermar | 130 | { |
| 131 | thread_t *t; |
||
| 132 | runq_t *r; |
||
| 783 | palkovsky | 133 | int i; |
| 1 | jermar | 134 | |
| 227 | jermar | 135 | ASSERT(CPU != NULL); |
| 136 | |||
| 1 | jermar | 137 | loop: |
| 413 | jermar | 138 | interrupts_enable(); |
| 1 | jermar | 139 | |
| 783 | palkovsky | 140 | if (atomic_get(&CPU->nrdy) == 0) { |
| 1 | jermar | 141 | /* |
| 142 | * For there was nothing to run, the CPU goes to sleep |
||
| 143 | * until a hardware interrupt or an IPI comes. |
||
| 144 | * This improves energy saving and hyperthreading. |
||
| 145 | */ |
||
| 785 | jermar | 146 | |
| 147 | /* |
||
| 148 | * An interrupt might occur right now and wake up a thread. |
||
| 149 | * In such case, the CPU will continue to go to sleep |
||
| 150 | * even though there is a runnable thread. |
||
| 151 | */ |
||
| 152 | |||
| 1 | jermar | 153 | cpu_sleep(); |
| 154 | goto loop; |
||
| 155 | } |
||
| 156 | |||
| 413 | jermar | 157 | interrupts_disable(); |
| 114 | jermar | 158 | |
| 159 | i = 0; |
||
| 160 | for (; i<RQ_COUNT; i++) { |
||
| 15 | jermar | 161 | r = &CPU->rq[i]; |
| 1 | jermar | 162 | spinlock_lock(&r->lock); |
| 163 | if (r->n == 0) { |
||
| 164 | /* |
||
| 165 | * If this queue is empty, try a lower-priority queue. |
||
| 166 | */ |
||
| 167 | spinlock_unlock(&r->lock); |
||
| 168 | continue; |
||
| 169 | } |
||
| 213 | jermar | 170 | |
| 783 | palkovsky | 171 | atomic_dec(&CPU->nrdy); |
| 475 | jermar | 172 | atomic_dec(&nrdy); |
| 1 | jermar | 173 | r->n--; |
| 174 | |||
| 175 | /* |
||
| 176 | * Take the first thread from the queue. |
||
| 177 | */ |
||
| 178 | t = list_get_instance(r->rq_head.next, thread_t, rq_link); |
||
| 179 | list_remove(&t->rq_link); |
||
| 180 | |||
| 181 | spinlock_unlock(&r->lock); |
||
| 182 | |||
| 183 | spinlock_lock(&t->lock); |
||
| 15 | jermar | 184 | t->cpu = CPU; |
| 1 | jermar | 185 | |
| 186 | t->ticks = us2ticks((i+1)*10000); |
||
| 413 | jermar | 187 | t->priority = i; /* eventually correct rq index */ |
| 1 | jermar | 188 | |
| 189 | /* |
||
| 190 | * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge. |
||
| 191 | */ |
||
| 192 | t->flags &= ~X_STOLEN; |
||
| 193 | spinlock_unlock(&t->lock); |
||
| 194 | |||
| 195 | return t; |
||
| 196 | } |
||
| 197 | goto loop; |
||
| 198 | |||
| 199 | } |
||
| 200 | |||
| 107 | decky | 201 | |
| 202 | /** Prevent rq starvation |
||
| 203 | * |
||
| 204 | * Prevent low priority threads from starving in rq's. |
||
| 205 | * |
||
| 206 | * When the function decides to relink rq's, it reconnects |
||
| 207 | * respective pointers so that in result threads with 'pri' |
||
| 208 | * greater or equal 'start' are moved to a higher-priority queue. |
||
| 209 | * |
||
| 210 | * @param start Threshold priority. |
||
| 211 | * |
||
| 1 | jermar | 212 | */ |
| 452 | decky | 213 | static void relink_rq(int start) |
| 1 | jermar | 214 | { |
| 215 | link_t head; |
||
| 216 | runq_t *r; |
||
| 217 | int i, n; |
||
| 218 | |||
| 219 | list_initialize(&head); |
||
| 15 | jermar | 220 | spinlock_lock(&CPU->lock); |
| 221 | if (CPU->needs_relink > NEEDS_RELINK_MAX) { |
||
| 1 | jermar | 222 | for (i = start; i<RQ_COUNT-1; i++) { |
| 223 | /* remember and empty rq[i + 1] */ |
||
| 15 | jermar | 224 | r = &CPU->rq[i + 1]; |
| 1 | jermar | 225 | spinlock_lock(&r->lock); |
| 226 | list_concat(&head, &r->rq_head); |
||
| 227 | n = r->n; |
||
| 228 | r->n = 0; |
||
| 229 | spinlock_unlock(&r->lock); |
||
| 230 | |||
| 231 | /* append rq[i + 1] to rq[i] */ |
||
| 15 | jermar | 232 | r = &CPU->rq[i]; |
| 1 | jermar | 233 | spinlock_lock(&r->lock); |
| 234 | list_concat(&r->rq_head, &head); |
||
| 235 | r->n += n; |
||
| 236 | spinlock_unlock(&r->lock); |
||
| 237 | } |
||
| 15 | jermar | 238 | CPU->needs_relink = 0; |
| 1 | jermar | 239 | } |
| 784 | palkovsky | 240 | spinlock_unlock(&CPU->lock); |
| 1 | jermar | 241 | |
| 242 | } |
||
| 243 | |||
| 107 | decky | 244 | |
| 245 | /** Scheduler stack switch wrapper |
||
| 246 | * |
||
| 247 | * Second part of the scheduler() function |
||
| 248 | * using new stack. Handling the actual context |
||
| 249 | * switch to a new thread. |
||
| 250 | * |
||
| 787 | palkovsky | 251 | * Assume THREAD->lock is held. |
| 107 | decky | 252 | */ |
| 452 | decky | 253 | static void scheduler_separated_stack(void) |
| 1 | jermar | 254 | { |
| 255 | int priority; |
||
| 256 | |||
| 227 | jermar | 257 | ASSERT(CPU != NULL); |
| 258 | |||
| 15 | jermar | 259 | if (THREAD) { |
| 260 | switch (THREAD->state) { |
||
| 1 | jermar | 261 | case Running: |
| 125 | jermar | 262 | THREAD->state = Ready; |
| 263 | spinlock_unlock(&THREAD->lock); |
||
| 264 | thread_ready(THREAD); |
||
| 265 | break; |
||
| 1 | jermar | 266 | |
| 267 | case Exiting: |
||
| 787 | palkovsky | 268 | thread_destroy(THREAD); |
| 125 | jermar | 269 | break; |
| 787 | palkovsky | 270 | |
| 1 | jermar | 271 | case Sleeping: |
| 125 | jermar | 272 | /* |
| 273 | * Prefer the thread after it's woken up. |
||
| 274 | */ |
||
| 413 | jermar | 275 | THREAD->priority = -1; |
| 1 | jermar | 276 | |
| 125 | jermar | 277 | /* |
| 278 | * We need to release wq->lock which we locked in waitq_sleep(). |
||
| 279 | * Address of wq->lock is kept in THREAD->sleep_queue. |
||
| 280 | */ |
||
| 281 | spinlock_unlock(&THREAD->sleep_queue->lock); |
||
| 1 | jermar | 282 | |
| 125 | jermar | 283 | /* |
| 284 | * Check for possible requests for out-of-context invocation. |
||
| 285 | */ |
||
| 286 | if (THREAD->call_me) { |
||
| 287 | THREAD->call_me(THREAD->call_me_with); |
||
| 288 | THREAD->call_me = NULL; |
||
| 289 | THREAD->call_me_with = NULL; |
||
| 290 | } |
||
| 1 | jermar | 291 | |
| 125 | jermar | 292 | spinlock_unlock(&THREAD->lock); |
| 1 | jermar | 293 | |
| 125 | jermar | 294 | break; |
| 295 | |||
| 1 | jermar | 296 | default: |
| 125 | jermar | 297 | /* |
| 298 | * Entering state is unexpected. |
||
| 299 | */ |
||
| 300 | panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]); |
||
| 301 | break; |
||
| 1 | jermar | 302 | } |
| 15 | jermar | 303 | THREAD = NULL; |
| 1 | jermar | 304 | } |
| 198 | jermar | 305 | |
| 214 | vana | 306 | |
| 15 | jermar | 307 | THREAD = find_best_thread(); |
| 1 | jermar | 308 | |
| 15 | jermar | 309 | spinlock_lock(&THREAD->lock); |
| 413 | jermar | 310 | priority = THREAD->priority; |
| 15 | jermar | 311 | spinlock_unlock(&THREAD->lock); |
| 192 | jermar | 312 | |
| 1 | jermar | 313 | relink_rq(priority); |
| 314 | |||
| 15 | jermar | 315 | spinlock_lock(&THREAD->lock); |
| 1 | jermar | 316 | |
| 317 | /* |
||
| 318 | * If both the old and the new task are the same, lots of work is avoided. |
||
| 319 | */ |
||
| 15 | jermar | 320 | if (TASK != THREAD->task) { |
| 703 | jermar | 321 | as_t *as1 = NULL; |
| 322 | as_t *as2; |
||
| 1 | jermar | 323 | |
| 15 | jermar | 324 | if (TASK) { |
| 325 | spinlock_lock(&TASK->lock); |
||
| 703 | jermar | 326 | as1 = TASK->as; |
| 15 | jermar | 327 | spinlock_unlock(&TASK->lock); |
| 1 | jermar | 328 | } |
| 329 | |||
| 15 | jermar | 330 | spinlock_lock(&THREAD->task->lock); |
| 703 | jermar | 331 | as2 = THREAD->task->as; |
| 15 | jermar | 332 | spinlock_unlock(&THREAD->task->lock); |
| 1 | jermar | 333 | |
| 334 | /* |
||
| 703 | jermar | 335 | * Note that it is possible for two tasks to share one address space. |
| 1 | jermar | 336 | */ |
| 703 | jermar | 337 | if (as1 != as2) { |
| 1 | jermar | 338 | /* |
| 703 | jermar | 339 | * Both tasks and address spaces are different. |
| 1 | jermar | 340 | * Replace the old one with the new one. |
| 341 | */ |
||
| 823 | jermar | 342 | as_switch(as1, as2); |
| 1 | jermar | 343 | } |
| 15 | jermar | 344 | TASK = THREAD->task; |
| 1 | jermar | 345 | } |
| 346 | |||
| 15 | jermar | 347 | THREAD->state = Running; |
| 1 | jermar | 348 | |
| 349 | #ifdef SCHEDULER_VERBOSE |
||
| 823 | jermar | 350 | printf("cpu%d: tid %d (priority=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, atomic_get(&CPU->nrdy)); |
| 1 | jermar | 351 | #endif |
| 352 | |||
| 213 | jermar | 353 | /* |
| 354 | * Copy the knowledge of CPU, TASK, THREAD and preemption counter to thread's stack. |
||
| 355 | */ |
||
| 184 | jermar | 356 | the_copy(THE, (the_t *) THREAD->kstack); |
| 357 | |||
| 15 | jermar | 358 | context_restore(&THREAD->saved_context); |
| 1 | jermar | 359 | /* not reached */ |
| 360 | } |
||
| 361 | |||
| 107 | decky | 362 | |
| 452 | decky | 363 | /** The scheduler |
| 364 | * |
||
| 365 | * The thread scheduling procedure. |
||
| 675 | jermar | 366 | * Passes control directly to |
| 367 | * scheduler_separated_stack(). |
||
| 452 | decky | 368 | * |
| 369 | */ |
||
| 370 | void scheduler(void) |
||
| 371 | { |
||
| 372 | volatile ipl_t ipl; |
||
| 373 | |||
| 374 | ASSERT(CPU != NULL); |
||
| 375 | |||
| 376 | ipl = interrupts_disable(); |
||
| 377 | |||
| 631 | palkovsky | 378 | if (atomic_get(&haltstate)) |
| 452 | decky | 379 | halt(); |
| 380 | |||
| 381 | if (THREAD) { |
||
| 382 | spinlock_lock(&THREAD->lock); |
||
| 458 | decky | 383 | #ifndef CONFIG_FPU_LAZY |
| 452 | decky | 384 | fpu_context_save(&(THREAD->saved_fpu_context)); |
| 385 | #endif |
||
| 386 | if (!context_save(&THREAD->saved_context)) { |
||
| 387 | /* |
||
| 388 | * This is the place where threads leave scheduler(); |
||
| 389 | */ |
||
| 390 | before_thread_runs(); |
||
| 391 | spinlock_unlock(&THREAD->lock); |
||
| 392 | interrupts_restore(THREAD->saved_context.ipl); |
||
| 393 | return; |
||
| 394 | } |
||
| 395 | |||
| 396 | /* |
||
| 397 | * Interrupt priority level of preempted thread is recorded here |
||
| 398 | * to facilitate scheduler() invocations from interrupts_disable()'d |
||
| 399 | * code (e.g. waitq_sleep_timeout()). |
||
| 400 | */ |
||
| 401 | THREAD->saved_context.ipl = ipl; |
||
| 402 | } |
||
| 403 | |||
| 404 | /* |
||
| 557 | jermar | 405 | * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM |
| 452 | decky | 406 | * and preemption counter. At this point THE could be coming either |
| 407 | * from THREAD's or CPU's stack. |
||
| 408 | */ |
||
| 409 | the_copy(THE, (the_t *) CPU->stack); |
||
| 410 | |||
| 411 | /* |
||
| 412 | * We may not keep the old stack. |
||
| 413 | * Reason: If we kept the old stack and got blocked, for instance, in |
||
| 414 | * find_best_thread(), the old thread could get rescheduled by another |
||
| 415 | * CPU and overwrite the part of its own stack that was also used by |
||
| 416 | * the scheduler on this CPU. |
||
| 417 | * |
||
| 418 | * Moreover, we have to bypass the compiler-generated POP sequence |
||
| 419 | * which is fooled by SP being set to the very top of the stack. |
||
| 420 | * Therefore the scheduler() function continues in |
||
| 421 | * scheduler_separated_stack(). |
||
| 422 | */ |
||
| 423 | context_save(&CPU->saved_context); |
||
| 424 | context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), (__address) CPU->stack, CPU_STACK_SIZE); |
||
| 425 | context_restore(&CPU->saved_context); |
||
| 426 | /* not reached */ |
||
| 427 | } |
||
| 428 | |||
| 429 | |||
| 430 | |||
| 431 | |||
| 432 | |||
| 458 | decky | 433 | #ifdef CONFIG_SMP |
| 107 | decky | 434 | /** Load balancing thread |
| 435 | * |
||
| 436 | * SMP load balancing thread, supervising thread supplies |
||
| 437 | * for the CPU it's wired to. |
||
| 438 | * |
||
| 439 | * @param arg Generic thread argument (unused). |
||
| 440 | * |
||
| 1 | jermar | 441 | */ |
| 442 | void kcpulb(void *arg) |
||
| 443 | { |
||
| 444 | thread_t *t; |
||
| 783 | palkovsky | 445 | int count, average, i, j, k = 0; |
| 413 | jermar | 446 | ipl_t ipl; |
| 1 | jermar | 447 | |
| 448 | loop: |
||
| 449 | /* |
||
| 779 | jermar | 450 | * Work in 1s intervals. |
| 1 | jermar | 451 | */ |
| 779 | jermar | 452 | thread_sleep(1); |
| 1 | jermar | 453 | |
| 454 | not_satisfied: |
||
| 455 | /* |
||
| 456 | * Calculate the number of threads that will be migrated/stolen from |
||
| 457 | * other CPU's. Note that situation can have changed between two |
||
| 458 | * passes. Each time get the most up to date counts. |
||
| 459 | */ |
||
| 784 | palkovsky | 460 | average = atomic_get(&nrdy) / config.cpu_active + 1; |
| 783 | palkovsky | 461 | count = average - atomic_get(&CPU->nrdy); |
| 1 | jermar | 462 | |
| 784 | palkovsky | 463 | if (count <= 0) |
| 1 | jermar | 464 | goto satisfied; |
| 465 | |||
| 466 | /* |
||
| 467 | * Searching least priority queues on all CPU's first and most priority queues on all CPU's last. |
||
| 468 | */ |
||
| 469 | for (j=RQ_COUNT-1; j >= 0; j--) { |
||
| 470 | for (i=0; i < config.cpu_active; i++) { |
||
| 471 | link_t *l; |
||
| 472 | runq_t *r; |
||
| 473 | cpu_t *cpu; |
||
| 474 | |||
| 475 | cpu = &cpus[(i + k) % config.cpu_active]; |
||
| 476 | |||
| 477 | /* |
||
| 478 | * Not interested in ourselves. |
||
| 479 | * Doesn't require interrupt disabling for kcpulb is X_WIRED. |
||
| 480 | */ |
||
| 15 | jermar | 481 | if (CPU == cpu) |
| 783 | palkovsky | 482 | continue; |
| 483 | if (atomic_get(&cpu->nrdy) <= average) |
||
| 484 | continue; |
||
| 1 | jermar | 485 | |
| 784 | palkovsky | 486 | ipl = interrupts_disable(); |
| 115 | jermar | 487 | r = &cpu->rq[j]; |
| 1 | jermar | 488 | spinlock_lock(&r->lock); |
| 489 | if (r->n == 0) { |
||
| 490 | spinlock_unlock(&r->lock); |
||
| 413 | jermar | 491 | interrupts_restore(ipl); |
| 1 | jermar | 492 | continue; |
| 493 | } |
||
| 494 | |||
| 495 | t = NULL; |
||
| 496 | l = r->rq_head.prev; /* search rq from the back */ |
||
| 497 | while (l != &r->rq_head) { |
||
| 498 | t = list_get_instance(l, thread_t, rq_link); |
||
| 499 | /* |
||
| 125 | jermar | 500 | * We don't want to steal CPU-wired threads neither threads already stolen. |
| 1 | jermar | 501 | * The latter prevents threads from migrating between CPU's without ever being run. |
| 125 | jermar | 502 | * We don't want to steal threads whose FPU context is still in CPU. |
| 73 | vana | 503 | */ |
| 1 | jermar | 504 | spinlock_lock(&t->lock); |
| 73 | vana | 505 | if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) { |
| 1 | jermar | 506 | /* |
| 507 | * Remove t from r. |
||
| 508 | */ |
||
| 509 | spinlock_unlock(&t->lock); |
||
| 510 | |||
| 783 | palkovsky | 511 | atomic_dec(&cpu->nrdy); |
| 475 | jermar | 512 | atomic_dec(&nrdy); |
| 1 | jermar | 513 | |
| 125 | jermar | 514 | r->n--; |
| 1 | jermar | 515 | list_remove(&t->rq_link); |
| 516 | |||
| 517 | break; |
||
| 518 | } |
||
| 519 | spinlock_unlock(&t->lock); |
||
| 520 | l = l->prev; |
||
| 521 | t = NULL; |
||
| 522 | } |
||
| 523 | spinlock_unlock(&r->lock); |
||
| 524 | |||
| 525 | if (t) { |
||
| 526 | /* |
||
| 527 | * Ready t on local CPU |
||
| 528 | */ |
||
| 529 | spinlock_lock(&t->lock); |
||
| 530 | #ifdef KCPULB_VERBOSE |
||
| 783 | palkovsky | 531 | printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, atomic_get(&CPU->nrdy), atomic_get(&nrdy) / config.cpu_active); |
| 1 | jermar | 532 | #endif |
| 533 | t->flags |= X_STOLEN; |
||
| 534 | spinlock_unlock(&t->lock); |
||
| 535 | |||
| 536 | thread_ready(t); |
||
| 537 | |||
| 413 | jermar | 538 | interrupts_restore(ipl); |
| 1 | jermar | 539 | |
| 540 | if (--count == 0) |
||
| 541 | goto satisfied; |
||
| 542 | |||
| 543 | /* |
||
| 125 | jermar | 544 | * We are not satisfied yet, focus on another CPU next time. |
| 1 | jermar | 545 | */ |
| 546 | k++; |
||
| 547 | |||
| 548 | continue; |
||
| 549 | } |
||
| 413 | jermar | 550 | interrupts_restore(ipl); |
| 1 | jermar | 551 | } |
| 552 | } |
||
| 553 | |||
| 783 | palkovsky | 554 | if (atomic_get(&CPU->nrdy)) { |
| 1 | jermar | 555 | /* |
| 556 | * Be a little bit light-weight and let migrated threads run. |
||
| 557 | */ |
||
| 558 | scheduler(); |
||
| 779 | jermar | 559 | } else { |
| 1 | jermar | 560 | /* |
| 561 | * We failed to migrate a single thread. |
||
| 779 | jermar | 562 | * Give up this turn. |
| 1 | jermar | 563 | */ |
| 779 | jermar | 564 | goto loop; |
| 1 | jermar | 565 | } |
| 566 | |||
| 567 | goto not_satisfied; |
||
| 125 | jermar | 568 | |
| 1 | jermar | 569 | satisfied: |
| 570 | goto loop; |
||
| 571 | } |
||
| 572 | |||
| 458 | decky | 573 | #endif /* CONFIG_SMP */ |
| 775 | palkovsky | 574 | |
| 575 | |||
| 576 | /** Print information about threads & scheduler queues */ |
||
| 577 | void sched_print_list(void) |
||
| 578 | { |
||
| 579 | ipl_t ipl; |
||
| 580 | int cpu,i; |
||
| 581 | runq_t *r; |
||
| 582 | thread_t *t; |
||
| 583 | link_t *cur; |
||
| 584 | |||
| 585 | /* We are going to mess with scheduler structures, |
||
| 586 | * let's not be interrupted */ |
||
| 587 | ipl = interrupts_disable(); |
||
| 588 | printf("*********** Scheduler dump ***********\n"); |
||
| 589 | for (cpu=0;cpu < config.cpu_count; cpu++) { |
||
| 590 | if (!cpus[cpu].active) |
||
| 591 | continue; |
||
| 592 | spinlock_lock(&cpus[cpu].lock); |
||
| 593 | printf("cpu%d: nrdy: %d needs_relink: %d\n", |
||
| 783 | palkovsky | 594 | cpus[cpu].id, atomic_get(&cpus[cpu].nrdy), cpus[cpu].needs_relink); |
| 775 | palkovsky | 595 | |
| 596 | for (i=0; i<RQ_COUNT; i++) { |
||
| 597 | r = &cpus[cpu].rq[i]; |
||
| 598 | spinlock_lock(&r->lock); |
||
| 599 | if (!r->n) { |
||
| 600 | spinlock_unlock(&r->lock); |
||
| 601 | continue; |
||
| 602 | } |
||
| 779 | jermar | 603 | printf("\tRq %d: ", i); |
| 775 | palkovsky | 604 | for (cur=r->rq_head.next; cur!=&r->rq_head; cur=cur->next) { |
| 605 | t = list_get_instance(cur, thread_t, rq_link); |
||
| 606 | printf("%d(%s) ", t->tid, |
||
| 607 | thread_states[t->state]); |
||
| 608 | } |
||
| 609 | printf("\n"); |
||
| 610 | spinlock_unlock(&r->lock); |
||
| 611 | } |
||
| 612 | spinlock_unlock(&cpus[cpu].lock); |
||
| 613 | } |
||
| 614 | |||
| 615 | interrupts_restore(ipl); |
||
| 616 | } |