Subversion Repositories HelenOS

Rev

Rev 2131 | Rev 2292 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1 jermar 1
/*
2071 jermar 2
 * Copyright (c) 2001-2007 Jakub Jermar
1 jermar 3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
1757 jermar 29
/** @addtogroup genericproc
1702 cejka 30
 * @{
31
 */
32
 
1248 jermar 33
/**
1702 cejka 34
 * @file
1248 jermar 35
 * @brief   Scheduler and load balancing.
36
 *
1264 jermar 37
 * This file contains the scheduler and kcpulb kernel thread which
1248 jermar 38
 * performs load-balancing of per-CPU run queues.
39
 */
40
 
1 jermar 41
#include <proc/scheduler.h>
42
#include <proc/thread.h>
43
#include <proc/task.h>
378 jermar 44
#include <mm/frame.h>
45
#include <mm/page.h>
703 jermar 46
#include <mm/as.h>
2089 decky 47
#include <time/timeout.h>
1571 jermar 48
#include <time/delay.h>
378 jermar 49
#include <arch/asm.h>
50
#include <arch/faddr.h>
2030 decky 51
#include <arch/cycle.h>
1104 jermar 52
#include <atomic.h>
378 jermar 53
#include <synch/spinlock.h>
1 jermar 54
#include <config.h>
55
#include <context.h>
2089 decky 56
#include <fpu_context.h>
1 jermar 57
#include <func.h>
58
#include <arch.h>
788 jermar 59
#include <adt/list.h>
68 decky 60
#include <panic.h>
378 jermar 61
#include <cpu.h>
195 vana 62
#include <print.h>
227 jermar 63
#include <debug.h>
2283 hudecek 64
#include <proc/tasklet.h>
1 jermar 65
 
1187 jermar 66
static void before_task_runs(void);
67
static void before_thread_runs(void);
68
static void after_thread_ran(void);
898 jermar 69
static void scheduler_separated_stack(void);
195 vana 70
 
898 jermar 71
atomic_t nrdy;  /**< Number of ready threads in the system. */
72
 
1187 jermar 73
/** Carry out actions before new task runs. */
74
void before_task_runs(void)
75
{
76
    before_task_runs_arch();
77
}
78
 
897 jermar 79
/** Take actions before new thread runs.
107 decky 80
 *
118 jermar 81
 * Perform actions that need to be
82
 * taken before the newly selected
83
 * tread is passed control.
107 decky 84
 *
827 palkovsky 85
 * THREAD->lock is locked on entry
86
 *
107 decky 87
 */
52 vana 88
void before_thread_runs(void)
89
{
309 palkovsky 90
    before_thread_runs_arch();
906 palkovsky 91
#ifdef CONFIG_FPU_LAZY
1882 jermar 92
    if(THREAD == CPU->fpu_owner)
309 palkovsky 93
        fpu_enable();
94
    else
95
        fpu_disable();
906 palkovsky 96
#else
309 palkovsky 97
    fpu_enable();
98
    if (THREAD->fpu_context_exists)
906 palkovsky 99
        fpu_context_restore(THREAD->saved_fpu_context);
309 palkovsky 100
    else {
906 palkovsky 101
        fpu_init();
1882 jermar 102
        THREAD->fpu_context_exists = 1;
309 palkovsky 103
    }
906 palkovsky 104
#endif
52 vana 105
}
106
 
898 jermar 107
/** Take actions after THREAD had run.
897 jermar 108
 *
109
 * Perform actions that need to be
110
 * taken after the running thread
898 jermar 111
 * had been preempted by the scheduler.
897 jermar 112
 *
113
 * THREAD->lock is locked on entry
114
 *
115
 */
116
void after_thread_ran(void)
117
{
118
    after_thread_ran_arch();
119
}
120
 
458 decky 121
#ifdef CONFIG_FPU_LAZY
309 palkovsky 122
void scheduler_fpu_lazy_request(void)
123
{
907 palkovsky 124
restart:
309 palkovsky 125
    fpu_enable();
827 palkovsky 126
    spinlock_lock(&CPU->lock);
127
 
128
    /* Save old context */
309 palkovsky 129
    if (CPU->fpu_owner != NULL) {  
827 palkovsky 130
        spinlock_lock(&CPU->fpu_owner->lock);
906 palkovsky 131
        fpu_context_save(CPU->fpu_owner->saved_fpu_context);
309 palkovsky 132
        /* don't prevent migration */
1882 jermar 133
        CPU->fpu_owner->fpu_context_engaged = 0;
827 palkovsky 134
        spinlock_unlock(&CPU->fpu_owner->lock);
907 palkovsky 135
        CPU->fpu_owner = NULL;
309 palkovsky 136
    }
827 palkovsky 137
 
138
    spinlock_lock(&THREAD->lock);
898 jermar 139
    if (THREAD->fpu_context_exists) {
906 palkovsky 140
        fpu_context_restore(THREAD->saved_fpu_context);
898 jermar 141
    } else {
906 palkovsky 142
        /* Allocate FPU context */
143
        if (!THREAD->saved_fpu_context) {
144
            /* Might sleep */
145
            spinlock_unlock(&THREAD->lock);
907 palkovsky 146
            spinlock_unlock(&CPU->lock);
2067 jermar 147
            THREAD->saved_fpu_context =
2118 decky 148
                (fpu_context_t *) slab_alloc(fpu_context_slab, 0);
907 palkovsky 149
            /* We may have switched CPUs during slab_alloc */
150
            goto restart;
906 palkovsky 151
        }
152
        fpu_init();
1882 jermar 153
        THREAD->fpu_context_exists = 1;
309 palkovsky 154
    }
1882 jermar 155
    CPU->fpu_owner = THREAD;
309 palkovsky 156
    THREAD->fpu_context_engaged = 1;
898 jermar 157
    spinlock_unlock(&THREAD->lock);
827 palkovsky 158
 
159
    spinlock_unlock(&CPU->lock);
309 palkovsky 160
}
161
#endif
52 vana 162
 
107 decky 163
/** Initialize scheduler
164
 *
165
 * Initialize kernel scheduler.
166
 *
167
 */
1 jermar 168
void scheduler_init(void)
169
{
170
}
171
 
107 decky 172
/** Get thread to be scheduled
173
 *
174
 * Get the optimal thread to be scheduled
109 jermar 175
 * according to thread accounting and scheduler
107 decky 176
 * policy.
177
 *
178
 * @return Thread to be scheduled.
179
 *
180
 */
483 jermar 181
static thread_t *find_best_thread(void)
1 jermar 182
{
183
    thread_t *t;
184
    runq_t *r;
783 palkovsky 185
    int i;
1 jermar 186
 
227 jermar 187
    ASSERT(CPU != NULL);
188
 
1 jermar 189
loop:
413 jermar 190
    interrupts_enable();
1 jermar 191
 
783 palkovsky 192
    if (atomic_get(&CPU->nrdy) == 0) {
1 jermar 193
        /*
194
         * For there was nothing to run, the CPU goes to sleep
195
         * until a hardware interrupt or an IPI comes.
196
         * This improves energy saving and hyperthreading.
197
         */
785 jermar 198
 
199
        /*
200
         * An interrupt might occur right now and wake up a thread.
201
         * In such case, the CPU will continue to go to sleep
202
         * even though there is a runnable thread.
203
         */
204
 
1 jermar 205
         cpu_sleep();
206
         goto loop;
207
    }
208
 
413 jermar 209
    interrupts_disable();
114 jermar 210
 
898 jermar 211
    for (i = 0; i<RQ_COUNT; i++) {
15 jermar 212
        r = &CPU->rq[i];
1 jermar 213
        spinlock_lock(&r->lock);
214
        if (r->n == 0) {
215
            /*
216
             * If this queue is empty, try a lower-priority queue.
217
             */
218
            spinlock_unlock(&r->lock);
219
            continue;
220
        }
213 jermar 221
 
783 palkovsky 222
        atomic_dec(&CPU->nrdy);
475 jermar 223
        atomic_dec(&nrdy);
1 jermar 224
        r->n--;
225
 
226
        /*
227
         * Take the first thread from the queue.
228
         */
229
        t = list_get_instance(r->rq_head.next, thread_t, rq_link);
2283 hudecek 230
        if (verbose)
231
            printf("cpu%d removing, rq_head %x, t: %x, next: %x, link: %x \n",CPU->id, r->rq_head, t, r->rq_head.next, t->rq_link);
1 jermar 232
        list_remove(&t->rq_link);
2283 hudecek 233
        if (verbose)
234
            printf("cpu%d  removed, rq_head %x, t: %x, next: %x, link: %x \n",CPU->id, r->rq_head, t, r->rq_head.next, t->rq_link);
1 jermar 235
 
236
        spinlock_unlock(&r->lock);
237
 
238
        spinlock_lock(&t->lock);
15 jermar 239
        t->cpu = CPU;
1 jermar 240
 
2067 jermar 241
        t->ticks = us2ticks((i + 1) * 10000);
898 jermar 242
        t->priority = i;    /* correct rq index */
1 jermar 243
 
244
        /*
1854 jermar 245
         * Clear the THREAD_FLAG_STOLEN flag so that t can be migrated
246
         * when load balancing needs emerge.
1 jermar 247
         */
1854 jermar 248
        t->flags &= ~THREAD_FLAG_STOLEN;
1 jermar 249
        spinlock_unlock(&t->lock);
250
 
251
        return t;
252
    }
253
    goto loop;
254
 
255
}
256
 
107 decky 257
/** Prevent rq starvation
258
 *
259
 * Prevent low priority threads from starving in rq's.
260
 *
261
 * When the function decides to relink rq's, it reconnects
262
 * respective pointers so that in result threads with 'pri'
1708 jermar 263
 * greater or equal start are moved to a higher-priority queue.
107 decky 264
 *
265
 * @param start Threshold priority.
266
 *
1 jermar 267
 */
452 decky 268
static void relink_rq(int start)
1 jermar 269
{
270
    link_t head;
271
    runq_t *r;
272
    int i, n;
273
 
274
    list_initialize(&head);
15 jermar 275
    spinlock_lock(&CPU->lock);
276
    if (CPU->needs_relink > NEEDS_RELINK_MAX) {
2067 jermar 277
        for (i = start; i < RQ_COUNT - 1; i++) {
1 jermar 278
            /* remember and empty rq[i + 1] */
15 jermar 279
            r = &CPU->rq[i + 1];
1 jermar 280
            spinlock_lock(&r->lock);
281
            list_concat(&head, &r->rq_head);
282
            n = r->n;
283
            r->n = 0;
284
            spinlock_unlock(&r->lock);
285
 
286
            /* append rq[i + 1] to rq[i] */
15 jermar 287
            r = &CPU->rq[i];
1 jermar 288
            spinlock_lock(&r->lock);
289
            list_concat(&r->rq_head, &head);
290
            r->n += n;
291
            spinlock_unlock(&r->lock);
292
        }
15 jermar 293
        CPU->needs_relink = 0;
1 jermar 294
    }
784 palkovsky 295
    spinlock_unlock(&CPU->lock);
1 jermar 296
 
297
}
298
 
898 jermar 299
/** The scheduler
300
 *
301
 * The thread scheduling procedure.
302
 * Passes control directly to
303
 * scheduler_separated_stack().
304
 *
305
 */
306
void scheduler(void)
307
{
308
    volatile ipl_t ipl;
107 decky 309
 
898 jermar 310
    ASSERT(CPU != NULL);
311
 
312
    ipl = interrupts_disable();
313
 
314
    if (atomic_get(&haltstate))
315
        halt();
1007 decky 316
 
898 jermar 317
    if (THREAD) {
318
        spinlock_lock(&THREAD->lock);
2030 decky 319
 
320
        /* Update thread accounting */
321
        THREAD->cycles += get_cycle() - THREAD->last_cycle;
322
 
906 palkovsky 323
#ifndef CONFIG_FPU_LAZY
324
        fpu_context_save(THREAD->saved_fpu_context);
325
#endif
898 jermar 326
        if (!context_save(&THREAD->saved_context)) {
327
            /*
328
             * This is the place where threads leave scheduler();
329
             */
2030 decky 330
 
331
            /* Save current CPU cycle */
332
            THREAD->last_cycle = get_cycle();
333
 
898 jermar 334
            spinlock_unlock(&THREAD->lock);
335
            interrupts_restore(THREAD->saved_context.ipl);
1007 decky 336
 
898 jermar 337
            return;
338
        }
339
 
340
        /*
2067 jermar 341
         * Interrupt priority level of preempted thread is recorded
342
         * here to facilitate scheduler() invocations from
343
         * interrupts_disable()'d code (e.g. waitq_sleep_timeout()).
898 jermar 344
         */
345
        THREAD->saved_context.ipl = ipl;
346
    }
347
 
348
    /*
349
     * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM
350
     * and preemption counter. At this point THE could be coming either
351
     * from THREAD's or CPU's stack.
352
     */
353
    the_copy(THE, (the_t *) CPU->stack);
354
 
355
    /*
356
     * We may not keep the old stack.
357
     * Reason: If we kept the old stack and got blocked, for instance, in
358
     * find_best_thread(), the old thread could get rescheduled by another
359
     * CPU and overwrite the part of its own stack that was also used by
360
     * the scheduler on this CPU.
361
     *
362
     * Moreover, we have to bypass the compiler-generated POP sequence
363
     * which is fooled by SP being set to the very top of the stack.
364
     * Therefore the scheduler() function continues in
365
     * scheduler_separated_stack().
366
     */
367
    context_save(&CPU->saved_context);
1854 jermar 368
    context_set(&CPU->saved_context, FADDR(scheduler_separated_stack),
2087 jermar 369
        (uintptr_t) CPU->stack, CPU_STACK_SIZE);
898 jermar 370
    context_restore(&CPU->saved_context);
371
    /* not reached */
372
}
373
 
107 decky 374
/** Scheduler stack switch wrapper
375
 *
376
 * Second part of the scheduler() function
377
 * using new stack. Handling the actual context
378
 * switch to a new thread.
379
 *
787 palkovsky 380
 * Assume THREAD->lock is held.
107 decky 381
 */
898 jermar 382
void scheduler_separated_stack(void)
1 jermar 383
{
384
    int priority;
1007 decky 385
 
227 jermar 386
    ASSERT(CPU != NULL);
1007 decky 387
 
15 jermar 388
    if (THREAD) {
898 jermar 389
        /* must be run after the switch to scheduler stack */
897 jermar 390
        after_thread_ran();
391
 
15 jermar 392
        switch (THREAD->state) {
1888 jermar 393
        case Running:
125 jermar 394
            spinlock_unlock(&THREAD->lock);
395
            thread_ready(THREAD);
396
            break;
1 jermar 397
 
1888 jermar 398
        case Exiting:
1571 jermar 399
repeat:
2040 decky 400
            if (THREAD->detached) {
1571 jermar 401
                thread_destroy(THREAD);
402
            } else {
403
                /*
2067 jermar 404
                 * The thread structure is kept allocated until
405
                 * somebody calls thread_detach() on it.
1571 jermar 406
                 */
407
                if (!spinlock_trylock(&THREAD->join_wq.lock)) {
408
                    /*
409
                     * Avoid deadlock.
410
                     */
411
                    spinlock_unlock(&THREAD->lock);
412
                    delay(10);
413
                    spinlock_lock(&THREAD->lock);
414
                    goto repeat;
415
                }
416
                _waitq_wakeup_unsafe(&THREAD->join_wq, false);
417
                spinlock_unlock(&THREAD->join_wq.lock);
418
 
419
                THREAD->state = Undead;
420
                spinlock_unlock(&THREAD->lock);
421
            }
125 jermar 422
            break;
787 palkovsky 423
 
1888 jermar 424
        case Sleeping:
125 jermar 425
            /*
426
             * Prefer the thread after it's woken up.
427
             */
413 jermar 428
            THREAD->priority = -1;
1 jermar 429
 
125 jermar 430
            /*
2067 jermar 431
             * We need to release wq->lock which we locked in
432
             * waitq_sleep(). Address of wq->lock is kept in
433
             * THREAD->sleep_queue.
125 jermar 434
             */
435
            spinlock_unlock(&THREAD->sleep_queue->lock);
1 jermar 436
 
125 jermar 437
            /*
2067 jermar 438
             * Check for possible requests for out-of-context
439
             * invocation.
125 jermar 440
             */
441
            if (THREAD->call_me) {
442
                THREAD->call_me(THREAD->call_me_with);
443
                THREAD->call_me = NULL;
444
                THREAD->call_me_with = NULL;
445
            }
2283 hudecek 446
            if (verbose)
447
                printf("cpu%d, Sleeping unlocking \n", CPU->id);
125 jermar 448
            spinlock_unlock(&THREAD->lock);
1 jermar 449
 
125 jermar 450
            break;
451
 
1888 jermar 452
        default:
125 jermar 453
            /*
454
             * Entering state is unexpected.
455
             */
2067 jermar 456
            panic("tid%d: unexpected state %s\n", THREAD->tid,
457
                thread_states[THREAD->state]);
125 jermar 458
            break;
1 jermar 459
        }
897 jermar 460
 
15 jermar 461
        THREAD = NULL;
1 jermar 462
    }
2283 hudecek 463
    if (verbose)
464
        printf("cpu%d looking for next thread\n", CPU->id);
15 jermar 465
    THREAD = find_best_thread();
1 jermar 466
 
2283 hudecek 467
    if (verbose)
468
        printf("cpu%d t locking  THREAD:%x \n", CPU->id, THREAD);
15 jermar 469
    spinlock_lock(&THREAD->lock);
413 jermar 470
    priority = THREAD->priority;
15 jermar 471
    spinlock_unlock(&THREAD->lock);
2283 hudecek 472
    if (verbose)
473
        printf("cpu%d t unlocked after priority THREAD:%x \n", CPU->id, THREAD);
192 jermar 474
 
1 jermar 475
    relink_rq(priority);       
476
 
477
    /*
2067 jermar 478
     * If both the old and the new task are the same, lots of work is
479
     * avoided.
1 jermar 480
     */
15 jermar 481
    if (TASK != THREAD->task) {
703 jermar 482
        as_t *as1 = NULL;
483
        as_t *as2;
1 jermar 484
 
15 jermar 485
        if (TASK) {
486
            spinlock_lock(&TASK->lock);
703 jermar 487
            as1 = TASK->as;
15 jermar 488
            spinlock_unlock(&TASK->lock);
1 jermar 489
        }
490
 
15 jermar 491
        spinlock_lock(&THREAD->task->lock);
703 jermar 492
        as2 = THREAD->task->as;
15 jermar 493
        spinlock_unlock(&THREAD->task->lock);
1 jermar 494
 
495
        /*
2067 jermar 496
         * Note that it is possible for two tasks to share one address
497
         * space.
1 jermar 498
         */
703 jermar 499
        if (as1 != as2) {
1 jermar 500
            /*
703 jermar 501
             * Both tasks and address spaces are different.
1 jermar 502
             * Replace the old one with the new one.
503
             */
823 jermar 504
            as_switch(as1, as2);
1 jermar 505
        }
906 palkovsky 506
        TASK = THREAD->task;
1187 jermar 507
        before_task_runs();
1 jermar 508
    }
509
 
1380 jermar 510
    spinlock_lock(&THREAD->lock);  
15 jermar 511
    THREAD->state = Running;
1 jermar 512
 
906 palkovsky 513
#ifdef SCHEDULER_VERBOSE
2067 jermar 514
    printf("cpu%d: tid %d (priority=%d, ticks=%lld, nrdy=%ld)\n",
2087 jermar 515
        CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks,
516
        atomic_get(&CPU->nrdy));
906 palkovsky 517
#endif  
1 jermar 518
 
213 jermar 519
    /*
897 jermar 520
     * Some architectures provide late kernel PA2KA(identity)
521
     * mapping in a page fault handler. However, the page fault
522
     * handler uses the kernel stack of the running thread and
523
     * therefore cannot be used to map it. The kernel stack, if
524
     * necessary, is to be mapped in before_thread_runs(). This
525
     * function must be executed before the switch to the new stack.
526
     */
527
    before_thread_runs();
528
 
529
    /*
2067 jermar 530
     * Copy the knowledge of CPU, TASK, THREAD and preemption counter to
531
     * thread's stack.
213 jermar 532
     */
184 jermar 533
    the_copy(THE, (the_t *) THREAD->kstack);
534
 
15 jermar 535
    context_restore(&THREAD->saved_context);
1 jermar 536
    /* not reached */
537
}
538
 
458 decky 539
#ifdef CONFIG_SMP
107 decky 540
/** Load balancing thread
541
 *
542
 * SMP load balancing thread, supervising thread supplies
543
 * for the CPU it's wired to.
544
 *
545
 * @param arg Generic thread argument (unused).
546
 *
1 jermar 547
 */
548
void kcpulb(void *arg)
549
{
550
    thread_t *t;
2118 decky 551
    int count, average, j, k = 0;
552
    unsigned int i;
413 jermar 553
    ipl_t ipl;
1 jermar 554
 
1576 jermar 555
    /*
556
     * Detach kcpulb as nobody will call thread_join_timeout() on it.
557
     */
558
    thread_detach(THREAD);
559
 
1 jermar 560
loop:
561
    /*
779 jermar 562
     * Work in 1s intervals.
1 jermar 563
     */
779 jermar 564
    thread_sleep(1);
1 jermar 565
 
566
not_satisfied:
567
    /*
568
     * Calculate the number of threads that will be migrated/stolen from
569
     * other CPU's. Note that situation can have changed between two
570
     * passes. Each time get the most up to date counts.
571
     */
784 palkovsky 572
    average = atomic_get(&nrdy) / config.cpu_active + 1;
783 palkovsky 573
    count = average - atomic_get(&CPU->nrdy);
1 jermar 574
 
784 palkovsky 575
    if (count <= 0)
1 jermar 576
        goto satisfied;
577
 
578
    /*
2067 jermar 579
     * Searching least priority queues on all CPU's first and most priority
580
     * queues on all CPU's last.
1 jermar 581
     */
2067 jermar 582
    for (j= RQ_COUNT - 1; j >= 0; j--) {
583
        for (i = 0; i < config.cpu_active; i++) {
1 jermar 584
            link_t *l;
585
            runq_t *r;
586
            cpu_t *cpu;
587
 
588
            cpu = &cpus[(i + k) % config.cpu_active];
589
 
590
            /*
591
             * Not interested in ourselves.
2067 jermar 592
             * Doesn't require interrupt disabling for kcpulb has
593
             * THREAD_FLAG_WIRED.
1 jermar 594
             */
15 jermar 595
            if (CPU == cpu)
783 palkovsky 596
                continue;
597
            if (atomic_get(&cpu->nrdy) <= average)
598
                continue;
1 jermar 599
 
784 palkovsky 600
            ipl = interrupts_disable();
115 jermar 601
            r = &cpu->rq[j];
1 jermar 602
            spinlock_lock(&r->lock);
603
            if (r->n == 0) {
604
                spinlock_unlock(&r->lock);
413 jermar 605
                interrupts_restore(ipl);
1 jermar 606
                continue;
607
            }
608
 
609
            t = NULL;
610
            l = r->rq_head.prev;    /* search rq from the back */
611
            while (l != &r->rq_head) {
612
                t = list_get_instance(l, thread_t, rq_link);
613
                /*
2067 jermar 614
                 * We don't want to steal CPU-wired threads
615
                 * neither threads already stolen. The latter
616
                 * prevents threads from migrating between CPU's
617
                 * without ever being run. We don't want to
618
                 * steal threads whose FPU context is still in
619
                 * CPU.
73 vana 620
                 */
1 jermar 621
                spinlock_lock(&t->lock);
2067 jermar 622
                if ((!(t->flags & (THREAD_FLAG_WIRED |
623
                    THREAD_FLAG_STOLEN))) &&
1854 jermar 624
                    (!(t->fpu_context_engaged)) ) {
1 jermar 625
                    /*
626
                     * Remove t from r.
627
                     */
628
                    spinlock_unlock(&t->lock);
629
 
783 palkovsky 630
                    atomic_dec(&cpu->nrdy);
475 jermar 631
                    atomic_dec(&nrdy);
1 jermar 632
 
125 jermar 633
                    r->n--;
1 jermar 634
                    list_remove(&t->rq_link);
635
 
636
                    break;
637
                }
638
                spinlock_unlock(&t->lock);
639
                l = l->prev;
640
                t = NULL;
641
            }
642
            spinlock_unlock(&r->lock);
643
 
644
            if (t) {
645
                /*
646
                 * Ready t on local CPU
647
                 */
648
                spinlock_lock(&t->lock);
906 palkovsky 649
#ifdef KCPULB_VERBOSE
2067 jermar 650
                printf("kcpulb%d: TID %d -> cpu%d, nrdy=%ld, "
2087 jermar 651
                    "avg=%nd\n", CPU->id, t->tid, CPU->id,
652
                    atomic_get(&CPU->nrdy),
653
                    atomic_get(&nrdy) / config.cpu_active);
906 palkovsky 654
#endif
1854 jermar 655
                t->flags |= THREAD_FLAG_STOLEN;
1115 jermar 656
                t->state = Entering;
1 jermar 657
                spinlock_unlock(&t->lock);
658
 
659
                thread_ready(t);
660
 
413 jermar 661
                interrupts_restore(ipl);
1 jermar 662
 
663
                if (--count == 0)
664
                    goto satisfied;
665
 
666
                /*
2067 jermar 667
                 * We are not satisfied yet, focus on another
668
                 * CPU next time.
1 jermar 669
                 */
670
                k++;
671
 
672
                continue;
673
            }
413 jermar 674
            interrupts_restore(ipl);
1 jermar 675
        }
676
    }
677
 
783 palkovsky 678
    if (atomic_get(&CPU->nrdy)) {
1 jermar 679
        /*
680
         * Be a little bit light-weight and let migrated threads run.
681
         */
682
        scheduler();
779 jermar 683
    } else {
1 jermar 684
        /*
685
         * We failed to migrate a single thread.
779 jermar 686
         * Give up this turn.
1 jermar 687
         */
779 jermar 688
        goto loop;
1 jermar 689
    }
690
 
691
    goto not_satisfied;
125 jermar 692
 
1 jermar 693
satisfied:
694
    goto loop;
695
}
696
 
458 decky 697
#endif /* CONFIG_SMP */
775 palkovsky 698
 
699
 
700
/** Print information about threads & scheduler queues */
701
void sched_print_list(void)
702
{
703
    ipl_t ipl;
2118 decky 704
    unsigned int cpu, i;
775 palkovsky 705
    runq_t *r;
706
    thread_t *t;
707
    link_t *cur;
708
 
709
    /* We are going to mess with scheduler structures,
710
     * let's not be interrupted */
711
    ipl = interrupts_disable();
2118 decky 712
    for (cpu = 0; cpu < config.cpu_count; cpu++) {
898 jermar 713
 
775 palkovsky 714
        if (!cpus[cpu].active)
715
            continue;
898 jermar 716
 
775 palkovsky 717
        spinlock_lock(&cpus[cpu].lock);
1221 decky 718
        printf("cpu%d: address=%p, nrdy=%ld, needs_relink=%ld\n",
2087 jermar 719
            cpus[cpu].id, &cpus[cpu], atomic_get(&cpus[cpu].nrdy),
720
            cpus[cpu].needs_relink);
775 palkovsky 721
 
2067 jermar 722
        for (i = 0; i < RQ_COUNT; i++) {
775 palkovsky 723
            r = &cpus[cpu].rq[i];
724
            spinlock_lock(&r->lock);
725
            if (!r->n) {
726
                spinlock_unlock(&r->lock);
727
                continue;
728
            }
898 jermar 729
            printf("\trq[%d]: ", i);
2067 jermar 730
            for (cur = r->rq_head.next; cur != &r->rq_head;
731
                cur = cur->next) {
775 palkovsky 732
                t = list_get_instance(cur, thread_t, rq_link);
733
                printf("%d(%s) ", t->tid,
2087 jermar 734
                    thread_states[t->state]);
775 palkovsky 735
            }
736
            printf("\n");
737
            spinlock_unlock(&r->lock);
738
        }
739
        spinlock_unlock(&cpus[cpu].lock);
740
    }
741
 
742
    interrupts_restore(ipl);
743
}
1702 cejka 744
 
1757 jermar 745
/** @}
1702 cejka 746
 */