Subversion Repositories HelenOS-historic

Rev

Rev 906 | Rev 1007 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1 jermar 1
/*
2
 * Copyright (C) 2001-2004 Jakub Jermar
3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
29
#include <proc/scheduler.h>
30
#include <proc/thread.h>
31
#include <proc/task.h>
378 jermar 32
#include <mm/frame.h>
33
#include <mm/page.h>
703 jermar 34
#include <mm/as.h>
378 jermar 35
#include <arch/asm.h>
36
#include <arch/faddr.h>
37
#include <arch/atomic.h>
38
#include <synch/spinlock.h>
1 jermar 39
#include <config.h>
40
#include <context.h>
41
#include <func.h>
42
#include <arch.h>
788 jermar 43
#include <adt/list.h>
68 decky 44
#include <panic.h>
1 jermar 45
#include <typedefs.h>
378 jermar 46
#include <cpu.h>
195 vana 47
#include <print.h>
227 jermar 48
#include <debug.h>
1 jermar 49
 
898 jermar 50
static void scheduler_separated_stack(void);
195 vana 51
 
898 jermar 52
atomic_t nrdy;  /**< Number of ready threads in the system. */
53
 
897 jermar 54
/** Take actions before new thread runs.
107 decky 55
 *
118 jermar 56
 * Perform actions that need to be
57
 * taken before the newly selected
58
 * tread is passed control.
107 decky 59
 *
827 palkovsky 60
 * THREAD->lock is locked on entry
61
 *
107 decky 62
 */
52 vana 63
void before_thread_runs(void)
64
{
309 palkovsky 65
    before_thread_runs_arch();
906 palkovsky 66
#ifdef CONFIG_FPU_LAZY
309 palkovsky 67
    if(THREAD==CPU->fpu_owner)
68
        fpu_enable();
69
    else
70
        fpu_disable();
906 palkovsky 71
#else
309 palkovsky 72
    fpu_enable();
73
    if (THREAD->fpu_context_exists)
906 palkovsky 74
        fpu_context_restore(THREAD->saved_fpu_context);
309 palkovsky 75
    else {
906 palkovsky 76
        fpu_init();
309 palkovsky 77
        THREAD->fpu_context_exists=1;
78
    }
906 palkovsky 79
#endif
52 vana 80
}
81
 
898 jermar 82
/** Take actions after THREAD had run.
897 jermar 83
 *
84
 * Perform actions that need to be
85
 * taken after the running thread
898 jermar 86
 * had been preempted by the scheduler.
897 jermar 87
 *
88
 * THREAD->lock is locked on entry
89
 *
90
 */
91
void after_thread_ran(void)
92
{
93
    after_thread_ran_arch();
94
}
95
 
458 decky 96
#ifdef CONFIG_FPU_LAZY
309 palkovsky 97
void scheduler_fpu_lazy_request(void)
98
{
907 palkovsky 99
restart:
309 palkovsky 100
    fpu_enable();
827 palkovsky 101
    spinlock_lock(&CPU->lock);
102
 
103
    /* Save old context */
309 palkovsky 104
    if (CPU->fpu_owner != NULL) {  
827 palkovsky 105
        spinlock_lock(&CPU->fpu_owner->lock);
906 palkovsky 106
        fpu_context_save(CPU->fpu_owner->saved_fpu_context);
309 palkovsky 107
        /* don't prevent migration */
108
        CPU->fpu_owner->fpu_context_engaged=0;
827 palkovsky 109
        spinlock_unlock(&CPU->fpu_owner->lock);
907 palkovsky 110
        CPU->fpu_owner = NULL;
309 palkovsky 111
    }
827 palkovsky 112
 
113
    spinlock_lock(&THREAD->lock);
898 jermar 114
    if (THREAD->fpu_context_exists) {
906 palkovsky 115
        fpu_context_restore(THREAD->saved_fpu_context);
898 jermar 116
    } else {
906 palkovsky 117
        /* Allocate FPU context */
118
        if (!THREAD->saved_fpu_context) {
119
            /* Might sleep */
120
            spinlock_unlock(&THREAD->lock);
907 palkovsky 121
            spinlock_unlock(&CPU->lock);
906 palkovsky 122
            THREAD->saved_fpu_context = slab_alloc(fpu_context_slab,
123
                                   0);
907 palkovsky 124
            /* We may have switched CPUs during slab_alloc */
125
            goto restart;
906 palkovsky 126
        }
127
        fpu_init();
309 palkovsky 128
        THREAD->fpu_context_exists=1;
129
    }
130
    CPU->fpu_owner=THREAD;
131
    THREAD->fpu_context_engaged = 1;
898 jermar 132
    spinlock_unlock(&THREAD->lock);
827 palkovsky 133
 
134
    spinlock_unlock(&CPU->lock);
309 palkovsky 135
}
136
#endif
52 vana 137
 
107 decky 138
/** Initialize scheduler
139
 *
140
 * Initialize kernel scheduler.
141
 *
142
 */
1 jermar 143
void scheduler_init(void)
144
{
145
}
146
 
107 decky 147
/** Get thread to be scheduled
148
 *
149
 * Get the optimal thread to be scheduled
109 jermar 150
 * according to thread accounting and scheduler
107 decky 151
 * policy.
152
 *
153
 * @return Thread to be scheduled.
154
 *
155
 */
483 jermar 156
static thread_t *find_best_thread(void)
1 jermar 157
{
158
    thread_t *t;
159
    runq_t *r;
783 palkovsky 160
    int i;
1 jermar 161
 
227 jermar 162
    ASSERT(CPU != NULL);
163
 
1 jermar 164
loop:
413 jermar 165
    interrupts_enable();
1 jermar 166
 
783 palkovsky 167
    if (atomic_get(&CPU->nrdy) == 0) {
1 jermar 168
        /*
169
         * For there was nothing to run, the CPU goes to sleep
170
         * until a hardware interrupt or an IPI comes.
171
         * This improves energy saving and hyperthreading.
172
         */
785 jermar 173
 
174
        /*
175
         * An interrupt might occur right now and wake up a thread.
176
         * In such case, the CPU will continue to go to sleep
177
         * even though there is a runnable thread.
178
         */
179
 
1 jermar 180
         cpu_sleep();
181
         goto loop;
182
    }
183
 
413 jermar 184
    interrupts_disable();
114 jermar 185
 
898 jermar 186
    for (i = 0; i<RQ_COUNT; i++) {
15 jermar 187
        r = &CPU->rq[i];
1 jermar 188
        spinlock_lock(&r->lock);
189
        if (r->n == 0) {
190
            /*
191
             * If this queue is empty, try a lower-priority queue.
192
             */
193
            spinlock_unlock(&r->lock);
194
            continue;
195
        }
213 jermar 196
 
783 palkovsky 197
        atomic_dec(&CPU->nrdy);
475 jermar 198
        atomic_dec(&nrdy);
1 jermar 199
        r->n--;
200
 
201
        /*
202
         * Take the first thread from the queue.
203
         */
204
        t = list_get_instance(r->rq_head.next, thread_t, rq_link);
205
        list_remove(&t->rq_link);
206
 
207
        spinlock_unlock(&r->lock);
208
 
209
        spinlock_lock(&t->lock);
15 jermar 210
        t->cpu = CPU;
1 jermar 211
 
212
        t->ticks = us2ticks((i+1)*10000);
898 jermar 213
        t->priority = i;    /* correct rq index */
1 jermar 214
 
215
        /*
216
         * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge.
217
         */
218
        t->flags &= ~X_STOLEN;
219
        spinlock_unlock(&t->lock);
220
 
221
        return t;
222
    }
223
    goto loop;
224
 
225
}
226
 
107 decky 227
/** Prevent rq starvation
228
 *
229
 * Prevent low priority threads from starving in rq's.
230
 *
231
 * When the function decides to relink rq's, it reconnects
232
 * respective pointers so that in result threads with 'pri'
233
 * greater or equal 'start' are moved to a higher-priority queue.
234
 *
235
 * @param start Threshold priority.
236
 *
1 jermar 237
 */
452 decky 238
static void relink_rq(int start)
1 jermar 239
{
240
    link_t head;
241
    runq_t *r;
242
    int i, n;
243
 
244
    list_initialize(&head);
15 jermar 245
    spinlock_lock(&CPU->lock);
246
    if (CPU->needs_relink > NEEDS_RELINK_MAX) {
1 jermar 247
        for (i = start; i<RQ_COUNT-1; i++) {
248
            /* remember and empty rq[i + 1] */
15 jermar 249
            r = &CPU->rq[i + 1];
1 jermar 250
            spinlock_lock(&r->lock);
251
            list_concat(&head, &r->rq_head);
252
            n = r->n;
253
            r->n = 0;
254
            spinlock_unlock(&r->lock);
255
 
256
            /* append rq[i + 1] to rq[i] */
15 jermar 257
            r = &CPU->rq[i];
1 jermar 258
            spinlock_lock(&r->lock);
259
            list_concat(&r->rq_head, &head);
260
            r->n += n;
261
            spinlock_unlock(&r->lock);
262
        }
15 jermar 263
        CPU->needs_relink = 0;
1 jermar 264
    }
784 palkovsky 265
    spinlock_unlock(&CPU->lock);
1 jermar 266
 
267
}
268
 
898 jermar 269
/** The scheduler
270
 *
271
 * The thread scheduling procedure.
272
 * Passes control directly to
273
 * scheduler_separated_stack().
274
 *
275
 */
276
void scheduler(void)
277
{
278
    volatile ipl_t ipl;
107 decky 279
 
898 jermar 280
    ASSERT(CPU != NULL);
281
 
282
    ipl = interrupts_disable();
283
 
284
    if (atomic_get(&haltstate))
285
        halt();
286
 
287
    if (THREAD) {
288
        spinlock_lock(&THREAD->lock);
906 palkovsky 289
#ifndef CONFIG_FPU_LAZY
290
        fpu_context_save(THREAD->saved_fpu_context);
291
#endif
898 jermar 292
        if (!context_save(&THREAD->saved_context)) {
293
            /*
294
             * This is the place where threads leave scheduler();
295
             */
296
            spinlock_unlock(&THREAD->lock);
297
            interrupts_restore(THREAD->saved_context.ipl);
298
            return;
299
        }
300
 
301
        /*
302
         * Interrupt priority level of preempted thread is recorded here
303
         * to facilitate scheduler() invocations from interrupts_disable()'d
304
         * code (e.g. waitq_sleep_timeout()).
305
         */
306
        THREAD->saved_context.ipl = ipl;
307
    }
308
 
309
    /*
310
     * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM
311
     * and preemption counter. At this point THE could be coming either
312
     * from THREAD's or CPU's stack.
313
     */
314
    the_copy(THE, (the_t *) CPU->stack);
315
 
316
    /*
317
     * We may not keep the old stack.
318
     * Reason: If we kept the old stack and got blocked, for instance, in
319
     * find_best_thread(), the old thread could get rescheduled by another
320
     * CPU and overwrite the part of its own stack that was also used by
321
     * the scheduler on this CPU.
322
     *
323
     * Moreover, we have to bypass the compiler-generated POP sequence
324
     * which is fooled by SP being set to the very top of the stack.
325
     * Therefore the scheduler() function continues in
326
     * scheduler_separated_stack().
327
     */
328
    context_save(&CPU->saved_context);
329
    context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), (__address) CPU->stack, CPU_STACK_SIZE);
330
    context_restore(&CPU->saved_context);
331
    /* not reached */
332
}
333
 
107 decky 334
/** Scheduler stack switch wrapper
335
 *
336
 * Second part of the scheduler() function
337
 * using new stack. Handling the actual context
338
 * switch to a new thread.
339
 *
787 palkovsky 340
 * Assume THREAD->lock is held.
107 decky 341
 */
898 jermar 342
void scheduler_separated_stack(void)
1 jermar 343
{
344
    int priority;
345
 
227 jermar 346
    ASSERT(CPU != NULL);
347
 
15 jermar 348
    if (THREAD) {
898 jermar 349
        /* must be run after the switch to scheduler stack */
897 jermar 350
        after_thread_ran();
351
 
15 jermar 352
        switch (THREAD->state) {
1 jermar 353
            case Running:
125 jermar 354
            THREAD->state = Ready;
355
            spinlock_unlock(&THREAD->lock);
356
            thread_ready(THREAD);
357
            break;
1 jermar 358
 
359
            case Exiting:
787 palkovsky 360
            thread_destroy(THREAD);
125 jermar 361
            break;
787 palkovsky 362
 
1 jermar 363
            case Sleeping:
125 jermar 364
            /*
365
             * Prefer the thread after it's woken up.
366
             */
413 jermar 367
            THREAD->priority = -1;
1 jermar 368
 
125 jermar 369
            /*
370
             * We need to release wq->lock which we locked in waitq_sleep().
371
             * Address of wq->lock is kept in THREAD->sleep_queue.
372
             */
373
            spinlock_unlock(&THREAD->sleep_queue->lock);
1 jermar 374
 
125 jermar 375
            /*
376
             * Check for possible requests for out-of-context invocation.
377
             */
378
            if (THREAD->call_me) {
379
                THREAD->call_me(THREAD->call_me_with);
380
                THREAD->call_me = NULL;
381
                THREAD->call_me_with = NULL;
382
            }
1 jermar 383
 
125 jermar 384
            spinlock_unlock(&THREAD->lock);
1 jermar 385
 
125 jermar 386
            break;
387
 
1 jermar 388
            default:
125 jermar 389
            /*
390
             * Entering state is unexpected.
391
             */
392
            panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]);
393
            break;
1 jermar 394
        }
897 jermar 395
 
15 jermar 396
        THREAD = NULL;
1 jermar 397
    }
198 jermar 398
 
15 jermar 399
    THREAD = find_best_thread();
1 jermar 400
 
15 jermar 401
    spinlock_lock(&THREAD->lock);
413 jermar 402
    priority = THREAD->priority;
15 jermar 403
    spinlock_unlock(&THREAD->lock);
192 jermar 404
 
1 jermar 405
    relink_rq(priority);       
406
 
15 jermar 407
    spinlock_lock(&THREAD->lock);  
1 jermar 408
 
409
    /*
410
     * If both the old and the new task are the same, lots of work is avoided.
411
     */
15 jermar 412
    if (TASK != THREAD->task) {
703 jermar 413
        as_t *as1 = NULL;
414
        as_t *as2;
1 jermar 415
 
15 jermar 416
        if (TASK) {
417
            spinlock_lock(&TASK->lock);
703 jermar 418
            as1 = TASK->as;
15 jermar 419
            spinlock_unlock(&TASK->lock);
1 jermar 420
        }
421
 
15 jermar 422
        spinlock_lock(&THREAD->task->lock);
703 jermar 423
        as2 = THREAD->task->as;
15 jermar 424
        spinlock_unlock(&THREAD->task->lock);
1 jermar 425
 
426
        /*
703 jermar 427
         * Note that it is possible for two tasks to share one address space.
1 jermar 428
         */
703 jermar 429
        if (as1 != as2) {
1 jermar 430
            /*
703 jermar 431
             * Both tasks and address spaces are different.
1 jermar 432
             * Replace the old one with the new one.
433
             */
823 jermar 434
            as_switch(as1, as2);
1 jermar 435
        }
906 palkovsky 436
        TASK = THREAD->task;
1 jermar 437
    }
438
 
15 jermar 439
    THREAD->state = Running;
1 jermar 440
 
906 palkovsky 441
#ifdef SCHEDULER_VERBOSE
823 jermar 442
    printf("cpu%d: tid %d (priority=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, atomic_get(&CPU->nrdy));
906 palkovsky 443
#endif  
1 jermar 444
 
213 jermar 445
    /*
897 jermar 446
     * Some architectures provide late kernel PA2KA(identity)
447
     * mapping in a page fault handler. However, the page fault
448
     * handler uses the kernel stack of the running thread and
449
     * therefore cannot be used to map it. The kernel stack, if
450
     * necessary, is to be mapped in before_thread_runs(). This
451
     * function must be executed before the switch to the new stack.
452
     */
453
    before_thread_runs();
454
 
455
    /*
213 jermar 456
     * Copy the knowledge of CPU, TASK, THREAD and preemption counter to thread's stack.
457
     */
184 jermar 458
    the_copy(THE, (the_t *) THREAD->kstack);
459
 
15 jermar 460
    context_restore(&THREAD->saved_context);
1 jermar 461
    /* not reached */
462
}
463
 
458 decky 464
#ifdef CONFIG_SMP
107 decky 465
/** Load balancing thread
466
 *
467
 * SMP load balancing thread, supervising thread supplies
468
 * for the CPU it's wired to.
469
 *
470
 * @param arg Generic thread argument (unused).
471
 *
1 jermar 472
 */
473
void kcpulb(void *arg)
474
{
475
    thread_t *t;
783 palkovsky 476
    int count, average, i, j, k = 0;
413 jermar 477
    ipl_t ipl;
1 jermar 478
 
479
loop:
480
    /*
779 jermar 481
     * Work in 1s intervals.
1 jermar 482
     */
779 jermar 483
    thread_sleep(1);
1 jermar 484
 
485
not_satisfied:
486
    /*
487
     * Calculate the number of threads that will be migrated/stolen from
488
     * other CPU's. Note that situation can have changed between two
489
     * passes. Each time get the most up to date counts.
490
     */
784 palkovsky 491
    average = atomic_get(&nrdy) / config.cpu_active + 1;
783 palkovsky 492
    count = average - atomic_get(&CPU->nrdy);
1 jermar 493
 
784 palkovsky 494
    if (count <= 0)
1 jermar 495
        goto satisfied;
496
 
497
    /*
498
     * Searching least priority queues on all CPU's first and most priority queues on all CPU's last.
499
     */
500
    for (j=RQ_COUNT-1; j >= 0; j--) {
501
        for (i=0; i < config.cpu_active; i++) {
502
            link_t *l;
503
            runq_t *r;
504
            cpu_t *cpu;
505
 
506
            cpu = &cpus[(i + k) % config.cpu_active];
507
 
508
            /*
509
             * Not interested in ourselves.
510
             * Doesn't require interrupt disabling for kcpulb is X_WIRED.
511
             */
15 jermar 512
            if (CPU == cpu)
783 palkovsky 513
                continue;
514
            if (atomic_get(&cpu->nrdy) <= average)
515
                continue;
1 jermar 516
 
784 palkovsky 517
            ipl = interrupts_disable();
115 jermar 518
            r = &cpu->rq[j];
1 jermar 519
            spinlock_lock(&r->lock);
520
            if (r->n == 0) {
521
                spinlock_unlock(&r->lock);
413 jermar 522
                interrupts_restore(ipl);
1 jermar 523
                continue;
524
            }
525
 
526
            t = NULL;
527
            l = r->rq_head.prev;    /* search rq from the back */
528
            while (l != &r->rq_head) {
529
                t = list_get_instance(l, thread_t, rq_link);
530
                /*
125 jermar 531
                 * We don't want to steal CPU-wired threads neither threads already stolen.
1 jermar 532
                 * The latter prevents threads from migrating between CPU's without ever being run.
125 jermar 533
                 * We don't want to steal threads whose FPU context is still in CPU.
73 vana 534
                 */
1 jermar 535
                spinlock_lock(&t->lock);
73 vana 536
                if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) {
1 jermar 537
                    /*
538
                     * Remove t from r.
539
                     */
540
                    spinlock_unlock(&t->lock);
541
 
783 palkovsky 542
                    atomic_dec(&cpu->nrdy);
475 jermar 543
                    atomic_dec(&nrdy);
1 jermar 544
 
125 jermar 545
                    r->n--;
1 jermar 546
                    list_remove(&t->rq_link);
547
 
548
                    break;
549
                }
550
                spinlock_unlock(&t->lock);
551
                l = l->prev;
552
                t = NULL;
553
            }
554
            spinlock_unlock(&r->lock);
555
 
556
            if (t) {
557
                /*
558
                 * Ready t on local CPU
559
                 */
560
                spinlock_lock(&t->lock);
906 palkovsky 561
#ifdef KCPULB_VERBOSE
783 palkovsky 562
                printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, atomic_get(&CPU->nrdy), atomic_get(&nrdy) / config.cpu_active);
906 palkovsky 563
#endif
1 jermar 564
                t->flags |= X_STOLEN;
565
                spinlock_unlock(&t->lock);
566
 
567
                thread_ready(t);
568
 
413 jermar 569
                interrupts_restore(ipl);
1 jermar 570
 
571
                if (--count == 0)
572
                    goto satisfied;
573
 
574
                /*
125 jermar 575
                 * We are not satisfied yet, focus on another CPU next time.
1 jermar 576
                 */
577
                k++;
578
 
579
                continue;
580
            }
413 jermar 581
            interrupts_restore(ipl);
1 jermar 582
        }
583
    }
584
 
783 palkovsky 585
    if (atomic_get(&CPU->nrdy)) {
1 jermar 586
        /*
587
         * Be a little bit light-weight and let migrated threads run.
588
         */
589
        scheduler();
779 jermar 590
    } else {
1 jermar 591
        /*
592
         * We failed to migrate a single thread.
779 jermar 593
         * Give up this turn.
1 jermar 594
         */
779 jermar 595
        goto loop;
1 jermar 596
    }
597
 
598
    goto not_satisfied;
125 jermar 599
 
1 jermar 600
satisfied:
601
    goto loop;
602
}
603
 
458 decky 604
#endif /* CONFIG_SMP */
775 palkovsky 605
 
606
 
607
/** Print information about threads & scheduler queues */
608
void sched_print_list(void)
609
{
610
    ipl_t ipl;
611
    int cpu,i;
612
    runq_t *r;
613
    thread_t *t;
614
    link_t *cur;
615
 
616
    /* We are going to mess with scheduler structures,
617
     * let's not be interrupted */
618
    ipl = interrupts_disable();
898 jermar 619
    printf("Scheduler dump:\n");
775 palkovsky 620
    for (cpu=0;cpu < config.cpu_count; cpu++) {
898 jermar 621
 
775 palkovsky 622
        if (!cpus[cpu].active)
623
            continue;
898 jermar 624
 
775 palkovsky 625
        spinlock_lock(&cpus[cpu].lock);
898 jermar 626
        printf("cpu%d: nrdy: %d, needs_relink: %d\n",
783 palkovsky 627
               cpus[cpu].id, atomic_get(&cpus[cpu].nrdy), cpus[cpu].needs_relink);
775 palkovsky 628
 
629
        for (i=0; i<RQ_COUNT; i++) {
630
            r = &cpus[cpu].rq[i];
631
            spinlock_lock(&r->lock);
632
            if (!r->n) {
633
                spinlock_unlock(&r->lock);
634
                continue;
635
            }
898 jermar 636
            printf("\trq[%d]: ", i);
775 palkovsky 637
            for (cur=r->rq_head.next; cur!=&r->rq_head; cur=cur->next) {
638
                t = list_get_instance(cur, thread_t, rq_link);
639
                printf("%d(%s) ", t->tid,
640
                       thread_states[t->state]);
641
            }
642
            printf("\n");
643
            spinlock_unlock(&r->lock);
644
        }
645
        spinlock_unlock(&cpus[cpu].lock);
646
    }
647
 
648
    interrupts_restore(ipl);
649
}