Subversion Repositories HelenOS-historic

Rev

Rev 1196 | Rev 1229 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1 jermar 1
/*
2
 * Copyright (C) 2001-2004 Jakub Jermar
3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
29
#include <proc/scheduler.h>
30
#include <proc/thread.h>
31
#include <proc/task.h>
378 jermar 32
#include <mm/frame.h>
33
#include <mm/page.h>
703 jermar 34
#include <mm/as.h>
378 jermar 35
#include <arch/asm.h>
36
#include <arch/faddr.h>
1104 jermar 37
#include <atomic.h>
378 jermar 38
#include <synch/spinlock.h>
1 jermar 39
#include <config.h>
40
#include <context.h>
41
#include <func.h>
42
#include <arch.h>
788 jermar 43
#include <adt/list.h>
68 decky 44
#include <panic.h>
1 jermar 45
#include <typedefs.h>
378 jermar 46
#include <cpu.h>
195 vana 47
#include <print.h>
227 jermar 48
#include <debug.h>
1 jermar 49
 
1187 jermar 50
static void before_task_runs(void);
51
static void before_thread_runs(void);
52
static void after_thread_ran(void);
898 jermar 53
static void scheduler_separated_stack(void);
195 vana 54
 
898 jermar 55
atomic_t nrdy;	/**< Number of ready threads in the system. */
56
 
1187 jermar 57
/** Carry out actions before new task runs. */
58
void before_task_runs(void)
59
{
60
	before_task_runs_arch();
61
}
62
 
897 jermar 63
/** Take actions before new thread runs.
107 decky 64
 *
118 jermar 65
 * Perform actions that need to be
66
 * taken before the newly selected
67
 * tread is passed control.
107 decky 68
 *
827 palkovsky 69
 * THREAD->lock is locked on entry
70
 *
107 decky 71
 */
52 vana 72
void before_thread_runs(void)
73
{
309 palkovsky 74
	before_thread_runs_arch();
906 palkovsky 75
#ifdef CONFIG_FPU_LAZY
309 palkovsky 76
	if(THREAD==CPU->fpu_owner) 
77
		fpu_enable();
78
	else
79
		fpu_disable(); 
906 palkovsky 80
#else
309 palkovsky 81
	fpu_enable();
82
	if (THREAD->fpu_context_exists)
906 palkovsky 83
		fpu_context_restore(THREAD->saved_fpu_context);
309 palkovsky 84
	else {
906 palkovsky 85
		fpu_init();
309 palkovsky 86
		THREAD->fpu_context_exists=1;
87
	}
906 palkovsky 88
#endif
52 vana 89
}
90
 
898 jermar 91
/** Take actions after THREAD had run.
897 jermar 92
 *
93
 * Perform actions that need to be
94
 * taken after the running thread
898 jermar 95
 * had been preempted by the scheduler.
897 jermar 96
 *
97
 * THREAD->lock is locked on entry
98
 *
99
 */
100
void after_thread_ran(void)
101
{
102
	after_thread_ran_arch();
103
}
104
 
458 decky 105
#ifdef CONFIG_FPU_LAZY
309 palkovsky 106
void scheduler_fpu_lazy_request(void)
107
{
907 palkovsky 108
restart:
309 palkovsky 109
	fpu_enable();
827 palkovsky 110
	spinlock_lock(&CPU->lock);
111
 
112
	/* Save old context */
309 palkovsky 113
	if (CPU->fpu_owner != NULL) {  
827 palkovsky 114
		spinlock_lock(&CPU->fpu_owner->lock);
906 palkovsky 115
		fpu_context_save(CPU->fpu_owner->saved_fpu_context);
309 palkovsky 116
		/* don't prevent migration */
117
		CPU->fpu_owner->fpu_context_engaged=0; 
827 palkovsky 118
		spinlock_unlock(&CPU->fpu_owner->lock);
907 palkovsky 119
		CPU->fpu_owner = NULL;
309 palkovsky 120
	}
827 palkovsky 121
 
122
	spinlock_lock(&THREAD->lock);
898 jermar 123
	if (THREAD->fpu_context_exists) {
906 palkovsky 124
		fpu_context_restore(THREAD->saved_fpu_context);
898 jermar 125
	} else {
906 palkovsky 126
		/* Allocate FPU context */
127
		if (!THREAD->saved_fpu_context) {
128
			/* Might sleep */
129
			spinlock_unlock(&THREAD->lock);
907 palkovsky 130
			spinlock_unlock(&CPU->lock);
906 palkovsky 131
			THREAD->saved_fpu_context = slab_alloc(fpu_context_slab,
132
							       0);
907 palkovsky 133
			/* We may have switched CPUs during slab_alloc */
134
			goto restart; 
906 palkovsky 135
		}
136
		fpu_init();
309 palkovsky 137
		THREAD->fpu_context_exists=1;
138
	}
139
	CPU->fpu_owner=THREAD;
140
	THREAD->fpu_context_engaged = 1;
898 jermar 141
	spinlock_unlock(&THREAD->lock);
827 palkovsky 142
 
143
	spinlock_unlock(&CPU->lock);
309 palkovsky 144
}
145
#endif
52 vana 146
 
107 decky 147
/** Initialize scheduler
148
 *
149
 * Initialize kernel scheduler.
150
 *
151
 */
1 jermar 152
void scheduler_init(void)
153
{
154
}
155
 
107 decky 156
/** Get thread to be scheduled
157
 *
158
 * Get the optimal thread to be scheduled
109 jermar 159
 * according to thread accounting and scheduler
107 decky 160
 * policy.
161
 *
162
 * @return Thread to be scheduled.
163
 *
164
 */
483 jermar 165
static thread_t *find_best_thread(void)
1 jermar 166
{
167
	thread_t *t;
168
	runq_t *r;
783 palkovsky 169
	int i;
1 jermar 170
 
227 jermar 171
	ASSERT(CPU != NULL);
172
 
1 jermar 173
loop:
413 jermar 174
	interrupts_enable();
1 jermar 175
 
783 palkovsky 176
	if (atomic_get(&CPU->nrdy) == 0) {
1 jermar 177
		/*
178
		 * For there was nothing to run, the CPU goes to sleep
179
		 * until a hardware interrupt or an IPI comes.
180
		 * This improves energy saving and hyperthreading.
181
		 */
785 jermar 182
 
183
		/*
184
		 * An interrupt might occur right now and wake up a thread.
185
		 * In such case, the CPU will continue to go to sleep
186
		 * even though there is a runnable thread.
187
		 */
188
 
1 jermar 189
		 cpu_sleep();
190
		 goto loop;
191
	}
192
 
413 jermar 193
	interrupts_disable();
114 jermar 194
 
898 jermar 195
	for (i = 0; i<RQ_COUNT; i++) {
15 jermar 196
		r = &CPU->rq[i];
1 jermar 197
		spinlock_lock(&r->lock);
198
		if (r->n == 0) {
199
			/*
200
			 * If this queue is empty, try a lower-priority queue.
201
			 */
202
			spinlock_unlock(&r->lock);
203
			continue;
204
		}
213 jermar 205
 
783 palkovsky 206
		atomic_dec(&CPU->nrdy);
475 jermar 207
		atomic_dec(&nrdy);
1 jermar 208
		r->n--;
209
 
210
		/*
211
		 * Take the first thread from the queue.
212
		 */
213
		t = list_get_instance(r->rq_head.next, thread_t, rq_link);
214
		list_remove(&t->rq_link);
215
 
216
		spinlock_unlock(&r->lock);
217
 
218
		spinlock_lock(&t->lock);
15 jermar 219
		t->cpu = CPU;
1 jermar 220
 
221
		t->ticks = us2ticks((i+1)*10000);
898 jermar 222
		t->priority = i;	/* correct rq index */
1 jermar 223
 
224
		/*
225
		 * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge.
226
		 */
227
		t->flags &= ~X_STOLEN;
228
		spinlock_unlock(&t->lock);
229
 
230
		return t;
231
	}
232
	goto loop;
233
 
234
}
235
 
107 decky 236
/** Prevent rq starvation
237
 *
238
 * Prevent low priority threads from starving in rq's.
239
 *
240
 * When the function decides to relink rq's, it reconnects
241
 * respective pointers so that in result threads with 'pri'
242
 * greater or equal 'start' are moved to a higher-priority queue.
243
 *
244
 * @param start Threshold priority.
245
 *
1 jermar 246
 */
452 decky 247
static void relink_rq(int start)
1 jermar 248
{
249
	link_t head;
250
	runq_t *r;
251
	int i, n;
252
 
253
	list_initialize(&head);
15 jermar 254
	spinlock_lock(&CPU->lock);
255
	if (CPU->needs_relink > NEEDS_RELINK_MAX) {
1 jermar 256
		for (i = start; i<RQ_COUNT-1; i++) {
257
			/* remember and empty rq[i + 1] */
15 jermar 258
			r = &CPU->rq[i + 1];
1 jermar 259
			spinlock_lock(&r->lock);
260
			list_concat(&head, &r->rq_head);
261
			n = r->n;
262
			r->n = 0;
263
			spinlock_unlock(&r->lock);
264
 
265
			/* append rq[i + 1] to rq[i] */
15 jermar 266
			r = &CPU->rq[i];
1 jermar 267
			spinlock_lock(&r->lock);
268
			list_concat(&r->rq_head, &head);
269
			r->n += n;
270
			spinlock_unlock(&r->lock);
271
		}
15 jermar 272
		CPU->needs_relink = 0;
1 jermar 273
	}
784 palkovsky 274
	spinlock_unlock(&CPU->lock);
1 jermar 275
 
276
}
277
 
898 jermar 278
/** The scheduler
279
 *
280
 * The thread scheduling procedure.
281
 * Passes control directly to
282
 * scheduler_separated_stack().
283
 *
284
 */
285
void scheduler(void)
286
{
287
	volatile ipl_t ipl;
107 decky 288
 
898 jermar 289
	ASSERT(CPU != NULL);
290
 
291
	ipl = interrupts_disable();
292
 
293
	if (atomic_get(&haltstate))
294
		halt();
1007 decky 295
 
898 jermar 296
	if (THREAD) {
297
		spinlock_lock(&THREAD->lock);
906 palkovsky 298
#ifndef CONFIG_FPU_LAZY
299
		fpu_context_save(THREAD->saved_fpu_context);
300
#endif
898 jermar 301
		if (!context_save(&THREAD->saved_context)) {
302
			/*
303
			 * This is the place where threads leave scheduler();
304
			 */
305
			spinlock_unlock(&THREAD->lock);
306
			interrupts_restore(THREAD->saved_context.ipl);
1007 decky 307
 
898 jermar 308
			return;
309
		}
310
 
311
		/*
312
		 * Interrupt priority level of preempted thread is recorded here
313
		 * to facilitate scheduler() invocations from interrupts_disable()'d
314
		 * code (e.g. waitq_sleep_timeout()). 
315
		 */
316
		THREAD->saved_context.ipl = ipl;
317
	}
318
 
319
	/*
320
	 * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM
321
	 * and preemption counter. At this point THE could be coming either
322
	 * from THREAD's or CPU's stack.
323
	 */
324
	the_copy(THE, (the_t *) CPU->stack);
325
 
326
	/*
327
	 * We may not keep the old stack.
328
	 * Reason: If we kept the old stack and got blocked, for instance, in
329
	 * find_best_thread(), the old thread could get rescheduled by another
330
	 * CPU and overwrite the part of its own stack that was also used by
331
	 * the scheduler on this CPU.
332
	 *
333
	 * Moreover, we have to bypass the compiler-generated POP sequence
334
	 * which is fooled by SP being set to the very top of the stack.
335
	 * Therefore the scheduler() function continues in
336
	 * scheduler_separated_stack().
337
	 */
338
	context_save(&CPU->saved_context);
339
	context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), (__address) CPU->stack, CPU_STACK_SIZE);
340
	context_restore(&CPU->saved_context);
341
	/* not reached */
342
}
343
 
107 decky 344
/** Scheduler stack switch wrapper
345
 *
346
 * Second part of the scheduler() function
347
 * using new stack. Handling the actual context
348
 * switch to a new thread.
349
 *
787 palkovsky 350
 * Assume THREAD->lock is held.
107 decky 351
 */
898 jermar 352
void scheduler_separated_stack(void)
1 jermar 353
{
354
	int priority;
1007 decky 355
 
227 jermar 356
	ASSERT(CPU != NULL);
1007 decky 357
 
15 jermar 358
	if (THREAD) {
898 jermar 359
		/* must be run after the switch to scheduler stack */
897 jermar 360
		after_thread_ran();
361
 
15 jermar 362
		switch (THREAD->state) {
1 jermar 363
		    case Running:
125 jermar 364
			spinlock_unlock(&THREAD->lock);
365
			thread_ready(THREAD);
366
			break;
1 jermar 367
 
368
		    case Exiting:
787 palkovsky 369
			thread_destroy(THREAD);
125 jermar 370
			break;
787 palkovsky 371
 
1 jermar 372
		    case Sleeping:
125 jermar 373
			/*
374
			 * Prefer the thread after it's woken up.
375
			 */
413 jermar 376
			THREAD->priority = -1;
1 jermar 377
 
125 jermar 378
			/*
379
			 * We need to release wq->lock which we locked in waitq_sleep().
380
			 * Address of wq->lock is kept in THREAD->sleep_queue.
381
			 */
382
			spinlock_unlock(&THREAD->sleep_queue->lock);
1 jermar 383
 
125 jermar 384
			/*
385
			 * Check for possible requests for out-of-context invocation.
386
			 */
387
			if (THREAD->call_me) {
388
				THREAD->call_me(THREAD->call_me_with);
389
				THREAD->call_me = NULL;
390
				THREAD->call_me_with = NULL;
391
			}
1 jermar 392
 
125 jermar 393
			spinlock_unlock(&THREAD->lock);
1 jermar 394
 
125 jermar 395
			break;
396
 
1 jermar 397
		    default:
125 jermar 398
			/*
399
			 * Entering state is unexpected.
400
			 */
401
			panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]);
402
			break;
1 jermar 403
		}
897 jermar 404
 
15 jermar 405
		THREAD = NULL;
1 jermar 406
	}
198 jermar 407
 
15 jermar 408
	THREAD = find_best_thread();
1 jermar 409
 
15 jermar 410
	spinlock_lock(&THREAD->lock);
413 jermar 411
	priority = THREAD->priority;
15 jermar 412
	spinlock_unlock(&THREAD->lock);	
192 jermar 413
 
1 jermar 414
	relink_rq(priority);		
415
 
15 jermar 416
	spinlock_lock(&THREAD->lock);	
1 jermar 417
 
418
	/*
419
	 * If both the old and the new task are the same, lots of work is avoided.
420
	 */
15 jermar 421
	if (TASK != THREAD->task) {
703 jermar 422
		as_t *as1 = NULL;
423
		as_t *as2;
1 jermar 424
 
15 jermar 425
		if (TASK) {
426
			spinlock_lock(&TASK->lock);
703 jermar 427
			as1 = TASK->as;
15 jermar 428
			spinlock_unlock(&TASK->lock);
1 jermar 429
		}
430
 
15 jermar 431
		spinlock_lock(&THREAD->task->lock);
703 jermar 432
		as2 = THREAD->task->as;
15 jermar 433
		spinlock_unlock(&THREAD->task->lock);
1 jermar 434
 
435
		/*
703 jermar 436
		 * Note that it is possible for two tasks to share one address space.
1 jermar 437
		 */
703 jermar 438
		if (as1 != as2) {
1 jermar 439
			/*
703 jermar 440
			 * Both tasks and address spaces are different.
1 jermar 441
			 * Replace the old one with the new one.
442
			 */
823 jermar 443
			as_switch(as1, as2);
1 jermar 444
		}
906 palkovsky 445
		TASK = THREAD->task;
1187 jermar 446
		before_task_runs();
1 jermar 447
	}
448
 
15 jermar 449
	THREAD->state = Running;
1 jermar 450
 
906 palkovsky 451
#ifdef SCHEDULER_VERBOSE
1196 cejka 452
	printf("cpu%d: tid %d (priority=%d,ticks=%lld,nrdy=%ld)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, atomic_get(&CPU->nrdy));
906 palkovsky 453
#endif	
1 jermar 454
 
213 jermar 455
	/*
897 jermar 456
	 * Some architectures provide late kernel PA2KA(identity)
457
	 * mapping in a page fault handler. However, the page fault
458
	 * handler uses the kernel stack of the running thread and
459
	 * therefore cannot be used to map it. The kernel stack, if
460
	 * necessary, is to be mapped in before_thread_runs(). This
461
	 * function must be executed before the switch to the new stack.
462
	 */
463
	before_thread_runs();
464
 
465
	/*
213 jermar 466
	 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to thread's stack.
467
	 */
184 jermar 468
	the_copy(THE, (the_t *) THREAD->kstack);
469
 
15 jermar 470
	context_restore(&THREAD->saved_context);
1 jermar 471
	/* not reached */
472
}
473
 
458 decky 474
#ifdef CONFIG_SMP
107 decky 475
/** Load balancing thread
476
 *
477
 * SMP load balancing thread, supervising thread supplies
478
 * for the CPU it's wired to.
479
 *
480
 * @param arg Generic thread argument (unused).
481
 *
1 jermar 482
 */
483
void kcpulb(void *arg)
484
{
485
	thread_t *t;
783 palkovsky 486
	int count, average, i, j, k = 0;
413 jermar 487
	ipl_t ipl;
1 jermar 488
 
489
loop:
490
	/*
779 jermar 491
	 * Work in 1s intervals.
1 jermar 492
	 */
779 jermar 493
	thread_sleep(1);
1 jermar 494
 
495
not_satisfied:
496
	/*
497
	 * Calculate the number of threads that will be migrated/stolen from
498
	 * other CPU's. Note that situation can have changed between two
499
	 * passes. Each time get the most up to date counts.
500
	 */
784 palkovsky 501
	average = atomic_get(&nrdy) / config.cpu_active + 1;
783 palkovsky 502
	count = average - atomic_get(&CPU->nrdy);
1 jermar 503
 
784 palkovsky 504
	if (count <= 0)
1 jermar 505
		goto satisfied;
506
 
507
	/*
508
	 * Searching least priority queues on all CPU's first and most priority queues on all CPU's last.
509
	 */
510
	for (j=RQ_COUNT-1; j >= 0; j--) {
511
		for (i=0; i < config.cpu_active; i++) {
512
			link_t *l;
513
			runq_t *r;
514
			cpu_t *cpu;
515
 
516
			cpu = &cpus[(i + k) % config.cpu_active];
517
 
518
			/*
519
			 * Not interested in ourselves.
520
			 * Doesn't require interrupt disabling for kcpulb is X_WIRED.
521
			 */
15 jermar 522
			if (CPU == cpu)
783 palkovsky 523
				continue;
524
			if (atomic_get(&cpu->nrdy) <= average)
525
				continue;
1 jermar 526
 
784 palkovsky 527
			ipl = interrupts_disable();
115 jermar 528
			r = &cpu->rq[j];
1 jermar 529
			spinlock_lock(&r->lock);
530
			if (r->n == 0) {
531
				spinlock_unlock(&r->lock);
413 jermar 532
				interrupts_restore(ipl);
1 jermar 533
				continue;
534
			}
535
 
536
			t = NULL;
537
			l = r->rq_head.prev;	/* search rq from the back */
538
			while (l != &r->rq_head) {
539
				t = list_get_instance(l, thread_t, rq_link);
540
				/*
125 jermar 541
				 * We don't want to steal CPU-wired threads neither threads already stolen.
1 jermar 542
				 * The latter prevents threads from migrating between CPU's without ever being run.
125 jermar 543
				 * We don't want to steal threads whose FPU context is still in CPU.
73 vana 544
				 */
1 jermar 545
				spinlock_lock(&t->lock);
73 vana 546
				if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) {
1 jermar 547
					/*
548
					 * Remove t from r.
549
					 */
550
					spinlock_unlock(&t->lock);
551
 
783 palkovsky 552
					atomic_dec(&cpu->nrdy);
475 jermar 553
					atomic_dec(&nrdy);
1 jermar 554
 
125 jermar 555
					r->n--;
1 jermar 556
					list_remove(&t->rq_link);
557
 
558
					break;
559
				}
560
				spinlock_unlock(&t->lock);
561
				l = l->prev;
562
				t = NULL;
563
			}
564
			spinlock_unlock(&r->lock);
565
 
566
			if (t) {
567
				/*
568
				 * Ready t on local CPU
569
				 */
570
				spinlock_lock(&t->lock);
906 palkovsky 571
#ifdef KCPULB_VERBOSE
1196 cejka 572
				printf("kcpulb%d: TID %d -> cpu%d, nrdy=%ld, avg=%nd\n", CPU->id, t->tid, CPU->id, atomic_get(&CPU->nrdy), atomic_get(&nrdy) / config.cpu_active);
906 palkovsky 573
#endif
1 jermar 574
				t->flags |= X_STOLEN;
1115 jermar 575
				t->state = Entering;
1 jermar 576
				spinlock_unlock(&t->lock);
577
 
578
				thread_ready(t);
579
 
413 jermar 580
				interrupts_restore(ipl);
1 jermar 581
 
582
				if (--count == 0)
583
					goto satisfied;
584
 
585
				/*
125 jermar 586
				 * We are not satisfied yet, focus on another CPU next time.
1 jermar 587
				 */
588
				k++;
589
 
590
				continue;
591
			}
413 jermar 592
			interrupts_restore(ipl);
1 jermar 593
		}
594
	}
595
 
783 palkovsky 596
	if (atomic_get(&CPU->nrdy)) {
1 jermar 597
		/*
598
		 * Be a little bit light-weight and let migrated threads run.
599
		 */
600
		scheduler();
779 jermar 601
	} else {
1 jermar 602
		/*
603
		 * We failed to migrate a single thread.
779 jermar 604
		 * Give up this turn.
1 jermar 605
		 */
779 jermar 606
		goto loop;
1 jermar 607
	}
608
 
609
	goto not_satisfied;
125 jermar 610
 
1 jermar 611
satisfied:
612
	goto loop;
613
}
614
 
458 decky 615
#endif /* CONFIG_SMP */
775 palkovsky 616
 
617
 
618
/** Print information about threads & scheduler queues */
619
void sched_print_list(void)
620
{
621
	ipl_t ipl;
622
	int cpu,i;
623
	runq_t *r;
624
	thread_t *t;
625
	link_t *cur;
626
 
627
	/* We are going to mess with scheduler structures,
628
	 * let's not be interrupted */
629
	ipl = interrupts_disable();
630
	for (cpu=0;cpu < config.cpu_count; cpu++) {
898 jermar 631
 
775 palkovsky 632
		if (!cpus[cpu].active)
633
			continue;
898 jermar 634
 
775 palkovsky 635
		spinlock_lock(&cpus[cpu].lock);
1221 decky 636
		printf("cpu%d: address=%p, nrdy=%ld, needs_relink=%ld\n",
1062 jermar 637
		       cpus[cpu].id, &cpus[cpu], atomic_get(&cpus[cpu].nrdy), cpus[cpu].needs_relink);
775 palkovsky 638
 
639
		for (i=0; i<RQ_COUNT; i++) {
640
			r = &cpus[cpu].rq[i];
641
			spinlock_lock(&r->lock);
642
			if (!r->n) {
643
				spinlock_unlock(&r->lock);
644
				continue;
645
			}
898 jermar 646
			printf("\trq[%d]: ", i);
775 palkovsky 647
			for (cur=r->rq_head.next; cur!=&r->rq_head; cur=cur->next) {
648
				t = list_get_instance(cur, thread_t, rq_link);
649
				printf("%d(%s) ", t->tid,
650
				       thread_states[t->state]);
651
			}
652
			printf("\n");
653
			spinlock_unlock(&r->lock);
654
		}
655
		spinlock_unlock(&cpus[cpu].lock);
656
	}
657
 
658
	interrupts_restore(ipl);
659
}