Subversion Repositories HelenOS-historic

Rev

Rev 1248 | Rev 1380 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1 jermar 1
/*
2
 * Copyright (C) 2001-2004 Jakub Jermar
3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
1248 jermar 29
/**
30
 * @file	scheduler.c
31
 * @brief	Scheduler and load balancing.
32
 *
1264 jermar 33
 * This file contains the scheduler and kcpulb kernel thread which
1248 jermar 34
 * performs load-balancing of per-CPU run queues.
35
 */
36
 
1 jermar 37
#include <proc/scheduler.h>
38
#include <proc/thread.h>
39
#include <proc/task.h>
378 jermar 40
#include <mm/frame.h>
41
#include <mm/page.h>
703 jermar 42
#include <mm/as.h>
378 jermar 43
#include <arch/asm.h>
44
#include <arch/faddr.h>
1104 jermar 45
#include <atomic.h>
378 jermar 46
#include <synch/spinlock.h>
1 jermar 47
#include <config.h>
48
#include <context.h>
49
#include <func.h>
50
#include <arch.h>
788 jermar 51
#include <adt/list.h>
68 decky 52
#include <panic.h>
1 jermar 53
#include <typedefs.h>
378 jermar 54
#include <cpu.h>
195 vana 55
#include <print.h>
227 jermar 56
#include <debug.h>
1 jermar 57
 
1187 jermar 58
static void before_task_runs(void);
59
static void before_thread_runs(void);
60
static void after_thread_ran(void);
898 jermar 61
static void scheduler_separated_stack(void);
195 vana 62
 
898 jermar 63
atomic_t nrdy;	/**< Number of ready threads in the system. */
64
 
1187 jermar 65
/** Carry out actions before new task runs. */
66
void before_task_runs(void)
67
{
68
	before_task_runs_arch();
69
}
70
 
897 jermar 71
/** Take actions before new thread runs.
107 decky 72
 *
118 jermar 73
 * Perform actions that need to be
74
 * taken before the newly selected
75
 * tread is passed control.
107 decky 76
 *
827 palkovsky 77
 * THREAD->lock is locked on entry
78
 *
107 decky 79
 */
52 vana 80
void before_thread_runs(void)
81
{
309 palkovsky 82
	before_thread_runs_arch();
906 palkovsky 83
#ifdef CONFIG_FPU_LAZY
309 palkovsky 84
	if(THREAD==CPU->fpu_owner) 
85
		fpu_enable();
86
	else
87
		fpu_disable(); 
906 palkovsky 88
#else
309 palkovsky 89
	fpu_enable();
90
	if (THREAD->fpu_context_exists)
906 palkovsky 91
		fpu_context_restore(THREAD->saved_fpu_context);
309 palkovsky 92
	else {
906 palkovsky 93
		fpu_init();
309 palkovsky 94
		THREAD->fpu_context_exists=1;
95
	}
906 palkovsky 96
#endif
52 vana 97
}
98
 
898 jermar 99
/** Take actions after THREAD had run.
897 jermar 100
 *
101
 * Perform actions that need to be
102
 * taken after the running thread
898 jermar 103
 * had been preempted by the scheduler.
897 jermar 104
 *
105
 * THREAD->lock is locked on entry
106
 *
107
 */
108
void after_thread_ran(void)
109
{
110
	after_thread_ran_arch();
111
}
112
 
458 decky 113
#ifdef CONFIG_FPU_LAZY
309 palkovsky 114
void scheduler_fpu_lazy_request(void)
115
{
907 palkovsky 116
restart:
309 palkovsky 117
	fpu_enable();
827 palkovsky 118
	spinlock_lock(&CPU->lock);
119
 
120
	/* Save old context */
309 palkovsky 121
	if (CPU->fpu_owner != NULL) {  
827 palkovsky 122
		spinlock_lock(&CPU->fpu_owner->lock);
906 palkovsky 123
		fpu_context_save(CPU->fpu_owner->saved_fpu_context);
309 palkovsky 124
		/* don't prevent migration */
125
		CPU->fpu_owner->fpu_context_engaged=0; 
827 palkovsky 126
		spinlock_unlock(&CPU->fpu_owner->lock);
907 palkovsky 127
		CPU->fpu_owner = NULL;
309 palkovsky 128
	}
827 palkovsky 129
 
130
	spinlock_lock(&THREAD->lock);
898 jermar 131
	if (THREAD->fpu_context_exists) {
906 palkovsky 132
		fpu_context_restore(THREAD->saved_fpu_context);
898 jermar 133
	} else {
906 palkovsky 134
		/* Allocate FPU context */
135
		if (!THREAD->saved_fpu_context) {
136
			/* Might sleep */
137
			spinlock_unlock(&THREAD->lock);
907 palkovsky 138
			spinlock_unlock(&CPU->lock);
906 palkovsky 139
			THREAD->saved_fpu_context = slab_alloc(fpu_context_slab,
140
							       0);
907 palkovsky 141
			/* We may have switched CPUs during slab_alloc */
142
			goto restart; 
906 palkovsky 143
		}
144
		fpu_init();
309 palkovsky 145
		THREAD->fpu_context_exists=1;
146
	}
147
	CPU->fpu_owner=THREAD;
148
	THREAD->fpu_context_engaged = 1;
898 jermar 149
	spinlock_unlock(&THREAD->lock);
827 palkovsky 150
 
151
	spinlock_unlock(&CPU->lock);
309 palkovsky 152
}
153
#endif
52 vana 154
 
107 decky 155
/** Initialize scheduler
156
 *
157
 * Initialize kernel scheduler.
158
 *
159
 */
1 jermar 160
void scheduler_init(void)
161
{
162
}
163
 
107 decky 164
/** Get thread to be scheduled
165
 *
166
 * Get the optimal thread to be scheduled
109 jermar 167
 * according to thread accounting and scheduler
107 decky 168
 * policy.
169
 *
170
 * @return Thread to be scheduled.
171
 *
172
 */
483 jermar 173
static thread_t *find_best_thread(void)
1 jermar 174
{
175
	thread_t *t;
176
	runq_t *r;
783 palkovsky 177
	int i;
1 jermar 178
 
227 jermar 179
	ASSERT(CPU != NULL);
180
 
1 jermar 181
loop:
413 jermar 182
	interrupts_enable();
1 jermar 183
 
783 palkovsky 184
	if (atomic_get(&CPU->nrdy) == 0) {
1 jermar 185
		/*
186
		 * For there was nothing to run, the CPU goes to sleep
187
		 * until a hardware interrupt or an IPI comes.
188
		 * This improves energy saving and hyperthreading.
189
		 */
785 jermar 190
 
191
		/*
192
		 * An interrupt might occur right now and wake up a thread.
193
		 * In such case, the CPU will continue to go to sleep
194
		 * even though there is a runnable thread.
195
		 */
196
 
1 jermar 197
		 cpu_sleep();
198
		 goto loop;
199
	}
200
 
413 jermar 201
	interrupts_disable();
114 jermar 202
 
898 jermar 203
	for (i = 0; i<RQ_COUNT; i++) {
15 jermar 204
		r = &CPU->rq[i];
1 jermar 205
		spinlock_lock(&r->lock);
206
		if (r->n == 0) {
207
			/*
208
			 * If this queue is empty, try a lower-priority queue.
209
			 */
210
			spinlock_unlock(&r->lock);
211
			continue;
212
		}
213 jermar 213
 
783 palkovsky 214
		atomic_dec(&CPU->nrdy);
475 jermar 215
		atomic_dec(&nrdy);
1 jermar 216
		r->n--;
217
 
218
		/*
219
		 * Take the first thread from the queue.
220
		 */
221
		t = list_get_instance(r->rq_head.next, thread_t, rq_link);
222
		list_remove(&t->rq_link);
223
 
224
		spinlock_unlock(&r->lock);
225
 
226
		spinlock_lock(&t->lock);
15 jermar 227
		t->cpu = CPU;
1 jermar 228
 
229
		t->ticks = us2ticks((i+1)*10000);
898 jermar 230
		t->priority = i;	/* correct rq index */
1 jermar 231
 
232
		/*
233
		 * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge.
234
		 */
235
		t->flags &= ~X_STOLEN;
236
		spinlock_unlock(&t->lock);
237
 
238
		return t;
239
	}
240
	goto loop;
241
 
242
}
243
 
107 decky 244
/** Prevent rq starvation
245
 *
246
 * Prevent low priority threads from starving in rq's.
247
 *
248
 * When the function decides to relink rq's, it reconnects
249
 * respective pointers so that in result threads with 'pri'
1229 jermar 250
 * greater or equal @start are moved to a higher-priority queue.
107 decky 251
 *
252
 * @param start Threshold priority.
253
 *
1 jermar 254
 */
452 decky 255
static void relink_rq(int start)
1 jermar 256
{
257
	link_t head;
258
	runq_t *r;
259
	int i, n;
260
 
261
	list_initialize(&head);
15 jermar 262
	spinlock_lock(&CPU->lock);
263
	if (CPU->needs_relink > NEEDS_RELINK_MAX) {
1 jermar 264
		for (i = start; i<RQ_COUNT-1; i++) {
265
			/* remember and empty rq[i + 1] */
15 jermar 266
			r = &CPU->rq[i + 1];
1 jermar 267
			spinlock_lock(&r->lock);
268
			list_concat(&head, &r->rq_head);
269
			n = r->n;
270
			r->n = 0;
271
			spinlock_unlock(&r->lock);
272
 
273
			/* append rq[i + 1] to rq[i] */
15 jermar 274
			r = &CPU->rq[i];
1 jermar 275
			spinlock_lock(&r->lock);
276
			list_concat(&r->rq_head, &head);
277
			r->n += n;
278
			spinlock_unlock(&r->lock);
279
		}
15 jermar 280
		CPU->needs_relink = 0;
1 jermar 281
	}
784 palkovsky 282
	spinlock_unlock(&CPU->lock);
1 jermar 283
 
284
}
285
 
898 jermar 286
/** The scheduler
287
 *
288
 * The thread scheduling procedure.
289
 * Passes control directly to
290
 * scheduler_separated_stack().
291
 *
292
 */
293
void scheduler(void)
294
{
295
	volatile ipl_t ipl;
107 decky 296
 
898 jermar 297
	ASSERT(CPU != NULL);
298
 
299
	ipl = interrupts_disable();
300
 
301
	if (atomic_get(&haltstate))
302
		halt();
1007 decky 303
 
898 jermar 304
	if (THREAD) {
305
		spinlock_lock(&THREAD->lock);
906 palkovsky 306
#ifndef CONFIG_FPU_LAZY
307
		fpu_context_save(THREAD->saved_fpu_context);
308
#endif
898 jermar 309
		if (!context_save(&THREAD->saved_context)) {
310
			/*
311
			 * This is the place where threads leave scheduler();
312
			 */
313
			spinlock_unlock(&THREAD->lock);
314
			interrupts_restore(THREAD->saved_context.ipl);
1007 decky 315
 
898 jermar 316
			return;
317
		}
318
 
319
		/*
320
		 * Interrupt priority level of preempted thread is recorded here
321
		 * to facilitate scheduler() invocations from interrupts_disable()'d
322
		 * code (e.g. waitq_sleep_timeout()). 
323
		 */
324
		THREAD->saved_context.ipl = ipl;
325
	}
326
 
327
	/*
328
	 * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM
329
	 * and preemption counter. At this point THE could be coming either
330
	 * from THREAD's or CPU's stack.
331
	 */
332
	the_copy(THE, (the_t *) CPU->stack);
333
 
334
	/*
335
	 * We may not keep the old stack.
336
	 * Reason: If we kept the old stack and got blocked, for instance, in
337
	 * find_best_thread(), the old thread could get rescheduled by another
338
	 * CPU and overwrite the part of its own stack that was also used by
339
	 * the scheduler on this CPU.
340
	 *
341
	 * Moreover, we have to bypass the compiler-generated POP sequence
342
	 * which is fooled by SP being set to the very top of the stack.
343
	 * Therefore the scheduler() function continues in
344
	 * scheduler_separated_stack().
345
	 */
346
	context_save(&CPU->saved_context);
347
	context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), (__address) CPU->stack, CPU_STACK_SIZE);
348
	context_restore(&CPU->saved_context);
349
	/* not reached */
350
}
351
 
107 decky 352
/** Scheduler stack switch wrapper
353
 *
354
 * Second part of the scheduler() function
355
 * using new stack. Handling the actual context
356
 * switch to a new thread.
357
 *
787 palkovsky 358
 * Assume THREAD->lock is held.
107 decky 359
 */
898 jermar 360
void scheduler_separated_stack(void)
1 jermar 361
{
362
	int priority;
1007 decky 363
 
227 jermar 364
	ASSERT(CPU != NULL);
1007 decky 365
 
15 jermar 366
	if (THREAD) {
898 jermar 367
		/* must be run after the switch to scheduler stack */
897 jermar 368
		after_thread_ran();
369
 
15 jermar 370
		switch (THREAD->state) {
1 jermar 371
		    case Running:
125 jermar 372
			spinlock_unlock(&THREAD->lock);
373
			thread_ready(THREAD);
374
			break;
1 jermar 375
 
376
		    case Exiting:
787 palkovsky 377
			thread_destroy(THREAD);
125 jermar 378
			break;
787 palkovsky 379
 
1 jermar 380
		    case Sleeping:
125 jermar 381
			/*
382
			 * Prefer the thread after it's woken up.
383
			 */
413 jermar 384
			THREAD->priority = -1;
1 jermar 385
 
125 jermar 386
			/*
387
			 * We need to release wq->lock which we locked in waitq_sleep().
388
			 * Address of wq->lock is kept in THREAD->sleep_queue.
389
			 */
390
			spinlock_unlock(&THREAD->sleep_queue->lock);
1 jermar 391
 
125 jermar 392
			/*
393
			 * Check for possible requests for out-of-context invocation.
394
			 */
395
			if (THREAD->call_me) {
396
				THREAD->call_me(THREAD->call_me_with);
397
				THREAD->call_me = NULL;
398
				THREAD->call_me_with = NULL;
399
			}
1 jermar 400
 
125 jermar 401
			spinlock_unlock(&THREAD->lock);
1 jermar 402
 
125 jermar 403
			break;
404
 
1 jermar 405
		    default:
125 jermar 406
			/*
407
			 * Entering state is unexpected.
408
			 */
409
			panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]);
410
			break;
1 jermar 411
		}
897 jermar 412
 
15 jermar 413
		THREAD = NULL;
1 jermar 414
	}
198 jermar 415
 
15 jermar 416
	THREAD = find_best_thread();
1 jermar 417
 
15 jermar 418
	spinlock_lock(&THREAD->lock);
413 jermar 419
	priority = THREAD->priority;
15 jermar 420
	spinlock_unlock(&THREAD->lock);	
192 jermar 421
 
1 jermar 422
	relink_rq(priority);		
423
 
15 jermar 424
	spinlock_lock(&THREAD->lock);	
1 jermar 425
 
426
	/*
427
	 * If both the old and the new task are the same, lots of work is avoided.
428
	 */
15 jermar 429
	if (TASK != THREAD->task) {
703 jermar 430
		as_t *as1 = NULL;
431
		as_t *as2;
1 jermar 432
 
15 jermar 433
		if (TASK) {
434
			spinlock_lock(&TASK->lock);
703 jermar 435
			as1 = TASK->as;
15 jermar 436
			spinlock_unlock(&TASK->lock);
1 jermar 437
		}
438
 
15 jermar 439
		spinlock_lock(&THREAD->task->lock);
703 jermar 440
		as2 = THREAD->task->as;
15 jermar 441
		spinlock_unlock(&THREAD->task->lock);
1 jermar 442
 
443
		/*
703 jermar 444
		 * Note that it is possible for two tasks to share one address space.
1 jermar 445
		 */
703 jermar 446
		if (as1 != as2) {
1 jermar 447
			/*
703 jermar 448
			 * Both tasks and address spaces are different.
1 jermar 449
			 * Replace the old one with the new one.
450
			 */
823 jermar 451
			as_switch(as1, as2);
1 jermar 452
		}
906 palkovsky 453
		TASK = THREAD->task;
1187 jermar 454
		before_task_runs();
1 jermar 455
	}
456
 
15 jermar 457
	THREAD->state = Running;
1 jermar 458
 
906 palkovsky 459
#ifdef SCHEDULER_VERBOSE
1196 cejka 460
	printf("cpu%d: tid %d (priority=%d,ticks=%lld,nrdy=%ld)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, atomic_get(&CPU->nrdy));
906 palkovsky 461
#endif	
1 jermar 462
 
213 jermar 463
	/*
897 jermar 464
	 * Some architectures provide late kernel PA2KA(identity)
465
	 * mapping in a page fault handler. However, the page fault
466
	 * handler uses the kernel stack of the running thread and
467
	 * therefore cannot be used to map it. The kernel stack, if
468
	 * necessary, is to be mapped in before_thread_runs(). This
469
	 * function must be executed before the switch to the new stack.
470
	 */
471
	before_thread_runs();
472
 
473
	/*
213 jermar 474
	 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to thread's stack.
475
	 */
184 jermar 476
	the_copy(THE, (the_t *) THREAD->kstack);
477
 
15 jermar 478
	context_restore(&THREAD->saved_context);
1 jermar 479
	/* not reached */
480
}
481
 
458 decky 482
#ifdef CONFIG_SMP
107 decky 483
/** Load balancing thread
484
 *
485
 * SMP load balancing thread, supervising thread supplies
486
 * for the CPU it's wired to.
487
 *
488
 * @param arg Generic thread argument (unused).
489
 *
1 jermar 490
 */
491
void kcpulb(void *arg)
492
{
493
	thread_t *t;
783 palkovsky 494
	int count, average, i, j, k = 0;
413 jermar 495
	ipl_t ipl;
1 jermar 496
 
497
loop:
498
	/*
779 jermar 499
	 * Work in 1s intervals.
1 jermar 500
	 */
779 jermar 501
	thread_sleep(1);
1 jermar 502
 
503
not_satisfied:
504
	/*
505
	 * Calculate the number of threads that will be migrated/stolen from
506
	 * other CPU's. Note that situation can have changed between two
507
	 * passes. Each time get the most up to date counts.
508
	 */
784 palkovsky 509
	average = atomic_get(&nrdy) / config.cpu_active + 1;
783 palkovsky 510
	count = average - atomic_get(&CPU->nrdy);
1 jermar 511
 
784 palkovsky 512
	if (count <= 0)
1 jermar 513
		goto satisfied;
514
 
515
	/*
516
	 * Searching least priority queues on all CPU's first and most priority queues on all CPU's last.
517
	 */
518
	for (j=RQ_COUNT-1; j >= 0; j--) {
519
		for (i=0; i < config.cpu_active; i++) {
520
			link_t *l;
521
			runq_t *r;
522
			cpu_t *cpu;
523
 
524
			cpu = &cpus[(i + k) % config.cpu_active];
525
 
526
			/*
527
			 * Not interested in ourselves.
528
			 * Doesn't require interrupt disabling for kcpulb is X_WIRED.
529
			 */
15 jermar 530
			if (CPU == cpu)
783 palkovsky 531
				continue;
532
			if (atomic_get(&cpu->nrdy) <= average)
533
				continue;
1 jermar 534
 
784 palkovsky 535
			ipl = interrupts_disable();
115 jermar 536
			r = &cpu->rq[j];
1 jermar 537
			spinlock_lock(&r->lock);
538
			if (r->n == 0) {
539
				spinlock_unlock(&r->lock);
413 jermar 540
				interrupts_restore(ipl);
1 jermar 541
				continue;
542
			}
543
 
544
			t = NULL;
545
			l = r->rq_head.prev;	/* search rq from the back */
546
			while (l != &r->rq_head) {
547
				t = list_get_instance(l, thread_t, rq_link);
548
				/*
125 jermar 549
				 * We don't want to steal CPU-wired threads neither threads already stolen.
1 jermar 550
				 * The latter prevents threads from migrating between CPU's without ever being run.
125 jermar 551
				 * We don't want to steal threads whose FPU context is still in CPU.
73 vana 552
				 */
1 jermar 553
				spinlock_lock(&t->lock);
73 vana 554
				if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) {
1 jermar 555
					/*
556
					 * Remove t from r.
557
					 */
558
					spinlock_unlock(&t->lock);
559
 
783 palkovsky 560
					atomic_dec(&cpu->nrdy);
475 jermar 561
					atomic_dec(&nrdy);
1 jermar 562
 
125 jermar 563
					r->n--;
1 jermar 564
					list_remove(&t->rq_link);
565
 
566
					break;
567
				}
568
				spinlock_unlock(&t->lock);
569
				l = l->prev;
570
				t = NULL;
571
			}
572
			spinlock_unlock(&r->lock);
573
 
574
			if (t) {
575
				/*
576
				 * Ready t on local CPU
577
				 */
578
				spinlock_lock(&t->lock);
906 palkovsky 579
#ifdef KCPULB_VERBOSE
1196 cejka 580
				printf("kcpulb%d: TID %d -> cpu%d, nrdy=%ld, avg=%nd\n", CPU->id, t->tid, CPU->id, atomic_get(&CPU->nrdy), atomic_get(&nrdy) / config.cpu_active);
906 palkovsky 581
#endif
1 jermar 582
				t->flags |= X_STOLEN;
1115 jermar 583
				t->state = Entering;
1 jermar 584
				spinlock_unlock(&t->lock);
585
 
586
				thread_ready(t);
587
 
413 jermar 588
				interrupts_restore(ipl);
1 jermar 589
 
590
				if (--count == 0)
591
					goto satisfied;
592
 
593
				/*
125 jermar 594
				 * We are not satisfied yet, focus on another CPU next time.
1 jermar 595
				 */
596
				k++;
597
 
598
				continue;
599
			}
413 jermar 600
			interrupts_restore(ipl);
1 jermar 601
		}
602
	}
603
 
783 palkovsky 604
	if (atomic_get(&CPU->nrdy)) {
1 jermar 605
		/*
606
		 * Be a little bit light-weight and let migrated threads run.
607
		 */
608
		scheduler();
779 jermar 609
	} else {
1 jermar 610
		/*
611
		 * We failed to migrate a single thread.
779 jermar 612
		 * Give up this turn.
1 jermar 613
		 */
779 jermar 614
		goto loop;
1 jermar 615
	}
616
 
617
	goto not_satisfied;
125 jermar 618
 
1 jermar 619
satisfied:
620
	goto loop;
621
}
622
 
458 decky 623
#endif /* CONFIG_SMP */
775 palkovsky 624
 
625
 
626
/** Print information about threads & scheduler queues */
627
void sched_print_list(void)
628
{
629
	ipl_t ipl;
630
	int cpu,i;
631
	runq_t *r;
632
	thread_t *t;
633
	link_t *cur;
634
 
635
	/* We are going to mess with scheduler structures,
636
	 * let's not be interrupted */
637
	ipl = interrupts_disable();
638
	for (cpu=0;cpu < config.cpu_count; cpu++) {
898 jermar 639
 
775 palkovsky 640
		if (!cpus[cpu].active)
641
			continue;
898 jermar 642
 
775 palkovsky 643
		spinlock_lock(&cpus[cpu].lock);
1221 decky 644
		printf("cpu%d: address=%p, nrdy=%ld, needs_relink=%ld\n",
1062 jermar 645
		       cpus[cpu].id, &cpus[cpu], atomic_get(&cpus[cpu].nrdy), cpus[cpu].needs_relink);
775 palkovsky 646
 
647
		for (i=0; i<RQ_COUNT; i++) {
648
			r = &cpus[cpu].rq[i];
649
			spinlock_lock(&r->lock);
650
			if (!r->n) {
651
				spinlock_unlock(&r->lock);
652
				continue;
653
			}
898 jermar 654
			printf("\trq[%d]: ", i);
775 palkovsky 655
			for (cur=r->rq_head.next; cur!=&r->rq_head; cur=cur->next) {
656
				t = list_get_instance(cur, thread_t, rq_link);
657
				printf("%d(%s) ", t->tid,
658
				       thread_states[t->state]);
659
			}
660
			printf("\n");
661
			spinlock_unlock(&r->lock);
662
		}
663
		spinlock_unlock(&cpus[cpu].lock);
664
	}
665
 
666
	interrupts_restore(ipl);
667
}