Subversion Repositories HelenOS-historic

Rev

Rev 1264 | Rev 1571 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1 jermar 1
/*
2
 * Copyright (C) 2001-2004 Jakub Jermar
3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
1248 jermar 29
/**
30
 * @file	scheduler.c
31
 * @brief	Scheduler and load balancing.
32
 *
1264 jermar 33
 * This file contains the scheduler and kcpulb kernel thread which
1248 jermar 34
 * performs load-balancing of per-CPU run queues.
35
 */
36
 
1 jermar 37
#include <proc/scheduler.h>
38
#include <proc/thread.h>
39
#include <proc/task.h>
378 jermar 40
#include <mm/frame.h>
41
#include <mm/page.h>
703 jermar 42
#include <mm/as.h>
378 jermar 43
#include <arch/asm.h>
44
#include <arch/faddr.h>
1104 jermar 45
#include <atomic.h>
378 jermar 46
#include <synch/spinlock.h>
1 jermar 47
#include <config.h>
48
#include <context.h>
49
#include <func.h>
50
#include <arch.h>
788 jermar 51
#include <adt/list.h>
68 decky 52
#include <panic.h>
1 jermar 53
#include <typedefs.h>
378 jermar 54
#include <cpu.h>
195 vana 55
#include <print.h>
227 jermar 56
#include <debug.h>
1 jermar 57
 
1187 jermar 58
static void before_task_runs(void);
59
static void before_thread_runs(void);
60
static void after_thread_ran(void);
898 jermar 61
static void scheduler_separated_stack(void);
195 vana 62
 
898 jermar 63
atomic_t nrdy;	/**< Number of ready threads in the system. */
64
 
1187 jermar 65
/** Carry out actions before new task runs. */
66
void before_task_runs(void)
67
{
68
	before_task_runs_arch();
69
}
70
 
897 jermar 71
/** Take actions before new thread runs.
107 decky 72
 *
118 jermar 73
 * Perform actions that need to be
74
 * taken before the newly selected
75
 * tread is passed control.
107 decky 76
 *
827 palkovsky 77
 * THREAD->lock is locked on entry
78
 *
107 decky 79
 */
52 vana 80
void before_thread_runs(void)
81
{
309 palkovsky 82
	before_thread_runs_arch();
906 palkovsky 83
#ifdef CONFIG_FPU_LAZY
309 palkovsky 84
	if(THREAD==CPU->fpu_owner) 
85
		fpu_enable();
86
	else
87
		fpu_disable(); 
906 palkovsky 88
#else
309 palkovsky 89
	fpu_enable();
90
	if (THREAD->fpu_context_exists)
906 palkovsky 91
		fpu_context_restore(THREAD->saved_fpu_context);
309 palkovsky 92
	else {
906 palkovsky 93
		fpu_init();
309 palkovsky 94
		THREAD->fpu_context_exists=1;
95
	}
906 palkovsky 96
#endif
52 vana 97
}
98
 
898 jermar 99
/** Take actions after THREAD had run.
897 jermar 100
 *
101
 * Perform actions that need to be
102
 * taken after the running thread
898 jermar 103
 * had been preempted by the scheduler.
897 jermar 104
 *
105
 * THREAD->lock is locked on entry
106
 *
107
 */
108
void after_thread_ran(void)
109
{
110
	after_thread_ran_arch();
111
}
112
 
458 decky 113
#ifdef CONFIG_FPU_LAZY
309 palkovsky 114
void scheduler_fpu_lazy_request(void)
115
{
907 palkovsky 116
restart:
309 palkovsky 117
	fpu_enable();
827 palkovsky 118
	spinlock_lock(&CPU->lock);
119
 
120
	/* Save old context */
309 palkovsky 121
	if (CPU->fpu_owner != NULL) {  
827 palkovsky 122
		spinlock_lock(&CPU->fpu_owner->lock);
906 palkovsky 123
		fpu_context_save(CPU->fpu_owner->saved_fpu_context);
309 palkovsky 124
		/* don't prevent migration */
125
		CPU->fpu_owner->fpu_context_engaged=0; 
827 palkovsky 126
		spinlock_unlock(&CPU->fpu_owner->lock);
907 palkovsky 127
		CPU->fpu_owner = NULL;
309 palkovsky 128
	}
827 palkovsky 129
 
130
	spinlock_lock(&THREAD->lock);
898 jermar 131
	if (THREAD->fpu_context_exists) {
906 palkovsky 132
		fpu_context_restore(THREAD->saved_fpu_context);
898 jermar 133
	} else {
906 palkovsky 134
		/* Allocate FPU context */
135
		if (!THREAD->saved_fpu_context) {
136
			/* Might sleep */
137
			spinlock_unlock(&THREAD->lock);
907 palkovsky 138
			spinlock_unlock(&CPU->lock);
906 palkovsky 139
			THREAD->saved_fpu_context = slab_alloc(fpu_context_slab,
140
							       0);
907 palkovsky 141
			/* We may have switched CPUs during slab_alloc */
142
			goto restart; 
906 palkovsky 143
		}
144
		fpu_init();
309 palkovsky 145
		THREAD->fpu_context_exists=1;
146
	}
147
	CPU->fpu_owner=THREAD;
148
	THREAD->fpu_context_engaged = 1;
898 jermar 149
	spinlock_unlock(&THREAD->lock);
827 palkovsky 150
 
151
	spinlock_unlock(&CPU->lock);
309 palkovsky 152
}
153
#endif
52 vana 154
 
107 decky 155
/** Initialize scheduler
156
 *
157
 * Initialize kernel scheduler.
158
 *
159
 */
1 jermar 160
void scheduler_init(void)
161
{
162
}
163
 
107 decky 164
/** Get thread to be scheduled
165
 *
166
 * Get the optimal thread to be scheduled
109 jermar 167
 * according to thread accounting and scheduler
107 decky 168
 * policy.
169
 *
170
 * @return Thread to be scheduled.
171
 *
172
 */
483 jermar 173
static thread_t *find_best_thread(void)
1 jermar 174
{
175
	thread_t *t;
176
	runq_t *r;
783 palkovsky 177
	int i;
1 jermar 178
 
227 jermar 179
	ASSERT(CPU != NULL);
180
 
1 jermar 181
loop:
413 jermar 182
	interrupts_enable();
1 jermar 183
 
783 palkovsky 184
	if (atomic_get(&CPU->nrdy) == 0) {
1 jermar 185
		/*
186
		 * For there was nothing to run, the CPU goes to sleep
187
		 * until a hardware interrupt or an IPI comes.
188
		 * This improves energy saving and hyperthreading.
189
		 */
785 jermar 190
 
191
		/*
192
		 * An interrupt might occur right now and wake up a thread.
193
		 * In such case, the CPU will continue to go to sleep
194
		 * even though there is a runnable thread.
195
		 */
196
 
1 jermar 197
		 cpu_sleep();
198
		 goto loop;
199
	}
200
 
413 jermar 201
	interrupts_disable();
114 jermar 202
 
898 jermar 203
	for (i = 0; i<RQ_COUNT; i++) {
15 jermar 204
		r = &CPU->rq[i];
1 jermar 205
		spinlock_lock(&r->lock);
206
		if (r->n == 0) {
207
			/*
208
			 * If this queue is empty, try a lower-priority queue.
209
			 */
210
			spinlock_unlock(&r->lock);
211
			continue;
212
		}
213 jermar 213
 
783 palkovsky 214
		atomic_dec(&CPU->nrdy);
475 jermar 215
		atomic_dec(&nrdy);
1 jermar 216
		r->n--;
217
 
218
		/*
219
		 * Take the first thread from the queue.
220
		 */
221
		t = list_get_instance(r->rq_head.next, thread_t, rq_link);
222
		list_remove(&t->rq_link);
223
 
224
		spinlock_unlock(&r->lock);
225
 
226
		spinlock_lock(&t->lock);
15 jermar 227
		t->cpu = CPU;
1 jermar 228
 
229
		t->ticks = us2ticks((i+1)*10000);
898 jermar 230
		t->priority = i;	/* correct rq index */
1 jermar 231
 
232
		/*
233
		 * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge.
234
		 */
235
		t->flags &= ~X_STOLEN;
236
		spinlock_unlock(&t->lock);
237
 
238
		return t;
239
	}
240
	goto loop;
241
 
242
}
243
 
107 decky 244
/** Prevent rq starvation
245
 *
246
 * Prevent low priority threads from starving in rq's.
247
 *
248
 * When the function decides to relink rq's, it reconnects
249
 * respective pointers so that in result threads with 'pri'
1229 jermar 250
 * greater or equal @start are moved to a higher-priority queue.
107 decky 251
 *
252
 * @param start Threshold priority.
253
 *
1 jermar 254
 */
452 decky 255
static void relink_rq(int start)
1 jermar 256
{
257
	link_t head;
258
	runq_t *r;
259
	int i, n;
260
 
261
	list_initialize(&head);
15 jermar 262
	spinlock_lock(&CPU->lock);
263
	if (CPU->needs_relink > NEEDS_RELINK_MAX) {
1 jermar 264
		for (i = start; i<RQ_COUNT-1; i++) {
265
			/* remember and empty rq[i + 1] */
15 jermar 266
			r = &CPU->rq[i + 1];
1 jermar 267
			spinlock_lock(&r->lock);
268
			list_concat(&head, &r->rq_head);
269
			n = r->n;
270
			r->n = 0;
271
			spinlock_unlock(&r->lock);
272
 
273
			/* append rq[i + 1] to rq[i] */
15 jermar 274
			r = &CPU->rq[i];
1 jermar 275
			spinlock_lock(&r->lock);
276
			list_concat(&r->rq_head, &head);
277
			r->n += n;
278
			spinlock_unlock(&r->lock);
279
		}
15 jermar 280
		CPU->needs_relink = 0;
1 jermar 281
	}
784 palkovsky 282
	spinlock_unlock(&CPU->lock);
1 jermar 283
 
284
}
285
 
898 jermar 286
/** The scheduler
287
 *
288
 * The thread scheduling procedure.
289
 * Passes control directly to
290
 * scheduler_separated_stack().
291
 *
292
 */
293
void scheduler(void)
294
{
295
	volatile ipl_t ipl;
107 decky 296
 
898 jermar 297
	ASSERT(CPU != NULL);
298
 
299
	ipl = interrupts_disable();
300
 
301
	if (atomic_get(&haltstate))
302
		halt();
1007 decky 303
 
898 jermar 304
	if (THREAD) {
305
		spinlock_lock(&THREAD->lock);
906 palkovsky 306
#ifndef CONFIG_FPU_LAZY
307
		fpu_context_save(THREAD->saved_fpu_context);
308
#endif
898 jermar 309
		if (!context_save(&THREAD->saved_context)) {
310
			/*
311
			 * This is the place where threads leave scheduler();
312
			 */
313
			spinlock_unlock(&THREAD->lock);
314
			interrupts_restore(THREAD->saved_context.ipl);
1007 decky 315
 
898 jermar 316
			return;
317
		}
318
 
319
		/*
320
		 * Interrupt priority level of preempted thread is recorded here
321
		 * to facilitate scheduler() invocations from interrupts_disable()'d
322
		 * code (e.g. waitq_sleep_timeout()). 
323
		 */
324
		THREAD->saved_context.ipl = ipl;
325
	}
326
 
327
	/*
328
	 * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM
329
	 * and preemption counter. At this point THE could be coming either
330
	 * from THREAD's or CPU's stack.
331
	 */
332
	the_copy(THE, (the_t *) CPU->stack);
333
 
334
	/*
335
	 * We may not keep the old stack.
336
	 * Reason: If we kept the old stack and got blocked, for instance, in
337
	 * find_best_thread(), the old thread could get rescheduled by another
338
	 * CPU and overwrite the part of its own stack that was also used by
339
	 * the scheduler on this CPU.
340
	 *
341
	 * Moreover, we have to bypass the compiler-generated POP sequence
342
	 * which is fooled by SP being set to the very top of the stack.
343
	 * Therefore the scheduler() function continues in
344
	 * scheduler_separated_stack().
345
	 */
346
	context_save(&CPU->saved_context);
347
	context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), (__address) CPU->stack, CPU_STACK_SIZE);
348
	context_restore(&CPU->saved_context);
349
	/* not reached */
350
}
351
 
107 decky 352
/** Scheduler stack switch wrapper
353
 *
354
 * Second part of the scheduler() function
355
 * using new stack. Handling the actual context
356
 * switch to a new thread.
357
 *
787 palkovsky 358
 * Assume THREAD->lock is held.
107 decky 359
 */
898 jermar 360
void scheduler_separated_stack(void)
1 jermar 361
{
362
	int priority;
1007 decky 363
 
227 jermar 364
	ASSERT(CPU != NULL);
1007 decky 365
 
15 jermar 366
	if (THREAD) {
898 jermar 367
		/* must be run after the switch to scheduler stack */
897 jermar 368
		after_thread_ran();
369
 
15 jermar 370
		switch (THREAD->state) {
1 jermar 371
		    case Running:
125 jermar 372
			spinlock_unlock(&THREAD->lock);
373
			thread_ready(THREAD);
374
			break;
1 jermar 375
 
376
		    case Exiting:
787 palkovsky 377
			thread_destroy(THREAD);
125 jermar 378
			break;
787 palkovsky 379
 
1 jermar 380
		    case Sleeping:
125 jermar 381
			/*
382
			 * Prefer the thread after it's woken up.
383
			 */
413 jermar 384
			THREAD->priority = -1;
1 jermar 385
 
125 jermar 386
			/*
387
			 * We need to release wq->lock which we locked in waitq_sleep().
388
			 * Address of wq->lock is kept in THREAD->sleep_queue.
389
			 */
390
			spinlock_unlock(&THREAD->sleep_queue->lock);
1 jermar 391
 
125 jermar 392
			/*
393
			 * Check for possible requests for out-of-context invocation.
394
			 */
395
			if (THREAD->call_me) {
396
				THREAD->call_me(THREAD->call_me_with);
397
				THREAD->call_me = NULL;
398
				THREAD->call_me_with = NULL;
399
			}
1 jermar 400
 
125 jermar 401
			spinlock_unlock(&THREAD->lock);
1 jermar 402
 
125 jermar 403
			break;
404
 
1 jermar 405
		    default:
125 jermar 406
			/*
407
			 * Entering state is unexpected.
408
			 */
409
			panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]);
410
			break;
1 jermar 411
		}
897 jermar 412
 
15 jermar 413
		THREAD = NULL;
1 jermar 414
	}
198 jermar 415
 
15 jermar 416
	THREAD = find_best_thread();
1 jermar 417
 
15 jermar 418
	spinlock_lock(&THREAD->lock);
413 jermar 419
	priority = THREAD->priority;
15 jermar 420
	spinlock_unlock(&THREAD->lock);	
192 jermar 421
 
1 jermar 422
	relink_rq(priority);		
423
 
424
	/*
425
	 * If both the old and the new task are the same, lots of work is avoided.
426
	 */
15 jermar 427
	if (TASK != THREAD->task) {
703 jermar 428
		as_t *as1 = NULL;
429
		as_t *as2;
1 jermar 430
 
15 jermar 431
		if (TASK) {
432
			spinlock_lock(&TASK->lock);
703 jermar 433
			as1 = TASK->as;
15 jermar 434
			spinlock_unlock(&TASK->lock);
1 jermar 435
		}
436
 
15 jermar 437
		spinlock_lock(&THREAD->task->lock);
703 jermar 438
		as2 = THREAD->task->as;
15 jermar 439
		spinlock_unlock(&THREAD->task->lock);
1 jermar 440
 
441
		/*
703 jermar 442
		 * Note that it is possible for two tasks to share one address space.
1 jermar 443
		 */
703 jermar 444
		if (as1 != as2) {
1 jermar 445
			/*
703 jermar 446
			 * Both tasks and address spaces are different.
1 jermar 447
			 * Replace the old one with the new one.
448
			 */
823 jermar 449
			as_switch(as1, as2);
1 jermar 450
		}
906 palkovsky 451
		TASK = THREAD->task;
1187 jermar 452
		before_task_runs();
1 jermar 453
	}
454
 
1380 jermar 455
	spinlock_lock(&THREAD->lock);	
15 jermar 456
	THREAD->state = Running;
1 jermar 457
 
906 palkovsky 458
#ifdef SCHEDULER_VERBOSE
1196 cejka 459
	printf("cpu%d: tid %d (priority=%d,ticks=%lld,nrdy=%ld)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, atomic_get(&CPU->nrdy));
906 palkovsky 460
#endif	
1 jermar 461
 
213 jermar 462
	/*
897 jermar 463
	 * Some architectures provide late kernel PA2KA(identity)
464
	 * mapping in a page fault handler. However, the page fault
465
	 * handler uses the kernel stack of the running thread and
466
	 * therefore cannot be used to map it. The kernel stack, if
467
	 * necessary, is to be mapped in before_thread_runs(). This
468
	 * function must be executed before the switch to the new stack.
469
	 */
470
	before_thread_runs();
471
 
472
	/*
213 jermar 473
	 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to thread's stack.
474
	 */
184 jermar 475
	the_copy(THE, (the_t *) THREAD->kstack);
476
 
15 jermar 477
	context_restore(&THREAD->saved_context);
1 jermar 478
	/* not reached */
479
}
480
 
458 decky 481
#ifdef CONFIG_SMP
107 decky 482
/** Load balancing thread
483
 *
484
 * SMP load balancing thread, supervising thread supplies
485
 * for the CPU it's wired to.
486
 *
487
 * @param arg Generic thread argument (unused).
488
 *
1 jermar 489
 */
490
void kcpulb(void *arg)
491
{
492
	thread_t *t;
783 palkovsky 493
	int count, average, i, j, k = 0;
413 jermar 494
	ipl_t ipl;
1 jermar 495
 
496
loop:
497
	/*
779 jermar 498
	 * Work in 1s intervals.
1 jermar 499
	 */
779 jermar 500
	thread_sleep(1);
1 jermar 501
 
502
not_satisfied:
503
	/*
504
	 * Calculate the number of threads that will be migrated/stolen from
505
	 * other CPU's. Note that situation can have changed between two
506
	 * passes. Each time get the most up to date counts.
507
	 */
784 palkovsky 508
	average = atomic_get(&nrdy) / config.cpu_active + 1;
783 palkovsky 509
	count = average - atomic_get(&CPU->nrdy);
1 jermar 510
 
784 palkovsky 511
	if (count <= 0)
1 jermar 512
		goto satisfied;
513
 
514
	/*
515
	 * Searching least priority queues on all CPU's first and most priority queues on all CPU's last.
516
	 */
517
	for (j=RQ_COUNT-1; j >= 0; j--) {
518
		for (i=0; i < config.cpu_active; i++) {
519
			link_t *l;
520
			runq_t *r;
521
			cpu_t *cpu;
522
 
523
			cpu = &cpus[(i + k) % config.cpu_active];
524
 
525
			/*
526
			 * Not interested in ourselves.
527
			 * Doesn't require interrupt disabling for kcpulb is X_WIRED.
528
			 */
15 jermar 529
			if (CPU == cpu)
783 palkovsky 530
				continue;
531
			if (atomic_get(&cpu->nrdy) <= average)
532
				continue;
1 jermar 533
 
784 palkovsky 534
			ipl = interrupts_disable();
115 jermar 535
			r = &cpu->rq[j];
1 jermar 536
			spinlock_lock(&r->lock);
537
			if (r->n == 0) {
538
				spinlock_unlock(&r->lock);
413 jermar 539
				interrupts_restore(ipl);
1 jermar 540
				continue;
541
			}
542
 
543
			t = NULL;
544
			l = r->rq_head.prev;	/* search rq from the back */
545
			while (l != &r->rq_head) {
546
				t = list_get_instance(l, thread_t, rq_link);
547
				/*
125 jermar 548
				 * We don't want to steal CPU-wired threads neither threads already stolen.
1 jermar 549
				 * The latter prevents threads from migrating between CPU's without ever being run.
125 jermar 550
				 * We don't want to steal threads whose FPU context is still in CPU.
73 vana 551
				 */
1 jermar 552
				spinlock_lock(&t->lock);
73 vana 553
				if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) {
1 jermar 554
					/*
555
					 * Remove t from r.
556
					 */
557
					spinlock_unlock(&t->lock);
558
 
783 palkovsky 559
					atomic_dec(&cpu->nrdy);
475 jermar 560
					atomic_dec(&nrdy);
1 jermar 561
 
125 jermar 562
					r->n--;
1 jermar 563
					list_remove(&t->rq_link);
564
 
565
					break;
566
				}
567
				spinlock_unlock(&t->lock);
568
				l = l->prev;
569
				t = NULL;
570
			}
571
			spinlock_unlock(&r->lock);
572
 
573
			if (t) {
574
				/*
575
				 * Ready t on local CPU
576
				 */
577
				spinlock_lock(&t->lock);
906 palkovsky 578
#ifdef KCPULB_VERBOSE
1196 cejka 579
				printf("kcpulb%d: TID %d -> cpu%d, nrdy=%ld, avg=%nd\n", CPU->id, t->tid, CPU->id, atomic_get(&CPU->nrdy), atomic_get(&nrdy) / config.cpu_active);
906 palkovsky 580
#endif
1 jermar 581
				t->flags |= X_STOLEN;
1115 jermar 582
				t->state = Entering;
1 jermar 583
				spinlock_unlock(&t->lock);
584
 
585
				thread_ready(t);
586
 
413 jermar 587
				interrupts_restore(ipl);
1 jermar 588
 
589
				if (--count == 0)
590
					goto satisfied;
591
 
592
				/*
125 jermar 593
				 * We are not satisfied yet, focus on another CPU next time.
1 jermar 594
				 */
595
				k++;
596
 
597
				continue;
598
			}
413 jermar 599
			interrupts_restore(ipl);
1 jermar 600
		}
601
	}
602
 
783 palkovsky 603
	if (atomic_get(&CPU->nrdy)) {
1 jermar 604
		/*
605
		 * Be a little bit light-weight and let migrated threads run.
606
		 */
607
		scheduler();
779 jermar 608
	} else {
1 jermar 609
		/*
610
		 * We failed to migrate a single thread.
779 jermar 611
		 * Give up this turn.
1 jermar 612
		 */
779 jermar 613
		goto loop;
1 jermar 614
	}
615
 
616
	goto not_satisfied;
125 jermar 617
 
1 jermar 618
satisfied:
619
	goto loop;
620
}
621
 
458 decky 622
#endif /* CONFIG_SMP */
775 palkovsky 623
 
624
 
625
/** Print information about threads & scheduler queues */
626
void sched_print_list(void)
627
{
628
	ipl_t ipl;
629
	int cpu,i;
630
	runq_t *r;
631
	thread_t *t;
632
	link_t *cur;
633
 
634
	/* We are going to mess with scheduler structures,
635
	 * let's not be interrupted */
636
	ipl = interrupts_disable();
637
	for (cpu=0;cpu < config.cpu_count; cpu++) {
898 jermar 638
 
775 palkovsky 639
		if (!cpus[cpu].active)
640
			continue;
898 jermar 641
 
775 palkovsky 642
		spinlock_lock(&cpus[cpu].lock);
1221 decky 643
		printf("cpu%d: address=%p, nrdy=%ld, needs_relink=%ld\n",
1062 jermar 644
		       cpus[cpu].id, &cpus[cpu], atomic_get(&cpus[cpu].nrdy), cpus[cpu].needs_relink);
775 palkovsky 645
 
646
		for (i=0; i<RQ_COUNT; i++) {
647
			r = &cpus[cpu].rq[i];
648
			spinlock_lock(&r->lock);
649
			if (!r->n) {
650
				spinlock_unlock(&r->lock);
651
				continue;
652
			}
898 jermar 653
			printf("\trq[%d]: ", i);
775 palkovsky 654
			for (cur=r->rq_head.next; cur!=&r->rq_head; cur=cur->next) {
655
				t = list_get_instance(cur, thread_t, rq_link);
656
				printf("%d(%s) ", t->tid,
657
				       thread_states[t->state]);
658
			}
659
			printf("\n");
660
			spinlock_unlock(&r->lock);
661
		}
662
		spinlock_unlock(&cpus[cpu].lock);
663
	}
664
 
665
	interrupts_restore(ipl);
666
}