Subversion Repositories HelenOS-historic

Rev

Rev 897 | Rev 906 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1 jermar 1
/*
2
 * Copyright (C) 2001-2004 Jakub Jermar
3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
29
#include <proc/scheduler.h>
30
#include <proc/thread.h>
31
#include <proc/task.h>
378 jermar 32
#include <mm/frame.h>
33
#include <mm/page.h>
703 jermar 34
#include <mm/as.h>
378 jermar 35
#include <arch/asm.h>
36
#include <arch/faddr.h>
37
#include <arch/atomic.h>
38
#include <synch/spinlock.h>
1 jermar 39
#include <config.h>
40
#include <context.h>
41
#include <func.h>
42
#include <arch.h>
788 jermar 43
#include <adt/list.h>
68 decky 44
#include <panic.h>
1 jermar 45
#include <typedefs.h>
378 jermar 46
#include <cpu.h>
195 vana 47
#include <print.h>
227 jermar 48
#include <debug.h>
1 jermar 49
 
898 jermar 50
static void scheduler_separated_stack(void);
195 vana 51
 
898 jermar 52
atomic_t nrdy;	/**< Number of ready threads in the system. */
53
 
897 jermar 54
/** Take actions before new thread runs.
107 decky 55
 *
118 jermar 56
 * Perform actions that need to be
57
 * taken before the newly selected
58
 * tread is passed control.
107 decky 59
 *
827 palkovsky 60
 * THREAD->lock is locked on entry
61
 *
107 decky 62
 */
52 vana 63
void before_thread_runs(void)
64
{
309 palkovsky 65
	before_thread_runs_arch();
898 jermar 66
	#ifdef CONFIG_FPU_LAZY
309 palkovsky 67
	if(THREAD==CPU->fpu_owner) 
68
		fpu_enable();
69
	else
70
		fpu_disable(); 
898 jermar 71
	#else
309 palkovsky 72
	fpu_enable();
73
	if (THREAD->fpu_context_exists)
74
		fpu_context_restore(&(THREAD->saved_fpu_context));
75
	else {
827 palkovsky 76
		fpu_init(&(THREAD->saved_fpu_context));
309 palkovsky 77
		THREAD->fpu_context_exists=1;
78
	}
898 jermar 79
	#endif
52 vana 80
}
81
 
898 jermar 82
/** Take actions after THREAD had run.
897 jermar 83
 *
84
 * Perform actions that need to be
85
 * taken after the running thread
898 jermar 86
 * had been preempted by the scheduler.
897 jermar 87
 *
88
 * THREAD->lock is locked on entry
89
 *
90
 */
91
void after_thread_ran(void)
92
{
93
	after_thread_ran_arch();
94
}
95
 
458 decky 96
#ifdef CONFIG_FPU_LAZY
309 palkovsky 97
void scheduler_fpu_lazy_request(void)
98
{
99
	fpu_enable();
827 palkovsky 100
	spinlock_lock(&CPU->lock);
101
 
102
	/* Save old context */
309 palkovsky 103
	if (CPU->fpu_owner != NULL) {  
827 palkovsky 104
		spinlock_lock(&CPU->fpu_owner->lock);
309 palkovsky 105
		fpu_context_save(&CPU->fpu_owner->saved_fpu_context);
106
		/* don't prevent migration */
107
		CPU->fpu_owner->fpu_context_engaged=0; 
827 palkovsky 108
		spinlock_unlock(&CPU->fpu_owner->lock);
309 palkovsky 109
	}
827 palkovsky 110
 
111
	spinlock_lock(&THREAD->lock);
898 jermar 112
	if (THREAD->fpu_context_exists) {
309 palkovsky 113
		fpu_context_restore(&THREAD->saved_fpu_context);
898 jermar 114
	} else {
827 palkovsky 115
		fpu_init(&(THREAD->saved_fpu_context));
309 palkovsky 116
		THREAD->fpu_context_exists=1;
117
	}
118
	CPU->fpu_owner=THREAD;
119
	THREAD->fpu_context_engaged = 1;
898 jermar 120
	spinlock_unlock(&THREAD->lock);
827 palkovsky 121
 
122
	spinlock_unlock(&CPU->lock);
309 palkovsky 123
}
124
#endif
52 vana 125
 
107 decky 126
/** Initialize scheduler
127
 *
128
 * Initialize kernel scheduler.
129
 *
130
 */
1 jermar 131
void scheduler_init(void)
132
{
133
}
134
 
107 decky 135
/** Get thread to be scheduled
136
 *
137
 * Get the optimal thread to be scheduled
109 jermar 138
 * according to thread accounting and scheduler
107 decky 139
 * policy.
140
 *
141
 * @return Thread to be scheduled.
142
 *
143
 */
483 jermar 144
static thread_t *find_best_thread(void)
1 jermar 145
{
146
	thread_t *t;
147
	runq_t *r;
783 palkovsky 148
	int i;
1 jermar 149
 
227 jermar 150
	ASSERT(CPU != NULL);
151
 
1 jermar 152
loop:
413 jermar 153
	interrupts_enable();
1 jermar 154
 
783 palkovsky 155
	if (atomic_get(&CPU->nrdy) == 0) {
1 jermar 156
		/*
157
		 * For there was nothing to run, the CPU goes to sleep
158
		 * until a hardware interrupt or an IPI comes.
159
		 * This improves energy saving and hyperthreading.
160
		 */
785 jermar 161
 
162
		/*
163
		 * An interrupt might occur right now and wake up a thread.
164
		 * In such case, the CPU will continue to go to sleep
165
		 * even though there is a runnable thread.
166
		 */
167
 
1 jermar 168
		 cpu_sleep();
169
		 goto loop;
170
	}
171
 
413 jermar 172
	interrupts_disable();
114 jermar 173
 
898 jermar 174
	for (i = 0; i<RQ_COUNT; i++) {
15 jermar 175
		r = &CPU->rq[i];
1 jermar 176
		spinlock_lock(&r->lock);
177
		if (r->n == 0) {
178
			/*
179
			 * If this queue is empty, try a lower-priority queue.
180
			 */
181
			spinlock_unlock(&r->lock);
182
			continue;
183
		}
213 jermar 184
 
783 palkovsky 185
		atomic_dec(&CPU->nrdy);
475 jermar 186
		atomic_dec(&nrdy);
1 jermar 187
		r->n--;
188
 
189
		/*
190
		 * Take the first thread from the queue.
191
		 */
192
		t = list_get_instance(r->rq_head.next, thread_t, rq_link);
193
		list_remove(&t->rq_link);
194
 
195
		spinlock_unlock(&r->lock);
196
 
197
		spinlock_lock(&t->lock);
15 jermar 198
		t->cpu = CPU;
1 jermar 199
 
200
		t->ticks = us2ticks((i+1)*10000);
898 jermar 201
		t->priority = i;	/* correct rq index */
1 jermar 202
 
203
		/*
204
		 * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge.
205
		 */
206
		t->flags &= ~X_STOLEN;
207
		spinlock_unlock(&t->lock);
208
 
209
		return t;
210
	}
211
	goto loop;
212
 
213
}
214
 
107 decky 215
/** Prevent rq starvation
216
 *
217
 * Prevent low priority threads from starving in rq's.
218
 *
219
 * When the function decides to relink rq's, it reconnects
220
 * respective pointers so that in result threads with 'pri'
221
 * greater or equal 'start' are moved to a higher-priority queue.
222
 *
223
 * @param start Threshold priority.
224
 *
1 jermar 225
 */
452 decky 226
static void relink_rq(int start)
1 jermar 227
{
228
	link_t head;
229
	runq_t *r;
230
	int i, n;
231
 
232
	list_initialize(&head);
15 jermar 233
	spinlock_lock(&CPU->lock);
234
	if (CPU->needs_relink > NEEDS_RELINK_MAX) {
1 jermar 235
		for (i = start; i<RQ_COUNT-1; i++) {
236
			/* remember and empty rq[i + 1] */
15 jermar 237
			r = &CPU->rq[i + 1];
1 jermar 238
			spinlock_lock(&r->lock);
239
			list_concat(&head, &r->rq_head);
240
			n = r->n;
241
			r->n = 0;
242
			spinlock_unlock(&r->lock);
243
 
244
			/* append rq[i + 1] to rq[i] */
15 jermar 245
			r = &CPU->rq[i];
1 jermar 246
			spinlock_lock(&r->lock);
247
			list_concat(&r->rq_head, &head);
248
			r->n += n;
249
			spinlock_unlock(&r->lock);
250
		}
15 jermar 251
		CPU->needs_relink = 0;
1 jermar 252
	}
784 palkovsky 253
	spinlock_unlock(&CPU->lock);
1 jermar 254
 
255
}
256
 
898 jermar 257
/** The scheduler
258
 *
259
 * The thread scheduling procedure.
260
 * Passes control directly to
261
 * scheduler_separated_stack().
262
 *
263
 */
264
void scheduler(void)
265
{
266
	volatile ipl_t ipl;
107 decky 267
 
898 jermar 268
	ASSERT(CPU != NULL);
269
 
270
	ipl = interrupts_disable();
271
 
272
	if (atomic_get(&haltstate))
273
		halt();
274
 
275
	if (THREAD) {
276
		spinlock_lock(&THREAD->lock);
277
		#ifndef CONFIG_FPU_LAZY
278
		fpu_context_save(&(THREAD->saved_fpu_context));
279
		#endif
280
		if (!context_save(&THREAD->saved_context)) {
281
			/*
282
			 * This is the place where threads leave scheduler();
283
			 */
284
			spinlock_unlock(&THREAD->lock);
285
			interrupts_restore(THREAD->saved_context.ipl);
286
			return;
287
		}
288
 
289
		/*
290
		 * Interrupt priority level of preempted thread is recorded here
291
		 * to facilitate scheduler() invocations from interrupts_disable()'d
292
		 * code (e.g. waitq_sleep_timeout()). 
293
		 */
294
		THREAD->saved_context.ipl = ipl;
295
	}
296
 
297
	/*
298
	 * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM
299
	 * and preemption counter. At this point THE could be coming either
300
	 * from THREAD's or CPU's stack.
301
	 */
302
	the_copy(THE, (the_t *) CPU->stack);
303
 
304
	/*
305
	 * We may not keep the old stack.
306
	 * Reason: If we kept the old stack and got blocked, for instance, in
307
	 * find_best_thread(), the old thread could get rescheduled by another
308
	 * CPU and overwrite the part of its own stack that was also used by
309
	 * the scheduler on this CPU.
310
	 *
311
	 * Moreover, we have to bypass the compiler-generated POP sequence
312
	 * which is fooled by SP being set to the very top of the stack.
313
	 * Therefore the scheduler() function continues in
314
	 * scheduler_separated_stack().
315
	 */
316
	context_save(&CPU->saved_context);
317
	context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), (__address) CPU->stack, CPU_STACK_SIZE);
318
	context_restore(&CPU->saved_context);
319
	/* not reached */
320
}
321
 
107 decky 322
/** Scheduler stack switch wrapper
323
 *
324
 * Second part of the scheduler() function
325
 * using new stack. Handling the actual context
326
 * switch to a new thread.
327
 *
787 palkovsky 328
 * Assume THREAD->lock is held.
107 decky 329
 */
898 jermar 330
void scheduler_separated_stack(void)
1 jermar 331
{
332
	int priority;
333
 
227 jermar 334
	ASSERT(CPU != NULL);
335
 
15 jermar 336
	if (THREAD) {
898 jermar 337
		/* must be run after the switch to scheduler stack */
897 jermar 338
		after_thread_ran();
339
 
15 jermar 340
		switch (THREAD->state) {
1 jermar 341
		    case Running:
125 jermar 342
			THREAD->state = Ready;
343
			spinlock_unlock(&THREAD->lock);
344
			thread_ready(THREAD);
345
			break;
1 jermar 346
 
347
		    case Exiting:
787 palkovsky 348
			thread_destroy(THREAD);
125 jermar 349
			break;
787 palkovsky 350
 
1 jermar 351
		    case Sleeping:
125 jermar 352
			/*
353
			 * Prefer the thread after it's woken up.
354
			 */
413 jermar 355
			THREAD->priority = -1;
1 jermar 356
 
125 jermar 357
			/*
358
			 * We need to release wq->lock which we locked in waitq_sleep().
359
			 * Address of wq->lock is kept in THREAD->sleep_queue.
360
			 */
361
			spinlock_unlock(&THREAD->sleep_queue->lock);
1 jermar 362
 
125 jermar 363
			/*
364
			 * Check for possible requests for out-of-context invocation.
365
			 */
366
			if (THREAD->call_me) {
367
				THREAD->call_me(THREAD->call_me_with);
368
				THREAD->call_me = NULL;
369
				THREAD->call_me_with = NULL;
370
			}
1 jermar 371
 
125 jermar 372
			spinlock_unlock(&THREAD->lock);
1 jermar 373
 
125 jermar 374
			break;
375
 
1 jermar 376
		    default:
125 jermar 377
			/*
378
			 * Entering state is unexpected.
379
			 */
380
			panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]);
381
			break;
1 jermar 382
		}
897 jermar 383
 
15 jermar 384
		THREAD = NULL;
1 jermar 385
	}
198 jermar 386
 
15 jermar 387
	THREAD = find_best_thread();
1 jermar 388
 
15 jermar 389
	spinlock_lock(&THREAD->lock);
413 jermar 390
	priority = THREAD->priority;
15 jermar 391
	spinlock_unlock(&THREAD->lock);	
192 jermar 392
 
1 jermar 393
	relink_rq(priority);		
394
 
15 jermar 395
	spinlock_lock(&THREAD->lock);	
1 jermar 396
 
397
	/*
398
	 * If both the old and the new task are the same, lots of work is avoided.
399
	 */
15 jermar 400
	if (TASK != THREAD->task) {
703 jermar 401
		as_t *as1 = NULL;
402
		as_t *as2;
1 jermar 403
 
15 jermar 404
		if (TASK) {
405
			spinlock_lock(&TASK->lock);
703 jermar 406
			as1 = TASK->as;
15 jermar 407
			spinlock_unlock(&TASK->lock);
1 jermar 408
		}
409
 
15 jermar 410
		spinlock_lock(&THREAD->task->lock);
703 jermar 411
		as2 = THREAD->task->as;
15 jermar 412
		spinlock_unlock(&THREAD->task->lock);
1 jermar 413
 
414
		/*
703 jermar 415
		 * Note that it is possible for two tasks to share one address space.
1 jermar 416
		 */
703 jermar 417
		if (as1 != as2) {
1 jermar 418
			/*
703 jermar 419
			 * Both tasks and address spaces are different.
1 jermar 420
			 * Replace the old one with the new one.
421
			 */
823 jermar 422
			as_switch(as1, as2);
1 jermar 423
		}
15 jermar 424
		TASK = THREAD->task;	
1 jermar 425
	}
426
 
15 jermar 427
	THREAD->state = Running;
1 jermar 428
 
429
	#ifdef SCHEDULER_VERBOSE
823 jermar 430
	printf("cpu%d: tid %d (priority=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, atomic_get(&CPU->nrdy));
1 jermar 431
	#endif	
432
 
213 jermar 433
	/*
897 jermar 434
	 * Some architectures provide late kernel PA2KA(identity)
435
	 * mapping in a page fault handler. However, the page fault
436
	 * handler uses the kernel stack of the running thread and
437
	 * therefore cannot be used to map it. The kernel stack, if
438
	 * necessary, is to be mapped in before_thread_runs(). This
439
	 * function must be executed before the switch to the new stack.
440
	 */
441
	before_thread_runs();
442
 
443
	/*
213 jermar 444
	 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to thread's stack.
445
	 */
184 jermar 446
	the_copy(THE, (the_t *) THREAD->kstack);
447
 
15 jermar 448
	context_restore(&THREAD->saved_context);
1 jermar 449
	/* not reached */
450
}
451
 
458 decky 452
#ifdef CONFIG_SMP
107 decky 453
/** Load balancing thread
454
 *
455
 * SMP load balancing thread, supervising thread supplies
456
 * for the CPU it's wired to.
457
 *
458
 * @param arg Generic thread argument (unused).
459
 *
1 jermar 460
 */
461
void kcpulb(void *arg)
462
{
463
	thread_t *t;
783 palkovsky 464
	int count, average, i, j, k = 0;
413 jermar 465
	ipl_t ipl;
1 jermar 466
 
467
loop:
468
	/*
779 jermar 469
	 * Work in 1s intervals.
1 jermar 470
	 */
779 jermar 471
	thread_sleep(1);
1 jermar 472
 
473
not_satisfied:
474
	/*
475
	 * Calculate the number of threads that will be migrated/stolen from
476
	 * other CPU's. Note that situation can have changed between two
477
	 * passes. Each time get the most up to date counts.
478
	 */
784 palkovsky 479
	average = atomic_get(&nrdy) / config.cpu_active + 1;
783 palkovsky 480
	count = average - atomic_get(&CPU->nrdy);
1 jermar 481
 
784 palkovsky 482
	if (count <= 0)
1 jermar 483
		goto satisfied;
484
 
485
	/*
486
	 * Searching least priority queues on all CPU's first and most priority queues on all CPU's last.
487
	 */
488
	for (j=RQ_COUNT-1; j >= 0; j--) {
489
		for (i=0; i < config.cpu_active; i++) {
490
			link_t *l;
491
			runq_t *r;
492
			cpu_t *cpu;
493
 
494
			cpu = &cpus[(i + k) % config.cpu_active];
495
 
496
			/*
497
			 * Not interested in ourselves.
498
			 * Doesn't require interrupt disabling for kcpulb is X_WIRED.
499
			 */
15 jermar 500
			if (CPU == cpu)
783 palkovsky 501
				continue;
502
			if (atomic_get(&cpu->nrdy) <= average)
503
				continue;
1 jermar 504
 
784 palkovsky 505
			ipl = interrupts_disable();
115 jermar 506
			r = &cpu->rq[j];
1 jermar 507
			spinlock_lock(&r->lock);
508
			if (r->n == 0) {
509
				spinlock_unlock(&r->lock);
413 jermar 510
				interrupts_restore(ipl);
1 jermar 511
				continue;
512
			}
513
 
514
			t = NULL;
515
			l = r->rq_head.prev;	/* search rq from the back */
516
			while (l != &r->rq_head) {
517
				t = list_get_instance(l, thread_t, rq_link);
518
				/*
125 jermar 519
				 * We don't want to steal CPU-wired threads neither threads already stolen.
1 jermar 520
				 * The latter prevents threads from migrating between CPU's without ever being run.
125 jermar 521
				 * We don't want to steal threads whose FPU context is still in CPU.
73 vana 522
				 */
1 jermar 523
				spinlock_lock(&t->lock);
73 vana 524
				if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) {
1 jermar 525
					/*
526
					 * Remove t from r.
527
					 */
528
					spinlock_unlock(&t->lock);
529
 
783 palkovsky 530
					atomic_dec(&cpu->nrdy);
475 jermar 531
					atomic_dec(&nrdy);
1 jermar 532
 
125 jermar 533
					r->n--;
1 jermar 534
					list_remove(&t->rq_link);
535
 
536
					break;
537
				}
538
				spinlock_unlock(&t->lock);
539
				l = l->prev;
540
				t = NULL;
541
			}
542
			spinlock_unlock(&r->lock);
543
 
544
			if (t) {
545
				/*
546
				 * Ready t on local CPU
547
				 */
548
				spinlock_lock(&t->lock);
549
				#ifdef KCPULB_VERBOSE
783 palkovsky 550
				printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, atomic_get(&CPU->nrdy), atomic_get(&nrdy) / config.cpu_active);
1 jermar 551
				#endif
552
				t->flags |= X_STOLEN;
553
				spinlock_unlock(&t->lock);
554
 
555
				thread_ready(t);
556
 
413 jermar 557
				interrupts_restore(ipl);
1 jermar 558
 
559
				if (--count == 0)
560
					goto satisfied;
561
 
562
				/*
125 jermar 563
				 * We are not satisfied yet, focus on another CPU next time.
1 jermar 564
				 */
565
				k++;
566
 
567
				continue;
568
			}
413 jermar 569
			interrupts_restore(ipl);
1 jermar 570
		}
571
	}
572
 
783 palkovsky 573
	if (atomic_get(&CPU->nrdy)) {
1 jermar 574
		/*
575
		 * Be a little bit light-weight and let migrated threads run.
576
		 */
577
		scheduler();
779 jermar 578
	} else {
1 jermar 579
		/*
580
		 * We failed to migrate a single thread.
779 jermar 581
		 * Give up this turn.
1 jermar 582
		 */
779 jermar 583
		goto loop;
1 jermar 584
	}
585
 
586
	goto not_satisfied;
125 jermar 587
 
1 jermar 588
satisfied:
589
	goto loop;
590
}
591
 
458 decky 592
#endif /* CONFIG_SMP */
775 palkovsky 593
 
594
 
595
/** Print information about threads & scheduler queues */
596
void sched_print_list(void)
597
{
598
	ipl_t ipl;
599
	int cpu,i;
600
	runq_t *r;
601
	thread_t *t;
602
	link_t *cur;
603
 
604
	/* We are going to mess with scheduler structures,
605
	 * let's not be interrupted */
606
	ipl = interrupts_disable();
898 jermar 607
	printf("Scheduler dump:\n");
775 palkovsky 608
	for (cpu=0;cpu < config.cpu_count; cpu++) {
898 jermar 609
 
775 palkovsky 610
		if (!cpus[cpu].active)
611
			continue;
898 jermar 612
 
775 palkovsky 613
		spinlock_lock(&cpus[cpu].lock);
898 jermar 614
		printf("cpu%d: nrdy: %d, needs_relink: %d\n",
783 palkovsky 615
		       cpus[cpu].id, atomic_get(&cpus[cpu].nrdy), cpus[cpu].needs_relink);
775 palkovsky 616
 
617
		for (i=0; i<RQ_COUNT; i++) {
618
			r = &cpus[cpu].rq[i];
619
			spinlock_lock(&r->lock);
620
			if (!r->n) {
621
				spinlock_unlock(&r->lock);
622
				continue;
623
			}
898 jermar 624
			printf("\trq[%d]: ", i);
775 palkovsky 625
			for (cur=r->rq_head.next; cur!=&r->rq_head; cur=cur->next) {
626
				t = list_get_instance(cur, thread_t, rq_link);
627
				printf("%d(%s) ", t->tid,
628
				       thread_states[t->state]);
629
			}
630
			printf("\n");
631
			spinlock_unlock(&r->lock);
632
		}
633
		spinlock_unlock(&cpus[cpu].lock);
634
	}
635
 
636
	interrupts_restore(ipl);
637
}