Subversion Repositories HelenOS-historic

Rev

Rev 827 | Rev 898 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1 jermar 1
/*
2
 * Copyright (C) 2001-2004 Jakub Jermar
3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
29
#include <proc/scheduler.h>
30
#include <proc/thread.h>
31
#include <proc/task.h>
378 jermar 32
#include <mm/frame.h>
33
#include <mm/page.h>
703 jermar 34
#include <mm/as.h>
378 jermar 35
#include <arch/asm.h>
36
#include <arch/faddr.h>
37
#include <arch/atomic.h>
38
#include <synch/spinlock.h>
1 jermar 39
#include <config.h>
40
#include <context.h>
41
#include <func.h>
42
#include <arch.h>
788 jermar 43
#include <adt/list.h>
68 decky 44
#include <panic.h>
1 jermar 45
#include <typedefs.h>
378 jermar 46
#include <cpu.h>
195 vana 47
#include <print.h>
227 jermar 48
#include <debug.h>
1 jermar 49
 
475 jermar 50
atomic_t nrdy;
195 vana 51
 
897 jermar 52
/** Take actions before new thread runs.
107 decky 53
 *
118 jermar 54
 * Perform actions that need to be
55
 * taken before the newly selected
56
 * tread is passed control.
107 decky 57
 *
827 palkovsky 58
 * THREAD->lock is locked on entry
59
 *
107 decky 60
 */
52 vana 61
void before_thread_runs(void)
62
{
309 palkovsky 63
	before_thread_runs_arch();
458 decky 64
#ifdef CONFIG_FPU_LAZY
309 palkovsky 65
	if(THREAD==CPU->fpu_owner) 
66
		fpu_enable();
67
	else
68
		fpu_disable(); 
69
#else
70
	fpu_enable();
71
	if (THREAD->fpu_context_exists)
72
		fpu_context_restore(&(THREAD->saved_fpu_context));
73
	else {
827 palkovsky 74
		fpu_init(&(THREAD->saved_fpu_context));
309 palkovsky 75
		THREAD->fpu_context_exists=1;
76
	}
77
#endif
52 vana 78
}
79
 
897 jermar 80
/** Take actions after old thread ran.
81
 *
82
 * Perform actions that need to be
83
 * taken after the running thread
84
 * was preempted by the scheduler.
85
 *
86
 * THREAD->lock is locked on entry
87
 *
88
 */
89
void after_thread_ran(void)
90
{
91
	after_thread_ran_arch();
92
}
93
 
458 decky 94
#ifdef CONFIG_FPU_LAZY
309 palkovsky 95
void scheduler_fpu_lazy_request(void)
96
{
97
	fpu_enable();
827 palkovsky 98
	spinlock_lock(&CPU->lock);
99
 
100
	/* Save old context */
309 palkovsky 101
	if (CPU->fpu_owner != NULL) {  
827 palkovsky 102
		spinlock_lock(&CPU->fpu_owner->lock);
309 palkovsky 103
		fpu_context_save(&CPU->fpu_owner->saved_fpu_context);
104
		/* don't prevent migration */
105
		CPU->fpu_owner->fpu_context_engaged=0; 
827 palkovsky 106
		spinlock_unlock(&CPU->fpu_owner->lock);
309 palkovsky 107
	}
827 palkovsky 108
 
109
	spinlock_lock(&THREAD->lock);
309 palkovsky 110
	if (THREAD->fpu_context_exists)
111
		fpu_context_restore(&THREAD->saved_fpu_context);
112
	else {
827 palkovsky 113
		fpu_init(&(THREAD->saved_fpu_context));
309 palkovsky 114
		THREAD->fpu_context_exists=1;
115
	}
116
	CPU->fpu_owner=THREAD;
117
	THREAD->fpu_context_engaged = 1;
827 palkovsky 118
 
119
	spinlock_unlock(&THREAD->lock);
120
	spinlock_unlock(&CPU->lock);
309 palkovsky 121
}
122
#endif
52 vana 123
 
107 decky 124
/** Initialize scheduler
125
 *
126
 * Initialize kernel scheduler.
127
 *
128
 */
1 jermar 129
void scheduler_init(void)
130
{
131
}
132
 
107 decky 133
 
134
/** Get thread to be scheduled
135
 *
136
 * Get the optimal thread to be scheduled
109 jermar 137
 * according to thread accounting and scheduler
107 decky 138
 * policy.
139
 *
140
 * @return Thread to be scheduled.
141
 *
142
 */
483 jermar 143
static thread_t *find_best_thread(void)
1 jermar 144
{
145
	thread_t *t;
146
	runq_t *r;
783 palkovsky 147
	int i;
1 jermar 148
 
227 jermar 149
	ASSERT(CPU != NULL);
150
 
1 jermar 151
loop:
413 jermar 152
	interrupts_enable();
1 jermar 153
 
783 palkovsky 154
	if (atomic_get(&CPU->nrdy) == 0) {
1 jermar 155
		/*
156
		 * For there was nothing to run, the CPU goes to sleep
157
		 * until a hardware interrupt or an IPI comes.
158
		 * This improves energy saving and hyperthreading.
159
		 */
785 jermar 160
 
161
		/*
162
		 * An interrupt might occur right now and wake up a thread.
163
		 * In such case, the CPU will continue to go to sleep
164
		 * even though there is a runnable thread.
165
		 */
166
 
1 jermar 167
		 cpu_sleep();
168
		 goto loop;
169
	}
170
 
413 jermar 171
	interrupts_disable();
114 jermar 172
 
173
	i = 0;
174
	for (; i<RQ_COUNT; i++) {
15 jermar 175
		r = &CPU->rq[i];
1 jermar 176
		spinlock_lock(&r->lock);
177
		if (r->n == 0) {
178
			/*
179
			 * If this queue is empty, try a lower-priority queue.
180
			 */
181
			spinlock_unlock(&r->lock);
182
			continue;
183
		}
213 jermar 184
 
783 palkovsky 185
		atomic_dec(&CPU->nrdy);
475 jermar 186
		atomic_dec(&nrdy);
1 jermar 187
		r->n--;
188
 
189
		/*
190
		 * Take the first thread from the queue.
191
		 */
192
		t = list_get_instance(r->rq_head.next, thread_t, rq_link);
193
		list_remove(&t->rq_link);
194
 
195
		spinlock_unlock(&r->lock);
196
 
197
		spinlock_lock(&t->lock);
15 jermar 198
		t->cpu = CPU;
1 jermar 199
 
200
		t->ticks = us2ticks((i+1)*10000);
413 jermar 201
		t->priority = i;	/* eventually correct rq index */
1 jermar 202
 
203
		/*
204
		 * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge.
205
		 */
206
		t->flags &= ~X_STOLEN;
207
		spinlock_unlock(&t->lock);
208
 
209
		return t;
210
	}
211
	goto loop;
212
 
213
}
214
 
107 decky 215
 
216
/** Prevent rq starvation
217
 *
218
 * Prevent low priority threads from starving in rq's.
219
 *
220
 * When the function decides to relink rq's, it reconnects
221
 * respective pointers so that in result threads with 'pri'
222
 * greater or equal 'start' are moved to a higher-priority queue.
223
 *
224
 * @param start Threshold priority.
225
 *
1 jermar 226
 */
452 decky 227
static void relink_rq(int start)
1 jermar 228
{
229
	link_t head;
230
	runq_t *r;
231
	int i, n;
232
 
233
	list_initialize(&head);
15 jermar 234
	spinlock_lock(&CPU->lock);
235
	if (CPU->needs_relink > NEEDS_RELINK_MAX) {
1 jermar 236
		for (i = start; i<RQ_COUNT-1; i++) {
237
			/* remember and empty rq[i + 1] */
15 jermar 238
			r = &CPU->rq[i + 1];
1 jermar 239
			spinlock_lock(&r->lock);
240
			list_concat(&head, &r->rq_head);
241
			n = r->n;
242
			r->n = 0;
243
			spinlock_unlock(&r->lock);
244
 
245
			/* append rq[i + 1] to rq[i] */
15 jermar 246
			r = &CPU->rq[i];
1 jermar 247
			spinlock_lock(&r->lock);
248
			list_concat(&r->rq_head, &head);
249
			r->n += n;
250
			spinlock_unlock(&r->lock);
251
		}
15 jermar 252
		CPU->needs_relink = 0;
1 jermar 253
	}
784 palkovsky 254
	spinlock_unlock(&CPU->lock);
1 jermar 255
 
256
}
257
 
107 decky 258
 
259
/** Scheduler stack switch wrapper
260
 *
261
 * Second part of the scheduler() function
262
 * using new stack. Handling the actual context
263
 * switch to a new thread.
264
 *
787 palkovsky 265
 * Assume THREAD->lock is held.
107 decky 266
 */
452 decky 267
static void scheduler_separated_stack(void)
1 jermar 268
{
269
	int priority;
270
 
227 jermar 271
	ASSERT(CPU != NULL);
272
 
15 jermar 273
	if (THREAD) {
897 jermar 274
		/* must be run after switch to scheduler stack */
275
		after_thread_ran();
276
 
15 jermar 277
		switch (THREAD->state) {
1 jermar 278
		    case Running:
125 jermar 279
			THREAD->state = Ready;
280
			spinlock_unlock(&THREAD->lock);
281
			thread_ready(THREAD);
282
			break;
1 jermar 283
 
284
		    case Exiting:
787 palkovsky 285
			thread_destroy(THREAD);
125 jermar 286
			break;
787 palkovsky 287
 
1 jermar 288
		    case Sleeping:
125 jermar 289
			/*
290
			 * Prefer the thread after it's woken up.
291
			 */
413 jermar 292
			THREAD->priority = -1;
1 jermar 293
 
125 jermar 294
			/*
295
			 * We need to release wq->lock which we locked in waitq_sleep().
296
			 * Address of wq->lock is kept in THREAD->sleep_queue.
297
			 */
298
			spinlock_unlock(&THREAD->sleep_queue->lock);
1 jermar 299
 
125 jermar 300
			/*
301
			 * Check for possible requests for out-of-context invocation.
302
			 */
303
			if (THREAD->call_me) {
304
				THREAD->call_me(THREAD->call_me_with);
305
				THREAD->call_me = NULL;
306
				THREAD->call_me_with = NULL;
307
			}
1 jermar 308
 
125 jermar 309
			spinlock_unlock(&THREAD->lock);
1 jermar 310
 
125 jermar 311
			break;
312
 
1 jermar 313
		    default:
125 jermar 314
			/*
315
			 * Entering state is unexpected.
316
			 */
317
			panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]);
318
			break;
1 jermar 319
		}
897 jermar 320
 
15 jermar 321
		THREAD = NULL;
1 jermar 322
	}
198 jermar 323
 
214 vana 324
 
15 jermar 325
	THREAD = find_best_thread();
1 jermar 326
 
15 jermar 327
	spinlock_lock(&THREAD->lock);
413 jermar 328
	priority = THREAD->priority;
15 jermar 329
	spinlock_unlock(&THREAD->lock);	
192 jermar 330
 
1 jermar 331
	relink_rq(priority);		
332
 
15 jermar 333
	spinlock_lock(&THREAD->lock);	
1 jermar 334
 
335
	/*
336
	 * If both the old and the new task are the same, lots of work is avoided.
337
	 */
15 jermar 338
	if (TASK != THREAD->task) {
703 jermar 339
		as_t *as1 = NULL;
340
		as_t *as2;
1 jermar 341
 
15 jermar 342
		if (TASK) {
343
			spinlock_lock(&TASK->lock);
703 jermar 344
			as1 = TASK->as;
15 jermar 345
			spinlock_unlock(&TASK->lock);
1 jermar 346
		}
347
 
15 jermar 348
		spinlock_lock(&THREAD->task->lock);
703 jermar 349
		as2 = THREAD->task->as;
15 jermar 350
		spinlock_unlock(&THREAD->task->lock);
1 jermar 351
 
352
		/*
703 jermar 353
		 * Note that it is possible for two tasks to share one address space.
1 jermar 354
		 */
703 jermar 355
		if (as1 != as2) {
1 jermar 356
			/*
703 jermar 357
			 * Both tasks and address spaces are different.
1 jermar 358
			 * Replace the old one with the new one.
359
			 */
823 jermar 360
			as_switch(as1, as2);
1 jermar 361
		}
15 jermar 362
		TASK = THREAD->task;	
1 jermar 363
	}
364
 
15 jermar 365
	THREAD->state = Running;
1 jermar 366
 
367
	#ifdef SCHEDULER_VERBOSE
823 jermar 368
	printf("cpu%d: tid %d (priority=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, atomic_get(&CPU->nrdy));
1 jermar 369
	#endif	
370
 
213 jermar 371
	/*
897 jermar 372
	 * Some architectures provide late kernel PA2KA(identity)
373
	 * mapping in a page fault handler. However, the page fault
374
	 * handler uses the kernel stack of the running thread and
375
	 * therefore cannot be used to map it. The kernel stack, if
376
	 * necessary, is to be mapped in before_thread_runs(). This
377
	 * function must be executed before the switch to the new stack.
378
	 */
379
	before_thread_runs();
380
 
381
	/*
213 jermar 382
	 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to thread's stack.
383
	 */
184 jermar 384
	the_copy(THE, (the_t *) THREAD->kstack);
385
 
15 jermar 386
	context_restore(&THREAD->saved_context);
1 jermar 387
	/* not reached */
388
}
389
 
107 decky 390
 
452 decky 391
/** The scheduler
392
 *
393
 * The thread scheduling procedure.
675 jermar 394
 * Passes control directly to
395
 * scheduler_separated_stack().
452 decky 396
 *
397
 */
398
void scheduler(void)
399
{
400
	volatile ipl_t ipl;
401
 
402
	ASSERT(CPU != NULL);
403
 
404
	ipl = interrupts_disable();
405
 
631 palkovsky 406
	if (atomic_get(&haltstate))
452 decky 407
		halt();
408
 
409
	if (THREAD) {
410
		spinlock_lock(&THREAD->lock);
458 decky 411
#ifndef CONFIG_FPU_LAZY
452 decky 412
		fpu_context_save(&(THREAD->saved_fpu_context));
413
#endif
414
		if (!context_save(&THREAD->saved_context)) {
415
			/*
416
			 * This is the place where threads leave scheduler();
417
			 */
418
			spinlock_unlock(&THREAD->lock);
419
			interrupts_restore(THREAD->saved_context.ipl);
420
			return;
421
		}
422
 
423
		/*
424
		 * Interrupt priority level of preempted thread is recorded here
425
		 * to facilitate scheduler() invocations from interrupts_disable()'d
426
		 * code (e.g. waitq_sleep_timeout()). 
427
		 */
428
		THREAD->saved_context.ipl = ipl;
429
	}
430
 
431
	/*
557 jermar 432
	 * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM
452 decky 433
	 * and preemption counter. At this point THE could be coming either
434
	 * from THREAD's or CPU's stack.
435
	 */
436
	the_copy(THE, (the_t *) CPU->stack);
437
 
438
	/*
439
	 * We may not keep the old stack.
440
	 * Reason: If we kept the old stack and got blocked, for instance, in
441
	 * find_best_thread(), the old thread could get rescheduled by another
442
	 * CPU and overwrite the part of its own stack that was also used by
443
	 * the scheduler on this CPU.
444
	 *
445
	 * Moreover, we have to bypass the compiler-generated POP sequence
446
	 * which is fooled by SP being set to the very top of the stack.
447
	 * Therefore the scheduler() function continues in
448
	 * scheduler_separated_stack().
449
	 */
450
	context_save(&CPU->saved_context);
451
	context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), (__address) CPU->stack, CPU_STACK_SIZE);
452
	context_restore(&CPU->saved_context);
453
	/* not reached */
454
}
455
 
456
 
457
 
458
 
459
 
458 decky 460
#ifdef CONFIG_SMP
107 decky 461
/** Load balancing thread
462
 *
463
 * SMP load balancing thread, supervising thread supplies
464
 * for the CPU it's wired to.
465
 *
466
 * @param arg Generic thread argument (unused).
467
 *
1 jermar 468
 */
469
void kcpulb(void *arg)
470
{
471
	thread_t *t;
783 palkovsky 472
	int count, average, i, j, k = 0;
413 jermar 473
	ipl_t ipl;
1 jermar 474
 
475
loop:
476
	/*
779 jermar 477
	 * Work in 1s intervals.
1 jermar 478
	 */
779 jermar 479
	thread_sleep(1);
1 jermar 480
 
481
not_satisfied:
482
	/*
483
	 * Calculate the number of threads that will be migrated/stolen from
484
	 * other CPU's. Note that situation can have changed between two
485
	 * passes. Each time get the most up to date counts.
486
	 */
784 palkovsky 487
	average = atomic_get(&nrdy) / config.cpu_active + 1;
783 palkovsky 488
	count = average - atomic_get(&CPU->nrdy);
1 jermar 489
 
784 palkovsky 490
	if (count <= 0)
1 jermar 491
		goto satisfied;
492
 
493
	/*
494
	 * Searching least priority queues on all CPU's first and most priority queues on all CPU's last.
495
	 */
496
	for (j=RQ_COUNT-1; j >= 0; j--) {
497
		for (i=0; i < config.cpu_active; i++) {
498
			link_t *l;
499
			runq_t *r;
500
			cpu_t *cpu;
501
 
502
			cpu = &cpus[(i + k) % config.cpu_active];
503
 
504
			/*
505
			 * Not interested in ourselves.
506
			 * Doesn't require interrupt disabling for kcpulb is X_WIRED.
507
			 */
15 jermar 508
			if (CPU == cpu)
783 palkovsky 509
				continue;
510
			if (atomic_get(&cpu->nrdy) <= average)
511
				continue;
1 jermar 512
 
784 palkovsky 513
			ipl = interrupts_disable();
115 jermar 514
			r = &cpu->rq[j];
1 jermar 515
			spinlock_lock(&r->lock);
516
			if (r->n == 0) {
517
				spinlock_unlock(&r->lock);
413 jermar 518
				interrupts_restore(ipl);
1 jermar 519
				continue;
520
			}
521
 
522
			t = NULL;
523
			l = r->rq_head.prev;	/* search rq from the back */
524
			while (l != &r->rq_head) {
525
				t = list_get_instance(l, thread_t, rq_link);
526
				/*
125 jermar 527
				 * We don't want to steal CPU-wired threads neither threads already stolen.
1 jermar 528
				 * The latter prevents threads from migrating between CPU's without ever being run.
125 jermar 529
				 * We don't want to steal threads whose FPU context is still in CPU.
73 vana 530
				 */
1 jermar 531
				spinlock_lock(&t->lock);
73 vana 532
				if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) {
1 jermar 533
					/*
534
					 * Remove t from r.
535
					 */
536
					spinlock_unlock(&t->lock);
537
 
783 palkovsky 538
					atomic_dec(&cpu->nrdy);
475 jermar 539
					atomic_dec(&nrdy);
1 jermar 540
 
125 jermar 541
					r->n--;
1 jermar 542
					list_remove(&t->rq_link);
543
 
544
					break;
545
				}
546
				spinlock_unlock(&t->lock);
547
				l = l->prev;
548
				t = NULL;
549
			}
550
			spinlock_unlock(&r->lock);
551
 
552
			if (t) {
553
				/*
554
				 * Ready t on local CPU
555
				 */
556
				spinlock_lock(&t->lock);
557
				#ifdef KCPULB_VERBOSE
783 palkovsky 558
				printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, atomic_get(&CPU->nrdy), atomic_get(&nrdy) / config.cpu_active);
1 jermar 559
				#endif
560
				t->flags |= X_STOLEN;
561
				spinlock_unlock(&t->lock);
562
 
563
				thread_ready(t);
564
 
413 jermar 565
				interrupts_restore(ipl);
1 jermar 566
 
567
				if (--count == 0)
568
					goto satisfied;
569
 
570
				/*
125 jermar 571
				 * We are not satisfied yet, focus on another CPU next time.
1 jermar 572
				 */
573
				k++;
574
 
575
				continue;
576
			}
413 jermar 577
			interrupts_restore(ipl);
1 jermar 578
		}
579
	}
580
 
783 palkovsky 581
	if (atomic_get(&CPU->nrdy)) {
1 jermar 582
		/*
583
		 * Be a little bit light-weight and let migrated threads run.
584
		 */
585
		scheduler();
779 jermar 586
	} else {
1 jermar 587
		/*
588
		 * We failed to migrate a single thread.
779 jermar 589
		 * Give up this turn.
1 jermar 590
		 */
779 jermar 591
		goto loop;
1 jermar 592
	}
593
 
594
	goto not_satisfied;
125 jermar 595
 
1 jermar 596
satisfied:
597
	goto loop;
598
}
599
 
458 decky 600
#endif /* CONFIG_SMP */
775 palkovsky 601
 
602
 
603
/** Print information about threads & scheduler queues */
604
void sched_print_list(void)
605
{
606
	ipl_t ipl;
607
	int cpu,i;
608
	runq_t *r;
609
	thread_t *t;
610
	link_t *cur;
611
 
612
	/* We are going to mess with scheduler structures,
613
	 * let's not be interrupted */
614
	ipl = interrupts_disable();
615
	printf("*********** Scheduler dump ***********\n");
616
	for (cpu=0;cpu < config.cpu_count; cpu++) {
617
		if (!cpus[cpu].active)
618
			continue;
619
		spinlock_lock(&cpus[cpu].lock);
620
		printf("cpu%d: nrdy: %d needs_relink: %d\n",
783 palkovsky 621
		       cpus[cpu].id, atomic_get(&cpus[cpu].nrdy), cpus[cpu].needs_relink);
775 palkovsky 622
 
623
		for (i=0; i<RQ_COUNT; i++) {
624
			r = &cpus[cpu].rq[i];
625
			spinlock_lock(&r->lock);
626
			if (!r->n) {
627
				spinlock_unlock(&r->lock);
628
				continue;
629
			}
779 jermar 630
			printf("\tRq %d: ", i);
775 palkovsky 631
			for (cur=r->rq_head.next; cur!=&r->rq_head; cur=cur->next) {
632
				t = list_get_instance(cur, thread_t, rq_link);
633
				printf("%d(%s) ", t->tid,
634
				       thread_states[t->state]);
635
			}
636
			printf("\n");
637
			spinlock_unlock(&r->lock);
638
		}
639
		spinlock_unlock(&cpus[cpu].lock);
640
	}
641
 
642
	interrupts_restore(ipl);
643
}