Subversion Repositories HelenOS-historic

Rev

Rev 1705 | Rev 1757 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1 jermar 1
/*
2
 * Copyright (C) 2001-2004 Jakub Jermar
3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
1702 cejka 29
 
1705 cejka 30
 /** @addtogroup genericproc
1702 cejka 31
 * @{
32
 */
33
 
1248 jermar 34
/**
1702 cejka 35
 * @file
1248 jermar 36
 * @brief	Scheduler and load balancing.
37
 *
1264 jermar 38
 * This file contains the scheduler and kcpulb kernel thread which
1248 jermar 39
 * performs load-balancing of per-CPU run queues.
40
 */
41
 
1 jermar 42
#include <proc/scheduler.h>
43
#include <proc/thread.h>
44
#include <proc/task.h>
378 jermar 45
#include <mm/frame.h>
46
#include <mm/page.h>
703 jermar 47
#include <mm/as.h>
1571 jermar 48
#include <time/delay.h>
378 jermar 49
#include <arch/asm.h>
50
#include <arch/faddr.h>
1104 jermar 51
#include <atomic.h>
378 jermar 52
#include <synch/spinlock.h>
1 jermar 53
#include <config.h>
54
#include <context.h>
55
#include <func.h>
56
#include <arch.h>
788 jermar 57
#include <adt/list.h>
68 decky 58
#include <panic.h>
1 jermar 59
#include <typedefs.h>
378 jermar 60
#include <cpu.h>
195 vana 61
#include <print.h>
227 jermar 62
#include <debug.h>
1 jermar 63
 
1187 jermar 64
static void before_task_runs(void);
65
static void before_thread_runs(void);
66
static void after_thread_ran(void);
898 jermar 67
static void scheduler_separated_stack(void);
195 vana 68
 
898 jermar 69
atomic_t nrdy;	/**< Number of ready threads in the system. */
70
 
1187 jermar 71
/** Carry out actions before new task runs. */
72
void before_task_runs(void)
73
{
74
	before_task_runs_arch();
75
}
76
 
897 jermar 77
/** Take actions before new thread runs.
107 decky 78
 *
118 jermar 79
 * Perform actions that need to be
80
 * taken before the newly selected
81
 * tread is passed control.
107 decky 82
 *
827 palkovsky 83
 * THREAD->lock is locked on entry
84
 *
107 decky 85
 */
52 vana 86
void before_thread_runs(void)
87
{
309 palkovsky 88
	before_thread_runs_arch();
906 palkovsky 89
#ifdef CONFIG_FPU_LAZY
309 palkovsky 90
	if(THREAD==CPU->fpu_owner) 
91
		fpu_enable();
92
	else
93
		fpu_disable(); 
906 palkovsky 94
#else
309 palkovsky 95
	fpu_enable();
96
	if (THREAD->fpu_context_exists)
906 palkovsky 97
		fpu_context_restore(THREAD->saved_fpu_context);
309 palkovsky 98
	else {
906 palkovsky 99
		fpu_init();
309 palkovsky 100
		THREAD->fpu_context_exists=1;
101
	}
906 palkovsky 102
#endif
52 vana 103
}
104
 
898 jermar 105
/** Take actions after THREAD had run.
897 jermar 106
 *
107
 * Perform actions that need to be
108
 * taken after the running thread
898 jermar 109
 * had been preempted by the scheduler.
897 jermar 110
 *
111
 * THREAD->lock is locked on entry
112
 *
113
 */
114
void after_thread_ran(void)
115
{
116
	after_thread_ran_arch();
117
}
118
 
458 decky 119
#ifdef CONFIG_FPU_LAZY
309 palkovsky 120
void scheduler_fpu_lazy_request(void)
121
{
907 palkovsky 122
restart:
309 palkovsky 123
	fpu_enable();
827 palkovsky 124
	spinlock_lock(&CPU->lock);
125
 
126
	/* Save old context */
309 palkovsky 127
	if (CPU->fpu_owner != NULL) {  
827 palkovsky 128
		spinlock_lock(&CPU->fpu_owner->lock);
906 palkovsky 129
		fpu_context_save(CPU->fpu_owner->saved_fpu_context);
309 palkovsky 130
		/* don't prevent migration */
131
		CPU->fpu_owner->fpu_context_engaged=0; 
827 palkovsky 132
		spinlock_unlock(&CPU->fpu_owner->lock);
907 palkovsky 133
		CPU->fpu_owner = NULL;
309 palkovsky 134
	}
827 palkovsky 135
 
136
	spinlock_lock(&THREAD->lock);
898 jermar 137
	if (THREAD->fpu_context_exists) {
906 palkovsky 138
		fpu_context_restore(THREAD->saved_fpu_context);
898 jermar 139
	} else {
906 palkovsky 140
		/* Allocate FPU context */
141
		if (!THREAD->saved_fpu_context) {
142
			/* Might sleep */
143
			spinlock_unlock(&THREAD->lock);
907 palkovsky 144
			spinlock_unlock(&CPU->lock);
906 palkovsky 145
			THREAD->saved_fpu_context = slab_alloc(fpu_context_slab,
146
							       0);
907 palkovsky 147
			/* We may have switched CPUs during slab_alloc */
148
			goto restart; 
906 palkovsky 149
		}
150
		fpu_init();
309 palkovsky 151
		THREAD->fpu_context_exists=1;
152
	}
153
	CPU->fpu_owner=THREAD;
154
	THREAD->fpu_context_engaged = 1;
898 jermar 155
	spinlock_unlock(&THREAD->lock);
827 palkovsky 156
 
157
	spinlock_unlock(&CPU->lock);
309 palkovsky 158
}
159
#endif
52 vana 160
 
107 decky 161
/** Initialize scheduler
162
 *
163
 * Initialize kernel scheduler.
164
 *
165
 */
1 jermar 166
void scheduler_init(void)
167
{
168
}
169
 
107 decky 170
/** Get thread to be scheduled
171
 *
172
 * Get the optimal thread to be scheduled
109 jermar 173
 * according to thread accounting and scheduler
107 decky 174
 * policy.
175
 *
176
 * @return Thread to be scheduled.
177
 *
178
 */
483 jermar 179
static thread_t *find_best_thread(void)
1 jermar 180
{
181
	thread_t *t;
182
	runq_t *r;
783 palkovsky 183
	int i;
1 jermar 184
 
227 jermar 185
	ASSERT(CPU != NULL);
186
 
1 jermar 187
loop:
413 jermar 188
	interrupts_enable();
1 jermar 189
 
783 palkovsky 190
	if (atomic_get(&CPU->nrdy) == 0) {
1 jermar 191
		/*
192
		 * For there was nothing to run, the CPU goes to sleep
193
		 * until a hardware interrupt or an IPI comes.
194
		 * This improves energy saving and hyperthreading.
195
		 */
785 jermar 196
 
197
		/*
198
		 * An interrupt might occur right now and wake up a thread.
199
		 * In such case, the CPU will continue to go to sleep
200
		 * even though there is a runnable thread.
201
		 */
202
 
1 jermar 203
		 cpu_sleep();
204
		 goto loop;
205
	}
206
 
413 jermar 207
	interrupts_disable();
114 jermar 208
 
898 jermar 209
	for (i = 0; i<RQ_COUNT; i++) {
15 jermar 210
		r = &CPU->rq[i];
1 jermar 211
		spinlock_lock(&r->lock);
212
		if (r->n == 0) {
213
			/*
214
			 * If this queue is empty, try a lower-priority queue.
215
			 */
216
			spinlock_unlock(&r->lock);
217
			continue;
218
		}
213 jermar 219
 
783 palkovsky 220
		atomic_dec(&CPU->nrdy);
475 jermar 221
		atomic_dec(&nrdy);
1 jermar 222
		r->n--;
223
 
224
		/*
225
		 * Take the first thread from the queue.
226
		 */
227
		t = list_get_instance(r->rq_head.next, thread_t, rq_link);
228
		list_remove(&t->rq_link);
229
 
230
		spinlock_unlock(&r->lock);
231
 
232
		spinlock_lock(&t->lock);
15 jermar 233
		t->cpu = CPU;
1 jermar 234
 
235
		t->ticks = us2ticks((i+1)*10000);
898 jermar 236
		t->priority = i;	/* correct rq index */
1 jermar 237
 
238
		/*
239
		 * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge.
240
		 */
241
		t->flags &= ~X_STOLEN;
242
		spinlock_unlock(&t->lock);
243
 
244
		return t;
245
	}
246
	goto loop;
247
 
248
}
249
 
107 decky 250
/** Prevent rq starvation
251
 *
252
 * Prevent low priority threads from starving in rq's.
253
 *
254
 * When the function decides to relink rq's, it reconnects
255
 * respective pointers so that in result threads with 'pri'
1708 jermar 256
 * greater or equal start are moved to a higher-priority queue.
107 decky 257
 *
258
 * @param start Threshold priority.
259
 *
1 jermar 260
 */
452 decky 261
static void relink_rq(int start)
1 jermar 262
{
263
	link_t head;
264
	runq_t *r;
265
	int i, n;
266
 
267
	list_initialize(&head);
15 jermar 268
	spinlock_lock(&CPU->lock);
269
	if (CPU->needs_relink > NEEDS_RELINK_MAX) {
1 jermar 270
		for (i = start; i<RQ_COUNT-1; i++) {
271
			/* remember and empty rq[i + 1] */
15 jermar 272
			r = &CPU->rq[i + 1];
1 jermar 273
			spinlock_lock(&r->lock);
274
			list_concat(&head, &r->rq_head);
275
			n = r->n;
276
			r->n = 0;
277
			spinlock_unlock(&r->lock);
278
 
279
			/* append rq[i + 1] to rq[i] */
15 jermar 280
			r = &CPU->rq[i];
1 jermar 281
			spinlock_lock(&r->lock);
282
			list_concat(&r->rq_head, &head);
283
			r->n += n;
284
			spinlock_unlock(&r->lock);
285
		}
15 jermar 286
		CPU->needs_relink = 0;
1 jermar 287
	}
784 palkovsky 288
	spinlock_unlock(&CPU->lock);
1 jermar 289
 
290
}
291
 
898 jermar 292
/** The scheduler
293
 *
294
 * The thread scheduling procedure.
295
 * Passes control directly to
296
 * scheduler_separated_stack().
297
 *
298
 */
299
void scheduler(void)
300
{
301
	volatile ipl_t ipl;
107 decky 302
 
898 jermar 303
	ASSERT(CPU != NULL);
304
 
305
	ipl = interrupts_disable();
306
 
307
	if (atomic_get(&haltstate))
308
		halt();
1007 decky 309
 
898 jermar 310
	if (THREAD) {
311
		spinlock_lock(&THREAD->lock);
906 palkovsky 312
#ifndef CONFIG_FPU_LAZY
313
		fpu_context_save(THREAD->saved_fpu_context);
314
#endif
898 jermar 315
		if (!context_save(&THREAD->saved_context)) {
316
			/*
317
			 * This is the place where threads leave scheduler();
318
			 */
319
			spinlock_unlock(&THREAD->lock);
320
			interrupts_restore(THREAD->saved_context.ipl);
1007 decky 321
 
898 jermar 322
			return;
323
		}
324
 
325
		/*
326
		 * Interrupt priority level of preempted thread is recorded here
327
		 * to facilitate scheduler() invocations from interrupts_disable()'d
328
		 * code (e.g. waitq_sleep_timeout()). 
329
		 */
330
		THREAD->saved_context.ipl = ipl;
331
	}
332
 
333
	/*
334
	 * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM
335
	 * and preemption counter. At this point THE could be coming either
336
	 * from THREAD's or CPU's stack.
337
	 */
338
	the_copy(THE, (the_t *) CPU->stack);
339
 
340
	/*
341
	 * We may not keep the old stack.
342
	 * Reason: If we kept the old stack and got blocked, for instance, in
343
	 * find_best_thread(), the old thread could get rescheduled by another
344
	 * CPU and overwrite the part of its own stack that was also used by
345
	 * the scheduler on this CPU.
346
	 *
347
	 * Moreover, we have to bypass the compiler-generated POP sequence
348
	 * which is fooled by SP being set to the very top of the stack.
349
	 * Therefore the scheduler() function continues in
350
	 * scheduler_separated_stack().
351
	 */
352
	context_save(&CPU->saved_context);
353
	context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), (__address) CPU->stack, CPU_STACK_SIZE);
354
	context_restore(&CPU->saved_context);
355
	/* not reached */
356
}
357
 
107 decky 358
/** Scheduler stack switch wrapper
359
 *
360
 * Second part of the scheduler() function
361
 * using new stack. Handling the actual context
362
 * switch to a new thread.
363
 *
787 palkovsky 364
 * Assume THREAD->lock is held.
107 decky 365
 */
898 jermar 366
void scheduler_separated_stack(void)
1 jermar 367
{
368
	int priority;
1007 decky 369
 
227 jermar 370
	ASSERT(CPU != NULL);
1007 decky 371
 
15 jermar 372
	if (THREAD) {
898 jermar 373
		/* must be run after the switch to scheduler stack */
897 jermar 374
		after_thread_ran();
375
 
15 jermar 376
		switch (THREAD->state) {
1 jermar 377
		    case Running:
125 jermar 378
			spinlock_unlock(&THREAD->lock);
379
			thread_ready(THREAD);
380
			break;
1 jermar 381
 
382
		    case Exiting:
1571 jermar 383
repeat:
384
		    	if (THREAD->detached) {
385
				thread_destroy(THREAD);
386
			} else {
387
				/*
388
				 * The thread structure is kept allocated until somebody
389
				 * calls thread_detach() on it.
390
				 */
391
				if (!spinlock_trylock(&THREAD->join_wq.lock)) {
392
					/*
393
					 * Avoid deadlock.
394
					 */
395
					spinlock_unlock(&THREAD->lock);
396
					delay(10);
397
					spinlock_lock(&THREAD->lock);
398
					goto repeat;
399
				}
400
				_waitq_wakeup_unsafe(&THREAD->join_wq, false);
401
				spinlock_unlock(&THREAD->join_wq.lock);
402
 
403
				THREAD->state = Undead;
404
				spinlock_unlock(&THREAD->lock);
405
			}
125 jermar 406
			break;
787 palkovsky 407
 
1 jermar 408
		    case Sleeping:
125 jermar 409
			/*
410
			 * Prefer the thread after it's woken up.
411
			 */
413 jermar 412
			THREAD->priority = -1;
1 jermar 413
 
125 jermar 414
			/*
415
			 * We need to release wq->lock which we locked in waitq_sleep().
416
			 * Address of wq->lock is kept in THREAD->sleep_queue.
417
			 */
418
			spinlock_unlock(&THREAD->sleep_queue->lock);
1 jermar 419
 
125 jermar 420
			/*
421
			 * Check for possible requests for out-of-context invocation.
422
			 */
423
			if (THREAD->call_me) {
424
				THREAD->call_me(THREAD->call_me_with);
425
				THREAD->call_me = NULL;
426
				THREAD->call_me_with = NULL;
427
			}
1 jermar 428
 
125 jermar 429
			spinlock_unlock(&THREAD->lock);
1 jermar 430
 
125 jermar 431
			break;
432
 
1 jermar 433
		    default:
125 jermar 434
			/*
435
			 * Entering state is unexpected.
436
			 */
437
			panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]);
438
			break;
1 jermar 439
		}
897 jermar 440
 
15 jermar 441
		THREAD = NULL;
1 jermar 442
	}
198 jermar 443
 
15 jermar 444
	THREAD = find_best_thread();
1 jermar 445
 
15 jermar 446
	spinlock_lock(&THREAD->lock);
413 jermar 447
	priority = THREAD->priority;
15 jermar 448
	spinlock_unlock(&THREAD->lock);	
192 jermar 449
 
1 jermar 450
	relink_rq(priority);		
451
 
452
	/*
453
	 * If both the old and the new task are the same, lots of work is avoided.
454
	 */
15 jermar 455
	if (TASK != THREAD->task) {
703 jermar 456
		as_t *as1 = NULL;
457
		as_t *as2;
1 jermar 458
 
15 jermar 459
		if (TASK) {
460
			spinlock_lock(&TASK->lock);
703 jermar 461
			as1 = TASK->as;
15 jermar 462
			spinlock_unlock(&TASK->lock);
1 jermar 463
		}
464
 
15 jermar 465
		spinlock_lock(&THREAD->task->lock);
703 jermar 466
		as2 = THREAD->task->as;
15 jermar 467
		spinlock_unlock(&THREAD->task->lock);
1 jermar 468
 
469
		/*
703 jermar 470
		 * Note that it is possible for two tasks to share one address space.
1 jermar 471
		 */
703 jermar 472
		if (as1 != as2) {
1 jermar 473
			/*
703 jermar 474
			 * Both tasks and address spaces are different.
1 jermar 475
			 * Replace the old one with the new one.
476
			 */
823 jermar 477
			as_switch(as1, as2);
1 jermar 478
		}
906 palkovsky 479
		TASK = THREAD->task;
1187 jermar 480
		before_task_runs();
1 jermar 481
	}
482
 
1380 jermar 483
	spinlock_lock(&THREAD->lock);	
15 jermar 484
	THREAD->state = Running;
1 jermar 485
 
906 palkovsky 486
#ifdef SCHEDULER_VERBOSE
1196 cejka 487
	printf("cpu%d: tid %d (priority=%d,ticks=%lld,nrdy=%ld)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, atomic_get(&CPU->nrdy));
906 palkovsky 488
#endif	
1 jermar 489
 
213 jermar 490
	/*
897 jermar 491
	 * Some architectures provide late kernel PA2KA(identity)
492
	 * mapping in a page fault handler. However, the page fault
493
	 * handler uses the kernel stack of the running thread and
494
	 * therefore cannot be used to map it. The kernel stack, if
495
	 * necessary, is to be mapped in before_thread_runs(). This
496
	 * function must be executed before the switch to the new stack.
497
	 */
498
	before_thread_runs();
499
 
500
	/*
213 jermar 501
	 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to thread's stack.
502
	 */
184 jermar 503
	the_copy(THE, (the_t *) THREAD->kstack);
504
 
15 jermar 505
	context_restore(&THREAD->saved_context);
1 jermar 506
	/* not reached */
507
}
508
 
458 decky 509
#ifdef CONFIG_SMP
107 decky 510
/** Load balancing thread
511
 *
512
 * SMP load balancing thread, supervising thread supplies
513
 * for the CPU it's wired to.
514
 *
515
 * @param arg Generic thread argument (unused).
516
 *
1 jermar 517
 */
518
void kcpulb(void *arg)
519
{
520
	thread_t *t;
783 palkovsky 521
	int count, average, i, j, k = 0;
413 jermar 522
	ipl_t ipl;
1 jermar 523
 
1576 jermar 524
	/*
525
	 * Detach kcpulb as nobody will call thread_join_timeout() on it.
526
	 */
527
	thread_detach(THREAD);
528
 
1 jermar 529
loop:
530
	/*
779 jermar 531
	 * Work in 1s intervals.
1 jermar 532
	 */
779 jermar 533
	thread_sleep(1);
1 jermar 534
 
535
not_satisfied:
536
	/*
537
	 * Calculate the number of threads that will be migrated/stolen from
538
	 * other CPU's. Note that situation can have changed between two
539
	 * passes. Each time get the most up to date counts.
540
	 */
784 palkovsky 541
	average = atomic_get(&nrdy) / config.cpu_active + 1;
783 palkovsky 542
	count = average - atomic_get(&CPU->nrdy);
1 jermar 543
 
784 palkovsky 544
	if (count <= 0)
1 jermar 545
		goto satisfied;
546
 
547
	/*
548
	 * Searching least priority queues on all CPU's first and most priority queues on all CPU's last.
549
	 */
550
	for (j=RQ_COUNT-1; j >= 0; j--) {
551
		for (i=0; i < config.cpu_active; i++) {
552
			link_t *l;
553
			runq_t *r;
554
			cpu_t *cpu;
555
 
556
			cpu = &cpus[(i + k) % config.cpu_active];
557
 
558
			/*
559
			 * Not interested in ourselves.
560
			 * Doesn't require interrupt disabling for kcpulb is X_WIRED.
561
			 */
15 jermar 562
			if (CPU == cpu)
783 palkovsky 563
				continue;
564
			if (atomic_get(&cpu->nrdy) <= average)
565
				continue;
1 jermar 566
 
784 palkovsky 567
			ipl = interrupts_disable();
115 jermar 568
			r = &cpu->rq[j];
1 jermar 569
			spinlock_lock(&r->lock);
570
			if (r->n == 0) {
571
				spinlock_unlock(&r->lock);
413 jermar 572
				interrupts_restore(ipl);
1 jermar 573
				continue;
574
			}
575
 
576
			t = NULL;
577
			l = r->rq_head.prev;	/* search rq from the back */
578
			while (l != &r->rq_head) {
579
				t = list_get_instance(l, thread_t, rq_link);
580
				/*
125 jermar 581
				 * We don't want to steal CPU-wired threads neither threads already stolen.
1 jermar 582
				 * The latter prevents threads from migrating between CPU's without ever being run.
125 jermar 583
				 * We don't want to steal threads whose FPU context is still in CPU.
73 vana 584
				 */
1 jermar 585
				spinlock_lock(&t->lock);
73 vana 586
				if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) {
1 jermar 587
					/*
588
					 * Remove t from r.
589
					 */
590
					spinlock_unlock(&t->lock);
591
 
783 palkovsky 592
					atomic_dec(&cpu->nrdy);
475 jermar 593
					atomic_dec(&nrdy);
1 jermar 594
 
125 jermar 595
					r->n--;
1 jermar 596
					list_remove(&t->rq_link);
597
 
598
					break;
599
				}
600
				spinlock_unlock(&t->lock);
601
				l = l->prev;
602
				t = NULL;
603
			}
604
			spinlock_unlock(&r->lock);
605
 
606
			if (t) {
607
				/*
608
				 * Ready t on local CPU
609
				 */
610
				spinlock_lock(&t->lock);
906 palkovsky 611
#ifdef KCPULB_VERBOSE
1196 cejka 612
				printf("kcpulb%d: TID %d -> cpu%d, nrdy=%ld, avg=%nd\n", CPU->id, t->tid, CPU->id, atomic_get(&CPU->nrdy), atomic_get(&nrdy) / config.cpu_active);
906 palkovsky 613
#endif
1 jermar 614
				t->flags |= X_STOLEN;
1115 jermar 615
				t->state = Entering;
1 jermar 616
				spinlock_unlock(&t->lock);
617
 
618
				thread_ready(t);
619
 
413 jermar 620
				interrupts_restore(ipl);
1 jermar 621
 
622
				if (--count == 0)
623
					goto satisfied;
624
 
625
				/*
125 jermar 626
				 * We are not satisfied yet, focus on another CPU next time.
1 jermar 627
				 */
628
				k++;
629
 
630
				continue;
631
			}
413 jermar 632
			interrupts_restore(ipl);
1 jermar 633
		}
634
	}
635
 
783 palkovsky 636
	if (atomic_get(&CPU->nrdy)) {
1 jermar 637
		/*
638
		 * Be a little bit light-weight and let migrated threads run.
639
		 */
640
		scheduler();
779 jermar 641
	} else {
1 jermar 642
		/*
643
		 * We failed to migrate a single thread.
779 jermar 644
		 * Give up this turn.
1 jermar 645
		 */
779 jermar 646
		goto loop;
1 jermar 647
	}
648
 
649
	goto not_satisfied;
125 jermar 650
 
1 jermar 651
satisfied:
652
	goto loop;
653
}
654
 
458 decky 655
#endif /* CONFIG_SMP */
775 palkovsky 656
 
657
 
658
/** Print information about threads & scheduler queues */
659
void sched_print_list(void)
660
{
661
	ipl_t ipl;
662
	int cpu,i;
663
	runq_t *r;
664
	thread_t *t;
665
	link_t *cur;
666
 
667
	/* We are going to mess with scheduler structures,
668
	 * let's not be interrupted */
669
	ipl = interrupts_disable();
670
	for (cpu=0;cpu < config.cpu_count; cpu++) {
898 jermar 671
 
775 palkovsky 672
		if (!cpus[cpu].active)
673
			continue;
898 jermar 674
 
775 palkovsky 675
		spinlock_lock(&cpus[cpu].lock);
1221 decky 676
		printf("cpu%d: address=%p, nrdy=%ld, needs_relink=%ld\n",
1062 jermar 677
		       cpus[cpu].id, &cpus[cpu], atomic_get(&cpus[cpu].nrdy), cpus[cpu].needs_relink);
775 palkovsky 678
 
679
		for (i=0; i<RQ_COUNT; i++) {
680
			r = &cpus[cpu].rq[i];
681
			spinlock_lock(&r->lock);
682
			if (!r->n) {
683
				spinlock_unlock(&r->lock);
684
				continue;
685
			}
898 jermar 686
			printf("\trq[%d]: ", i);
775 palkovsky 687
			for (cur=r->rq_head.next; cur!=&r->rq_head; cur=cur->next) {
688
				t = list_get_instance(cur, thread_t, rq_link);
689
				printf("%d(%s) ", t->tid,
690
				       thread_states[t->state]);
691
			}
692
			printf("\n");
693
			spinlock_unlock(&r->lock);
694
		}
695
		spinlock_unlock(&cpus[cpu].lock);
696
	}
697
 
698
	interrupts_restore(ipl);
699
}
1702 cejka 700
 
701
 /** @}
702
 */
703