Subversion Repositories HelenOS

Rev

Rev 2087 | Rev 2118 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1 jermar 1
/*
2071 jermar 2
 * Copyright (c) 2001-2007 Jakub Jermar
1 jermar 3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
1757 jermar 29
/** @addtogroup genericproc
1702 cejka 30
 * @{
31
 */
32
 
1248 jermar 33
/**
1702 cejka 34
 * @file
1248 jermar 35
 * @brief	Scheduler and load balancing.
36
 *
1264 jermar 37
 * This file contains the scheduler and kcpulb kernel thread which
1248 jermar 38
 * performs load-balancing of per-CPU run queues.
39
 */
40
 
1 jermar 41
#include <proc/scheduler.h>
42
#include <proc/thread.h>
43
#include <proc/task.h>
378 jermar 44
#include <mm/frame.h>
45
#include <mm/page.h>
703 jermar 46
#include <mm/as.h>
2089 decky 47
#include <time/timeout.h>
1571 jermar 48
#include <time/delay.h>
378 jermar 49
#include <arch/asm.h>
50
#include <arch/faddr.h>
2030 decky 51
#include <arch/cycle.h>
1104 jermar 52
#include <atomic.h>
378 jermar 53
#include <synch/spinlock.h>
1 jermar 54
#include <config.h>
55
#include <context.h>
2089 decky 56
#include <fpu_context.h>
1 jermar 57
#include <func.h>
58
#include <arch.h>
788 jermar 59
#include <adt/list.h>
68 decky 60
#include <panic.h>
378 jermar 61
#include <cpu.h>
195 vana 62
#include <print.h>
227 jermar 63
#include <debug.h>
1 jermar 64
 
1187 jermar 65
static void before_task_runs(void);
66
static void before_thread_runs(void);
67
static void after_thread_ran(void);
898 jermar 68
static void scheduler_separated_stack(void);
195 vana 69
 
898 jermar 70
atomic_t nrdy;	/**< Number of ready threads in the system. */
71
 
1187 jermar 72
/** Carry out actions before new task runs. */
73
void before_task_runs(void)
74
{
75
	before_task_runs_arch();
76
}
77
 
897 jermar 78
/** Take actions before new thread runs.
107 decky 79
 *
118 jermar 80
 * Perform actions that need to be
81
 * taken before the newly selected
82
 * tread is passed control.
107 decky 83
 *
827 palkovsky 84
 * THREAD->lock is locked on entry
85
 *
107 decky 86
 */
52 vana 87
void before_thread_runs(void)
88
{
309 palkovsky 89
	before_thread_runs_arch();
906 palkovsky 90
#ifdef CONFIG_FPU_LAZY
1882 jermar 91
	if(THREAD == CPU->fpu_owner) 
309 palkovsky 92
		fpu_enable();
93
	else
94
		fpu_disable(); 
906 palkovsky 95
#else
309 palkovsky 96
	fpu_enable();
97
	if (THREAD->fpu_context_exists)
906 palkovsky 98
		fpu_context_restore(THREAD->saved_fpu_context);
309 palkovsky 99
	else {
906 palkovsky 100
		fpu_init();
1882 jermar 101
		THREAD->fpu_context_exists = 1;
309 palkovsky 102
	}
906 palkovsky 103
#endif
52 vana 104
}
105
 
898 jermar 106
/** Take actions after THREAD had run.
897 jermar 107
 *
108
 * Perform actions that need to be
109
 * taken after the running thread
898 jermar 110
 * had been preempted by the scheduler.
897 jermar 111
 *
112
 * THREAD->lock is locked on entry
113
 *
114
 */
115
void after_thread_ran(void)
116
{
117
	after_thread_ran_arch();
118
}
119
 
458 decky 120
#ifdef CONFIG_FPU_LAZY
309 palkovsky 121
void scheduler_fpu_lazy_request(void)
122
{
907 palkovsky 123
restart:
309 palkovsky 124
	fpu_enable();
827 palkovsky 125
	spinlock_lock(&CPU->lock);
126
 
127
	/* Save old context */
309 palkovsky 128
	if (CPU->fpu_owner != NULL) {  
827 palkovsky 129
		spinlock_lock(&CPU->fpu_owner->lock);
906 palkovsky 130
		fpu_context_save(CPU->fpu_owner->saved_fpu_context);
309 palkovsky 131
		/* don't prevent migration */
1882 jermar 132
		CPU->fpu_owner->fpu_context_engaged = 0;
827 palkovsky 133
		spinlock_unlock(&CPU->fpu_owner->lock);
907 palkovsky 134
		CPU->fpu_owner = NULL;
309 palkovsky 135
	}
827 palkovsky 136
 
137
	spinlock_lock(&THREAD->lock);
898 jermar 138
	if (THREAD->fpu_context_exists) {
906 palkovsky 139
		fpu_context_restore(THREAD->saved_fpu_context);
898 jermar 140
	} else {
906 palkovsky 141
		/* Allocate FPU context */
142
		if (!THREAD->saved_fpu_context) {
143
			/* Might sleep */
144
			spinlock_unlock(&THREAD->lock);
907 palkovsky 145
			spinlock_unlock(&CPU->lock);
2067 jermar 146
			THREAD->saved_fpu_context =
147
				slab_alloc(fpu_context_slab, 0);
907 palkovsky 148
			/* We may have switched CPUs during slab_alloc */
149
			goto restart; 
906 palkovsky 150
		}
151
		fpu_init();
1882 jermar 152
		THREAD->fpu_context_exists = 1;
309 palkovsky 153
	}
1882 jermar 154
	CPU->fpu_owner = THREAD;
309 palkovsky 155
	THREAD->fpu_context_engaged = 1;
898 jermar 156
	spinlock_unlock(&THREAD->lock);
827 palkovsky 157
 
158
	spinlock_unlock(&CPU->lock);
309 palkovsky 159
}
160
#endif
52 vana 161
 
107 decky 162
/** Initialize scheduler
163
 *
164
 * Initialize kernel scheduler.
165
 *
166
 */
1 jermar 167
void scheduler_init(void)
168
{
169
}
170
 
107 decky 171
/** Get thread to be scheduled
172
 *
173
 * Get the optimal thread to be scheduled
109 jermar 174
 * according to thread accounting and scheduler
107 decky 175
 * policy.
176
 *
177
 * @return Thread to be scheduled.
178
 *
179
 */
483 jermar 180
static thread_t *find_best_thread(void)
1 jermar 181
{
182
	thread_t *t;
183
	runq_t *r;
783 palkovsky 184
	int i;
1 jermar 185
 
227 jermar 186
	ASSERT(CPU != NULL);
187
 
1 jermar 188
loop:
413 jermar 189
	interrupts_enable();
1 jermar 190
 
783 palkovsky 191
	if (atomic_get(&CPU->nrdy) == 0) {
1 jermar 192
		/*
193
		 * For there was nothing to run, the CPU goes to sleep
194
		 * until a hardware interrupt or an IPI comes.
195
		 * This improves energy saving and hyperthreading.
196
		 */
785 jermar 197
 
198
		/*
199
		 * An interrupt might occur right now and wake up a thread.
200
		 * In such case, the CPU will continue to go to sleep
201
		 * even though there is a runnable thread.
202
		 */
203
 
1 jermar 204
		 cpu_sleep();
205
		 goto loop;
206
	}
207
 
413 jermar 208
	interrupts_disable();
114 jermar 209
 
898 jermar 210
	for (i = 0; i<RQ_COUNT; i++) {
15 jermar 211
		r = &CPU->rq[i];
1 jermar 212
		spinlock_lock(&r->lock);
213
		if (r->n == 0) {
214
			/*
215
			 * If this queue is empty, try a lower-priority queue.
216
			 */
217
			spinlock_unlock(&r->lock);
218
			continue;
219
		}
213 jermar 220
 
783 palkovsky 221
		atomic_dec(&CPU->nrdy);
475 jermar 222
		atomic_dec(&nrdy);
1 jermar 223
		r->n--;
224
 
225
		/*
226
		 * Take the first thread from the queue.
227
		 */
228
		t = list_get_instance(r->rq_head.next, thread_t, rq_link);
229
		list_remove(&t->rq_link);
230
 
231
		spinlock_unlock(&r->lock);
232
 
233
		spinlock_lock(&t->lock);
15 jermar 234
		t->cpu = CPU;
1 jermar 235
 
2067 jermar 236
		t->ticks = us2ticks((i + 1) * 10000);
898 jermar 237
		t->priority = i;	/* correct rq index */
1 jermar 238
 
239
		/*
1854 jermar 240
		 * Clear the THREAD_FLAG_STOLEN flag so that t can be migrated
241
		 * when load balancing needs emerge.
1 jermar 242
		 */
1854 jermar 243
		t->flags &= ~THREAD_FLAG_STOLEN;
1 jermar 244
		spinlock_unlock(&t->lock);
245
 
246
		return t;
247
	}
248
	goto loop;
249
 
250
}
251
 
107 decky 252
/** Prevent rq starvation
253
 *
254
 * Prevent low priority threads from starving in rq's.
255
 *
256
 * When the function decides to relink rq's, it reconnects
257
 * respective pointers so that in result threads with 'pri'
1708 jermar 258
 * greater or equal start are moved to a higher-priority queue.
107 decky 259
 *
260
 * @param start Threshold priority.
261
 *
1 jermar 262
 */
452 decky 263
static void relink_rq(int start)
1 jermar 264
{
265
	link_t head;
266
	runq_t *r;
267
	int i, n;
268
 
269
	list_initialize(&head);
15 jermar 270
	spinlock_lock(&CPU->lock);
271
	if (CPU->needs_relink > NEEDS_RELINK_MAX) {
2067 jermar 272
		for (i = start; i < RQ_COUNT - 1; i++) {
1 jermar 273
			/* remember and empty rq[i + 1] */
15 jermar 274
			r = &CPU->rq[i + 1];
1 jermar 275
			spinlock_lock(&r->lock);
276
			list_concat(&head, &r->rq_head);
277
			n = r->n;
278
			r->n = 0;
279
			spinlock_unlock(&r->lock);
280
 
281
			/* append rq[i + 1] to rq[i] */
15 jermar 282
			r = &CPU->rq[i];
1 jermar 283
			spinlock_lock(&r->lock);
284
			list_concat(&r->rq_head, &head);
285
			r->n += n;
286
			spinlock_unlock(&r->lock);
287
		}
15 jermar 288
		CPU->needs_relink = 0;
1 jermar 289
	}
784 palkovsky 290
	spinlock_unlock(&CPU->lock);
1 jermar 291
 
292
}
293
 
898 jermar 294
/** The scheduler
295
 *
296
 * The thread scheduling procedure.
297
 * Passes control directly to
298
 * scheduler_separated_stack().
299
 *
300
 */
301
void scheduler(void)
302
{
303
	volatile ipl_t ipl;
107 decky 304
 
898 jermar 305
	ASSERT(CPU != NULL);
306
 
307
	ipl = interrupts_disable();
308
 
309
	if (atomic_get(&haltstate))
310
		halt();
1007 decky 311
 
898 jermar 312
	if (THREAD) {
313
		spinlock_lock(&THREAD->lock);
2030 decky 314
 
315
		/* Update thread accounting */
316
		THREAD->cycles += get_cycle() - THREAD->last_cycle;
317
 
906 palkovsky 318
#ifndef CONFIG_FPU_LAZY
319
		fpu_context_save(THREAD->saved_fpu_context);
320
#endif
898 jermar 321
		if (!context_save(&THREAD->saved_context)) {
322
			/*
323
			 * This is the place where threads leave scheduler();
324
			 */
2030 decky 325
 
326
			/* Save current CPU cycle */
327
			THREAD->last_cycle = get_cycle();
328
 
898 jermar 329
			spinlock_unlock(&THREAD->lock);
330
			interrupts_restore(THREAD->saved_context.ipl);
1007 decky 331
 
898 jermar 332
			return;
333
		}
334
 
335
		/*
2067 jermar 336
		 * Interrupt priority level of preempted thread is recorded
337
		 * here to facilitate scheduler() invocations from
338
		 * interrupts_disable()'d code (e.g. waitq_sleep_timeout()). 
898 jermar 339
		 */
340
		THREAD->saved_context.ipl = ipl;
341
	}
342
 
343
	/*
344
	 * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM
345
	 * and preemption counter. At this point THE could be coming either
346
	 * from THREAD's or CPU's stack.
347
	 */
348
	the_copy(THE, (the_t *) CPU->stack);
349
 
350
	/*
351
	 * We may not keep the old stack.
352
	 * Reason: If we kept the old stack and got blocked, for instance, in
353
	 * find_best_thread(), the old thread could get rescheduled by another
354
	 * CPU and overwrite the part of its own stack that was also used by
355
	 * the scheduler on this CPU.
356
	 *
357
	 * Moreover, we have to bypass the compiler-generated POP sequence
358
	 * which is fooled by SP being set to the very top of the stack.
359
	 * Therefore the scheduler() function continues in
360
	 * scheduler_separated_stack().
361
	 */
362
	context_save(&CPU->saved_context);
1854 jermar 363
	context_set(&CPU->saved_context, FADDR(scheduler_separated_stack),
2087 jermar 364
	    (uintptr_t) CPU->stack, CPU_STACK_SIZE);
898 jermar 365
	context_restore(&CPU->saved_context);
366
	/* not reached */
367
}
368
 
107 decky 369
/** Scheduler stack switch wrapper
370
 *
371
 * Second part of the scheduler() function
372
 * using new stack. Handling the actual context
373
 * switch to a new thread.
374
 *
787 palkovsky 375
 * Assume THREAD->lock is held.
107 decky 376
 */
898 jermar 377
void scheduler_separated_stack(void)
1 jermar 378
{
379
	int priority;
1007 decky 380
 
227 jermar 381
	ASSERT(CPU != NULL);
1007 decky 382
 
15 jermar 383
	if (THREAD) {
898 jermar 384
		/* must be run after the switch to scheduler stack */
897 jermar 385
		after_thread_ran();
386
 
15 jermar 387
		switch (THREAD->state) {
1888 jermar 388
		case Running:
125 jermar 389
			spinlock_unlock(&THREAD->lock);
390
			thread_ready(THREAD);
391
			break;
1 jermar 392
 
1888 jermar 393
		case Exiting:
1571 jermar 394
repeat:
2040 decky 395
			if (THREAD->detached) {
1571 jermar 396
				thread_destroy(THREAD);
397
			} else {
398
				/*
2067 jermar 399
				 * The thread structure is kept allocated until
400
				 * somebody calls thread_detach() on it.
1571 jermar 401
				 */
402
				if (!spinlock_trylock(&THREAD->join_wq.lock)) {
403
					/*
404
					 * Avoid deadlock.
405
					 */
406
					spinlock_unlock(&THREAD->lock);
407
					delay(10);
408
					spinlock_lock(&THREAD->lock);
409
					goto repeat;
410
				}
411
				_waitq_wakeup_unsafe(&THREAD->join_wq, false);
412
				spinlock_unlock(&THREAD->join_wq.lock);
413
 
414
				THREAD->state = Undead;
415
				spinlock_unlock(&THREAD->lock);
416
			}
125 jermar 417
			break;
787 palkovsky 418
 
1888 jermar 419
		case Sleeping:
125 jermar 420
			/*
421
			 * Prefer the thread after it's woken up.
422
			 */
413 jermar 423
			THREAD->priority = -1;
1 jermar 424
 
125 jermar 425
			/*
2067 jermar 426
			 * We need to release wq->lock which we locked in
427
			 * waitq_sleep(). Address of wq->lock is kept in
428
			 * THREAD->sleep_queue.
125 jermar 429
			 */
430
			spinlock_unlock(&THREAD->sleep_queue->lock);
1 jermar 431
 
125 jermar 432
			/*
2067 jermar 433
			 * Check for possible requests for out-of-context
434
			 * invocation.
125 jermar 435
			 */
436
			if (THREAD->call_me) {
437
				THREAD->call_me(THREAD->call_me_with);
438
				THREAD->call_me = NULL;
439
				THREAD->call_me_with = NULL;
440
			}
1 jermar 441
 
125 jermar 442
			spinlock_unlock(&THREAD->lock);
1 jermar 443
 
125 jermar 444
			break;
445
 
1888 jermar 446
		default:
125 jermar 447
			/*
448
			 * Entering state is unexpected.
449
			 */
2067 jermar 450
			panic("tid%d: unexpected state %s\n", THREAD->tid,
451
				thread_states[THREAD->state]);
125 jermar 452
			break;
1 jermar 453
		}
897 jermar 454
 
15 jermar 455
		THREAD = NULL;
1 jermar 456
	}
198 jermar 457
 
15 jermar 458
	THREAD = find_best_thread();
1 jermar 459
 
15 jermar 460
	spinlock_lock(&THREAD->lock);
413 jermar 461
	priority = THREAD->priority;
15 jermar 462
	spinlock_unlock(&THREAD->lock);	
192 jermar 463
 
1 jermar 464
	relink_rq(priority);		
465
 
466
	/*
2067 jermar 467
	 * If both the old and the new task are the same, lots of work is
468
	 * avoided.
1 jermar 469
	 */
15 jermar 470
	if (TASK != THREAD->task) {
703 jermar 471
		as_t *as1 = NULL;
472
		as_t *as2;
1 jermar 473
 
15 jermar 474
		if (TASK) {
475
			spinlock_lock(&TASK->lock);
703 jermar 476
			as1 = TASK->as;
15 jermar 477
			spinlock_unlock(&TASK->lock);
1 jermar 478
		}
479
 
15 jermar 480
		spinlock_lock(&THREAD->task->lock);
703 jermar 481
		as2 = THREAD->task->as;
15 jermar 482
		spinlock_unlock(&THREAD->task->lock);
1 jermar 483
 
484
		/*
2067 jermar 485
		 * Note that it is possible for two tasks to share one address
486
		 * space.
1 jermar 487
		 */
703 jermar 488
		if (as1 != as2) {
1 jermar 489
			/*
703 jermar 490
			 * Both tasks and address spaces are different.
1 jermar 491
			 * Replace the old one with the new one.
492
			 */
823 jermar 493
			as_switch(as1, as2);
1 jermar 494
		}
906 palkovsky 495
		TASK = THREAD->task;
1187 jermar 496
		before_task_runs();
1 jermar 497
	}
498
 
1380 jermar 499
	spinlock_lock(&THREAD->lock);	
15 jermar 500
	THREAD->state = Running;
1 jermar 501
 
906 palkovsky 502
#ifdef SCHEDULER_VERBOSE
2067 jermar 503
	printf("cpu%d: tid %d (priority=%d, ticks=%lld, nrdy=%ld)\n",
2087 jermar 504
	    CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks,
505
	    atomic_get(&CPU->nrdy));
906 palkovsky 506
#endif	
1 jermar 507
 
213 jermar 508
	/*
897 jermar 509
	 * Some architectures provide late kernel PA2KA(identity)
510
	 * mapping in a page fault handler. However, the page fault
511
	 * handler uses the kernel stack of the running thread and
512
	 * therefore cannot be used to map it. The kernel stack, if
513
	 * necessary, is to be mapped in before_thread_runs(). This
514
	 * function must be executed before the switch to the new stack.
515
	 */
516
	before_thread_runs();
517
 
518
	/*
2067 jermar 519
	 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to
520
	 * thread's stack.
213 jermar 521
	 */
184 jermar 522
	the_copy(THE, (the_t *) THREAD->kstack);
523
 
15 jermar 524
	context_restore(&THREAD->saved_context);
1 jermar 525
	/* not reached */
526
}
527
 
458 decky 528
#ifdef CONFIG_SMP
107 decky 529
/** Load balancing thread
530
 *
531
 * SMP load balancing thread, supervising thread supplies
532
 * for the CPU it's wired to.
533
 *
534
 * @param arg Generic thread argument (unused).
535
 *
1 jermar 536
 */
537
void kcpulb(void *arg)
538
{
539
	thread_t *t;
783 palkovsky 540
	int count, average, i, j, k = 0;
413 jermar 541
	ipl_t ipl;
1 jermar 542
 
1576 jermar 543
	/*
544
	 * Detach kcpulb as nobody will call thread_join_timeout() on it.
545
	 */
546
	thread_detach(THREAD);
547
 
1 jermar 548
loop:
549
	/*
779 jermar 550
	 * Work in 1s intervals.
1 jermar 551
	 */
779 jermar 552
	thread_sleep(1);
1 jermar 553
 
554
not_satisfied:
555
	/*
556
	 * Calculate the number of threads that will be migrated/stolen from
557
	 * other CPU's. Note that situation can have changed between two
558
	 * passes. Each time get the most up to date counts.
559
	 */
784 palkovsky 560
	average = atomic_get(&nrdy) / config.cpu_active + 1;
783 palkovsky 561
	count = average - atomic_get(&CPU->nrdy);
1 jermar 562
 
784 palkovsky 563
	if (count <= 0)
1 jermar 564
		goto satisfied;
565
 
566
	/*
2067 jermar 567
	 * Searching least priority queues on all CPU's first and most priority
568
	 * queues on all CPU's last.
1 jermar 569
	 */
2067 jermar 570
	for (j= RQ_COUNT - 1; j >= 0; j--) {
571
		for (i = 0; i < config.cpu_active; i++) {
1 jermar 572
			link_t *l;
573
			runq_t *r;
574
			cpu_t *cpu;
575
 
576
			cpu = &cpus[(i + k) % config.cpu_active];
577
 
578
			/*
579
			 * Not interested in ourselves.
2067 jermar 580
			 * Doesn't require interrupt disabling for kcpulb has
581
			 * THREAD_FLAG_WIRED.
1 jermar 582
			 */
15 jermar 583
			if (CPU == cpu)
783 palkovsky 584
				continue;
585
			if (atomic_get(&cpu->nrdy) <= average)
586
				continue;
1 jermar 587
 
784 palkovsky 588
			ipl = interrupts_disable();
115 jermar 589
			r = &cpu->rq[j];
1 jermar 590
			spinlock_lock(&r->lock);
591
			if (r->n == 0) {
592
				spinlock_unlock(&r->lock);
413 jermar 593
				interrupts_restore(ipl);
1 jermar 594
				continue;
595
			}
596
 
597
			t = NULL;
598
			l = r->rq_head.prev;	/* search rq from the back */
599
			while (l != &r->rq_head) {
600
				t = list_get_instance(l, thread_t, rq_link);
601
				/*
2067 jermar 602
				 * We don't want to steal CPU-wired threads
603
				 * neither threads already stolen. The latter
604
				 * prevents threads from migrating between CPU's
605
				 * without ever being run. We don't want to
606
				 * steal threads whose FPU context is still in
607
				 * CPU.
73 vana 608
				 */
1 jermar 609
				spinlock_lock(&t->lock);
2067 jermar 610
				if ((!(t->flags & (THREAD_FLAG_WIRED |
611
					THREAD_FLAG_STOLEN))) &&
1854 jermar 612
					(!(t->fpu_context_engaged)) ) {
1 jermar 613
					/*
614
					 * Remove t from r.
615
					 */
616
					spinlock_unlock(&t->lock);
617
 
783 palkovsky 618
					atomic_dec(&cpu->nrdy);
475 jermar 619
					atomic_dec(&nrdy);
1 jermar 620
 
125 jermar 621
					r->n--;
1 jermar 622
					list_remove(&t->rq_link);
623
 
624
					break;
625
				}
626
				spinlock_unlock(&t->lock);
627
				l = l->prev;
628
				t = NULL;
629
			}
630
			spinlock_unlock(&r->lock);
631
 
632
			if (t) {
633
				/*
634
				 * Ready t on local CPU
635
				 */
636
				spinlock_lock(&t->lock);
906 palkovsky 637
#ifdef KCPULB_VERBOSE
2067 jermar 638
				printf("kcpulb%d: TID %d -> cpu%d, nrdy=%ld, "
2087 jermar 639
				    "avg=%nd\n", CPU->id, t->tid, CPU->id,
640
				    atomic_get(&CPU->nrdy),
641
				    atomic_get(&nrdy) / config.cpu_active);
906 palkovsky 642
#endif
1854 jermar 643
				t->flags |= THREAD_FLAG_STOLEN;
1115 jermar 644
				t->state = Entering;
1 jermar 645
				spinlock_unlock(&t->lock);
646
 
647
				thread_ready(t);
648
 
413 jermar 649
				interrupts_restore(ipl);
1 jermar 650
 
651
				if (--count == 0)
652
					goto satisfied;
653
 
654
				/*
2067 jermar 655
				 * We are not satisfied yet, focus on another
656
				 * CPU next time.
1 jermar 657
				 */
658
				k++;
659
 
660
				continue;
661
			}
413 jermar 662
			interrupts_restore(ipl);
1 jermar 663
		}
664
	}
665
 
783 palkovsky 666
	if (atomic_get(&CPU->nrdy)) {
1 jermar 667
		/*
668
		 * Be a little bit light-weight and let migrated threads run.
669
		 */
670
		scheduler();
779 jermar 671
	} else {
1 jermar 672
		/*
673
		 * We failed to migrate a single thread.
779 jermar 674
		 * Give up this turn.
1 jermar 675
		 */
779 jermar 676
		goto loop;
1 jermar 677
	}
678
 
679
	goto not_satisfied;
125 jermar 680
 
1 jermar 681
satisfied:
682
	goto loop;
683
}
684
 
458 decky 685
#endif /* CONFIG_SMP */
775 palkovsky 686
 
687
 
688
/** Print information about threads & scheduler queues */
689
void sched_print_list(void)
690
{
691
	ipl_t ipl;
692
	int cpu,i;
693
	runq_t *r;
694
	thread_t *t;
695
	link_t *cur;
696
 
697
	/* We are going to mess with scheduler structures,
698
	 * let's not be interrupted */
699
	ipl = interrupts_disable();
700
	for (cpu=0;cpu < config.cpu_count; cpu++) {
898 jermar 701
 
775 palkovsky 702
		if (!cpus[cpu].active)
703
			continue;
898 jermar 704
 
775 palkovsky 705
		spinlock_lock(&cpus[cpu].lock);
1221 decky 706
		printf("cpu%d: address=%p, nrdy=%ld, needs_relink=%ld\n",
2087 jermar 707
		    cpus[cpu].id, &cpus[cpu], atomic_get(&cpus[cpu].nrdy),
708
		    cpus[cpu].needs_relink);
775 palkovsky 709
 
2067 jermar 710
		for (i = 0; i < RQ_COUNT; i++) {
775 palkovsky 711
			r = &cpus[cpu].rq[i];
712
			spinlock_lock(&r->lock);
713
			if (!r->n) {
714
				spinlock_unlock(&r->lock);
715
				continue;
716
			}
898 jermar 717
			printf("\trq[%d]: ", i);
2067 jermar 718
			for (cur = r->rq_head.next; cur != &r->rq_head;
719
				cur = cur->next) {
775 palkovsky 720
				t = list_get_instance(cur, thread_t, rq_link);
721
				printf("%d(%s) ", t->tid,
2087 jermar 722
				    thread_states[t->state]);
775 palkovsky 723
			}
724
			printf("\n");
725
			spinlock_unlock(&r->lock);
726
		}
727
		spinlock_unlock(&cpus[cpu].lock);
728
	}
729
 
730
	interrupts_restore(ipl);
731
}
1702 cejka 732
 
1757 jermar 733
/** @}
1702 cejka 734
 */