Subversion Repositories HelenOS-historic

Rev

Rev 775 | Rev 783 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1 jermar 1
/*
2
 * Copyright (C) 2001-2004 Jakub Jermar
3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
29
#include <proc/scheduler.h>
30
#include <proc/thread.h>
31
#include <proc/task.h>
378 jermar 32
#include <mm/heap.h>
33
#include <mm/frame.h>
34
#include <mm/page.h>
703 jermar 35
#include <mm/as.h>
378 jermar 36
#include <arch/asm.h>
37
#include <arch/faddr.h>
38
#include <arch/atomic.h>
39
#include <synch/spinlock.h>
1 jermar 40
#include <config.h>
41
#include <context.h>
42
#include <func.h>
43
#include <arch.h>
44
#include <list.h>
68 decky 45
#include <panic.h>
1 jermar 46
#include <typedefs.h>
378 jermar 47
#include <cpu.h>
195 vana 48
#include <print.h>
227 jermar 49
#include <debug.h>
1 jermar 50
 
475 jermar 51
atomic_t nrdy;
195 vana 52
 
118 jermar 53
/** Take actions before new thread runs
107 decky 54
 *
118 jermar 55
 * Perform actions that need to be
56
 * taken before the newly selected
57
 * tread is passed control.
107 decky 58
 *
59
 */
52 vana 60
void before_thread_runs(void)
61
{
309 palkovsky 62
	before_thread_runs_arch();
458 decky 63
#ifdef CONFIG_FPU_LAZY
309 palkovsky 64
	if(THREAD==CPU->fpu_owner) 
65
		fpu_enable();
66
	else
67
		fpu_disable(); 
68
#else
69
	fpu_enable();
70
	if (THREAD->fpu_context_exists)
71
		fpu_context_restore(&(THREAD->saved_fpu_context));
72
	else {
73
		fpu_init();
74
		THREAD->fpu_context_exists=1;
75
	}
76
#endif
52 vana 77
}
78
 
458 decky 79
#ifdef CONFIG_FPU_LAZY
309 palkovsky 80
void scheduler_fpu_lazy_request(void)
81
{
82
	fpu_enable();
83
	if (CPU->fpu_owner != NULL) {  
84
		fpu_context_save(&CPU->fpu_owner->saved_fpu_context);
85
		/* don't prevent migration */
86
		CPU->fpu_owner->fpu_context_engaged=0; 
87
	}
88
	if (THREAD->fpu_context_exists)
89
		fpu_context_restore(&THREAD->saved_fpu_context);
90
	else {
91
		fpu_init();
92
		THREAD->fpu_context_exists=1;
93
	}
94
	CPU->fpu_owner=THREAD;
95
	THREAD->fpu_context_engaged = 1;
96
}
97
#endif
52 vana 98
 
107 decky 99
/** Initialize scheduler
100
 *
101
 * Initialize kernel scheduler.
102
 *
103
 */
1 jermar 104
void scheduler_init(void)
105
{
106
}
107
 
107 decky 108
 
109
/** Get thread to be scheduled
110
 *
111
 * Get the optimal thread to be scheduled
109 jermar 112
 * according to thread accounting and scheduler
107 decky 113
 * policy.
114
 *
115
 * @return Thread to be scheduled.
116
 *
117
 */
483 jermar 118
static thread_t *find_best_thread(void)
1 jermar 119
{
120
	thread_t *t;
121
	runq_t *r;
122
	int i, n;
123
 
227 jermar 124
	ASSERT(CPU != NULL);
125
 
1 jermar 126
loop:
413 jermar 127
	interrupts_disable();
1 jermar 128
 
15 jermar 129
	spinlock_lock(&CPU->lock);
130
	n = CPU->nrdy;
131
	spinlock_unlock(&CPU->lock);
1 jermar 132
 
413 jermar 133
	interrupts_enable();
1 jermar 134
 
135
	if (n == 0) {
136
		/*
137
		 * For there was nothing to run, the CPU goes to sleep
138
		 * until a hardware interrupt or an IPI comes.
139
		 * This improves energy saving and hyperthreading.
140
		 */
141
		 cpu_sleep();
142
		 goto loop;
143
	}
144
 
413 jermar 145
	interrupts_disable();
114 jermar 146
 
147
	i = 0;
148
retry:
149
	for (; i<RQ_COUNT; i++) {
15 jermar 150
		r = &CPU->rq[i];
1 jermar 151
		spinlock_lock(&r->lock);
152
		if (r->n == 0) {
153
			/*
154
			 * If this queue is empty, try a lower-priority queue.
155
			 */
156
			spinlock_unlock(&r->lock);
157
			continue;
158
		}
213 jermar 159
 
115 jermar 160
		/* avoid deadlock with relink_rq() */
114 jermar 161
		if (!spinlock_trylock(&CPU->lock)) {
162
			/*
163
			 * Unlock r and try again.
164
			 */
165
			spinlock_unlock(&r->lock);
166
			goto retry;
167
		}
15 jermar 168
		CPU->nrdy--;
169
		spinlock_unlock(&CPU->lock);
1 jermar 170
 
475 jermar 171
		atomic_dec(&nrdy);
1 jermar 172
		r->n--;
173
 
174
		/*
175
		 * Take the first thread from the queue.
176
		 */
177
		t = list_get_instance(r->rq_head.next, thread_t, rq_link);
178
		list_remove(&t->rq_link);
179
 
180
		spinlock_unlock(&r->lock);
181
 
182
		spinlock_lock(&t->lock);
15 jermar 183
		t->cpu = CPU;
1 jermar 184
 
185
		t->ticks = us2ticks((i+1)*10000);
413 jermar 186
		t->priority = i;	/* eventually correct rq index */
1 jermar 187
 
188
		/*
189
		 * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge.
190
		 */
191
		t->flags &= ~X_STOLEN;
192
		spinlock_unlock(&t->lock);
193
 
194
		return t;
195
	}
196
	goto loop;
197
 
198
}
199
 
107 decky 200
 
201
/** Prevent rq starvation
202
 *
203
 * Prevent low priority threads from starving in rq's.
204
 *
205
 * When the function decides to relink rq's, it reconnects
206
 * respective pointers so that in result threads with 'pri'
207
 * greater or equal 'start' are moved to a higher-priority queue.
208
 *
209
 * @param start Threshold priority.
210
 *
1 jermar 211
 */
452 decky 212
static void relink_rq(int start)
1 jermar 213
{
214
	link_t head;
215
	runq_t *r;
216
	int i, n;
217
 
218
	list_initialize(&head);
15 jermar 219
	spinlock_lock(&CPU->lock);
220
	if (CPU->needs_relink > NEEDS_RELINK_MAX) {
1 jermar 221
		for (i = start; i<RQ_COUNT-1; i++) {
222
			/* remember and empty rq[i + 1] */
15 jermar 223
			r = &CPU->rq[i + 1];
1 jermar 224
			spinlock_lock(&r->lock);
225
			list_concat(&head, &r->rq_head);
226
			n = r->n;
227
			r->n = 0;
228
			spinlock_unlock(&r->lock);
229
 
230
			/* append rq[i + 1] to rq[i] */
15 jermar 231
			r = &CPU->rq[i];
1 jermar 232
			spinlock_lock(&r->lock);
233
			list_concat(&r->rq_head, &head);
234
			r->n += n;
235
			spinlock_unlock(&r->lock);
236
		}
15 jermar 237
		CPU->needs_relink = 0;
1 jermar 238
	}
15 jermar 239
	spinlock_unlock(&CPU->lock);				
1 jermar 240
 
241
}
242
 
107 decky 243
 
244
/** Scheduler stack switch wrapper
245
 *
246
 * Second part of the scheduler() function
247
 * using new stack. Handling the actual context
248
 * switch to a new thread.
249
 *
250
 */
452 decky 251
static void scheduler_separated_stack(void)
1 jermar 252
{
253
	int priority;
254
 
227 jermar 255
	ASSERT(CPU != NULL);
256
 
15 jermar 257
	if (THREAD) {
258
		switch (THREAD->state) {
1 jermar 259
		    case Running:
125 jermar 260
			THREAD->state = Ready;
261
			spinlock_unlock(&THREAD->lock);
262
			thread_ready(THREAD);
263
			break;
1 jermar 264
 
265
		    case Exiting:
125 jermar 266
			frame_free((__address) THREAD->kstack);
267
			if (THREAD->ustack) {
268
				frame_free((__address) THREAD->ustack);
269
			}
1 jermar 270
 
125 jermar 271
			/*
272
			 * Detach from the containing task.
273
			 */
274
			spinlock_lock(&TASK->lock);
275
			list_remove(&THREAD->th_link);
276
			spinlock_unlock(&TASK->lock);
73 vana 277
 
125 jermar 278
			spinlock_unlock(&THREAD->lock);
279
 
280
			spinlock_lock(&threads_lock);
281
			list_remove(&THREAD->threads_link);
282
			spinlock_unlock(&threads_lock);
73 vana 283
 
125 jermar 284
			spinlock_lock(&CPU->lock);
650 jermar 285
			if(CPU->fpu_owner==THREAD)
286
				CPU->fpu_owner=NULL;
125 jermar 287
			spinlock_unlock(&CPU->lock);
288
 
289
			free(THREAD);
290
 
291
			break;
292
 
1 jermar 293
		    case Sleeping:
125 jermar 294
			/*
295
			 * Prefer the thread after it's woken up.
296
			 */
413 jermar 297
			THREAD->priority = -1;
1 jermar 298
 
125 jermar 299
			/*
300
			 * We need to release wq->lock which we locked in waitq_sleep().
301
			 * Address of wq->lock is kept in THREAD->sleep_queue.
302
			 */
303
			spinlock_unlock(&THREAD->sleep_queue->lock);
1 jermar 304
 
125 jermar 305
			/*
306
			 * Check for possible requests for out-of-context invocation.
307
			 */
308
			if (THREAD->call_me) {
309
				THREAD->call_me(THREAD->call_me_with);
310
				THREAD->call_me = NULL;
311
				THREAD->call_me_with = NULL;
312
			}
1 jermar 313
 
125 jermar 314
			spinlock_unlock(&THREAD->lock);
1 jermar 315
 
125 jermar 316
			break;
317
 
1 jermar 318
		    default:
125 jermar 319
			/*
320
			 * Entering state is unexpected.
321
			 */
322
			panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]);
323
			break;
1 jermar 324
		}
15 jermar 325
		THREAD = NULL;
1 jermar 326
	}
198 jermar 327
 
214 vana 328
 
15 jermar 329
	THREAD = find_best_thread();
1 jermar 330
 
15 jermar 331
	spinlock_lock(&THREAD->lock);
413 jermar 332
	priority = THREAD->priority;
15 jermar 333
	spinlock_unlock(&THREAD->lock);	
192 jermar 334
 
1 jermar 335
	relink_rq(priority);		
336
 
15 jermar 337
	spinlock_lock(&THREAD->lock);	
1 jermar 338
 
339
	/*
340
	 * If both the old and the new task are the same, lots of work is avoided.
341
	 */
15 jermar 342
	if (TASK != THREAD->task) {
703 jermar 343
		as_t *as1 = NULL;
344
		as_t *as2;
1 jermar 345
 
15 jermar 346
		if (TASK) {
347
			spinlock_lock(&TASK->lock);
703 jermar 348
			as1 = TASK->as;
15 jermar 349
			spinlock_unlock(&TASK->lock);
1 jermar 350
		}
351
 
15 jermar 352
		spinlock_lock(&THREAD->task->lock);
703 jermar 353
		as2 = THREAD->task->as;
15 jermar 354
		spinlock_unlock(&THREAD->task->lock);
1 jermar 355
 
356
		/*
703 jermar 357
		 * Note that it is possible for two tasks to share one address space.
1 jermar 358
		 */
703 jermar 359
		if (as1 != as2) {
1 jermar 360
			/*
703 jermar 361
			 * Both tasks and address spaces are different.
1 jermar 362
			 * Replace the old one with the new one.
363
			 */
703 jermar 364
			as_install(as2);
1 jermar 365
		}
15 jermar 366
		TASK = THREAD->task;	
1 jermar 367
	}
368
 
15 jermar 369
	THREAD->state = Running;
1 jermar 370
 
371
	#ifdef SCHEDULER_VERBOSE
413 jermar 372
	printf("cpu%d: tid %d (priority=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, CPU->nrdy);
1 jermar 373
	#endif	
374
 
213 jermar 375
	/*
376
	 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to thread's stack.
377
	 */
184 jermar 378
	the_copy(THE, (the_t *) THREAD->kstack);
379
 
15 jermar 380
	context_restore(&THREAD->saved_context);
1 jermar 381
	/* not reached */
382
}
383
 
107 decky 384
 
452 decky 385
/** The scheduler
386
 *
387
 * The thread scheduling procedure.
675 jermar 388
 * Passes control directly to
389
 * scheduler_separated_stack().
452 decky 390
 *
391
 */
392
void scheduler(void)
393
{
394
	volatile ipl_t ipl;
395
 
396
	ASSERT(CPU != NULL);
397
 
398
	ipl = interrupts_disable();
399
 
631 palkovsky 400
	if (atomic_get(&haltstate))
452 decky 401
		halt();
402
 
403
	if (THREAD) {
404
		spinlock_lock(&THREAD->lock);
458 decky 405
#ifndef CONFIG_FPU_LAZY
452 decky 406
		fpu_context_save(&(THREAD->saved_fpu_context));
407
#endif
408
		if (!context_save(&THREAD->saved_context)) {
409
			/*
410
			 * This is the place where threads leave scheduler();
411
			 */
412
			before_thread_runs();
413
			spinlock_unlock(&THREAD->lock);
414
			interrupts_restore(THREAD->saved_context.ipl);
415
			return;
416
		}
417
 
418
		/*
419
		 * Interrupt priority level of preempted thread is recorded here
420
		 * to facilitate scheduler() invocations from interrupts_disable()'d
421
		 * code (e.g. waitq_sleep_timeout()). 
422
		 */
423
		THREAD->saved_context.ipl = ipl;
424
	}
425
 
426
	/*
557 jermar 427
	 * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM
452 decky 428
	 * and preemption counter. At this point THE could be coming either
429
	 * from THREAD's or CPU's stack.
430
	 */
431
	the_copy(THE, (the_t *) CPU->stack);
432
 
433
	/*
434
	 * We may not keep the old stack.
435
	 * Reason: If we kept the old stack and got blocked, for instance, in
436
	 * find_best_thread(), the old thread could get rescheduled by another
437
	 * CPU and overwrite the part of its own stack that was also used by
438
	 * the scheduler on this CPU.
439
	 *
440
	 * Moreover, we have to bypass the compiler-generated POP sequence
441
	 * which is fooled by SP being set to the very top of the stack.
442
	 * Therefore the scheduler() function continues in
443
	 * scheduler_separated_stack().
444
	 */
445
	context_save(&CPU->saved_context);
446
	context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), (__address) CPU->stack, CPU_STACK_SIZE);
447
	context_restore(&CPU->saved_context);
448
	/* not reached */
449
}
450
 
451
 
452
 
453
 
454
 
458 decky 455
#ifdef CONFIG_SMP
107 decky 456
/** Load balancing thread
457
 *
458
 * SMP load balancing thread, supervising thread supplies
459
 * for the CPU it's wired to.
460
 *
461
 * @param arg Generic thread argument (unused).
462
 *
1 jermar 463
 */
464
void kcpulb(void *arg)
465
{
466
	thread_t *t;
467
	int count, i, j, k = 0;
413 jermar 468
	ipl_t ipl;
1 jermar 469
 
470
loop:
471
	/*
779 jermar 472
	 * Work in 1s intervals.
1 jermar 473
	 */
779 jermar 474
	thread_sleep(1);
1 jermar 475
 
476
not_satisfied:
477
	/*
478
	 * Calculate the number of threads that will be migrated/stolen from
479
	 * other CPU's. Note that situation can have changed between two
480
	 * passes. Each time get the most up to date counts.
481
	 */
413 jermar 482
	ipl = interrupts_disable();
15 jermar 483
	spinlock_lock(&CPU->lock);
625 palkovsky 484
	count = atomic_get(&nrdy) / config.cpu_active;
15 jermar 485
	count -= CPU->nrdy;
486
	spinlock_unlock(&CPU->lock);
413 jermar 487
	interrupts_restore(ipl);
1 jermar 488
 
489
	if (count <= 0)
490
		goto satisfied;
491
 
492
	/*
493
	 * Searching least priority queues on all CPU's first and most priority queues on all CPU's last.
494
	 */
495
	for (j=RQ_COUNT-1; j >= 0; j--) {
496
		for (i=0; i < config.cpu_active; i++) {
497
			link_t *l;
498
			runq_t *r;
499
			cpu_t *cpu;
500
 
501
			cpu = &cpus[(i + k) % config.cpu_active];
502
 
503
			/*
504
			 * Not interested in ourselves.
505
			 * Doesn't require interrupt disabling for kcpulb is X_WIRED.
506
			 */
15 jermar 507
			if (CPU == cpu)
115 jermar 508
				continue;				
1 jermar 509
 
413 jermar 510
restart:		ipl = interrupts_disable();
115 jermar 511
			r = &cpu->rq[j];
1 jermar 512
			spinlock_lock(&r->lock);
513
			if (r->n == 0) {
514
				spinlock_unlock(&r->lock);
413 jermar 515
				interrupts_restore(ipl);
1 jermar 516
				continue;
517
			}
518
 
519
			t = NULL;
520
			l = r->rq_head.prev;	/* search rq from the back */
521
			while (l != &r->rq_head) {
522
				t = list_get_instance(l, thread_t, rq_link);
523
				/*
125 jermar 524
				 * We don't want to steal CPU-wired threads neither threads already stolen.
1 jermar 525
				 * The latter prevents threads from migrating between CPU's without ever being run.
125 jermar 526
				 * We don't want to steal threads whose FPU context is still in CPU.
73 vana 527
				 */
1 jermar 528
				spinlock_lock(&t->lock);
73 vana 529
				if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) {
115 jermar 530
 
1 jermar 531
					/*
532
					 * Remove t from r.
533
					 */
534
 
535
					spinlock_unlock(&t->lock);
536
 
537
					/*
538
					 * Here we have to avoid deadlock with relink_rq(),
539
					 * because it locks cpu and r in a different order than we do.
540
					 */
541
					if (!spinlock_trylock(&cpu->lock)) {
542
						/* Release all locks and try again. */ 
543
						spinlock_unlock(&r->lock);
413 jermar 544
						interrupts_restore(ipl);
1 jermar 545
						goto restart;
546
					}
547
					cpu->nrdy--;
548
					spinlock_unlock(&cpu->lock);
549
 
475 jermar 550
					atomic_dec(&nrdy);
1 jermar 551
 
125 jermar 552
					r->n--;
1 jermar 553
					list_remove(&t->rq_link);
554
 
555
					break;
556
				}
557
				spinlock_unlock(&t->lock);
558
				l = l->prev;
559
				t = NULL;
560
			}
561
			spinlock_unlock(&r->lock);
562
 
563
			if (t) {
564
				/*
565
				 * Ready t on local CPU
566
				 */
567
				spinlock_lock(&t->lock);
568
				#ifdef KCPULB_VERBOSE
779 jermar 569
				printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, CPU->nrdy, atomic_get(&nrdy) / config.cpu_active);
1 jermar 570
				#endif
571
				t->flags |= X_STOLEN;
572
				spinlock_unlock(&t->lock);
573
 
574
				thread_ready(t);
575
 
413 jermar 576
				interrupts_restore(ipl);
1 jermar 577
 
578
				if (--count == 0)
579
					goto satisfied;
580
 
581
				/*
125 jermar 582
				 * We are not satisfied yet, focus on another CPU next time.
1 jermar 583
				 */
584
				k++;
585
 
586
				continue;
587
			}
413 jermar 588
			interrupts_restore(ipl);
1 jermar 589
		}
590
	}
591
 
15 jermar 592
	if (CPU->nrdy) {
1 jermar 593
		/*
594
		 * Be a little bit light-weight and let migrated threads run.
595
		 */
596
		scheduler();
779 jermar 597
	} else {
1 jermar 598
		/*
599
		 * We failed to migrate a single thread.
779 jermar 600
		 * Give up this turn.
1 jermar 601
		 */
779 jermar 602
		goto loop;
1 jermar 603
	}
604
 
605
	goto not_satisfied;
125 jermar 606
 
1 jermar 607
satisfied:
608
	goto loop;
609
}
610
 
458 decky 611
#endif /* CONFIG_SMP */
775 palkovsky 612
 
613
 
614
/** Print information about threads & scheduler queues */
615
void sched_print_list(void)
616
{
617
	ipl_t ipl;
618
	int cpu,i;
619
	runq_t *r;
620
	thread_t *t;
621
	link_t *cur;
622
 
623
	/* We are going to mess with scheduler structures,
624
	 * let's not be interrupted */
625
	ipl = interrupts_disable();
626
	printf("*********** Scheduler dump ***********\n");
627
	for (cpu=0;cpu < config.cpu_count; cpu++) {
628
		if (!cpus[cpu].active)
629
			continue;
630
		spinlock_lock(&cpus[cpu].lock);
631
		printf("cpu%d: nrdy: %d needs_relink: %d\n",
632
		       cpus[cpu].id, cpus[cpu].nrdy, cpus[cpu].needs_relink);
633
 
634
		for (i=0; i<RQ_COUNT; i++) {
635
			r = &cpus[cpu].rq[i];
636
			spinlock_lock(&r->lock);
637
			if (!r->n) {
638
				spinlock_unlock(&r->lock);
639
				continue;
640
			}
779 jermar 641
			printf("\tRq %d: ", i);
775 palkovsky 642
			for (cur=r->rq_head.next; cur!=&r->rq_head; cur=cur->next) {
643
				t = list_get_instance(cur, thread_t, rq_link);
644
				printf("%d(%s) ", t->tid,
645
				       thread_states[t->state]);
646
			}
647
			printf("\n");
648
			spinlock_unlock(&r->lock);
649
		}
650
		spinlock_unlock(&cpus[cpu].lock);
651
	}
652
 
653
	interrupts_restore(ipl);
654
}