Subversion Repositories HelenOS-historic

Rev

Rev 452 | Rev 475 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1 jermar 1
/*
2
 * Copyright (C) 2001-2004 Jakub Jermar
3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
29
#include <proc/scheduler.h>
30
#include <proc/thread.h>
31
#include <proc/task.h>
378 jermar 32
#include <mm/heap.h>
33
#include <mm/frame.h>
34
#include <mm/page.h>
1 jermar 35
#include <mm/vm.h>
378 jermar 36
#include <arch/asm.h>
37
#include <arch/faddr.h>
38
#include <arch/atomic.h>
39
#include <synch/spinlock.h>
1 jermar 40
#include <config.h>
41
#include <context.h>
42
#include <func.h>
43
#include <arch.h>
44
#include <list.h>
68 decky 45
#include <panic.h>
1 jermar 46
#include <typedefs.h>
378 jermar 47
#include <cpu.h>
195 vana 48
#include <print.h>
227 jermar 49
#include <debug.h>
1 jermar 50
 
227 jermar 51
volatile count_t nrdy;
195 vana 52
 
1 jermar 53
 
118 jermar 54
/** Take actions before new thread runs
107 decky 55
 *
118 jermar 56
 * Perform actions that need to be
57
 * taken before the newly selected
58
 * tread is passed control.
107 decky 59
 *
60
 */
52 vana 61
void before_thread_runs(void)
62
{
309 palkovsky 63
	before_thread_runs_arch();
458 decky 64
#ifdef CONFIG_FPU_LAZY
309 palkovsky 65
	if(THREAD==CPU->fpu_owner) 
66
		fpu_enable();
67
	else
68
		fpu_disable(); 
69
#else
70
	fpu_enable();
71
	if (THREAD->fpu_context_exists)
72
		fpu_context_restore(&(THREAD->saved_fpu_context));
73
	else {
74
		fpu_init();
75
		THREAD->fpu_context_exists=1;
76
	}
77
#endif
52 vana 78
}
79
 
458 decky 80
#ifdef CONFIG_FPU_LAZY
309 palkovsky 81
void scheduler_fpu_lazy_request(void)
82
{
83
	fpu_enable();
84
	if (CPU->fpu_owner != NULL) {  
85
		fpu_context_save(&CPU->fpu_owner->saved_fpu_context);
86
		/* don't prevent migration */
87
		CPU->fpu_owner->fpu_context_engaged=0; 
88
	}
89
	if (THREAD->fpu_context_exists)
90
		fpu_context_restore(&THREAD->saved_fpu_context);
91
	else {
92
		fpu_init();
93
		THREAD->fpu_context_exists=1;
94
	}
95
	CPU->fpu_owner=THREAD;
96
	THREAD->fpu_context_engaged = 1;
97
}
98
#endif
52 vana 99
 
107 decky 100
/** Initialize scheduler
101
 *
102
 * Initialize kernel scheduler.
103
 *
104
 */
1 jermar 105
void scheduler_init(void)
106
{
107
}
108
 
107 decky 109
 
110
/** Get thread to be scheduled
111
 *
112
 * Get the optimal thread to be scheduled
109 jermar 113
 * according to thread accounting and scheduler
107 decky 114
 * policy.
115
 *
116
 * @return Thread to be scheduled.
117
 *
118
 */
452 decky 119
static struct thread *find_best_thread(void)
1 jermar 120
{
121
	thread_t *t;
122
	runq_t *r;
123
	int i, n;
124
 
227 jermar 125
	ASSERT(CPU != NULL);
126
 
1 jermar 127
loop:
413 jermar 128
	interrupts_disable();
1 jermar 129
 
15 jermar 130
	spinlock_lock(&CPU->lock);
131
	n = CPU->nrdy;
132
	spinlock_unlock(&CPU->lock);
1 jermar 133
 
413 jermar 134
	interrupts_enable();
1 jermar 135
 
136
	if (n == 0) {
458 decky 137
		#ifdef CONFIG_SMP
1 jermar 138
		/*
139
		 * If the load balancing thread is not running, wake it up and
140
		 * set CPU-private flag that the kcpulb has been started.
141
		 */
15 jermar 142
		if (test_and_set(&CPU->kcpulbstarted) == 0) {
125 jermar 143
			waitq_wakeup(&CPU->kcpulb_wq, 0);
1 jermar 144
			goto loop;
145
		}
458 decky 146
		#endif /* CONFIG_SMP */
1 jermar 147
 
148
		/*
149
		 * For there was nothing to run, the CPU goes to sleep
150
		 * until a hardware interrupt or an IPI comes.
151
		 * This improves energy saving and hyperthreading.
152
		 * On the other hand, several hardware interrupts can be ignored.
153
		 */
154
		 cpu_sleep();
155
		 goto loop;
156
	}
157
 
413 jermar 158
	interrupts_disable();
114 jermar 159
 
160
	i = 0;
161
retry:
162
	for (; i<RQ_COUNT; i++) {
15 jermar 163
		r = &CPU->rq[i];
1 jermar 164
		spinlock_lock(&r->lock);
165
		if (r->n == 0) {
166
			/*
167
			 * If this queue is empty, try a lower-priority queue.
168
			 */
169
			spinlock_unlock(&r->lock);
170
			continue;
171
		}
213 jermar 172
 
115 jermar 173
		/* avoid deadlock with relink_rq() */
114 jermar 174
		if (!spinlock_trylock(&CPU->lock)) {
175
			/*
176
			 * Unlock r and try again.
177
			 */
178
			spinlock_unlock(&r->lock);
179
			goto retry;
180
		}
15 jermar 181
		CPU->nrdy--;
182
		spinlock_unlock(&CPU->lock);
1 jermar 183
 
248 jermar 184
		atomic_dec((int *) &nrdy);
1 jermar 185
		r->n--;
186
 
187
		/*
188
		 * Take the first thread from the queue.
189
		 */
190
		t = list_get_instance(r->rq_head.next, thread_t, rq_link);
191
		list_remove(&t->rq_link);
192
 
193
		spinlock_unlock(&r->lock);
194
 
195
		spinlock_lock(&t->lock);
15 jermar 196
		t->cpu = CPU;
1 jermar 197
 
198
		t->ticks = us2ticks((i+1)*10000);
413 jermar 199
		t->priority = i;	/* eventually correct rq index */
1 jermar 200
 
201
		/*
202
		 * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge.
203
		 */
204
		t->flags &= ~X_STOLEN;
205
		spinlock_unlock(&t->lock);
206
 
207
		return t;
208
	}
209
	goto loop;
210
 
211
}
212
 
107 decky 213
 
214
/** Prevent rq starvation
215
 *
216
 * Prevent low priority threads from starving in rq's.
217
 *
218
 * When the function decides to relink rq's, it reconnects
219
 * respective pointers so that in result threads with 'pri'
220
 * greater or equal 'start' are moved to a higher-priority queue.
221
 *
222
 * @param start Threshold priority.
223
 *
1 jermar 224
 */
452 decky 225
static void relink_rq(int start)
1 jermar 226
{
227
	link_t head;
228
	runq_t *r;
229
	int i, n;
230
 
231
	list_initialize(&head);
15 jermar 232
	spinlock_lock(&CPU->lock);
233
	if (CPU->needs_relink > NEEDS_RELINK_MAX) {
1 jermar 234
		for (i = start; i<RQ_COUNT-1; i++) {
235
			/* remember and empty rq[i + 1] */
15 jermar 236
			r = &CPU->rq[i + 1];
1 jermar 237
			spinlock_lock(&r->lock);
238
			list_concat(&head, &r->rq_head);
239
			n = r->n;
240
			r->n = 0;
241
			spinlock_unlock(&r->lock);
242
 
243
			/* append rq[i + 1] to rq[i] */
15 jermar 244
			r = &CPU->rq[i];
1 jermar 245
			spinlock_lock(&r->lock);
246
			list_concat(&r->rq_head, &head);
247
			r->n += n;
248
			spinlock_unlock(&r->lock);
249
		}
15 jermar 250
		CPU->needs_relink = 0;
1 jermar 251
	}
15 jermar 252
	spinlock_unlock(&CPU->lock);				
1 jermar 253
 
254
}
255
 
107 decky 256
 
257
/** Scheduler stack switch wrapper
258
 *
259
 * Second part of the scheduler() function
260
 * using new stack. Handling the actual context
261
 * switch to a new thread.
262
 *
263
 */
452 decky 264
static void scheduler_separated_stack(void)
1 jermar 265
{
266
	int priority;
267
 
227 jermar 268
	ASSERT(CPU != NULL);
269
 
15 jermar 270
	if (THREAD) {
271
		switch (THREAD->state) {
1 jermar 272
		    case Running:
125 jermar 273
			THREAD->state = Ready;
274
			spinlock_unlock(&THREAD->lock);
275
			thread_ready(THREAD);
276
			break;
1 jermar 277
 
278
		    case Exiting:
125 jermar 279
			frame_free((__address) THREAD->kstack);
280
			if (THREAD->ustack) {
281
				frame_free((__address) THREAD->ustack);
282
			}
1 jermar 283
 
125 jermar 284
			/*
285
			 * Detach from the containing task.
286
			 */
287
			spinlock_lock(&TASK->lock);
288
			list_remove(&THREAD->th_link);
289
			spinlock_unlock(&TASK->lock);
73 vana 290
 
125 jermar 291
			spinlock_unlock(&THREAD->lock);
292
 
293
			spinlock_lock(&threads_lock);
294
			list_remove(&THREAD->threads_link);
295
			spinlock_unlock(&threads_lock);
73 vana 296
 
125 jermar 297
			spinlock_lock(&CPU->lock);
298
			if(CPU->fpu_owner==THREAD) CPU->fpu_owner=NULL;
299
			spinlock_unlock(&CPU->lock);
300
 
301
			free(THREAD);
302
 
303
			break;
304
 
1 jermar 305
		    case Sleeping:
125 jermar 306
			/*
307
			 * Prefer the thread after it's woken up.
308
			 */
413 jermar 309
			THREAD->priority = -1;
1 jermar 310
 
125 jermar 311
			/*
312
			 * We need to release wq->lock which we locked in waitq_sleep().
313
			 * Address of wq->lock is kept in THREAD->sleep_queue.
314
			 */
315
			spinlock_unlock(&THREAD->sleep_queue->lock);
1 jermar 316
 
125 jermar 317
			/*
318
			 * Check for possible requests for out-of-context invocation.
319
			 */
320
			if (THREAD->call_me) {
321
				THREAD->call_me(THREAD->call_me_with);
322
				THREAD->call_me = NULL;
323
				THREAD->call_me_with = NULL;
324
			}
1 jermar 325
 
125 jermar 326
			spinlock_unlock(&THREAD->lock);
1 jermar 327
 
125 jermar 328
			break;
329
 
1 jermar 330
		    default:
125 jermar 331
			/*
332
			 * Entering state is unexpected.
333
			 */
334
			panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]);
335
			break;
1 jermar 336
		}
15 jermar 337
		THREAD = NULL;
1 jermar 338
	}
198 jermar 339
 
214 vana 340
 
15 jermar 341
	THREAD = find_best_thread();
1 jermar 342
 
15 jermar 343
	spinlock_lock(&THREAD->lock);
413 jermar 344
	priority = THREAD->priority;
15 jermar 345
	spinlock_unlock(&THREAD->lock);	
192 jermar 346
 
1 jermar 347
	relink_rq(priority);		
348
 
15 jermar 349
	spinlock_lock(&THREAD->lock);	
1 jermar 350
 
351
	/*
352
	 * If both the old and the new task are the same, lots of work is avoided.
353
	 */
15 jermar 354
	if (TASK != THREAD->task) {
1 jermar 355
		vm_t *m1 = NULL;
356
		vm_t *m2;
357
 
15 jermar 358
		if (TASK) {
359
			spinlock_lock(&TASK->lock);
360
			m1 = TASK->vm;
361
			spinlock_unlock(&TASK->lock);
1 jermar 362
		}
363
 
15 jermar 364
		spinlock_lock(&THREAD->task->lock);
365
		m2 = THREAD->task->vm;
366
		spinlock_unlock(&THREAD->task->lock);
1 jermar 367
 
368
		/*
369
		 * Note that it is possible for two tasks to share one vm mapping.
370
		 */
371
		if (m1 != m2) {
372
			/*
373
			 * Both tasks and vm mappings are different.
374
			 * Replace the old one with the new one.
375
			 */
376
			vm_install(m2);
377
		}
15 jermar 378
		TASK = THREAD->task;	
1 jermar 379
	}
380
 
15 jermar 381
	THREAD->state = Running;
1 jermar 382
 
383
	#ifdef SCHEDULER_VERBOSE
413 jermar 384
	printf("cpu%d: tid %d (priority=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, CPU->nrdy);
1 jermar 385
	#endif	
386
 
213 jermar 387
	/*
388
	 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to thread's stack.
389
	 */
184 jermar 390
	the_copy(THE, (the_t *) THREAD->kstack);
391
 
15 jermar 392
	context_restore(&THREAD->saved_context);
1 jermar 393
	/* not reached */
394
}
395
 
107 decky 396
 
452 decky 397
/** The scheduler
398
 *
399
 * The thread scheduling procedure.
400
 *
401
 */
402
void scheduler(void)
403
{
404
	volatile ipl_t ipl;
405
 
406
	ASSERT(CPU != NULL);
407
 
408
	ipl = interrupts_disable();
409
 
410
	if (haltstate)
411
		halt();
412
 
413
	if (THREAD) {
414
		spinlock_lock(&THREAD->lock);
458 decky 415
#ifndef CONFIG_FPU_LAZY
452 decky 416
		fpu_context_save(&(THREAD->saved_fpu_context));
417
#endif
418
		if (!context_save(&THREAD->saved_context)) {
419
			/*
420
			 * This is the place where threads leave scheduler();
421
			 */
422
			before_thread_runs();
423
			spinlock_unlock(&THREAD->lock);
424
			interrupts_restore(THREAD->saved_context.ipl);
425
			return;
426
		}
427
 
428
		/*
429
		 * Interrupt priority level of preempted thread is recorded here
430
		 * to facilitate scheduler() invocations from interrupts_disable()'d
431
		 * code (e.g. waitq_sleep_timeout()). 
432
		 */
433
		THREAD->saved_context.ipl = ipl;
434
	}
435
 
436
	/*
437
	 * Through the 'THE' structure, we keep track of THREAD, TASK, CPU
438
	 * and preemption counter. At this point THE could be coming either
439
	 * from THREAD's or CPU's stack.
440
	 */
441
	the_copy(THE, (the_t *) CPU->stack);
442
 
443
	/*
444
	 * We may not keep the old stack.
445
	 * Reason: If we kept the old stack and got blocked, for instance, in
446
	 * find_best_thread(), the old thread could get rescheduled by another
447
	 * CPU and overwrite the part of its own stack that was also used by
448
	 * the scheduler on this CPU.
449
	 *
450
	 * Moreover, we have to bypass the compiler-generated POP sequence
451
	 * which is fooled by SP being set to the very top of the stack.
452
	 * Therefore the scheduler() function continues in
453
	 * scheduler_separated_stack().
454
	 */
455
	context_save(&CPU->saved_context);
456
	context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), (__address) CPU->stack, CPU_STACK_SIZE);
457
	context_restore(&CPU->saved_context);
458
	/* not reached */
459
}
460
 
461
 
462
 
463
 
464
 
458 decky 465
#ifdef CONFIG_SMP
107 decky 466
/** Load balancing thread
467
 *
468
 * SMP load balancing thread, supervising thread supplies
469
 * for the CPU it's wired to.
470
 *
471
 * @param arg Generic thread argument (unused).
472
 *
1 jermar 473
 */
474
void kcpulb(void *arg)
475
{
476
	thread_t *t;
477
	int count, i, j, k = 0;
413 jermar 478
	ipl_t ipl;
1 jermar 479
 
480
loop:
481
	/*
482
	 * Sleep until there's some work to do.
483
	 */
15 jermar 484
	waitq_sleep(&CPU->kcpulb_wq);
1 jermar 485
 
486
not_satisfied:
487
	/*
488
	 * Calculate the number of threads that will be migrated/stolen from
489
	 * other CPU's. Note that situation can have changed between two
490
	 * passes. Each time get the most up to date counts.
491
	 */
413 jermar 492
	ipl = interrupts_disable();
15 jermar 493
	spinlock_lock(&CPU->lock);
1 jermar 494
	count = nrdy / config.cpu_active;
15 jermar 495
	count -= CPU->nrdy;
496
	spinlock_unlock(&CPU->lock);
413 jermar 497
	interrupts_restore(ipl);
1 jermar 498
 
499
	if (count <= 0)
500
		goto satisfied;
501
 
502
	/*
503
	 * Searching least priority queues on all CPU's first and most priority queues on all CPU's last.
504
	 */
505
	for (j=RQ_COUNT-1; j >= 0; j--) {
506
		for (i=0; i < config.cpu_active; i++) {
507
			link_t *l;
508
			runq_t *r;
509
			cpu_t *cpu;
510
 
511
			cpu = &cpus[(i + k) % config.cpu_active];
512
 
513
			/*
514
			 * Not interested in ourselves.
515
			 * Doesn't require interrupt disabling for kcpulb is X_WIRED.
516
			 */
15 jermar 517
			if (CPU == cpu)
115 jermar 518
				continue;				
1 jermar 519
 
413 jermar 520
restart:		ipl = interrupts_disable();
115 jermar 521
			r = &cpu->rq[j];
1 jermar 522
			spinlock_lock(&r->lock);
523
			if (r->n == 0) {
524
				spinlock_unlock(&r->lock);
413 jermar 525
				interrupts_restore(ipl);
1 jermar 526
				continue;
527
			}
528
 
529
			t = NULL;
530
			l = r->rq_head.prev;	/* search rq from the back */
531
			while (l != &r->rq_head) {
532
				t = list_get_instance(l, thread_t, rq_link);
533
				/*
125 jermar 534
				 * We don't want to steal CPU-wired threads neither threads already stolen.
1 jermar 535
				 * The latter prevents threads from migrating between CPU's without ever being run.
125 jermar 536
				 * We don't want to steal threads whose FPU context is still in CPU.
73 vana 537
				 */
1 jermar 538
				spinlock_lock(&t->lock);
73 vana 539
				if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) {
115 jermar 540
 
1 jermar 541
					/*
542
					 * Remove t from r.
543
					 */
544
 
545
					spinlock_unlock(&t->lock);
546
 
547
					/*
548
					 * Here we have to avoid deadlock with relink_rq(),
549
					 * because it locks cpu and r in a different order than we do.
550
					 */
551
					if (!spinlock_trylock(&cpu->lock)) {
552
						/* Release all locks and try again. */ 
553
						spinlock_unlock(&r->lock);
413 jermar 554
						interrupts_restore(ipl);
1 jermar 555
						goto restart;
556
					}
557
					cpu->nrdy--;
558
					spinlock_unlock(&cpu->lock);
559
 
258 palkovsky 560
					atomic_dec((int *)&nrdy);
1 jermar 561
 
125 jermar 562
					r->n--;
1 jermar 563
					list_remove(&t->rq_link);
564
 
565
					break;
566
				}
567
				spinlock_unlock(&t->lock);
568
				l = l->prev;
569
				t = NULL;
570
			}
571
			spinlock_unlock(&r->lock);
572
 
573
			if (t) {
574
				/*
575
				 * Ready t on local CPU
576
				 */
577
				spinlock_lock(&t->lock);
578
				#ifdef KCPULB_VERBOSE
15 jermar 579
				printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, CPU->nrdy, nrdy / config.cpu_active);
1 jermar 580
				#endif
581
				t->flags |= X_STOLEN;
582
				spinlock_unlock(&t->lock);
583
 
584
				thread_ready(t);
585
 
413 jermar 586
				interrupts_restore(ipl);
1 jermar 587
 
588
				if (--count == 0)
589
					goto satisfied;
590
 
591
				/*
125 jermar 592
				 * We are not satisfied yet, focus on another CPU next time.
1 jermar 593
				 */
594
				k++;
595
 
596
				continue;
597
			}
413 jermar 598
			interrupts_restore(ipl);
1 jermar 599
		}
600
	}
601
 
15 jermar 602
	if (CPU->nrdy) {
1 jermar 603
		/*
604
		 * Be a little bit light-weight and let migrated threads run.
605
		 */
606
		scheduler();
607
	} 
608
	else {
609
		/*
610
		 * We failed to migrate a single thread.
611
		 * Something more sophisticated should be done.
612
		 */
613
		scheduler();
614
	}
615
 
616
	goto not_satisfied;
125 jermar 617
 
1 jermar 618
satisfied:
619
	/*
620
	 * Tell find_best_thread() to wake us up later again.
621
	 */
15 jermar 622
	CPU->kcpulbstarted = 0;
1 jermar 623
	goto loop;
624
}
625
 
458 decky 626
#endif /* CONFIG_SMP */