Subversion Repositories HelenOS-historic

Rev

Rev 258 | Rev 378 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1 jermar 1
/*
2
 * Copyright (C) 2001-2004 Jakub Jermar
3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
29
#include <proc/scheduler.h>
30
#include <proc/thread.h>
31
#include <proc/task.h>
32
#include <cpu.h>
33
#include <mm/vm.h>
34
#include <config.h>
35
#include <context.h>
36
#include <func.h>
37
#include <arch.h>
38
#include <arch/asm.h>
39
#include <list.h>
68 decky 40
#include <panic.h>
1 jermar 41
#include <typedefs.h>
42
#include <mm/page.h>
43
#include <synch/spinlock.h>
76 jermar 44
#include <arch/faddr.h>
111 palkovsky 45
#include <arch/atomic.h>
195 vana 46
#include <print.h>
47
#include <mm/frame.h>
48
#include <mm/heap.h>
227 jermar 49
#include <debug.h>
1 jermar 50
 
227 jermar 51
volatile count_t nrdy;
195 vana 52
 
1 jermar 53
 
118 jermar 54
/** Take actions before new thread runs
107 decky 55
 *
118 jermar 56
 * Perform actions that need to be
57
 * taken before the newly selected
58
 * tread is passed control.
107 decky 59
 *
60
 */
52 vana 61
void before_thread_runs(void)
62
{
309 palkovsky 63
	before_thread_runs_arch();
64
#ifdef FPU_LAZY
65
	if(THREAD==CPU->fpu_owner) 
66
		fpu_enable();
67
	else
68
		fpu_disable(); 
69
#else
70
	fpu_enable();
71
	if (THREAD->fpu_context_exists)
72
		fpu_context_restore(&(THREAD->saved_fpu_context));
73
	else {
74
		fpu_init();
75
		THREAD->fpu_context_exists=1;
76
	}
77
#endif
52 vana 78
}
79
 
309 palkovsky 80
#ifdef FPU_LAZY
81
void scheduler_fpu_lazy_request(void)
82
{
83
	fpu_enable();
84
	if (CPU->fpu_owner != NULL) {  
85
		fpu_context_save(&CPU->fpu_owner->saved_fpu_context);
86
		/* don't prevent migration */
87
		CPU->fpu_owner->fpu_context_engaged=0; 
88
	}
89
	if (THREAD->fpu_context_exists)
90
		fpu_context_restore(&THREAD->saved_fpu_context);
91
	else {
92
		fpu_init();
93
		THREAD->fpu_context_exists=1;
94
	}
95
	CPU->fpu_owner=THREAD;
96
	THREAD->fpu_context_engaged = 1;
97
}
98
#endif
52 vana 99
 
107 decky 100
/** Initialize scheduler
101
 *
102
 * Initialize kernel scheduler.
103
 *
104
 */
1 jermar 105
void scheduler_init(void)
106
{
107
}
108
 
107 decky 109
 
110
/** Get thread to be scheduled
111
 *
112
 * Get the optimal thread to be scheduled
109 jermar 113
 * according to thread accounting and scheduler
107 decky 114
 * policy.
115
 *
116
 * @return Thread to be scheduled.
117
 *
118
 */
1 jermar 119
struct thread *find_best_thread(void)
120
{
121
	thread_t *t;
122
	runq_t *r;
123
	int i, n;
124
 
227 jermar 125
	ASSERT(CPU != NULL);
126
 
1 jermar 127
loop:
128
	cpu_priority_high();
129
 
15 jermar 130
	spinlock_lock(&CPU->lock);
131
	n = CPU->nrdy;
132
	spinlock_unlock(&CPU->lock);
1 jermar 133
 
134
	cpu_priority_low();
135
 
136
	if (n == 0) {
137
		#ifdef __SMP__
138
		/*
139
		 * If the load balancing thread is not running, wake it up and
140
		 * set CPU-private flag that the kcpulb has been started.
141
		 */
15 jermar 142
		if (test_and_set(&CPU->kcpulbstarted) == 0) {
125 jermar 143
			waitq_wakeup(&CPU->kcpulb_wq, 0);
1 jermar 144
			goto loop;
145
		}
146
		#endif /* __SMP__ */
147
 
148
		/*
149
		 * For there was nothing to run, the CPU goes to sleep
150
		 * until a hardware interrupt or an IPI comes.
151
		 * This improves energy saving and hyperthreading.
152
		 * On the other hand, several hardware interrupts can be ignored.
153
		 */
154
		 cpu_sleep();
155
		 goto loop;
156
	}
157
 
158
	cpu_priority_high();
114 jermar 159
 
160
	i = 0;
161
retry:
162
	for (; i<RQ_COUNT; i++) {
15 jermar 163
		r = &CPU->rq[i];
1 jermar 164
		spinlock_lock(&r->lock);
165
		if (r->n == 0) {
166
			/*
167
			 * If this queue is empty, try a lower-priority queue.
168
			 */
169
			spinlock_unlock(&r->lock);
170
			continue;
171
		}
213 jermar 172
 
115 jermar 173
		/* avoid deadlock with relink_rq() */
114 jermar 174
		if (!spinlock_trylock(&CPU->lock)) {
175
			/*
176
			 * Unlock r and try again.
177
			 */
178
			spinlock_unlock(&r->lock);
179
			goto retry;
180
		}
15 jermar 181
		CPU->nrdy--;
182
		spinlock_unlock(&CPU->lock);
1 jermar 183
 
248 jermar 184
		atomic_dec((int *) &nrdy);
1 jermar 185
		r->n--;
186
 
187
		/*
188
		 * Take the first thread from the queue.
189
		 */
190
		t = list_get_instance(r->rq_head.next, thread_t, rq_link);
191
		list_remove(&t->rq_link);
192
 
193
		spinlock_unlock(&r->lock);
194
 
195
		spinlock_lock(&t->lock);
15 jermar 196
		t->cpu = CPU;
1 jermar 197
 
198
		t->ticks = us2ticks((i+1)*10000);
199
		t->pri = i;	/* eventually correct rq index */
200
 
201
		/*
202
		 * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge.
203
		 */
204
		t->flags &= ~X_STOLEN;
205
		spinlock_unlock(&t->lock);
206
 
207
		return t;
208
	}
209
	goto loop;
210
 
211
}
212
 
107 decky 213
 
214
/** Prevent rq starvation
215
 *
216
 * Prevent low priority threads from starving in rq's.
217
 *
218
 * When the function decides to relink rq's, it reconnects
219
 * respective pointers so that in result threads with 'pri'
220
 * greater or equal 'start' are moved to a higher-priority queue.
221
 *
222
 * @param start Threshold priority.
223
 *
1 jermar 224
 */
225
void relink_rq(int start)
226
{
227
	link_t head;
228
	runq_t *r;
229
	int i, n;
230
 
231
	list_initialize(&head);
15 jermar 232
	spinlock_lock(&CPU->lock);
233
	if (CPU->needs_relink > NEEDS_RELINK_MAX) {
1 jermar 234
		for (i = start; i<RQ_COUNT-1; i++) {
235
			/* remember and empty rq[i + 1] */
15 jermar 236
			r = &CPU->rq[i + 1];
1 jermar 237
			spinlock_lock(&r->lock);
238
			list_concat(&head, &r->rq_head);
239
			n = r->n;
240
			r->n = 0;
241
			spinlock_unlock(&r->lock);
242
 
243
			/* append rq[i + 1] to rq[i] */
15 jermar 244
			r = &CPU->rq[i];
1 jermar 245
			spinlock_lock(&r->lock);
246
			list_concat(&r->rq_head, &head);
247
			r->n += n;
248
			spinlock_unlock(&r->lock);
249
		}
15 jermar 250
		CPU->needs_relink = 0;
1 jermar 251
	}
15 jermar 252
	spinlock_unlock(&CPU->lock);				
1 jermar 253
 
254
}
255
 
107 decky 256
 
257
/** The scheduler
258
 *
259
 * The thread scheduling procedure.
260
 *
1 jermar 261
 */
262
void scheduler(void)
263
{
264
	volatile pri_t pri;
265
 
227 jermar 266
	ASSERT(CPU != NULL);
267
 
1 jermar 268
	pri = cpu_priority_high();
269
 
270
	if (haltstate)
271
		halt();
272
 
15 jermar 273
	if (THREAD) {
274
		spinlock_lock(&THREAD->lock);
309 palkovsky 275
#ifndef FPU_LAZY
57 vana 276
		fpu_context_save(&(THREAD->saved_fpu_context));
309 palkovsky 277
#endif
15 jermar 278
		if (!context_save(&THREAD->saved_context)) {
1 jermar 279
			/*
280
			 * This is the place where threads leave scheduler();
281
			 */
22 jermar 282
			before_thread_runs();
125 jermar 283
			spinlock_unlock(&THREAD->lock);
15 jermar 284
			cpu_priority_restore(THREAD->saved_context.pri);
1 jermar 285
			return;
286
		}
170 jermar 287
 
288
		/*
289
		 * CPU priority of preempted thread is recorded here
290
		 * to facilitate scheduler() invocations from
291
		 * cpu_priority_high()'ed code (e.g. waitq_sleep_timeout()). 
292
		 */
15 jermar 293
		THREAD->saved_context.pri = pri;
1 jermar 294
	}
295
 
296
	/*
184 jermar 297
	 * Through the 'THE' structure, we keep track of THREAD, TASK, CPU
298
	 * and preemption counter. At this point THE could be coming either
299
	 * from THREAD's or CPU's stack.
300
	 */
301
	the_copy(THE, (the_t *) CPU->stack);
302
 
303
	/*
1 jermar 304
	 * We may not keep the old stack.
305
	 * Reason: If we kept the old stack and got blocked, for instance, in
306
	 * find_best_thread(), the old thread could get rescheduled by another
307
	 * CPU and overwrite the part of its own stack that was also used by
308
	 * the scheduler on this CPU.
309
	 *
310
	 * Moreover, we have to bypass the compiler-generated POP sequence
311
	 * which is fooled by SP being set to the very top of the stack.
312
	 * Therefore the scheduler() function continues in
313
	 * scheduler_separated_stack().
314
	 */
15 jermar 315
	context_save(&CPU->saved_context);
97 jermar 316
	context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), CPU->stack, CPU_STACK_SIZE);
15 jermar 317
	context_restore(&CPU->saved_context);
1 jermar 318
	/* not reached */
319
}
320
 
107 decky 321
 
322
/** Scheduler stack switch wrapper
323
 *
324
 * Second part of the scheduler() function
325
 * using new stack. Handling the actual context
326
 * switch to a new thread.
327
 *
328
 */
1 jermar 329
void scheduler_separated_stack(void)
330
{
331
	int priority;
332
 
227 jermar 333
	ASSERT(CPU != NULL);
334
 
15 jermar 335
	if (THREAD) {
336
		switch (THREAD->state) {
1 jermar 337
		    case Running:
125 jermar 338
			THREAD->state = Ready;
339
			spinlock_unlock(&THREAD->lock);
340
			thread_ready(THREAD);
341
			break;
1 jermar 342
 
343
		    case Exiting:
125 jermar 344
			frame_free((__address) THREAD->kstack);
345
			if (THREAD->ustack) {
346
				frame_free((__address) THREAD->ustack);
347
			}
1 jermar 348
 
125 jermar 349
			/*
350
			 * Detach from the containing task.
351
			 */
352
			spinlock_lock(&TASK->lock);
353
			list_remove(&THREAD->th_link);
354
			spinlock_unlock(&TASK->lock);
73 vana 355
 
125 jermar 356
			spinlock_unlock(&THREAD->lock);
357
 
358
			spinlock_lock(&threads_lock);
359
			list_remove(&THREAD->threads_link);
360
			spinlock_unlock(&threads_lock);
73 vana 361
 
125 jermar 362
			spinlock_lock(&CPU->lock);
363
			if(CPU->fpu_owner==THREAD) CPU->fpu_owner=NULL;
364
			spinlock_unlock(&CPU->lock);
365
 
366
			free(THREAD);
367
 
368
			break;
369
 
1 jermar 370
		    case Sleeping:
125 jermar 371
			/*
372
			 * Prefer the thread after it's woken up.
373
			 */
374
			THREAD->pri = -1;
1 jermar 375
 
125 jermar 376
			/*
377
			 * We need to release wq->lock which we locked in waitq_sleep().
378
			 * Address of wq->lock is kept in THREAD->sleep_queue.
379
			 */
380
			spinlock_unlock(&THREAD->sleep_queue->lock);
1 jermar 381
 
125 jermar 382
			/*
383
			 * Check for possible requests for out-of-context invocation.
384
			 */
385
			if (THREAD->call_me) {
386
				THREAD->call_me(THREAD->call_me_with);
387
				THREAD->call_me = NULL;
388
				THREAD->call_me_with = NULL;
389
			}
1 jermar 390
 
125 jermar 391
			spinlock_unlock(&THREAD->lock);
1 jermar 392
 
125 jermar 393
			break;
394
 
1 jermar 395
		    default:
125 jermar 396
			/*
397
			 * Entering state is unexpected.
398
			 */
399
			panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]);
400
			break;
1 jermar 401
		}
15 jermar 402
		THREAD = NULL;
1 jermar 403
	}
198 jermar 404
 
214 vana 405
 
15 jermar 406
	THREAD = find_best_thread();
1 jermar 407
 
15 jermar 408
	spinlock_lock(&THREAD->lock);
409
	priority = THREAD->pri;
410
	spinlock_unlock(&THREAD->lock);	
192 jermar 411
 
1 jermar 412
	relink_rq(priority);		
413
 
15 jermar 414
	spinlock_lock(&THREAD->lock);	
1 jermar 415
 
416
	/*
417
	 * If both the old and the new task are the same, lots of work is avoided.
418
	 */
15 jermar 419
	if (TASK != THREAD->task) {
1 jermar 420
		vm_t *m1 = NULL;
421
		vm_t *m2;
422
 
15 jermar 423
		if (TASK) {
424
			spinlock_lock(&TASK->lock);
425
			m1 = TASK->vm;
426
			spinlock_unlock(&TASK->lock);
1 jermar 427
		}
428
 
15 jermar 429
		spinlock_lock(&THREAD->task->lock);
430
		m2 = THREAD->task->vm;
431
		spinlock_unlock(&THREAD->task->lock);
1 jermar 432
 
433
		/*
434
		 * Note that it is possible for two tasks to share one vm mapping.
435
		 */
436
		if (m1 != m2) {
437
			/*
438
			 * Both tasks and vm mappings are different.
439
			 * Replace the old one with the new one.
440
			 */
441
			vm_install(m2);
442
		}
15 jermar 443
		TASK = THREAD->task;	
1 jermar 444
	}
445
 
15 jermar 446
	THREAD->state = Running;
1 jermar 447
 
448
	#ifdef SCHEDULER_VERBOSE
15 jermar 449
	printf("cpu%d: tid %d (pri=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->pri, THREAD->ticks, CPU->nrdy);
1 jermar 450
	#endif	
451
 
213 jermar 452
	/*
453
	 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to thread's stack.
454
	 */
184 jermar 455
	the_copy(THE, (the_t *) THREAD->kstack);
456
 
15 jermar 457
	context_restore(&THREAD->saved_context);
1 jermar 458
	/* not reached */
459
}
460
 
107 decky 461
 
1 jermar 462
#ifdef __SMP__
107 decky 463
/** Load balancing thread
464
 *
465
 * SMP load balancing thread, supervising thread supplies
466
 * for the CPU it's wired to.
467
 *
468
 * @param arg Generic thread argument (unused).
469
 *
1 jermar 470
 */
471
void kcpulb(void *arg)
472
{
473
	thread_t *t;
474
	int count, i, j, k = 0;
475
	pri_t pri;
476
 
477
loop:
478
	/*
479
	 * Sleep until there's some work to do.
480
	 */
15 jermar 481
	waitq_sleep(&CPU->kcpulb_wq);
1 jermar 482
 
483
not_satisfied:
484
	/*
485
	 * Calculate the number of threads that will be migrated/stolen from
486
	 * other CPU's. Note that situation can have changed between two
487
	 * passes. Each time get the most up to date counts.
488
	 */
489
	pri = cpu_priority_high();
15 jermar 490
	spinlock_lock(&CPU->lock);
1 jermar 491
	count = nrdy / config.cpu_active;
15 jermar 492
	count -= CPU->nrdy;
493
	spinlock_unlock(&CPU->lock);
1 jermar 494
	cpu_priority_restore(pri);
495
 
496
	if (count <= 0)
497
		goto satisfied;
498
 
499
	/*
500
	 * Searching least priority queues on all CPU's first and most priority queues on all CPU's last.
501
	 */
502
	for (j=RQ_COUNT-1; j >= 0; j--) {
503
		for (i=0; i < config.cpu_active; i++) {
504
			link_t *l;
505
			runq_t *r;
506
			cpu_t *cpu;
507
 
508
			cpu = &cpus[(i + k) % config.cpu_active];
509
 
510
			/*
511
			 * Not interested in ourselves.
512
			 * Doesn't require interrupt disabling for kcpulb is X_WIRED.
513
			 */
15 jermar 514
			if (CPU == cpu)
115 jermar 515
				continue;				
1 jermar 516
 
517
restart:		pri = cpu_priority_high();
115 jermar 518
			r = &cpu->rq[j];
1 jermar 519
			spinlock_lock(&r->lock);
520
			if (r->n == 0) {
521
				spinlock_unlock(&r->lock);
522
				cpu_priority_restore(pri);
523
				continue;
524
			}
525
 
526
			t = NULL;
527
			l = r->rq_head.prev;	/* search rq from the back */
528
			while (l != &r->rq_head) {
529
				t = list_get_instance(l, thread_t, rq_link);
530
				/*
125 jermar 531
				 * We don't want to steal CPU-wired threads neither threads already stolen.
1 jermar 532
				 * The latter prevents threads from migrating between CPU's without ever being run.
125 jermar 533
				 * We don't want to steal threads whose FPU context is still in CPU.
73 vana 534
				 */
1 jermar 535
				spinlock_lock(&t->lock);
73 vana 536
				if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) {
115 jermar 537
 
1 jermar 538
					/*
539
					 * Remove t from r.
540
					 */
541
 
542
					spinlock_unlock(&t->lock);
543
 
544
					/*
545
					 * Here we have to avoid deadlock with relink_rq(),
546
					 * because it locks cpu and r in a different order than we do.
547
					 */
548
					if (!spinlock_trylock(&cpu->lock)) {
549
						/* Release all locks and try again. */ 
550
						spinlock_unlock(&r->lock);
551
						cpu_priority_restore(pri);
552
						goto restart;
553
					}
554
					cpu->nrdy--;
555
					spinlock_unlock(&cpu->lock);
556
 
258 palkovsky 557
					atomic_dec((int *)&nrdy);
1 jermar 558
 
125 jermar 559
					r->n--;
1 jermar 560
					list_remove(&t->rq_link);
561
 
562
					break;
563
				}
564
				spinlock_unlock(&t->lock);
565
				l = l->prev;
566
				t = NULL;
567
			}
568
			spinlock_unlock(&r->lock);
569
 
570
			if (t) {
571
				/*
572
				 * Ready t on local CPU
573
				 */
574
				spinlock_lock(&t->lock);
575
				#ifdef KCPULB_VERBOSE
15 jermar 576
				printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, CPU->nrdy, nrdy / config.cpu_active);
1 jermar 577
				#endif
578
				t->flags |= X_STOLEN;
579
				spinlock_unlock(&t->lock);
580
 
581
				thread_ready(t);
582
 
583
				cpu_priority_restore(pri);
584
 
585
				if (--count == 0)
586
					goto satisfied;
587
 
588
				/*
125 jermar 589
				 * We are not satisfied yet, focus on another CPU next time.
1 jermar 590
				 */
591
				k++;
592
 
593
				continue;
594
			}
595
			cpu_priority_restore(pri);
596
		}
597
	}
598
 
15 jermar 599
	if (CPU->nrdy) {
1 jermar 600
		/*
601
		 * Be a little bit light-weight and let migrated threads run.
602
		 */
603
		scheduler();
604
	} 
605
	else {
606
		/*
607
		 * We failed to migrate a single thread.
608
		 * Something more sophisticated should be done.
609
		 */
610
		scheduler();
611
	}
612
 
613
	goto not_satisfied;
125 jermar 614
 
1 jermar 615
satisfied:
616
	/*
617
	 * Tell find_best_thread() to wake us up later again.
618
	 */
15 jermar 619
	CPU->kcpulbstarted = 0;
1 jermar 620
	goto loop;
621
}
622
 
623
#endif /* __SMP__ */