Subversion Repositories HelenOS-historic

Rev

Rev 213 | Rev 227 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1 jermar 1
/*
2
 * Copyright (C) 2001-2004 Jakub Jermar
3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
29
#include <proc/scheduler.h>
30
#include <proc/thread.h>
31
#include <proc/task.h>
32
#include <cpu.h>
33
#include <mm/vm.h>
34
#include <config.h>
35
#include <context.h>
36
#include <func.h>
37
#include <arch.h>
38
#include <arch/asm.h>
39
#include <list.h>
68 decky 40
#include <panic.h>
1 jermar 41
#include <typedefs.h>
42
#include <mm/page.h>
43
#include <synch/spinlock.h>
76 jermar 44
#include <arch/faddr.h>
111 palkovsky 45
#include <arch/atomic.h>
195 vana 46
#include <print.h>
47
#include <mm/frame.h>
48
#include <mm/heap.h>
1 jermar 49
 
195 vana 50
 
1 jermar 51
volatile int nrdy;
52
 
107 decky 53
 
118 jermar 54
/** Take actions before new thread runs
107 decky 55
 *
118 jermar 56
 * Perform actions that need to be
57
 * taken before the newly selected
58
 * tread is passed control.
107 decky 59
 *
60
 */
52 vana 61
void before_thread_runs(void)
62
{
63
	before_thread_runs_arch(); 
57 vana 64
	fpu_context_restore(&(THREAD->saved_fpu_context));
52 vana 65
}
66
 
67
 
107 decky 68
/** Initialize scheduler
69
 *
70
 * Initialize kernel scheduler.
71
 *
72
 */
1 jermar 73
void scheduler_init(void)
74
{
75
}
76
 
107 decky 77
 
78
/** Get thread to be scheduled
79
 *
80
 * Get the optimal thread to be scheduled
109 jermar 81
 * according to thread accounting and scheduler
107 decky 82
 * policy.
83
 *
84
 * @return Thread to be scheduled.
85
 *
86
 */
1 jermar 87
struct thread *find_best_thread(void)
88
{
89
	thread_t *t;
90
	runq_t *r;
91
	int i, n;
92
 
93
loop:
94
	cpu_priority_high();
95
 
15 jermar 96
	spinlock_lock(&CPU->lock);
97
	n = CPU->nrdy;
98
	spinlock_unlock(&CPU->lock);
1 jermar 99
 
100
	cpu_priority_low();
101
 
102
	if (n == 0) {
103
		#ifdef __SMP__
104
		/*
105
		 * If the load balancing thread is not running, wake it up and
106
		 * set CPU-private flag that the kcpulb has been started.
107
		 */
15 jermar 108
		if (test_and_set(&CPU->kcpulbstarted) == 0) {
125 jermar 109
			waitq_wakeup(&CPU->kcpulb_wq, 0);
1 jermar 110
			goto loop;
111
		}
112
		#endif /* __SMP__ */
113
 
114
		/*
115
		 * For there was nothing to run, the CPU goes to sleep
116
		 * until a hardware interrupt or an IPI comes.
117
		 * This improves energy saving and hyperthreading.
118
		 * On the other hand, several hardware interrupts can be ignored.
119
		 */
120
		 cpu_sleep();
121
		 goto loop;
122
	}
123
 
124
	cpu_priority_high();
114 jermar 125
 
126
	i = 0;
127
retry:
128
	for (; i<RQ_COUNT; i++) {
15 jermar 129
		r = &CPU->rq[i];
1 jermar 130
		spinlock_lock(&r->lock);
131
		if (r->n == 0) {
132
			/*
133
			 * If this queue is empty, try a lower-priority queue.
134
			 */
135
			spinlock_unlock(&r->lock);
136
			continue;
137
		}
213 jermar 138
 
115 jermar 139
		/* avoid deadlock with relink_rq() */
114 jermar 140
		if (!spinlock_trylock(&CPU->lock)) {
141
			/*
142
			 * Unlock r and try again.
143
			 */
144
			spinlock_unlock(&r->lock);
145
			goto retry;
146
		}
15 jermar 147
		CPU->nrdy--;
148
		spinlock_unlock(&CPU->lock);
1 jermar 149
 
114 jermar 150
		atomic_dec(&nrdy);
1 jermar 151
		r->n--;
152
 
153
		/*
154
		 * Take the first thread from the queue.
155
		 */
156
		t = list_get_instance(r->rq_head.next, thread_t, rq_link);
157
		list_remove(&t->rq_link);
158
 
159
		spinlock_unlock(&r->lock);
160
 
161
		spinlock_lock(&t->lock);
15 jermar 162
		t->cpu = CPU;
1 jermar 163
 
164
		t->ticks = us2ticks((i+1)*10000);
165
		t->pri = i;	/* eventually correct rq index */
166
 
167
		/*
168
		 * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge.
169
		 */
170
		t->flags &= ~X_STOLEN;
171
		spinlock_unlock(&t->lock);
172
 
173
		return t;
174
	}
175
	goto loop;
176
 
177
}
178
 
107 decky 179
 
180
/** Prevent rq starvation
181
 *
182
 * Prevent low priority threads from starving in rq's.
183
 *
184
 * When the function decides to relink rq's, it reconnects
185
 * respective pointers so that in result threads with 'pri'
186
 * greater or equal 'start' are moved to a higher-priority queue.
187
 *
188
 * @param start Threshold priority.
189
 *
1 jermar 190
 */
191
void relink_rq(int start)
192
{
193
	link_t head;
194
	runq_t *r;
195
	int i, n;
196
 
197
	list_initialize(&head);
15 jermar 198
	spinlock_lock(&CPU->lock);
199
	if (CPU->needs_relink > NEEDS_RELINK_MAX) {
1 jermar 200
		for (i = start; i<RQ_COUNT-1; i++) {
201
			/* remember and empty rq[i + 1] */
15 jermar 202
			r = &CPU->rq[i + 1];
1 jermar 203
			spinlock_lock(&r->lock);
204
			list_concat(&head, &r->rq_head);
205
			n = r->n;
206
			r->n = 0;
207
			spinlock_unlock(&r->lock);
208
 
209
			/* append rq[i + 1] to rq[i] */
15 jermar 210
			r = &CPU->rq[i];
1 jermar 211
			spinlock_lock(&r->lock);
212
			list_concat(&r->rq_head, &head);
213
			r->n += n;
214
			spinlock_unlock(&r->lock);
215
		}
15 jermar 216
		CPU->needs_relink = 0;
1 jermar 217
	}
15 jermar 218
	spinlock_unlock(&CPU->lock);				
1 jermar 219
 
220
}
221
 
107 decky 222
 
223
/** The scheduler
224
 *
225
 * The thread scheduling procedure.
226
 *
1 jermar 227
 */
228
void scheduler(void)
229
{
230
	volatile pri_t pri;
231
 
232
	pri = cpu_priority_high();
233
 
234
	if (haltstate)
235
		halt();
236
 
15 jermar 237
	if (THREAD) {
238
		spinlock_lock(&THREAD->lock);
57 vana 239
		fpu_context_save(&(THREAD->saved_fpu_context));
15 jermar 240
		if (!context_save(&THREAD->saved_context)) {
1 jermar 241
			/*
242
			 * This is the place where threads leave scheduler();
243
			 */
22 jermar 244
			before_thread_runs();
125 jermar 245
			spinlock_unlock(&THREAD->lock);
15 jermar 246
			cpu_priority_restore(THREAD->saved_context.pri);
1 jermar 247
			return;
248
		}
170 jermar 249
 
250
		/*
251
		 * CPU priority of preempted thread is recorded here
252
		 * to facilitate scheduler() invocations from
253
		 * cpu_priority_high()'ed code (e.g. waitq_sleep_timeout()). 
254
		 */
15 jermar 255
		THREAD->saved_context.pri = pri;
1 jermar 256
	}
257
 
258
	/*
184 jermar 259
	 * Through the 'THE' structure, we keep track of THREAD, TASK, CPU
260
	 * and preemption counter. At this point THE could be coming either
261
	 * from THREAD's or CPU's stack.
262
	 */
263
	the_copy(THE, (the_t *) CPU->stack);
264
 
265
	/*
1 jermar 266
	 * We may not keep the old stack.
267
	 * Reason: If we kept the old stack and got blocked, for instance, in
268
	 * find_best_thread(), the old thread could get rescheduled by another
269
	 * CPU and overwrite the part of its own stack that was also used by
270
	 * the scheduler on this CPU.
271
	 *
272
	 * Moreover, we have to bypass the compiler-generated POP sequence
273
	 * which is fooled by SP being set to the very top of the stack.
274
	 * Therefore the scheduler() function continues in
275
	 * scheduler_separated_stack().
276
	 */
15 jermar 277
	context_save(&CPU->saved_context);
97 jermar 278
	context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), CPU->stack, CPU_STACK_SIZE);
15 jermar 279
	context_restore(&CPU->saved_context);
1 jermar 280
	/* not reached */
281
}
282
 
107 decky 283
 
284
/** Scheduler stack switch wrapper
285
 *
286
 * Second part of the scheduler() function
287
 * using new stack. Handling the actual context
288
 * switch to a new thread.
289
 *
290
 */
1 jermar 291
void scheduler_separated_stack(void)
292
{
293
	int priority;
294
 
15 jermar 295
	if (THREAD) {
296
		switch (THREAD->state) {
1 jermar 297
		    case Running:
125 jermar 298
			THREAD->state = Ready;
299
			spinlock_unlock(&THREAD->lock);
300
			thread_ready(THREAD);
301
			break;
1 jermar 302
 
303
		    case Exiting:
125 jermar 304
			frame_free((__address) THREAD->kstack);
305
			if (THREAD->ustack) {
306
				frame_free((__address) THREAD->ustack);
307
			}
1 jermar 308
 
125 jermar 309
			/*
310
			 * Detach from the containing task.
311
			 */
312
			spinlock_lock(&TASK->lock);
313
			list_remove(&THREAD->th_link);
314
			spinlock_unlock(&TASK->lock);
73 vana 315
 
125 jermar 316
			spinlock_unlock(&THREAD->lock);
317
 
318
			spinlock_lock(&threads_lock);
319
			list_remove(&THREAD->threads_link);
320
			spinlock_unlock(&threads_lock);
73 vana 321
 
125 jermar 322
			spinlock_lock(&CPU->lock);
323
			if(CPU->fpu_owner==THREAD) CPU->fpu_owner=NULL;
324
			spinlock_unlock(&CPU->lock);
325
 
326
			free(THREAD);
327
 
328
			break;
329
 
1 jermar 330
		    case Sleeping:
125 jermar 331
			/*
332
			 * Prefer the thread after it's woken up.
333
			 */
334
			THREAD->pri = -1;
1 jermar 335
 
125 jermar 336
			/*
337
			 * We need to release wq->lock which we locked in waitq_sleep().
338
			 * Address of wq->lock is kept in THREAD->sleep_queue.
339
			 */
340
			spinlock_unlock(&THREAD->sleep_queue->lock);
1 jermar 341
 
125 jermar 342
			/*
343
			 * Check for possible requests for out-of-context invocation.
344
			 */
345
			if (THREAD->call_me) {
346
				THREAD->call_me(THREAD->call_me_with);
347
				THREAD->call_me = NULL;
348
				THREAD->call_me_with = NULL;
349
			}
1 jermar 350
 
125 jermar 351
			spinlock_unlock(&THREAD->lock);
1 jermar 352
 
125 jermar 353
			break;
354
 
1 jermar 355
		    default:
125 jermar 356
			/*
357
			 * Entering state is unexpected.
358
			 */
359
			panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]);
360
			break;
1 jermar 361
		}
15 jermar 362
		THREAD = NULL;
1 jermar 363
	}
198 jermar 364
 
214 vana 365
 
15 jermar 366
	THREAD = find_best_thread();
1 jermar 367
 
15 jermar 368
	spinlock_lock(&THREAD->lock);
369
	priority = THREAD->pri;
370
	spinlock_unlock(&THREAD->lock);	
192 jermar 371
 
1 jermar 372
	relink_rq(priority);		
373
 
15 jermar 374
	spinlock_lock(&THREAD->lock);	
1 jermar 375
 
376
	/*
377
	 * If both the old and the new task are the same, lots of work is avoided.
378
	 */
15 jermar 379
	if (TASK != THREAD->task) {
1 jermar 380
		vm_t *m1 = NULL;
381
		vm_t *m2;
382
 
15 jermar 383
		if (TASK) {
384
			spinlock_lock(&TASK->lock);
385
			m1 = TASK->vm;
386
			spinlock_unlock(&TASK->lock);
1 jermar 387
		}
388
 
15 jermar 389
		spinlock_lock(&THREAD->task->lock);
390
		m2 = THREAD->task->vm;
391
		spinlock_unlock(&THREAD->task->lock);
1 jermar 392
 
393
		/*
394
		 * Note that it is possible for two tasks to share one vm mapping.
395
		 */
396
		if (m1 != m2) {
397
			/*
398
			 * Both tasks and vm mappings are different.
399
			 * Replace the old one with the new one.
400
			 */
401
			vm_install(m2);
402
		}
15 jermar 403
		TASK = THREAD->task;	
1 jermar 404
	}
405
 
15 jermar 406
	THREAD->state = Running;
1 jermar 407
 
408
	#ifdef SCHEDULER_VERBOSE
15 jermar 409
	printf("cpu%d: tid %d (pri=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->pri, THREAD->ticks, CPU->nrdy);
1 jermar 410
	#endif	
411
 
213 jermar 412
	/*
413
	 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to thread's stack.
414
	 */
184 jermar 415
	the_copy(THE, (the_t *) THREAD->kstack);
416
 
15 jermar 417
	context_restore(&THREAD->saved_context);
1 jermar 418
	/* not reached */
419
}
420
 
107 decky 421
 
1 jermar 422
#ifdef __SMP__
107 decky 423
/** Load balancing thread
424
 *
425
 * SMP load balancing thread, supervising thread supplies
426
 * for the CPU it's wired to.
427
 *
428
 * @param arg Generic thread argument (unused).
429
 *
1 jermar 430
 */
431
void kcpulb(void *arg)
432
{
433
	thread_t *t;
434
	int count, i, j, k = 0;
435
	pri_t pri;
436
 
437
loop:
438
	/*
439
	 * Sleep until there's some work to do.
440
	 */
15 jermar 441
	waitq_sleep(&CPU->kcpulb_wq);
1 jermar 442
 
443
not_satisfied:
444
	/*
445
	 * Calculate the number of threads that will be migrated/stolen from
446
	 * other CPU's. Note that situation can have changed between two
447
	 * passes. Each time get the most up to date counts.
448
	 */
449
	pri = cpu_priority_high();
15 jermar 450
	spinlock_lock(&CPU->lock);
1 jermar 451
	count = nrdy / config.cpu_active;
15 jermar 452
	count -= CPU->nrdy;
453
	spinlock_unlock(&CPU->lock);
1 jermar 454
	cpu_priority_restore(pri);
455
 
456
	if (count <= 0)
457
		goto satisfied;
458
 
459
	/*
460
	 * Searching least priority queues on all CPU's first and most priority queues on all CPU's last.
461
	 */
462
	for (j=RQ_COUNT-1; j >= 0; j--) {
463
		for (i=0; i < config.cpu_active; i++) {
464
			link_t *l;
465
			runq_t *r;
466
			cpu_t *cpu;
467
 
468
			cpu = &cpus[(i + k) % config.cpu_active];
469
 
470
			/*
471
			 * Not interested in ourselves.
472
			 * Doesn't require interrupt disabling for kcpulb is X_WIRED.
473
			 */
15 jermar 474
			if (CPU == cpu)
115 jermar 475
				continue;				
1 jermar 476
 
477
restart:		pri = cpu_priority_high();
115 jermar 478
			r = &cpu->rq[j];
1 jermar 479
			spinlock_lock(&r->lock);
480
			if (r->n == 0) {
481
				spinlock_unlock(&r->lock);
482
				cpu_priority_restore(pri);
483
				continue;
484
			}
485
 
486
			t = NULL;
487
			l = r->rq_head.prev;	/* search rq from the back */
488
			while (l != &r->rq_head) {
489
				t = list_get_instance(l, thread_t, rq_link);
490
				/*
125 jermar 491
				 * We don't want to steal CPU-wired threads neither threads already stolen.
1 jermar 492
				 * The latter prevents threads from migrating between CPU's without ever being run.
125 jermar 493
				 * We don't want to steal threads whose FPU context is still in CPU.
73 vana 494
				 */
1 jermar 495
				spinlock_lock(&t->lock);
73 vana 496
				if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) {
115 jermar 497
 
1 jermar 498
					/*
499
					 * Remove t from r.
500
					 */
501
 
502
					spinlock_unlock(&t->lock);
503
 
504
					/*
505
					 * Here we have to avoid deadlock with relink_rq(),
506
					 * because it locks cpu and r in a different order than we do.
507
					 */
508
					if (!spinlock_trylock(&cpu->lock)) {
509
						/* Release all locks and try again. */ 
510
						spinlock_unlock(&r->lock);
511
						cpu_priority_restore(pri);
512
						goto restart;
513
					}
514
					cpu->nrdy--;
515
					spinlock_unlock(&cpu->lock);
516
 
111 palkovsky 517
					atomic_dec(&nrdy);
1 jermar 518
 
125 jermar 519
					r->n--;
1 jermar 520
					list_remove(&t->rq_link);
521
 
522
					break;
523
				}
524
				spinlock_unlock(&t->lock);
525
				l = l->prev;
526
				t = NULL;
527
			}
528
			spinlock_unlock(&r->lock);
529
 
530
			if (t) {
531
				/*
532
				 * Ready t on local CPU
533
				 */
534
				spinlock_lock(&t->lock);
535
				#ifdef KCPULB_VERBOSE
15 jermar 536
				printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, CPU->nrdy, nrdy / config.cpu_active);
1 jermar 537
				#endif
538
				t->flags |= X_STOLEN;
539
				spinlock_unlock(&t->lock);
540
 
541
				thread_ready(t);
542
 
543
				cpu_priority_restore(pri);
544
 
545
				if (--count == 0)
546
					goto satisfied;
547
 
548
				/*
125 jermar 549
				 * We are not satisfied yet, focus on another CPU next time.
1 jermar 550
				 */
551
				k++;
552
 
553
				continue;
554
			}
555
			cpu_priority_restore(pri);
556
		}
557
	}
558
 
15 jermar 559
	if (CPU->nrdy) {
1 jermar 560
		/*
561
		 * Be a little bit light-weight and let migrated threads run.
562
		 */
563
		scheduler();
564
	} 
565
	else {
566
		/*
567
		 * We failed to migrate a single thread.
568
		 * Something more sophisticated should be done.
569
		 */
570
		scheduler();
571
	}
572
 
573
	goto not_satisfied;
125 jermar 574
 
1 jermar 575
satisfied:
576
	/*
577
	 * Tell find_best_thread() to wake us up later again.
578
	 */
15 jermar 579
	CPU->kcpulbstarted = 0;
1 jermar 580
	goto loop;
581
}
582
 
583
#endif /* __SMP__ */