Subversion Repositories HelenOS-historic

Rev

Rev 214 | Rev 241 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1 jermar 1
/*
2
 * Copyright (C) 2001-2004 Jakub Jermar
3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
29
#include <proc/scheduler.h>
30
#include <proc/thread.h>
31
#include <proc/task.h>
32
#include <cpu.h>
33
#include <mm/vm.h>
34
#include <config.h>
35
#include <context.h>
36
#include <func.h>
37
#include <arch.h>
38
#include <arch/asm.h>
39
#include <list.h>
68 decky 40
#include <panic.h>
1 jermar 41
#include <typedefs.h>
42
#include <mm/page.h>
43
#include <synch/spinlock.h>
76 jermar 44
#include <arch/faddr.h>
111 palkovsky 45
#include <arch/atomic.h>
195 vana 46
#include <print.h>
47
#include <mm/frame.h>
48
#include <mm/heap.h>
227 jermar 49
#include <debug.h>
1 jermar 50
 
227 jermar 51
volatile count_t nrdy;
195 vana 52
 
1 jermar 53
 
118 jermar 54
/** Take actions before new thread runs
107 decky 55
 *
118 jermar 56
 * Perform actions that need to be
57
 * taken before the newly selected
58
 * tread is passed control.
107 decky 59
 *
60
 */
52 vana 61
void before_thread_runs(void)
62
{
63
	before_thread_runs_arch(); 
57 vana 64
	fpu_context_restore(&(THREAD->saved_fpu_context));
52 vana 65
}
66
 
67
 
107 decky 68
/** Initialize scheduler
69
 *
70
 * Initialize kernel scheduler.
71
 *
72
 */
1 jermar 73
void scheduler_init(void)
74
{
75
}
76
 
107 decky 77
 
78
/** Get thread to be scheduled
79
 *
80
 * Get the optimal thread to be scheduled
109 jermar 81
 * according to thread accounting and scheduler
107 decky 82
 * policy.
83
 *
84
 * @return Thread to be scheduled.
85
 *
86
 */
1 jermar 87
struct thread *find_best_thread(void)
88
{
89
	thread_t *t;
90
	runq_t *r;
91
	int i, n;
92
 
227 jermar 93
	ASSERT(CPU != NULL);
94
 
1 jermar 95
loop:
96
	cpu_priority_high();
97
 
15 jermar 98
	spinlock_lock(&CPU->lock);
99
	n = CPU->nrdy;
100
	spinlock_unlock(&CPU->lock);
1 jermar 101
 
102
	cpu_priority_low();
103
 
104
	if (n == 0) {
105
		#ifdef __SMP__
106
		/*
107
		 * If the load balancing thread is not running, wake it up and
108
		 * set CPU-private flag that the kcpulb has been started.
109
		 */
15 jermar 110
		if (test_and_set(&CPU->kcpulbstarted) == 0) {
125 jermar 111
			waitq_wakeup(&CPU->kcpulb_wq, 0);
1 jermar 112
			goto loop;
113
		}
114
		#endif /* __SMP__ */
115
 
116
		/*
117
		 * For there was nothing to run, the CPU goes to sleep
118
		 * until a hardware interrupt or an IPI comes.
119
		 * This improves energy saving and hyperthreading.
120
		 * On the other hand, several hardware interrupts can be ignored.
121
		 */
122
		 cpu_sleep();
123
		 goto loop;
124
	}
125
 
126
	cpu_priority_high();
114 jermar 127
 
128
	i = 0;
129
retry:
130
	for (; i<RQ_COUNT; i++) {
15 jermar 131
		r = &CPU->rq[i];
1 jermar 132
		spinlock_lock(&r->lock);
133
		if (r->n == 0) {
134
			/*
135
			 * If this queue is empty, try a lower-priority queue.
136
			 */
137
			spinlock_unlock(&r->lock);
138
			continue;
139
		}
213 jermar 140
 
115 jermar 141
		/* avoid deadlock with relink_rq() */
114 jermar 142
		if (!spinlock_trylock(&CPU->lock)) {
143
			/*
144
			 * Unlock r and try again.
145
			 */
146
			spinlock_unlock(&r->lock);
147
			goto retry;
148
		}
15 jermar 149
		CPU->nrdy--;
150
		spinlock_unlock(&CPU->lock);
1 jermar 151
 
227 jermar 152
		atomic_dec((int *) &nrdy);
1 jermar 153
		r->n--;
154
 
155
		/*
156
		 * Take the first thread from the queue.
157
		 */
158
		t = list_get_instance(r->rq_head.next, thread_t, rq_link);
159
		list_remove(&t->rq_link);
160
 
161
		spinlock_unlock(&r->lock);
162
 
163
		spinlock_lock(&t->lock);
15 jermar 164
		t->cpu = CPU;
1 jermar 165
 
166
		t->ticks = us2ticks((i+1)*10000);
167
		t->pri = i;	/* eventually correct rq index */
168
 
169
		/*
170
		 * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge.
171
		 */
172
		t->flags &= ~X_STOLEN;
173
		spinlock_unlock(&t->lock);
174
 
175
		return t;
176
	}
177
	goto loop;
178
 
179
}
180
 
107 decky 181
 
182
/** Prevent rq starvation
183
 *
184
 * Prevent low priority threads from starving in rq's.
185
 *
186
 * When the function decides to relink rq's, it reconnects
187
 * respective pointers so that in result threads with 'pri'
188
 * greater or equal 'start' are moved to a higher-priority queue.
189
 *
190
 * @param start Threshold priority.
191
 *
1 jermar 192
 */
193
void relink_rq(int start)
194
{
195
	link_t head;
196
	runq_t *r;
197
	int i, n;
198
 
199
	list_initialize(&head);
15 jermar 200
	spinlock_lock(&CPU->lock);
201
	if (CPU->needs_relink > NEEDS_RELINK_MAX) {
1 jermar 202
		for (i = start; i<RQ_COUNT-1; i++) {
203
			/* remember and empty rq[i + 1] */
15 jermar 204
			r = &CPU->rq[i + 1];
1 jermar 205
			spinlock_lock(&r->lock);
206
			list_concat(&head, &r->rq_head);
207
			n = r->n;
208
			r->n = 0;
209
			spinlock_unlock(&r->lock);
210
 
211
			/* append rq[i + 1] to rq[i] */
15 jermar 212
			r = &CPU->rq[i];
1 jermar 213
			spinlock_lock(&r->lock);
214
			list_concat(&r->rq_head, &head);
215
			r->n += n;
216
			spinlock_unlock(&r->lock);
217
		}
15 jermar 218
		CPU->needs_relink = 0;
1 jermar 219
	}
15 jermar 220
	spinlock_unlock(&CPU->lock);				
1 jermar 221
 
222
}
223
 
107 decky 224
 
225
/** The scheduler
226
 *
227
 * The thread scheduling procedure.
228
 *
1 jermar 229
 */
230
void scheduler(void)
231
{
232
	volatile pri_t pri;
233
 
227 jermar 234
	ASSERT(CPU != NULL);
235
 
1 jermar 236
	pri = cpu_priority_high();
237
 
238
	if (haltstate)
239
		halt();
240
 
15 jermar 241
	if (THREAD) {
242
		spinlock_lock(&THREAD->lock);
57 vana 243
		fpu_context_save(&(THREAD->saved_fpu_context));
15 jermar 244
		if (!context_save(&THREAD->saved_context)) {
1 jermar 245
			/*
246
			 * This is the place where threads leave scheduler();
247
			 */
22 jermar 248
			before_thread_runs();
125 jermar 249
			spinlock_unlock(&THREAD->lock);
15 jermar 250
			cpu_priority_restore(THREAD->saved_context.pri);
1 jermar 251
			return;
252
		}
170 jermar 253
 
254
		/*
255
		 * CPU priority of preempted thread is recorded here
256
		 * to facilitate scheduler() invocations from
257
		 * cpu_priority_high()'ed code (e.g. waitq_sleep_timeout()). 
258
		 */
15 jermar 259
		THREAD->saved_context.pri = pri;
1 jermar 260
	}
261
 
262
	/*
184 jermar 263
	 * Through the 'THE' structure, we keep track of THREAD, TASK, CPU
264
	 * and preemption counter. At this point THE could be coming either
265
	 * from THREAD's or CPU's stack.
266
	 */
267
	the_copy(THE, (the_t *) CPU->stack);
268
 
269
	/*
1 jermar 270
	 * We may not keep the old stack.
271
	 * Reason: If we kept the old stack and got blocked, for instance, in
272
	 * find_best_thread(), the old thread could get rescheduled by another
273
	 * CPU and overwrite the part of its own stack that was also used by
274
	 * the scheduler on this CPU.
275
	 *
276
	 * Moreover, we have to bypass the compiler-generated POP sequence
277
	 * which is fooled by SP being set to the very top of the stack.
278
	 * Therefore the scheduler() function continues in
279
	 * scheduler_separated_stack().
280
	 */
15 jermar 281
	context_save(&CPU->saved_context);
97 jermar 282
	context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), CPU->stack, CPU_STACK_SIZE);
15 jermar 283
	context_restore(&CPU->saved_context);
1 jermar 284
	/* not reached */
285
}
286
 
107 decky 287
 
288
/** Scheduler stack switch wrapper
289
 *
290
 * Second part of the scheduler() function
291
 * using new stack. Handling the actual context
292
 * switch to a new thread.
293
 *
294
 */
1 jermar 295
void scheduler_separated_stack(void)
296
{
297
	int priority;
298
 
227 jermar 299
	ASSERT(CPU != NULL);
300
 
15 jermar 301
	if (THREAD) {
302
		switch (THREAD->state) {
1 jermar 303
		    case Running:
125 jermar 304
			THREAD->state = Ready;
305
			spinlock_unlock(&THREAD->lock);
306
			thread_ready(THREAD);
307
			break;
1 jermar 308
 
309
		    case Exiting:
125 jermar 310
			frame_free((__address) THREAD->kstack);
311
			if (THREAD->ustack) {
312
				frame_free((__address) THREAD->ustack);
313
			}
1 jermar 314
 
125 jermar 315
			/*
316
			 * Detach from the containing task.
317
			 */
318
			spinlock_lock(&TASK->lock);
319
			list_remove(&THREAD->th_link);
320
			spinlock_unlock(&TASK->lock);
73 vana 321
 
125 jermar 322
			spinlock_unlock(&THREAD->lock);
323
 
324
			spinlock_lock(&threads_lock);
325
			list_remove(&THREAD->threads_link);
326
			spinlock_unlock(&threads_lock);
73 vana 327
 
125 jermar 328
			spinlock_lock(&CPU->lock);
329
			if(CPU->fpu_owner==THREAD) CPU->fpu_owner=NULL;
330
			spinlock_unlock(&CPU->lock);
331
 
332
			free(THREAD);
333
 
334
			break;
335
 
1 jermar 336
		    case Sleeping:
125 jermar 337
			/*
338
			 * Prefer the thread after it's woken up.
339
			 */
340
			THREAD->pri = -1;
1 jermar 341
 
125 jermar 342
			/*
343
			 * We need to release wq->lock which we locked in waitq_sleep().
344
			 * Address of wq->lock is kept in THREAD->sleep_queue.
345
			 */
346
			spinlock_unlock(&THREAD->sleep_queue->lock);
1 jermar 347
 
125 jermar 348
			/*
349
			 * Check for possible requests for out-of-context invocation.
350
			 */
351
			if (THREAD->call_me) {
352
				THREAD->call_me(THREAD->call_me_with);
353
				THREAD->call_me = NULL;
354
				THREAD->call_me_with = NULL;
355
			}
1 jermar 356
 
125 jermar 357
			spinlock_unlock(&THREAD->lock);
1 jermar 358
 
125 jermar 359
			break;
360
 
1 jermar 361
		    default:
125 jermar 362
			/*
363
			 * Entering state is unexpected.
364
			 */
365
			panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]);
366
			break;
1 jermar 367
		}
15 jermar 368
		THREAD = NULL;
1 jermar 369
	}
198 jermar 370
 
214 vana 371
 
15 jermar 372
	THREAD = find_best_thread();
1 jermar 373
 
15 jermar 374
	spinlock_lock(&THREAD->lock);
375
	priority = THREAD->pri;
376
	spinlock_unlock(&THREAD->lock);	
192 jermar 377
 
1 jermar 378
	relink_rq(priority);		
379
 
15 jermar 380
	spinlock_lock(&THREAD->lock);	
1 jermar 381
 
382
	/*
383
	 * If both the old and the new task are the same, lots of work is avoided.
384
	 */
15 jermar 385
	if (TASK != THREAD->task) {
1 jermar 386
		vm_t *m1 = NULL;
387
		vm_t *m2;
388
 
15 jermar 389
		if (TASK) {
390
			spinlock_lock(&TASK->lock);
391
			m1 = TASK->vm;
392
			spinlock_unlock(&TASK->lock);
1 jermar 393
		}
394
 
15 jermar 395
		spinlock_lock(&THREAD->task->lock);
396
		m2 = THREAD->task->vm;
397
		spinlock_unlock(&THREAD->task->lock);
1 jermar 398
 
399
		/*
400
		 * Note that it is possible for two tasks to share one vm mapping.
401
		 */
402
		if (m1 != m2) {
403
			/*
404
			 * Both tasks and vm mappings are different.
405
			 * Replace the old one with the new one.
406
			 */
407
			vm_install(m2);
408
		}
15 jermar 409
		TASK = THREAD->task;	
1 jermar 410
	}
411
 
15 jermar 412
	THREAD->state = Running;
1 jermar 413
 
414
	#ifdef SCHEDULER_VERBOSE
15 jermar 415
	printf("cpu%d: tid %d (pri=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->pri, THREAD->ticks, CPU->nrdy);
1 jermar 416
	#endif	
417
 
213 jermar 418
	/*
419
	 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to thread's stack.
420
	 */
184 jermar 421
	the_copy(THE, (the_t *) THREAD->kstack);
422
 
15 jermar 423
	context_restore(&THREAD->saved_context);
1 jermar 424
	/* not reached */
425
}
426
 
107 decky 427
 
1 jermar 428
#ifdef __SMP__
107 decky 429
/** Load balancing thread
430
 *
431
 * SMP load balancing thread, supervising thread supplies
432
 * for the CPU it's wired to.
433
 *
434
 * @param arg Generic thread argument (unused).
435
 *
1 jermar 436
 */
437
void kcpulb(void *arg)
438
{
439
	thread_t *t;
440
	int count, i, j, k = 0;
441
	pri_t pri;
442
 
443
loop:
444
	/*
445
	 * Sleep until there's some work to do.
446
	 */
15 jermar 447
	waitq_sleep(&CPU->kcpulb_wq);
1 jermar 448
 
449
not_satisfied:
450
	/*
451
	 * Calculate the number of threads that will be migrated/stolen from
452
	 * other CPU's. Note that situation can have changed between two
453
	 * passes. Each time get the most up to date counts.
454
	 */
455
	pri = cpu_priority_high();
15 jermar 456
	spinlock_lock(&CPU->lock);
1 jermar 457
	count = nrdy / config.cpu_active;
15 jermar 458
	count -= CPU->nrdy;
459
	spinlock_unlock(&CPU->lock);
1 jermar 460
	cpu_priority_restore(pri);
461
 
462
	if (count <= 0)
463
		goto satisfied;
464
 
465
	/*
466
	 * Searching least priority queues on all CPU's first and most priority queues on all CPU's last.
467
	 */
468
	for (j=RQ_COUNT-1; j >= 0; j--) {
469
		for (i=0; i < config.cpu_active; i++) {
470
			link_t *l;
471
			runq_t *r;
472
			cpu_t *cpu;
473
 
474
			cpu = &cpus[(i + k) % config.cpu_active];
475
 
476
			/*
477
			 * Not interested in ourselves.
478
			 * Doesn't require interrupt disabling for kcpulb is X_WIRED.
479
			 */
15 jermar 480
			if (CPU == cpu)
115 jermar 481
				continue;				
1 jermar 482
 
483
restart:		pri = cpu_priority_high();
115 jermar 484
			r = &cpu->rq[j];
1 jermar 485
			spinlock_lock(&r->lock);
486
			if (r->n == 0) {
487
				spinlock_unlock(&r->lock);
488
				cpu_priority_restore(pri);
489
				continue;
490
			}
491
 
492
			t = NULL;
493
			l = r->rq_head.prev;	/* search rq from the back */
494
			while (l != &r->rq_head) {
495
				t = list_get_instance(l, thread_t, rq_link);
496
				/*
125 jermar 497
				 * We don't want to steal CPU-wired threads neither threads already stolen.
1 jermar 498
				 * The latter prevents threads from migrating between CPU's without ever being run.
125 jermar 499
				 * We don't want to steal threads whose FPU context is still in CPU.
73 vana 500
				 */
1 jermar 501
				spinlock_lock(&t->lock);
73 vana 502
				if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) {
115 jermar 503
 
1 jermar 504
					/*
505
					 * Remove t from r.
506
					 */
507
 
508
					spinlock_unlock(&t->lock);
509
 
510
					/*
511
					 * Here we have to avoid deadlock with relink_rq(),
512
					 * because it locks cpu and r in a different order than we do.
513
					 */
514
					if (!spinlock_trylock(&cpu->lock)) {
515
						/* Release all locks and try again. */ 
516
						spinlock_unlock(&r->lock);
517
						cpu_priority_restore(pri);
518
						goto restart;
519
					}
520
					cpu->nrdy--;
521
					spinlock_unlock(&cpu->lock);
522
 
111 palkovsky 523
					atomic_dec(&nrdy);
1 jermar 524
 
125 jermar 525
					r->n--;
1 jermar 526
					list_remove(&t->rq_link);
527
 
528
					break;
529
				}
530
				spinlock_unlock(&t->lock);
531
				l = l->prev;
532
				t = NULL;
533
			}
534
			spinlock_unlock(&r->lock);
535
 
536
			if (t) {
537
				/*
538
				 * Ready t on local CPU
539
				 */
540
				spinlock_lock(&t->lock);
541
				#ifdef KCPULB_VERBOSE
15 jermar 542
				printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, CPU->nrdy, nrdy / config.cpu_active);
1 jermar 543
				#endif
544
				t->flags |= X_STOLEN;
545
				spinlock_unlock(&t->lock);
546
 
547
				thread_ready(t);
548
 
549
				cpu_priority_restore(pri);
550
 
551
				if (--count == 0)
552
					goto satisfied;
553
 
554
				/*
125 jermar 555
				 * We are not satisfied yet, focus on another CPU next time.
1 jermar 556
				 */
557
				k++;
558
 
559
				continue;
560
			}
561
			cpu_priority_restore(pri);
562
		}
563
	}
564
 
15 jermar 565
	if (CPU->nrdy) {
1 jermar 566
		/*
567
		 * Be a little bit light-weight and let migrated threads run.
568
		 */
569
		scheduler();
570
	} 
571
	else {
572
		/*
573
		 * We failed to migrate a single thread.
574
		 * Something more sophisticated should be done.
575
		 */
576
		scheduler();
577
	}
578
 
579
	goto not_satisfied;
125 jermar 580
 
1 jermar 581
satisfied:
582
	/*
583
	 * Tell find_best_thread() to wake us up later again.
584
	 */
15 jermar 585
	CPU->kcpulbstarted = 0;
1 jermar 586
	goto loop;
587
}
588
 
589
#endif /* __SMP__ */