Subversion Repositories HelenOS-historic

Rev

Rev 11 | Rev 22 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1 jermar 1
/*
2
 * Copyright (C) 2001-2004 Jakub Jermar
3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
29
#include <proc/scheduler.h>
30
#include <proc/thread.h>
31
#include <proc/task.h>
32
#include <cpu.h>
33
#include <mm/vm.h>
34
#include <config.h>
35
#include <context.h>
36
#include <func.h>
37
#include <arch.h>
38
#include <arch/asm.h>
39
#include <list.h>
40
#include <typedefs.h>
41
#include <mm/page.h>
42
#include <synch/spinlock.h>
43
 
44
#ifdef __SMP__
11 jermar 45
#include <arch/smp/atomic.h>
1 jermar 46
#endif /* __SMP__ */
47
 
48
/*
49
 * NOTE ON ATOMIC READS:
50
 * Some architectures cannot read __u32 atomically.
51
 * For that reason, all accesses to nrdy and the likes must be protected by spinlock.
52
 */
53
 
54
spinlock_t nrdylock;
55
volatile int nrdy;
56
 
57
void scheduler_init(void)
58
{
59
	spinlock_initialize(&nrdylock);
60
}
61
 
62
/* cpu_priority_high()'d */
63
struct thread *find_best_thread(void)
64
{
65
	thread_t *t;
66
	runq_t *r;
67
	int i, n;
68
 
69
loop:
70
	cpu_priority_high();
71
 
15 jermar 72
	spinlock_lock(&CPU->lock);
73
	n = CPU->nrdy;
74
	spinlock_unlock(&CPU->lock);
1 jermar 75
 
76
	cpu_priority_low();
77
 
78
	if (n == 0) {
79
		#ifdef __SMP__
80
		/*
81
		 * If the load balancing thread is not running, wake it up and
82
		 * set CPU-private flag that the kcpulb has been started.
83
		 */
15 jermar 84
		if (test_and_set(&CPU->kcpulbstarted) == 0) {
85
    			waitq_wakeup(&CPU->kcpulb_wq, 0);
1 jermar 86
			goto loop;
87
		}
88
		#endif /* __SMP__ */
89
 
90
		/*
91
		 * For there was nothing to run, the CPU goes to sleep
92
		 * until a hardware interrupt or an IPI comes.
93
		 * This improves energy saving and hyperthreading.
94
		 * On the other hand, several hardware interrupts can be ignored.
95
		 */
96
		 cpu_sleep();
97
		 goto loop;
98
	}
99
 
100
	cpu_priority_high();
101
 
102
	for (i = 0; i<RQ_COUNT; i++) {
15 jermar 103
		r = &CPU->rq[i];
1 jermar 104
		spinlock_lock(&r->lock);
105
		if (r->n == 0) {
106
			/*
107
			 * If this queue is empty, try a lower-priority queue.
108
			 */
109
			spinlock_unlock(&r->lock);
110
			continue;
111
		}
112
 
113
		spinlock_lock(&nrdylock);
114
		nrdy--;
115
		spinlock_unlock(&nrdylock);		
116
 
15 jermar 117
		spinlock_lock(&CPU->lock);
118
		CPU->nrdy--;
119
		spinlock_unlock(&CPU->lock);
1 jermar 120
 
121
		r->n--;
122
 
123
		/*
124
		 * Take the first thread from the queue.
125
		 */
126
		t = list_get_instance(r->rq_head.next, thread_t, rq_link);
127
		list_remove(&t->rq_link);
128
 
129
		spinlock_unlock(&r->lock);
130
 
131
		spinlock_lock(&t->lock);
15 jermar 132
		t->cpu = CPU;
1 jermar 133
 
134
		t->ticks = us2ticks((i+1)*10000);
135
		t->pri = i;	/* eventually correct rq index */
136
 
137
		/*
138
		 * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge.
139
		 */
140
		t->flags &= ~X_STOLEN;
141
		spinlock_unlock(&t->lock);
142
 
143
		return t;
144
	}
145
	goto loop;
146
 
147
}
148
 
149
/*
150
 * This function prevents low priority threads from starving in rq's.
151
 * When it decides to relink rq's, it reconnects respective pointers
152
 * so that in result threads with 'pri' greater or equal 'start' are
153
 * moved to a higher-priority queue.
154
 */
155
void relink_rq(int start)
156
{
157
	link_t head;
158
	runq_t *r;
159
	int i, n;
160
 
161
	list_initialize(&head);
15 jermar 162
	spinlock_lock(&CPU->lock);
163
	if (CPU->needs_relink > NEEDS_RELINK_MAX) {
1 jermar 164
		for (i = start; i<RQ_COUNT-1; i++) {
165
			/* remember and empty rq[i + 1] */
15 jermar 166
			r = &CPU->rq[i + 1];
1 jermar 167
			spinlock_lock(&r->lock);
168
			list_concat(&head, &r->rq_head);
169
			n = r->n;
170
			r->n = 0;
171
			spinlock_unlock(&r->lock);
172
 
173
			/* append rq[i + 1] to rq[i] */
15 jermar 174
			r = &CPU->rq[i];
1 jermar 175
			spinlock_lock(&r->lock);
176
			list_concat(&r->rq_head, &head);
177
			r->n += n;
178
			spinlock_unlock(&r->lock);
179
		}
15 jermar 180
		CPU->needs_relink = 0;
1 jermar 181
	}
15 jermar 182
	spinlock_unlock(&CPU->lock);				
1 jermar 183
 
184
}
185
 
186
/*
187
 * The scheduler.
188
 */
189
void scheduler(void)
190
{
191
	volatile pri_t pri;
192
 
193
	pri = cpu_priority_high();
194
 
195
	if (haltstate)
196
		halt();
197
 
15 jermar 198
	if (THREAD) {
199
		spinlock_lock(&THREAD->lock);
200
		if (!context_save(&THREAD->saved_context)) {
1 jermar 201
			/*
202
			 * This is the place where threads leave scheduler();
203
			 */
15 jermar 204
		    	spinlock_unlock(&THREAD->lock);
205
			cpu_priority_restore(THREAD->saved_context.pri);
1 jermar 206
			return;
207
		}
15 jermar 208
		THREAD->saved_context.pri = pri;
1 jermar 209
	}
210
 
211
	/*
212
	 * We may not keep the old stack.
213
	 * Reason: If we kept the old stack and got blocked, for instance, in
214
	 * find_best_thread(), the old thread could get rescheduled by another
215
	 * CPU and overwrite the part of its own stack that was also used by
216
	 * the scheduler on this CPU.
217
	 *
218
	 * Moreover, we have to bypass the compiler-generated POP sequence
219
	 * which is fooled by SP being set to the very top of the stack.
220
	 * Therefore the scheduler() function continues in
221
	 * scheduler_separated_stack().
222
	 */
15 jermar 223
	context_save(&CPU->saved_context);
224
	CPU->saved_context.sp = (__address) &CPU->stack[CPU_STACK_SIZE-8];
225
	CPU->saved_context.pc = (__address) scheduler_separated_stack;
226
	context_restore(&CPU->saved_context);
1 jermar 227
	/* not reached */
228
}
229
 
230
void scheduler_separated_stack(void)
231
{
232
	int priority;
233
 
15 jermar 234
	if (THREAD) {
235
		switch (THREAD->state) {
1 jermar 236
		    case Running:
15 jermar 237
			    THREAD->state = Ready;
238
			    spinlock_unlock(&THREAD->lock);
239
			    thread_ready(THREAD);
1 jermar 240
			    break;
241
 
242
		    case Exiting:
15 jermar 243
			    frame_free((__address) THREAD->kstack);
244
			    if (THREAD->ustack) {
245
				    frame_free((__address) THREAD->ustack);
1 jermar 246
			    }
247
 
248
			    /*
249
			     * Detach from the containing task.
250
			     */
15 jermar 251
			    spinlock_lock(&TASK->lock);
252
			    list_remove(&THREAD->th_link);
253
			    spinlock_unlock(&TASK->lock);
1 jermar 254
 
15 jermar 255
			    spinlock_unlock(&THREAD->lock);
1 jermar 256
 
257
			    spinlock_lock(&threads_lock);
15 jermar 258
			    list_remove(&THREAD->threads_link);
1 jermar 259
			    spinlock_unlock(&threads_lock);
260
 
15 jermar 261
			    free(THREAD);
1 jermar 262
 
263
			    break;
264
 
265
		    case Sleeping:
266
			    /*
267
			     * Prefer the thread after it's woken up.
268
			     */
15 jermar 269
			    THREAD->pri = -1;
1 jermar 270
 
271
			    /*
272
			     * We need to release wq->lock which we locked in waitq_sleep().
15 jermar 273
			     * Address of wq->lock is kept in THREAD->sleep_queue.
1 jermar 274
			     */
15 jermar 275
			    spinlock_unlock(&THREAD->sleep_queue->lock);
1 jermar 276
 
277
			    /*
278
			     * Check for possible requests for out-of-context invocation.
279
			     */
15 jermar 280
			    if (THREAD->call_me) {
281
				    THREAD->call_me(THREAD->call_me_with);
282
				    THREAD->call_me = NULL;
283
				    THREAD->call_me_with = NULL;
1 jermar 284
			    }
285
 
15 jermar 286
			    spinlock_unlock(&THREAD->lock);
1 jermar 287
 
288
			    break;
289
 
290
		    default:
291
			    /*
292
			     * Entering state is unexpected.
293
			     */
15 jermar 294
			    panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]);
1 jermar 295
			    break;
296
		}
15 jermar 297
		THREAD = NULL;
1 jermar 298
	}
299
 
15 jermar 300
	THREAD = find_best_thread();
1 jermar 301
 
15 jermar 302
	spinlock_lock(&THREAD->lock);
303
	priority = THREAD->pri;
304
	spinlock_unlock(&THREAD->lock);	
1 jermar 305
 
306
	relink_rq(priority);		
307
 
15 jermar 308
	spinlock_lock(&THREAD->lock);	
1 jermar 309
 
310
	/*
311
	 * If both the old and the new task are the same, lots of work is avoided.
312
	 */
15 jermar 313
	if (TASK != THREAD->task) {
1 jermar 314
		vm_t *m1 = NULL;
315
		vm_t *m2;
316
 
15 jermar 317
		if (TASK) {
318
			spinlock_lock(&TASK->lock);
319
			m1 = TASK->vm;
320
			spinlock_unlock(&TASK->lock);
1 jermar 321
		}
322
 
15 jermar 323
		spinlock_lock(&THREAD->task->lock);
324
		m2 = THREAD->task->vm;
325
		spinlock_unlock(&THREAD->task->lock);
1 jermar 326
 
327
		/*
328
		 * Note that it is possible for two tasks to share one vm mapping.
329
		 */
330
		if (m1 != m2) {
331
			/*
332
			 * Both tasks and vm mappings are different.
333
			 * Replace the old one with the new one.
334
			 */
335
			if (m1) {
336
				vm_uninstall(m1);
337
			}
338
			vm_install(m2);
339
		}
15 jermar 340
		TASK = THREAD->task;	
1 jermar 341
	}
342
 
15 jermar 343
	THREAD->state = Running;
1 jermar 344
 
345
	#ifdef SCHEDULER_VERBOSE
15 jermar 346
	printf("cpu%d: tid %d (pri=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->pri, THREAD->ticks, CPU->nrdy);
1 jermar 347
	#endif	
348
 
15 jermar 349
	context_restore(&THREAD->saved_context);
1 jermar 350
	/* not reached */
351
}
352
 
353
#ifdef __SMP__
354
/*
355
 * This is the load balancing thread. 
356
 * It supervises thread supplies for the CPU it's wired to.
357
 */
358
void kcpulb(void *arg)
359
{
360
	thread_t *t;
361
	int count, i, j, k = 0;
362
	pri_t pri;
363
 
364
loop:
365
	/*
366
	 * Sleep until there's some work to do.
367
	 */
15 jermar 368
	waitq_sleep(&CPU->kcpulb_wq);
1 jermar 369
 
370
not_satisfied:
371
	/*
372
	 * Calculate the number of threads that will be migrated/stolen from
373
	 * other CPU's. Note that situation can have changed between two
374
	 * passes. Each time get the most up to date counts.
375
	 */
376
	pri = cpu_priority_high();
15 jermar 377
	spinlock_lock(&CPU->lock);
1 jermar 378
	count = nrdy / config.cpu_active;
15 jermar 379
	count -= CPU->nrdy;
380
	spinlock_unlock(&CPU->lock);
1 jermar 381
	cpu_priority_restore(pri);
382
 
383
	if (count <= 0)
384
		goto satisfied;
385
 
386
	/*
387
	 * Searching least priority queues on all CPU's first and most priority queues on all CPU's last.
388
	 */
389
	for (j=RQ_COUNT-1; j >= 0; j--) {
390
		for (i=0; i < config.cpu_active; i++) {
391
			link_t *l;
392
			runq_t *r;
393
			cpu_t *cpu;
394
 
395
			cpu = &cpus[(i + k) % config.cpu_active];
396
			r = &cpu->rq[j];
397
 
398
			/*
399
			 * Not interested in ourselves.
400
			 * Doesn't require interrupt disabling for kcpulb is X_WIRED.
401
			 */
15 jermar 402
			if (CPU == cpu)
1 jermar 403
				continue;
404
 
405
restart:		pri = cpu_priority_high();
406
			spinlock_lock(&r->lock);
407
			if (r->n == 0) {
408
				spinlock_unlock(&r->lock);
409
				cpu_priority_restore(pri);
410
				continue;
411
			}
412
 
413
			t = NULL;
414
			l = r->rq_head.prev;	/* search rq from the back */
415
			while (l != &r->rq_head) {
416
				t = list_get_instance(l, thread_t, rq_link);
417
				/*
418
		    		 * We don't want to steal CPU-wired threads neither threads already stolen.
419
				 * The latter prevents threads from migrating between CPU's without ever being run.
420
		        	 */
421
				spinlock_lock(&t->lock);
422
				if (!(t->flags & (X_WIRED | X_STOLEN))) {
423
					/*
424
					 * Remove t from r.
425
					 */
426
 
427
					spinlock_unlock(&t->lock);
428
 
429
					/*
430
					 * Here we have to avoid deadlock with relink_rq(),
431
					 * because it locks cpu and r in a different order than we do.
432
					 */
433
					if (!spinlock_trylock(&cpu->lock)) {
434
						/* Release all locks and try again. */ 
435
						spinlock_unlock(&r->lock);
436
						cpu_priority_restore(pri);
437
						goto restart;
438
					}
439
					cpu->nrdy--;
440
					spinlock_unlock(&cpu->lock);
441
 
442
					spinlock_lock(&nrdylock);
443
					nrdy--;
444
					spinlock_unlock(&nrdylock);					
445
 
446
			    		r->n--;
447
					list_remove(&t->rq_link);
448
 
449
					break;
450
				}
451
				spinlock_unlock(&t->lock);
452
				l = l->prev;
453
				t = NULL;
454
			}
455
			spinlock_unlock(&r->lock);
456
 
457
			if (t) {
458
				/*
459
				 * Ready t on local CPU
460
				 */
461
				spinlock_lock(&t->lock);
462
				#ifdef KCPULB_VERBOSE
15 jermar 463
				printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, CPU->nrdy, nrdy / config.cpu_active);
1 jermar 464
				#endif
465
				t->flags |= X_STOLEN;
466
				spinlock_unlock(&t->lock);
467
 
468
				thread_ready(t);
469
 
470
				cpu_priority_restore(pri);
471
 
472
				if (--count == 0)
473
					goto satisfied;
474
 
475
				/*
476
                    		 * We are not satisfied yet, focus on another CPU next time.
477
				 */
478
				k++;
479
 
480
				continue;
481
			}
482
			cpu_priority_restore(pri);
483
		}
484
	}
485
 
15 jermar 486
	if (CPU->nrdy) {
1 jermar 487
		/*
488
		 * Be a little bit light-weight and let migrated threads run.
489
		 */
490
		scheduler();
491
	} 
492
	else {
493
		/*
494
		 * We failed to migrate a single thread.
495
		 * Something more sophisticated should be done.
496
		 */
497
		scheduler();
498
	}
499
 
500
	goto not_satisfied;
501
 
502
satisfied:
503
	/*
504
	 * Tell find_best_thread() to wake us up later again.
505
	 */
15 jermar 506
	CPU->kcpulbstarted = 0;
1 jermar 507
	goto loop;
508
}
509
 
510
#endif /* __SMP__ */