Subversion Repositories HelenOS-historic

Rev

Rev 22 | Rev 57 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1 jermar 1
/*
2
 * Copyright (C) 2001-2004 Jakub Jermar
3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
29
#include <proc/scheduler.h>
30
#include <proc/thread.h>
31
#include <proc/task.h>
32
#include <cpu.h>
33
#include <mm/vm.h>
34
#include <config.h>
35
#include <context.h>
36
#include <func.h>
37
#include <arch.h>
38
#include <arch/asm.h>
39
#include <list.h>
40
#include <typedefs.h>
41
#include <mm/page.h>
42
#include <synch/spinlock.h>
43
 
44
#ifdef __SMP__
11 jermar 45
#include <arch/smp/atomic.h>
1 jermar 46
#endif /* __SMP__ */
47
 
48
/*
49
 * NOTE ON ATOMIC READS:
50
 * Some architectures cannot read __u32 atomically.
51
 * For that reason, all accesses to nrdy and the likes must be protected by spinlock.
52
 */
53
 
54
spinlock_t nrdylock;
55
volatile int nrdy;
56
 
52 vana 57
void before_thread_runs(void)
58
{
59
    before_thread_runs_arch();
60
    fpu_context_restore();
61
}
62
 
63
 
1 jermar 64
void scheduler_init(void)
65
{
66
    spinlock_initialize(&nrdylock);
67
}
68
 
69
/* cpu_priority_high()'d */
70
struct thread *find_best_thread(void)
71
{
72
    thread_t *t;
73
    runq_t *r;
74
    int i, n;
75
 
76
loop:
77
    cpu_priority_high();
78
 
15 jermar 79
    spinlock_lock(&CPU->lock);
80
    n = CPU->nrdy;
81
    spinlock_unlock(&CPU->lock);
1 jermar 82
 
83
    cpu_priority_low();
84
 
85
    if (n == 0) {
86
        #ifdef __SMP__
87
        /*
88
         * If the load balancing thread is not running, wake it up and
89
         * set CPU-private flag that the kcpulb has been started.
90
         */
15 jermar 91
        if (test_and_set(&CPU->kcpulbstarted) == 0) {
92
                waitq_wakeup(&CPU->kcpulb_wq, 0);
1 jermar 93
            goto loop;
94
        }
95
        #endif /* __SMP__ */
96
 
97
        /*
98
         * For there was nothing to run, the CPU goes to sleep
99
         * until a hardware interrupt or an IPI comes.
100
         * This improves energy saving and hyperthreading.
101
         * On the other hand, several hardware interrupts can be ignored.
102
         */
103
         cpu_sleep();
104
         goto loop;
105
    }
106
 
107
    cpu_priority_high();
108
 
109
    for (i = 0; i<RQ_COUNT; i++) {
15 jermar 110
        r = &CPU->rq[i];
1 jermar 111
        spinlock_lock(&r->lock);
112
        if (r->n == 0) {
113
            /*
114
             * If this queue is empty, try a lower-priority queue.
115
             */
116
            spinlock_unlock(&r->lock);
117
            continue;
118
        }
119
 
120
        spinlock_lock(&nrdylock);
121
        nrdy--;
122
        spinlock_unlock(&nrdylock);    
123
 
15 jermar 124
        spinlock_lock(&CPU->lock);
125
        CPU->nrdy--;
126
        spinlock_unlock(&CPU->lock);
1 jermar 127
 
128
        r->n--;
129
 
130
        /*
131
         * Take the first thread from the queue.
132
         */
133
        t = list_get_instance(r->rq_head.next, thread_t, rq_link);
134
        list_remove(&t->rq_link);
135
 
136
        spinlock_unlock(&r->lock);
137
 
138
        spinlock_lock(&t->lock);
15 jermar 139
        t->cpu = CPU;
1 jermar 140
 
141
        t->ticks = us2ticks((i+1)*10000);
142
        t->pri = i; /* eventually correct rq index */
143
 
144
        /*
145
         * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge.
146
         */
147
        t->flags &= ~X_STOLEN;
148
        spinlock_unlock(&t->lock);
149
 
150
        return t;
151
    }
152
    goto loop;
153
 
154
}
155
 
156
/*
157
 * This function prevents low priority threads from starving in rq's.
158
 * When it decides to relink rq's, it reconnects respective pointers
159
 * so that in result threads with 'pri' greater or equal 'start' are
160
 * moved to a higher-priority queue.
161
 */
162
void relink_rq(int start)
163
{
164
    link_t head;
165
    runq_t *r;
166
    int i, n;
167
 
168
    list_initialize(&head);
15 jermar 169
    spinlock_lock(&CPU->lock);
170
    if (CPU->needs_relink > NEEDS_RELINK_MAX) {
1 jermar 171
        for (i = start; i<RQ_COUNT-1; i++) {
172
            /* remember and empty rq[i + 1] */
15 jermar 173
            r = &CPU->rq[i + 1];
1 jermar 174
            spinlock_lock(&r->lock);
175
            list_concat(&head, &r->rq_head);
176
            n = r->n;
177
            r->n = 0;
178
            spinlock_unlock(&r->lock);
179
 
180
            /* append rq[i + 1] to rq[i] */
15 jermar 181
            r = &CPU->rq[i];
1 jermar 182
            spinlock_lock(&r->lock);
183
            list_concat(&r->rq_head, &head);
184
            r->n += n;
185
            spinlock_unlock(&r->lock);
186
        }
15 jermar 187
        CPU->needs_relink = 0;
1 jermar 188
    }
15 jermar 189
    spinlock_unlock(&CPU->lock);               
1 jermar 190
 
191
}
192
 
193
/*
194
 * The scheduler.
195
 */
196
void scheduler(void)
197
{
198
    volatile pri_t pri;
199
 
200
    pri = cpu_priority_high();
201
 
202
    if (haltstate)
203
        halt();
204
 
15 jermar 205
    if (THREAD) {
206
        spinlock_lock(&THREAD->lock);
207
        if (!context_save(&THREAD->saved_context)) {
1 jermar 208
            /*
209
             * This is the place where threads leave scheduler();
210
             */
22 jermar 211
            before_thread_runs();
15 jermar 212
                spinlock_unlock(&THREAD->lock);
213
            cpu_priority_restore(THREAD->saved_context.pri);
1 jermar 214
            return;
215
        }
15 jermar 216
        THREAD->saved_context.pri = pri;
1 jermar 217
    }
218
 
219
    /*
220
     * We may not keep the old stack.
221
     * Reason: If we kept the old stack and got blocked, for instance, in
222
     * find_best_thread(), the old thread could get rescheduled by another
223
     * CPU and overwrite the part of its own stack that was also used by
224
     * the scheduler on this CPU.
225
     *
226
     * Moreover, we have to bypass the compiler-generated POP sequence
227
     * which is fooled by SP being set to the very top of the stack.
228
     * Therefore the scheduler() function continues in
229
     * scheduler_separated_stack().
230
     */
15 jermar 231
    context_save(&CPU->saved_context);
232
    CPU->saved_context.sp = (__address) &CPU->stack[CPU_STACK_SIZE-8];
233
    CPU->saved_context.pc = (__address) scheduler_separated_stack;
234
    context_restore(&CPU->saved_context);
1 jermar 235
    /* not reached */
236
}
237
 
238
void scheduler_separated_stack(void)
239
{
240
    int priority;
241
 
15 jermar 242
    if (THREAD) {
243
        switch (THREAD->state) {
1 jermar 244
            case Running:
15 jermar 245
                THREAD->state = Ready;
246
                spinlock_unlock(&THREAD->lock);
247
                thread_ready(THREAD);
1 jermar 248
                break;
249
 
250
            case Exiting:
15 jermar 251
                frame_free((__address) THREAD->kstack);
252
                if (THREAD->ustack) {
253
                    frame_free((__address) THREAD->ustack);
1 jermar 254
                }
255
 
256
                /*
257
                 * Detach from the containing task.
258
                 */
15 jermar 259
                spinlock_lock(&TASK->lock);
260
                list_remove(&THREAD->th_link);
261
                spinlock_unlock(&TASK->lock);
1 jermar 262
 
15 jermar 263
                spinlock_unlock(&THREAD->lock);
1 jermar 264
 
265
                spinlock_lock(&threads_lock);
15 jermar 266
                list_remove(&THREAD->threads_link);
1 jermar 267
                spinlock_unlock(&threads_lock);
268
 
15 jermar 269
                free(THREAD);
1 jermar 270
 
271
                break;
272
 
273
            case Sleeping:
274
                /*
275
                 * Prefer the thread after it's woken up.
276
                 */
15 jermar 277
                THREAD->pri = -1;
1 jermar 278
 
279
                /*
280
                 * We need to release wq->lock which we locked in waitq_sleep().
15 jermar 281
                 * Address of wq->lock is kept in THREAD->sleep_queue.
1 jermar 282
                 */
15 jermar 283
                spinlock_unlock(&THREAD->sleep_queue->lock);
1 jermar 284
 
285
                /*
286
                 * Check for possible requests for out-of-context invocation.
287
                 */
15 jermar 288
                if (THREAD->call_me) {
289
                    THREAD->call_me(THREAD->call_me_with);
290
                    THREAD->call_me = NULL;
291
                    THREAD->call_me_with = NULL;
1 jermar 292
                }
293
 
15 jermar 294
                spinlock_unlock(&THREAD->lock);
1 jermar 295
 
296
                break;
297
 
298
            default:
299
                /*
300
                 * Entering state is unexpected.
301
                 */
15 jermar 302
                panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]);
1 jermar 303
                break;
304
        }
15 jermar 305
        THREAD = NULL;
1 jermar 306
    }
307
 
15 jermar 308
    THREAD = find_best_thread();
1 jermar 309
 
15 jermar 310
    spinlock_lock(&THREAD->lock);
311
    priority = THREAD->pri;
312
    spinlock_unlock(&THREAD->lock);
1 jermar 313
 
314
    relink_rq(priority);       
315
 
15 jermar 316
    spinlock_lock(&THREAD->lock);  
1 jermar 317
 
318
    /*
319
     * If both the old and the new task are the same, lots of work is avoided.
320
     */
15 jermar 321
    if (TASK != THREAD->task) {
1 jermar 322
        vm_t *m1 = NULL;
323
        vm_t *m2;
324
 
15 jermar 325
        if (TASK) {
326
            spinlock_lock(&TASK->lock);
327
            m1 = TASK->vm;
328
            spinlock_unlock(&TASK->lock);
1 jermar 329
        }
330
 
15 jermar 331
        spinlock_lock(&THREAD->task->lock);
332
        m2 = THREAD->task->vm;
333
        spinlock_unlock(&THREAD->task->lock);
1 jermar 334
 
335
        /*
336
         * Note that it is possible for two tasks to share one vm mapping.
337
         */
338
        if (m1 != m2) {
339
            /*
340
             * Both tasks and vm mappings are different.
341
             * Replace the old one with the new one.
342
             */
343
            if (m1) {
344
                vm_uninstall(m1);
345
            }
346
            vm_install(m2);
347
        }
15 jermar 348
        TASK = THREAD->task;   
1 jermar 349
    }
350
 
15 jermar 351
    THREAD->state = Running;
1 jermar 352
 
353
    #ifdef SCHEDULER_VERBOSE
15 jermar 354
    printf("cpu%d: tid %d (pri=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->pri, THREAD->ticks, CPU->nrdy);
1 jermar 355
    #endif  
356
 
15 jermar 357
    context_restore(&THREAD->saved_context);
1 jermar 358
    /* not reached */
359
}
360
 
361
#ifdef __SMP__
362
/*
363
 * This is the load balancing thread.
364
 * It supervises thread supplies for the CPU it's wired to.
365
 */
366
void kcpulb(void *arg)
367
{
368
    thread_t *t;
369
    int count, i, j, k = 0;
370
    pri_t pri;
371
 
372
loop:
373
    /*
374
     * Sleep until there's some work to do.
375
     */
15 jermar 376
    waitq_sleep(&CPU->kcpulb_wq);
1 jermar 377
 
378
not_satisfied:
379
    /*
380
     * Calculate the number of threads that will be migrated/stolen from
381
     * other CPU's. Note that situation can have changed between two
382
     * passes. Each time get the most up to date counts.
383
     */
384
    pri = cpu_priority_high();
15 jermar 385
    spinlock_lock(&CPU->lock);
1 jermar 386
    count = nrdy / config.cpu_active;
15 jermar 387
    count -= CPU->nrdy;
388
    spinlock_unlock(&CPU->lock);
1 jermar 389
    cpu_priority_restore(pri);
390
 
391
    if (count <= 0)
392
        goto satisfied;
393
 
394
    /*
395
     * Searching least priority queues on all CPU's first and most priority queues on all CPU's last.
396
     */
397
    for (j=RQ_COUNT-1; j >= 0; j--) {
398
        for (i=0; i < config.cpu_active; i++) {
399
            link_t *l;
400
            runq_t *r;
401
            cpu_t *cpu;
402
 
403
            cpu = &cpus[(i + k) % config.cpu_active];
404
            r = &cpu->rq[j];
405
 
406
            /*
407
             * Not interested in ourselves.
408
             * Doesn't require interrupt disabling for kcpulb is X_WIRED.
409
             */
15 jermar 410
            if (CPU == cpu)
1 jermar 411
                continue;
412
 
413
restart:        pri = cpu_priority_high();
414
            spinlock_lock(&r->lock);
415
            if (r->n == 0) {
416
                spinlock_unlock(&r->lock);
417
                cpu_priority_restore(pri);
418
                continue;
419
            }
420
 
421
            t = NULL;
422
            l = r->rq_head.prev;    /* search rq from the back */
423
            while (l != &r->rq_head) {
424
                t = list_get_instance(l, thread_t, rq_link);
425
                /*
426
                     * We don't want to steal CPU-wired threads neither threads already stolen.
427
                 * The latter prevents threads from migrating between CPU's without ever being run.
428
                     */
429
                spinlock_lock(&t->lock);
430
                if (!(t->flags & (X_WIRED | X_STOLEN))) {
431
                    /*
432
                     * Remove t from r.
433
                     */
434
 
435
                    spinlock_unlock(&t->lock);
436
 
437
                    /*
438
                     * Here we have to avoid deadlock with relink_rq(),
439
                     * because it locks cpu and r in a different order than we do.
440
                     */
441
                    if (!spinlock_trylock(&cpu->lock)) {
442
                        /* Release all locks and try again. */
443
                        spinlock_unlock(&r->lock);
444
                        cpu_priority_restore(pri);
445
                        goto restart;
446
                    }
447
                    cpu->nrdy--;
448
                    spinlock_unlock(&cpu->lock);
449
 
450
                    spinlock_lock(&nrdylock);
451
                    nrdy--;
452
                    spinlock_unlock(&nrdylock);                
453
 
454
                        r->n--;
455
                    list_remove(&t->rq_link);
456
 
457
                    break;
458
                }
459
                spinlock_unlock(&t->lock);
460
                l = l->prev;
461
                t = NULL;
462
            }
463
            spinlock_unlock(&r->lock);
464
 
465
            if (t) {
466
                /*
467
                 * Ready t on local CPU
468
                 */
469
                spinlock_lock(&t->lock);
470
                #ifdef KCPULB_VERBOSE
15 jermar 471
                printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, CPU->nrdy, nrdy / config.cpu_active);
1 jermar 472
                #endif
473
                t->flags |= X_STOLEN;
474
                spinlock_unlock(&t->lock);
475
 
476
                thread_ready(t);
477
 
478
                cpu_priority_restore(pri);
479
 
480
                if (--count == 0)
481
                    goto satisfied;
482
 
483
                /*
484
                             * We are not satisfied yet, focus on another CPU next time.
485
                 */
486
                k++;
487
 
488
                continue;
489
            }
490
            cpu_priority_restore(pri);
491
        }
492
    }
493
 
15 jermar 494
    if (CPU->nrdy) {
1 jermar 495
        /*
496
         * Be a little bit light-weight and let migrated threads run.
497
         */
498
        scheduler();
499
    }
500
    else {
501
        /*
502
         * We failed to migrate a single thread.
503
         * Something more sophisticated should be done.
504
         */
505
        scheduler();
506
    }
507
 
508
    goto not_satisfied;
509
 
510
satisfied:
511
    /*
512
     * Tell find_best_thread() to wake us up later again.
513
     */
15 jermar 514
    CPU->kcpulbstarted = 0;
1 jermar 515
    goto loop;
516
}
517
 
518
#endif /* __SMP__ */