Subversion Repositories HelenOS-historic

Rev

Rev 783 | Rev 785 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1 jermar 1
/*
2
 * Copyright (C) 2001-2004 Jakub Jermar
3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
29
#include <proc/scheduler.h>
30
#include <proc/thread.h>
31
#include <proc/task.h>
378 jermar 32
#include <mm/heap.h>
33
#include <mm/frame.h>
34
#include <mm/page.h>
703 jermar 35
#include <mm/as.h>
378 jermar 36
#include <arch/asm.h>
37
#include <arch/faddr.h>
38
#include <arch/atomic.h>
39
#include <synch/spinlock.h>
1 jermar 40
#include <config.h>
41
#include <context.h>
42
#include <func.h>
43
#include <arch.h>
44
#include <list.h>
68 decky 45
#include <panic.h>
1 jermar 46
#include <typedefs.h>
378 jermar 47
#include <cpu.h>
195 vana 48
#include <print.h>
227 jermar 49
#include <debug.h>
1 jermar 50
 
475 jermar 51
atomic_t nrdy;
195 vana 52
 
118 jermar 53
/** Take actions before new thread runs
107 decky 54
 *
118 jermar 55
 * Perform actions that need to be
56
 * taken before the newly selected
57
 * tread is passed control.
107 decky 58
 *
59
 */
52 vana 60
void before_thread_runs(void)
61
{
309 palkovsky 62
    before_thread_runs_arch();
458 decky 63
#ifdef CONFIG_FPU_LAZY
309 palkovsky 64
    if(THREAD==CPU->fpu_owner)
65
        fpu_enable();
66
    else
67
        fpu_disable();
68
#else
69
    fpu_enable();
70
    if (THREAD->fpu_context_exists)
71
        fpu_context_restore(&(THREAD->saved_fpu_context));
72
    else {
73
        fpu_init();
74
        THREAD->fpu_context_exists=1;
75
    }
76
#endif
52 vana 77
}
78
 
458 decky 79
#ifdef CONFIG_FPU_LAZY
309 palkovsky 80
void scheduler_fpu_lazy_request(void)
81
{
82
    fpu_enable();
83
    if (CPU->fpu_owner != NULL) {  
84
        fpu_context_save(&CPU->fpu_owner->saved_fpu_context);
85
        /* don't prevent migration */
86
        CPU->fpu_owner->fpu_context_engaged=0;
87
    }
88
    if (THREAD->fpu_context_exists)
89
        fpu_context_restore(&THREAD->saved_fpu_context);
90
    else {
91
        fpu_init();
92
        THREAD->fpu_context_exists=1;
93
    }
94
    CPU->fpu_owner=THREAD;
95
    THREAD->fpu_context_engaged = 1;
96
}
97
#endif
52 vana 98
 
107 decky 99
/** Initialize scheduler
100
 *
101
 * Initialize kernel scheduler.
102
 *
103
 */
1 jermar 104
void scheduler_init(void)
105
{
106
}
107
 
107 decky 108
 
109
/** Get thread to be scheduled
110
 *
111
 * Get the optimal thread to be scheduled
109 jermar 112
 * according to thread accounting and scheduler
107 decky 113
 * policy.
114
 *
115
 * @return Thread to be scheduled.
116
 *
117
 */
483 jermar 118
static thread_t *find_best_thread(void)
1 jermar 119
{
120
    thread_t *t;
121
    runq_t *r;
783 palkovsky 122
    int i;
1 jermar 123
 
227 jermar 124
    ASSERT(CPU != NULL);
125
 
1 jermar 126
loop:
413 jermar 127
    interrupts_enable();
1 jermar 128
 
783 palkovsky 129
    if (atomic_get(&CPU->nrdy) == 0) {
1 jermar 130
        /*
131
         * For there was nothing to run, the CPU goes to sleep
132
         * until a hardware interrupt or an IPI comes.
133
         * This improves energy saving and hyperthreading.
784 palkovsky 134
         *
135
         * - we might get an interrupt here that makes some thread runnable,
136
         *   in such a case we must wait for the next quantum to come
1 jermar 137
         */
138
         cpu_sleep();
139
         goto loop;
140
    }
141
 
413 jermar 142
    interrupts_disable();
114 jermar 143
 
144
    i = 0;
145
    for (; i<RQ_COUNT; i++) {
15 jermar 146
        r = &CPU->rq[i];
1 jermar 147
        spinlock_lock(&r->lock);
148
        if (r->n == 0) {
149
            /*
150
             * If this queue is empty, try a lower-priority queue.
151
             */
152
            spinlock_unlock(&r->lock);
153
            continue;
154
        }
213 jermar 155
 
783 palkovsky 156
        atomic_dec(&CPU->nrdy);
475 jermar 157
        atomic_dec(&nrdy);
1 jermar 158
        r->n--;
159
 
160
        /*
161
         * Take the first thread from the queue.
162
         */
163
        t = list_get_instance(r->rq_head.next, thread_t, rq_link);
164
        list_remove(&t->rq_link);
165
 
166
        spinlock_unlock(&r->lock);
167
 
168
        spinlock_lock(&t->lock);
15 jermar 169
        t->cpu = CPU;
1 jermar 170
 
171
        t->ticks = us2ticks((i+1)*10000);
413 jermar 172
        t->priority = i;    /* eventually correct rq index */
1 jermar 173
 
174
        /*
175
         * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge.
176
         */
177
        t->flags &= ~X_STOLEN;
178
        spinlock_unlock(&t->lock);
179
 
180
        return t;
181
    }
182
    goto loop;
183
 
184
}
185
 
107 decky 186
 
187
/** Prevent rq starvation
188
 *
189
 * Prevent low priority threads from starving in rq's.
190
 *
191
 * When the function decides to relink rq's, it reconnects
192
 * respective pointers so that in result threads with 'pri'
193
 * greater or equal 'start' are moved to a higher-priority queue.
194
 *
195
 * @param start Threshold priority.
196
 *
1 jermar 197
 */
452 decky 198
static void relink_rq(int start)
1 jermar 199
{
200
    link_t head;
201
    runq_t *r;
202
    int i, n;
203
 
204
    list_initialize(&head);
15 jermar 205
    spinlock_lock(&CPU->lock);
206
    if (CPU->needs_relink > NEEDS_RELINK_MAX) {
1 jermar 207
        for (i = start; i<RQ_COUNT-1; i++) {
208
            /* remember and empty rq[i + 1] */
15 jermar 209
            r = &CPU->rq[i + 1];
1 jermar 210
            spinlock_lock(&r->lock);
211
            list_concat(&head, &r->rq_head);
212
            n = r->n;
213
            r->n = 0;
214
            spinlock_unlock(&r->lock);
215
 
216
            /* append rq[i + 1] to rq[i] */
15 jermar 217
            r = &CPU->rq[i];
1 jermar 218
            spinlock_lock(&r->lock);
219
            list_concat(&r->rq_head, &head);
220
            r->n += n;
221
            spinlock_unlock(&r->lock);
222
        }
15 jermar 223
        CPU->needs_relink = 0;
1 jermar 224
    }
784 palkovsky 225
    spinlock_unlock(&CPU->lock);
1 jermar 226
 
227
}
228
 
107 decky 229
 
230
/** Scheduler stack switch wrapper
231
 *
232
 * Second part of the scheduler() function
233
 * using new stack. Handling the actual context
234
 * switch to a new thread.
235
 *
236
 */
452 decky 237
static void scheduler_separated_stack(void)
1 jermar 238
{
239
    int priority;
240
 
227 jermar 241
    ASSERT(CPU != NULL);
242
 
15 jermar 243
    if (THREAD) {
244
        switch (THREAD->state) {
1 jermar 245
            case Running:
125 jermar 246
            THREAD->state = Ready;
247
            spinlock_unlock(&THREAD->lock);
248
            thread_ready(THREAD);
249
            break;
1 jermar 250
 
251
            case Exiting:
125 jermar 252
            frame_free((__address) THREAD->kstack);
253
            if (THREAD->ustack) {
254
                frame_free((__address) THREAD->ustack);
255
            }
1 jermar 256
 
125 jermar 257
            /*
258
             * Detach from the containing task.
259
             */
260
            spinlock_lock(&TASK->lock);
261
            list_remove(&THREAD->th_link);
262
            spinlock_unlock(&TASK->lock);
73 vana 263
 
125 jermar 264
            spinlock_unlock(&THREAD->lock);
265
 
266
            spinlock_lock(&threads_lock);
267
            list_remove(&THREAD->threads_link);
268
            spinlock_unlock(&threads_lock);
73 vana 269
 
125 jermar 270
            spinlock_lock(&CPU->lock);
650 jermar 271
            if(CPU->fpu_owner==THREAD)
272
                CPU->fpu_owner=NULL;
125 jermar 273
            spinlock_unlock(&CPU->lock);
274
 
275
            free(THREAD);
276
 
277
            break;
278
 
1 jermar 279
            case Sleeping:
125 jermar 280
            /*
281
             * Prefer the thread after it's woken up.
282
             */
413 jermar 283
            THREAD->priority = -1;
1 jermar 284
 
125 jermar 285
            /*
286
             * We need to release wq->lock which we locked in waitq_sleep().
287
             * Address of wq->lock is kept in THREAD->sleep_queue.
288
             */
289
            spinlock_unlock(&THREAD->sleep_queue->lock);
1 jermar 290
 
125 jermar 291
            /*
292
             * Check for possible requests for out-of-context invocation.
293
             */
294
            if (THREAD->call_me) {
295
                THREAD->call_me(THREAD->call_me_with);
296
                THREAD->call_me = NULL;
297
                THREAD->call_me_with = NULL;
298
            }
1 jermar 299
 
125 jermar 300
            spinlock_unlock(&THREAD->lock);
1 jermar 301
 
125 jermar 302
            break;
303
 
1 jermar 304
            default:
125 jermar 305
            /*
306
             * Entering state is unexpected.
307
             */
308
            panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]);
309
            break;
1 jermar 310
        }
15 jermar 311
        THREAD = NULL;
1 jermar 312
    }
198 jermar 313
 
214 vana 314
 
15 jermar 315
    THREAD = find_best_thread();
1 jermar 316
 
15 jermar 317
    spinlock_lock(&THREAD->lock);
413 jermar 318
    priority = THREAD->priority;
15 jermar 319
    spinlock_unlock(&THREAD->lock);
192 jermar 320
 
1 jermar 321
    relink_rq(priority);       
322
 
15 jermar 323
    spinlock_lock(&THREAD->lock);  
1 jermar 324
 
325
    /*
326
     * If both the old and the new task are the same, lots of work is avoided.
327
     */
15 jermar 328
    if (TASK != THREAD->task) {
703 jermar 329
        as_t *as1 = NULL;
330
        as_t *as2;
1 jermar 331
 
15 jermar 332
        if (TASK) {
333
            spinlock_lock(&TASK->lock);
703 jermar 334
            as1 = TASK->as;
15 jermar 335
            spinlock_unlock(&TASK->lock);
1 jermar 336
        }
337
 
15 jermar 338
        spinlock_lock(&THREAD->task->lock);
703 jermar 339
        as2 = THREAD->task->as;
15 jermar 340
        spinlock_unlock(&THREAD->task->lock);
1 jermar 341
 
342
        /*
703 jermar 343
         * Note that it is possible for two tasks to share one address space.
1 jermar 344
         */
703 jermar 345
        if (as1 != as2) {
1 jermar 346
            /*
703 jermar 347
             * Both tasks and address spaces are different.
1 jermar 348
             * Replace the old one with the new one.
349
             */
703 jermar 350
            as_install(as2);
1 jermar 351
        }
15 jermar 352
        TASK = THREAD->task;   
1 jermar 353
    }
354
 
15 jermar 355
    THREAD->state = Running;
1 jermar 356
 
357
    #ifdef SCHEDULER_VERBOSE
413 jermar 358
    printf("cpu%d: tid %d (priority=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, CPU->nrdy);
1 jermar 359
    #endif  
360
 
213 jermar 361
    /*
362
     * Copy the knowledge of CPU, TASK, THREAD and preemption counter to thread's stack.
363
     */
184 jermar 364
    the_copy(THE, (the_t *) THREAD->kstack);
365
 
15 jermar 366
    context_restore(&THREAD->saved_context);
1 jermar 367
    /* not reached */
368
}
369
 
107 decky 370
 
452 decky 371
/** The scheduler
372
 *
373
 * The thread scheduling procedure.
675 jermar 374
 * Passes control directly to
375
 * scheduler_separated_stack().
452 decky 376
 *
377
 */
378
void scheduler(void)
379
{
380
    volatile ipl_t ipl;
381
 
382
    ASSERT(CPU != NULL);
383
 
384
    ipl = interrupts_disable();
385
 
631 palkovsky 386
    if (atomic_get(&haltstate))
452 decky 387
        halt();
388
 
389
    if (THREAD) {
390
        spinlock_lock(&THREAD->lock);
458 decky 391
#ifndef CONFIG_FPU_LAZY
452 decky 392
        fpu_context_save(&(THREAD->saved_fpu_context));
393
#endif
394
        if (!context_save(&THREAD->saved_context)) {
395
            /*
396
             * This is the place where threads leave scheduler();
397
             */
398
            before_thread_runs();
399
            spinlock_unlock(&THREAD->lock);
400
            interrupts_restore(THREAD->saved_context.ipl);
401
            return;
402
        }
403
 
404
        /*
405
         * Interrupt priority level of preempted thread is recorded here
406
         * to facilitate scheduler() invocations from interrupts_disable()'d
407
         * code (e.g. waitq_sleep_timeout()).
408
         */
409
        THREAD->saved_context.ipl = ipl;
410
    }
411
 
412
    /*
557 jermar 413
     * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM
452 decky 414
     * and preemption counter. At this point THE could be coming either
415
     * from THREAD's or CPU's stack.
416
     */
417
    the_copy(THE, (the_t *) CPU->stack);
418
 
419
    /*
420
     * We may not keep the old stack.
421
     * Reason: If we kept the old stack and got blocked, for instance, in
422
     * find_best_thread(), the old thread could get rescheduled by another
423
     * CPU and overwrite the part of its own stack that was also used by
424
     * the scheduler on this CPU.
425
     *
426
     * Moreover, we have to bypass the compiler-generated POP sequence
427
     * which is fooled by SP being set to the very top of the stack.
428
     * Therefore the scheduler() function continues in
429
     * scheduler_separated_stack().
430
     */
431
    context_save(&CPU->saved_context);
432
    context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), (__address) CPU->stack, CPU_STACK_SIZE);
433
    context_restore(&CPU->saved_context);
434
    /* not reached */
435
}
436
 
437
 
438
 
439
 
440
 
458 decky 441
#ifdef CONFIG_SMP
107 decky 442
/** Load balancing thread
443
 *
444
 * SMP load balancing thread, supervising thread supplies
445
 * for the CPU it's wired to.
446
 *
447
 * @param arg Generic thread argument (unused).
448
 *
1 jermar 449
 */
450
void kcpulb(void *arg)
451
{
452
    thread_t *t;
783 palkovsky 453
    int count, average, i, j, k = 0;
413 jermar 454
    ipl_t ipl;
1 jermar 455
 
456
loop:
457
    /*
779 jermar 458
     * Work in 1s intervals.
1 jermar 459
     */
779 jermar 460
    thread_sleep(1);
1 jermar 461
 
462
not_satisfied:
463
    /*
464
     * Calculate the number of threads that will be migrated/stolen from
465
     * other CPU's. Note that situation can have changed between two
466
     * passes. Each time get the most up to date counts.
467
     */
784 palkovsky 468
    average = atomic_get(&nrdy) / config.cpu_active + 1;
783 palkovsky 469
    count = average - atomic_get(&CPU->nrdy);
1 jermar 470
 
784 palkovsky 471
    if (count <= 0)
1 jermar 472
        goto satisfied;
473
 
474
    /*
475
     * Searching least priority queues on all CPU's first and most priority queues on all CPU's last.
476
     */
477
    for (j=RQ_COUNT-1; j >= 0; j--) {
478
        for (i=0; i < config.cpu_active; i++) {
479
            link_t *l;
480
            runq_t *r;
481
            cpu_t *cpu;
482
 
483
            cpu = &cpus[(i + k) % config.cpu_active];
484
 
485
            /*
486
             * Not interested in ourselves.
487
             * Doesn't require interrupt disabling for kcpulb is X_WIRED.
488
             */
15 jermar 489
            if (CPU == cpu)
783 palkovsky 490
                continue;
491
            if (atomic_get(&cpu->nrdy) <= average)
492
                continue;
1 jermar 493
 
784 palkovsky 494
            ipl = interrupts_disable();
115 jermar 495
            r = &cpu->rq[j];
1 jermar 496
            spinlock_lock(&r->lock);
497
            if (r->n == 0) {
498
                spinlock_unlock(&r->lock);
413 jermar 499
                interrupts_restore(ipl);
1 jermar 500
                continue;
501
            }
502
 
503
            t = NULL;
504
            l = r->rq_head.prev;    /* search rq from the back */
505
            while (l != &r->rq_head) {
506
                t = list_get_instance(l, thread_t, rq_link);
507
                /*
125 jermar 508
                 * We don't want to steal CPU-wired threads neither threads already stolen.
1 jermar 509
                 * The latter prevents threads from migrating between CPU's without ever being run.
125 jermar 510
                 * We don't want to steal threads whose FPU context is still in CPU.
73 vana 511
                 */
1 jermar 512
                spinlock_lock(&t->lock);
73 vana 513
                if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) {
1 jermar 514
                    /*
515
                     * Remove t from r.
516
                     */
517
                    spinlock_unlock(&t->lock);
518
 
783 palkovsky 519
                    atomic_dec(&cpu->nrdy);
475 jermar 520
                    atomic_dec(&nrdy);
1 jermar 521
 
125 jermar 522
                    r->n--;
1 jermar 523
                    list_remove(&t->rq_link);
524
 
525
                    break;
526
                }
527
                spinlock_unlock(&t->lock);
528
                l = l->prev;
529
                t = NULL;
530
            }
531
            spinlock_unlock(&r->lock);
532
 
533
            if (t) {
534
                /*
535
                 * Ready t on local CPU
536
                 */
537
                spinlock_lock(&t->lock);
538
                #ifdef KCPULB_VERBOSE
783 palkovsky 539
                printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, atomic_get(&CPU->nrdy), atomic_get(&nrdy) / config.cpu_active);
1 jermar 540
                #endif
541
                t->flags |= X_STOLEN;
542
                spinlock_unlock(&t->lock);
543
 
544
                thread_ready(t);
545
 
413 jermar 546
                interrupts_restore(ipl);
1 jermar 547
 
548
                if (--count == 0)
549
                    goto satisfied;
550
 
551
                /*
125 jermar 552
                 * We are not satisfied yet, focus on another CPU next time.
1 jermar 553
                 */
554
                k++;
555
 
556
                continue;
557
            }
413 jermar 558
            interrupts_restore(ipl);
1 jermar 559
        }
560
    }
561
 
783 palkovsky 562
    if (atomic_get(&CPU->nrdy)) {
1 jermar 563
        /*
564
         * Be a little bit light-weight and let migrated threads run.
565
         */
566
        scheduler();
779 jermar 567
    } else {
1 jermar 568
        /*
569
         * We failed to migrate a single thread.
779 jermar 570
         * Give up this turn.
1 jermar 571
         */
779 jermar 572
        goto loop;
1 jermar 573
    }
574
 
575
    goto not_satisfied;
125 jermar 576
 
1 jermar 577
satisfied:
578
    goto loop;
579
}
580
 
458 decky 581
#endif /* CONFIG_SMP */
775 palkovsky 582
 
583
 
584
/** Print information about threads & scheduler queues */
585
void sched_print_list(void)
586
{
587
    ipl_t ipl;
588
    int cpu,i;
589
    runq_t *r;
590
    thread_t *t;
591
    link_t *cur;
592
 
593
    /* We are going to mess with scheduler structures,
594
     * let's not be interrupted */
595
    ipl = interrupts_disable();
596
    printf("*********** Scheduler dump ***********\n");
597
    for (cpu=0;cpu < config.cpu_count; cpu++) {
598
        if (!cpus[cpu].active)
599
            continue;
600
        spinlock_lock(&cpus[cpu].lock);
601
        printf("cpu%d: nrdy: %d needs_relink: %d\n",
783 palkovsky 602
               cpus[cpu].id, atomic_get(&cpus[cpu].nrdy), cpus[cpu].needs_relink);
775 palkovsky 603
 
604
        for (i=0; i<RQ_COUNT; i++) {
605
            r = &cpus[cpu].rq[i];
606
            spinlock_lock(&r->lock);
607
            if (!r->n) {
608
                spinlock_unlock(&r->lock);
609
                continue;
610
            }
779 jermar 611
            printf("\tRq %d: ", i);
775 palkovsky 612
            for (cur=r->rq_head.next; cur!=&r->rq_head; cur=cur->next) {
613
                t = list_get_instance(cur, thread_t, rq_link);
614
                printf("%d(%s) ", t->tid,
615
                       thread_states[t->state]);
616
            }
617
            printf("\n");
618
            spinlock_unlock(&r->lock);
619
        }
620
        spinlock_unlock(&cpus[cpu].lock);
621
    }
622
 
623
    interrupts_restore(ipl);
624
}