Subversion Repositories HelenOS-historic

Rev

Rev 779 | Rev 784 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1 jermar 1
/*
2
 * Copyright (C) 2001-2004 Jakub Jermar
3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
29
#include <proc/scheduler.h>
30
#include <proc/thread.h>
31
#include <proc/task.h>
378 jermar 32
#include <mm/heap.h>
33
#include <mm/frame.h>
34
#include <mm/page.h>
703 jermar 35
#include <mm/as.h>
378 jermar 36
#include <arch/asm.h>
37
#include <arch/faddr.h>
38
#include <arch/atomic.h>
39
#include <synch/spinlock.h>
1 jermar 40
#include <config.h>
41
#include <context.h>
42
#include <func.h>
43
#include <arch.h>
44
#include <list.h>
68 decky 45
#include <panic.h>
1 jermar 46
#include <typedefs.h>
378 jermar 47
#include <cpu.h>
195 vana 48
#include <print.h>
227 jermar 49
#include <debug.h>
1 jermar 50
 
475 jermar 51
atomic_t nrdy;
195 vana 52
 
118 jermar 53
/** Take actions before new thread runs
107 decky 54
 *
118 jermar 55
 * Perform actions that need to be
56
 * taken before the newly selected
57
 * tread is passed control.
107 decky 58
 *
59
 */
52 vana 60
void before_thread_runs(void)
61
{
309 palkovsky 62
    before_thread_runs_arch();
458 decky 63
#ifdef CONFIG_FPU_LAZY
309 palkovsky 64
    if(THREAD==CPU->fpu_owner)
65
        fpu_enable();
66
    else
67
        fpu_disable();
68
#else
69
    fpu_enable();
70
    if (THREAD->fpu_context_exists)
71
        fpu_context_restore(&(THREAD->saved_fpu_context));
72
    else {
73
        fpu_init();
74
        THREAD->fpu_context_exists=1;
75
    }
76
#endif
52 vana 77
}
78
 
458 decky 79
#ifdef CONFIG_FPU_LAZY
309 palkovsky 80
void scheduler_fpu_lazy_request(void)
81
{
82
    fpu_enable();
83
    if (CPU->fpu_owner != NULL) {  
84
        fpu_context_save(&CPU->fpu_owner->saved_fpu_context);
85
        /* don't prevent migration */
86
        CPU->fpu_owner->fpu_context_engaged=0;
87
    }
88
    if (THREAD->fpu_context_exists)
89
        fpu_context_restore(&THREAD->saved_fpu_context);
90
    else {
91
        fpu_init();
92
        THREAD->fpu_context_exists=1;
93
    }
94
    CPU->fpu_owner=THREAD;
95
    THREAD->fpu_context_engaged = 1;
96
}
97
#endif
52 vana 98
 
107 decky 99
/** Initialize scheduler
100
 *
101
 * Initialize kernel scheduler.
102
 *
103
 */
1 jermar 104
void scheduler_init(void)
105
{
106
}
107
 
107 decky 108
 
109
/** Get thread to be scheduled
110
 *
111
 * Get the optimal thread to be scheduled
109 jermar 112
 * according to thread accounting and scheduler
107 decky 113
 * policy.
114
 *
115
 * @return Thread to be scheduled.
116
 *
117
 */
483 jermar 118
static thread_t *find_best_thread(void)
1 jermar 119
{
120
    thread_t *t;
121
    runq_t *r;
783 palkovsky 122
    int i;
1 jermar 123
 
227 jermar 124
    ASSERT(CPU != NULL);
125
 
1 jermar 126
loop:
413 jermar 127
    interrupts_enable();
1 jermar 128
 
783 palkovsky 129
    if (atomic_get(&CPU->nrdy) == 0) {
1 jermar 130
        /*
131
         * For there was nothing to run, the CPU goes to sleep
132
         * until a hardware interrupt or an IPI comes.
133
         * This improves energy saving and hyperthreading.
134
         */
135
         cpu_sleep();
136
         goto loop;
137
    }
138
 
413 jermar 139
    interrupts_disable();
114 jermar 140
 
141
    i = 0;
142
    for (; i<RQ_COUNT; i++) {
15 jermar 143
        r = &CPU->rq[i];
1 jermar 144
        spinlock_lock(&r->lock);
145
        if (r->n == 0) {
146
            /*
147
             * If this queue is empty, try a lower-priority queue.
148
             */
149
            spinlock_unlock(&r->lock);
150
            continue;
151
        }
213 jermar 152
 
783 palkovsky 153
        atomic_dec(&CPU->nrdy);
475 jermar 154
        atomic_dec(&nrdy);
1 jermar 155
        r->n--;
156
 
157
        /*
158
         * Take the first thread from the queue.
159
         */
160
        t = list_get_instance(r->rq_head.next, thread_t, rq_link);
161
        list_remove(&t->rq_link);
162
 
163
        spinlock_unlock(&r->lock);
164
 
165
        spinlock_lock(&t->lock);
15 jermar 166
        t->cpu = CPU;
1 jermar 167
 
168
        t->ticks = us2ticks((i+1)*10000);
413 jermar 169
        t->priority = i;    /* eventually correct rq index */
1 jermar 170
 
171
        /*
172
         * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge.
173
         */
174
        t->flags &= ~X_STOLEN;
175
        spinlock_unlock(&t->lock);
176
 
177
        return t;
178
    }
179
    goto loop;
180
 
181
}
182
 
107 decky 183
 
184
/** Prevent rq starvation
185
 *
186
 * Prevent low priority threads from starving in rq's.
187
 *
188
 * When the function decides to relink rq's, it reconnects
189
 * respective pointers so that in result threads with 'pri'
190
 * greater or equal 'start' are moved to a higher-priority queue.
191
 *
192
 * @param start Threshold priority.
193
 *
1 jermar 194
 */
452 decky 195
static void relink_rq(int start)
1 jermar 196
{
197
    link_t head;
198
    runq_t *r;
199
    int i, n;
200
 
201
    list_initialize(&head);
15 jermar 202
    spinlock_lock(&CPU->lock);
203
    if (CPU->needs_relink > NEEDS_RELINK_MAX) {
1 jermar 204
        for (i = start; i<RQ_COUNT-1; i++) {
205
            /* remember and empty rq[i + 1] */
15 jermar 206
            r = &CPU->rq[i + 1];
1 jermar 207
            spinlock_lock(&r->lock);
208
            list_concat(&head, &r->rq_head);
209
            n = r->n;
210
            r->n = 0;
211
            spinlock_unlock(&r->lock);
212
 
213
            /* append rq[i + 1] to rq[i] */
15 jermar 214
            r = &CPU->rq[i];
1 jermar 215
            spinlock_lock(&r->lock);
216
            list_concat(&r->rq_head, &head);
217
            r->n += n;
218
            spinlock_unlock(&r->lock);
219
        }
15 jermar 220
        CPU->needs_relink = 0;
1 jermar 221
    }
15 jermar 222
    spinlock_unlock(&CPU->lock);               
1 jermar 223
 
224
}
225
 
107 decky 226
 
227
/** Scheduler stack switch wrapper
228
 *
229
 * Second part of the scheduler() function
230
 * using new stack. Handling the actual context
231
 * switch to a new thread.
232
 *
233
 */
452 decky 234
static void scheduler_separated_stack(void)
1 jermar 235
{
236
    int priority;
237
 
227 jermar 238
    ASSERT(CPU != NULL);
239
 
15 jermar 240
    if (THREAD) {
241
        switch (THREAD->state) {
1 jermar 242
            case Running:
125 jermar 243
            THREAD->state = Ready;
244
            spinlock_unlock(&THREAD->lock);
245
            thread_ready(THREAD);
246
            break;
1 jermar 247
 
248
            case Exiting:
125 jermar 249
            frame_free((__address) THREAD->kstack);
250
            if (THREAD->ustack) {
251
                frame_free((__address) THREAD->ustack);
252
            }
1 jermar 253
 
125 jermar 254
            /*
255
             * Detach from the containing task.
256
             */
257
            spinlock_lock(&TASK->lock);
258
            list_remove(&THREAD->th_link);
259
            spinlock_unlock(&TASK->lock);
73 vana 260
 
125 jermar 261
            spinlock_unlock(&THREAD->lock);
262
 
263
            spinlock_lock(&threads_lock);
264
            list_remove(&THREAD->threads_link);
265
            spinlock_unlock(&threads_lock);
73 vana 266
 
125 jermar 267
            spinlock_lock(&CPU->lock);
650 jermar 268
            if(CPU->fpu_owner==THREAD)
269
                CPU->fpu_owner=NULL;
125 jermar 270
            spinlock_unlock(&CPU->lock);
271
 
272
            free(THREAD);
273
 
274
            break;
275
 
1 jermar 276
            case Sleeping:
125 jermar 277
            /*
278
             * Prefer the thread after it's woken up.
279
             */
413 jermar 280
            THREAD->priority = -1;
1 jermar 281
 
125 jermar 282
            /*
283
             * We need to release wq->lock which we locked in waitq_sleep().
284
             * Address of wq->lock is kept in THREAD->sleep_queue.
285
             */
286
            spinlock_unlock(&THREAD->sleep_queue->lock);
1 jermar 287
 
125 jermar 288
            /*
289
             * Check for possible requests for out-of-context invocation.
290
             */
291
            if (THREAD->call_me) {
292
                THREAD->call_me(THREAD->call_me_with);
293
                THREAD->call_me = NULL;
294
                THREAD->call_me_with = NULL;
295
            }
1 jermar 296
 
125 jermar 297
            spinlock_unlock(&THREAD->lock);
1 jermar 298
 
125 jermar 299
            break;
300
 
1 jermar 301
            default:
125 jermar 302
            /*
303
             * Entering state is unexpected.
304
             */
305
            panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]);
306
            break;
1 jermar 307
        }
15 jermar 308
        THREAD = NULL;
1 jermar 309
    }
198 jermar 310
 
214 vana 311
 
15 jermar 312
    THREAD = find_best_thread();
1 jermar 313
 
15 jermar 314
    spinlock_lock(&THREAD->lock);
413 jermar 315
    priority = THREAD->priority;
15 jermar 316
    spinlock_unlock(&THREAD->lock);
192 jermar 317
 
1 jermar 318
    relink_rq(priority);       
319
 
15 jermar 320
    spinlock_lock(&THREAD->lock);  
1 jermar 321
 
322
    /*
323
     * If both the old and the new task are the same, lots of work is avoided.
324
     */
15 jermar 325
    if (TASK != THREAD->task) {
703 jermar 326
        as_t *as1 = NULL;
327
        as_t *as2;
1 jermar 328
 
15 jermar 329
        if (TASK) {
330
            spinlock_lock(&TASK->lock);
703 jermar 331
            as1 = TASK->as;
15 jermar 332
            spinlock_unlock(&TASK->lock);
1 jermar 333
        }
334
 
15 jermar 335
        spinlock_lock(&THREAD->task->lock);
703 jermar 336
        as2 = THREAD->task->as;
15 jermar 337
        spinlock_unlock(&THREAD->task->lock);
1 jermar 338
 
339
        /*
703 jermar 340
         * Note that it is possible for two tasks to share one address space.
1 jermar 341
         */
703 jermar 342
        if (as1 != as2) {
1 jermar 343
            /*
703 jermar 344
             * Both tasks and address spaces are different.
1 jermar 345
             * Replace the old one with the new one.
346
             */
703 jermar 347
            as_install(as2);
1 jermar 348
        }
15 jermar 349
        TASK = THREAD->task;   
1 jermar 350
    }
351
 
15 jermar 352
    THREAD->state = Running;
1 jermar 353
 
354
    #ifdef SCHEDULER_VERBOSE
413 jermar 355
    printf("cpu%d: tid %d (priority=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, CPU->nrdy);
1 jermar 356
    #endif  
357
 
213 jermar 358
    /*
359
     * Copy the knowledge of CPU, TASK, THREAD and preemption counter to thread's stack.
360
     */
184 jermar 361
    the_copy(THE, (the_t *) THREAD->kstack);
362
 
15 jermar 363
    context_restore(&THREAD->saved_context);
1 jermar 364
    /* not reached */
365
}
366
 
107 decky 367
 
452 decky 368
/** The scheduler
369
 *
370
 * The thread scheduling procedure.
675 jermar 371
 * Passes control directly to
372
 * scheduler_separated_stack().
452 decky 373
 *
374
 */
375
void scheduler(void)
376
{
377
    volatile ipl_t ipl;
378
 
379
    ASSERT(CPU != NULL);
380
 
381
    ipl = interrupts_disable();
382
 
631 palkovsky 383
    if (atomic_get(&haltstate))
452 decky 384
        halt();
385
 
386
    if (THREAD) {
387
        spinlock_lock(&THREAD->lock);
458 decky 388
#ifndef CONFIG_FPU_LAZY
452 decky 389
        fpu_context_save(&(THREAD->saved_fpu_context));
390
#endif
391
        if (!context_save(&THREAD->saved_context)) {
392
            /*
393
             * This is the place where threads leave scheduler();
394
             */
395
            before_thread_runs();
396
            spinlock_unlock(&THREAD->lock);
397
            interrupts_restore(THREAD->saved_context.ipl);
398
            return;
399
        }
400
 
401
        /*
402
         * Interrupt priority level of preempted thread is recorded here
403
         * to facilitate scheduler() invocations from interrupts_disable()'d
404
         * code (e.g. waitq_sleep_timeout()).
405
         */
406
        THREAD->saved_context.ipl = ipl;
407
    }
408
 
409
    /*
557 jermar 410
     * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM
452 decky 411
     * and preemption counter. At this point THE could be coming either
412
     * from THREAD's or CPU's stack.
413
     */
414
    the_copy(THE, (the_t *) CPU->stack);
415
 
416
    /*
417
     * We may not keep the old stack.
418
     * Reason: If we kept the old stack and got blocked, for instance, in
419
     * find_best_thread(), the old thread could get rescheduled by another
420
     * CPU and overwrite the part of its own stack that was also used by
421
     * the scheduler on this CPU.
422
     *
423
     * Moreover, we have to bypass the compiler-generated POP sequence
424
     * which is fooled by SP being set to the very top of the stack.
425
     * Therefore the scheduler() function continues in
426
     * scheduler_separated_stack().
427
     */
428
    context_save(&CPU->saved_context);
429
    context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), (__address) CPU->stack, CPU_STACK_SIZE);
430
    context_restore(&CPU->saved_context);
431
    /* not reached */
432
}
433
 
434
 
435
 
436
 
437
 
458 decky 438
#ifdef CONFIG_SMP
107 decky 439
/** Load balancing thread
440
 *
441
 * SMP load balancing thread, supervising thread supplies
442
 * for the CPU it's wired to.
443
 *
444
 * @param arg Generic thread argument (unused).
445
 *
1 jermar 446
 */
447
void kcpulb(void *arg)
448
{
449
    thread_t *t;
783 palkovsky 450
    int count, average, i, j, k = 0;
413 jermar 451
    ipl_t ipl;
1 jermar 452
 
453
loop:
454
    /*
779 jermar 455
     * Work in 1s intervals.
1 jermar 456
     */
779 jermar 457
    thread_sleep(1);
1 jermar 458
 
459
not_satisfied:
460
    /*
461
     * Calculate the number of threads that will be migrated/stolen from
462
     * other CPU's. Note that situation can have changed between two
463
     * passes. Each time get the most up to date counts.
464
     */
783 palkovsky 465
    average = atomic_get(&nrdy) / config.cpu_active;
466
    count = average - atomic_get(&CPU->nrdy);
1 jermar 467
 
783 palkovsky 468
    if (count < 0)
1 jermar 469
        goto satisfied;
470
 
783 palkovsky 471
    if (!count) { /* Try to steal threads from CPU's that have more then average count */
472
        count = 1;
473
        average += 1;
474
    }
475
 
1 jermar 476
    /*
477
     * Searching least priority queues on all CPU's first and most priority queues on all CPU's last.
478
     */
479
    for (j=RQ_COUNT-1; j >= 0; j--) {
480
        for (i=0; i < config.cpu_active; i++) {
481
            link_t *l;
482
            runq_t *r;
483
            cpu_t *cpu;
484
 
485
            cpu = &cpus[(i + k) % config.cpu_active];
486
 
487
            /*
488
             * Not interested in ourselves.
489
             * Doesn't require interrupt disabling for kcpulb is X_WIRED.
490
             */
15 jermar 491
            if (CPU == cpu)
783 palkovsky 492
                continue;
493
            if (atomic_get(&cpu->nrdy) <= average)
494
                continue;
1 jermar 495
 
413 jermar 496
restart:        ipl = interrupts_disable();
115 jermar 497
            r = &cpu->rq[j];
1 jermar 498
            spinlock_lock(&r->lock);
499
            if (r->n == 0) {
500
                spinlock_unlock(&r->lock);
413 jermar 501
                interrupts_restore(ipl);
1 jermar 502
                continue;
503
            }
504
 
505
            t = NULL;
506
            l = r->rq_head.prev;    /* search rq from the back */
507
            while (l != &r->rq_head) {
508
                t = list_get_instance(l, thread_t, rq_link);
509
                /*
125 jermar 510
                 * We don't want to steal CPU-wired threads neither threads already stolen.
1 jermar 511
                 * The latter prevents threads from migrating between CPU's without ever being run.
125 jermar 512
                 * We don't want to steal threads whose FPU context is still in CPU.
73 vana 513
                 */
1 jermar 514
                spinlock_lock(&t->lock);
73 vana 515
                if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) {
115 jermar 516
 
1 jermar 517
                    /*
518
                     * Remove t from r.
519
                     */
520
 
521
                    spinlock_unlock(&t->lock);
522
 
523
                    /*
524
                     * Here we have to avoid deadlock with relink_rq(),
525
                     * because it locks cpu and r in a different order than we do.
526
                     */
527
                    if (!spinlock_trylock(&cpu->lock)) {
528
                        /* Release all locks and try again. */
529
                        spinlock_unlock(&r->lock);
413 jermar 530
                        interrupts_restore(ipl);
1 jermar 531
                        goto restart;
532
                    }
783 palkovsky 533
                    atomic_dec(&cpu->nrdy);
1 jermar 534
                    spinlock_unlock(&cpu->lock);
535
 
475 jermar 536
                    atomic_dec(&nrdy);
1 jermar 537
 
125 jermar 538
                    r->n--;
1 jermar 539
                    list_remove(&t->rq_link);
540
 
541
                    break;
542
                }
543
                spinlock_unlock(&t->lock);
544
                l = l->prev;
545
                t = NULL;
546
            }
547
            spinlock_unlock(&r->lock);
548
 
549
            if (t) {
550
                /*
551
                 * Ready t on local CPU
552
                 */
553
                spinlock_lock(&t->lock);
554
                #ifdef KCPULB_VERBOSE
783 palkovsky 555
                printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, atomic_get(&CPU->nrdy), atomic_get(&nrdy) / config.cpu_active);
1 jermar 556
                #endif
557
                t->flags |= X_STOLEN;
558
                spinlock_unlock(&t->lock);
559
 
560
                thread_ready(t);
561
 
413 jermar 562
                interrupts_restore(ipl);
1 jermar 563
 
564
                if (--count == 0)
565
                    goto satisfied;
566
 
567
                /*
125 jermar 568
                 * We are not satisfied yet, focus on another CPU next time.
1 jermar 569
                 */
570
                k++;
571
 
572
                continue;
573
            }
413 jermar 574
            interrupts_restore(ipl);
1 jermar 575
        }
576
    }
577
 
783 palkovsky 578
    if (atomic_get(&CPU->nrdy)) {
1 jermar 579
        /*
580
         * Be a little bit light-weight and let migrated threads run.
581
         */
582
        scheduler();
779 jermar 583
    } else {
1 jermar 584
        /*
585
         * We failed to migrate a single thread.
779 jermar 586
         * Give up this turn.
1 jermar 587
         */
779 jermar 588
        goto loop;
1 jermar 589
    }
590
 
591
    goto not_satisfied;
125 jermar 592
 
1 jermar 593
satisfied:
594
    goto loop;
595
}
596
 
458 decky 597
#endif /* CONFIG_SMP */
775 palkovsky 598
 
599
 
600
/** Print information about threads & scheduler queues */
601
void sched_print_list(void)
602
{
603
    ipl_t ipl;
604
    int cpu,i;
605
    runq_t *r;
606
    thread_t *t;
607
    link_t *cur;
608
 
609
    /* We are going to mess with scheduler structures,
610
     * let's not be interrupted */
611
    ipl = interrupts_disable();
612
    printf("*********** Scheduler dump ***********\n");
613
    for (cpu=0;cpu < config.cpu_count; cpu++) {
614
        if (!cpus[cpu].active)
615
            continue;
616
        spinlock_lock(&cpus[cpu].lock);
617
        printf("cpu%d: nrdy: %d needs_relink: %d\n",
783 palkovsky 618
               cpus[cpu].id, atomic_get(&cpus[cpu].nrdy), cpus[cpu].needs_relink);
775 palkovsky 619
 
620
        for (i=0; i<RQ_COUNT; i++) {
621
            r = &cpus[cpu].rq[i];
622
            spinlock_lock(&r->lock);
623
            if (!r->n) {
624
                spinlock_unlock(&r->lock);
625
                continue;
626
            }
779 jermar 627
            printf("\tRq %d: ", i);
775 palkovsky 628
            for (cur=r->rq_head.next; cur!=&r->rq_head; cur=cur->next) {
629
                t = list_get_instance(cur, thread_t, rq_link);
630
                printf("%d(%s) ", t->tid,
631
                       thread_states[t->state]);
632
            }
633
            printf("\n");
634
            spinlock_unlock(&r->lock);
635
        }
636
        spinlock_unlock(&cpus[cpu].lock);
637
    }
638
 
639
    interrupts_restore(ipl);
640
}