Subversion Repositories HelenOS-historic

Rev

Rev 111 | Rev 114 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1 jermar 1
/*
2
 * Copyright (C) 2001-2004 Jakub Jermar
3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
29
#include <proc/scheduler.h>
30
#include <proc/thread.h>
31
#include <proc/task.h>
32
#include <cpu.h>
33
#include <mm/vm.h>
34
#include <config.h>
35
#include <context.h>
36
#include <func.h>
37
#include <arch.h>
38
#include <arch/asm.h>
39
#include <list.h>
68 decky 40
#include <panic.h>
1 jermar 41
#include <typedefs.h>
42
#include <mm/page.h>
43
#include <synch/spinlock.h>
76 jermar 44
#include <arch/faddr.h>
111 palkovsky 45
#include <arch/atomic.h>
1 jermar 46
 
47
volatile int nrdy;
48
 
107 decky 49
 
50
/** Initialize context switching
51
 *
52
 * Initialize context switching and lazy FPU
53
 * context switching.
54
 *
55
 */
52 vana 56
void before_thread_runs(void)
57
{
58
    before_thread_runs_arch();
57 vana 59
    fpu_context_restore(&(THREAD->saved_fpu_context));
52 vana 60
}
61
 
62
 
107 decky 63
/** Initialize scheduler
64
 *
65
 * Initialize kernel scheduler.
66
 *
67
 */
1 jermar 68
void scheduler_init(void)
69
{
70
}
71
 
107 decky 72
 
73
/** Get thread to be scheduled
74
 *
75
 * Get the optimal thread to be scheduled
109 jermar 76
 * according to thread accounting and scheduler
107 decky 77
 * policy.
78
 *
79
 * @return Thread to be scheduled.
80
 *
81
 */
1 jermar 82
struct thread *find_best_thread(void)
83
{
84
    thread_t *t;
85
    runq_t *r;
86
    int i, n;
87
 
88
loop:
89
    cpu_priority_high();
90
 
15 jermar 91
    spinlock_lock(&CPU->lock);
92
    n = CPU->nrdy;
93
    spinlock_unlock(&CPU->lock);
1 jermar 94
 
95
    cpu_priority_low();
96
 
97
    if (n == 0) {
98
        #ifdef __SMP__
99
        /*
100
         * If the load balancing thread is not running, wake it up and
101
         * set CPU-private flag that the kcpulb has been started.
102
         */
15 jermar 103
        if (test_and_set(&CPU->kcpulbstarted) == 0) {
104
                waitq_wakeup(&CPU->kcpulb_wq, 0);
1 jermar 105
            goto loop;
106
        }
107
        #endif /* __SMP__ */
108
 
109
        /*
110
         * For there was nothing to run, the CPU goes to sleep
111
         * until a hardware interrupt or an IPI comes.
112
         * This improves energy saving and hyperthreading.
113
         * On the other hand, several hardware interrupts can be ignored.
114
         */
115
         cpu_sleep();
116
         goto loop;
117
    }
118
 
119
    cpu_priority_high();
120
 
121
    for (i = 0; i<RQ_COUNT; i++) {
15 jermar 122
        r = &CPU->rq[i];
1 jermar 123
        spinlock_lock(&r->lock);
124
        if (r->n == 0) {
125
            /*
126
             * If this queue is empty, try a lower-priority queue.
127
             */
128
            spinlock_unlock(&r->lock);
129
            continue;
130
        }
131
 
111 palkovsky 132
        atomic_dec(&nrdy);
1 jermar 133
 
15 jermar 134
        spinlock_lock(&CPU->lock);
135
        CPU->nrdy--;
136
        spinlock_unlock(&CPU->lock);
1 jermar 137
 
138
        r->n--;
139
 
140
        /*
141
         * Take the first thread from the queue.
142
         */
143
        t = list_get_instance(r->rq_head.next, thread_t, rq_link);
144
        list_remove(&t->rq_link);
145
 
146
        spinlock_unlock(&r->lock);
147
 
148
        spinlock_lock(&t->lock);
15 jermar 149
        t->cpu = CPU;
1 jermar 150
 
151
        t->ticks = us2ticks((i+1)*10000);
152
        t->pri = i; /* eventually correct rq index */
153
 
154
        /*
155
         * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge.
156
         */
157
        t->flags &= ~X_STOLEN;
158
        spinlock_unlock(&t->lock);
159
 
160
        return t;
161
    }
162
    goto loop;
163
 
164
}
165
 
107 decky 166
 
167
/** Prevent rq starvation
168
 *
169
 * Prevent low priority threads from starving in rq's.
170
 *
171
 * When the function decides to relink rq's, it reconnects
172
 * respective pointers so that in result threads with 'pri'
173
 * greater or equal 'start' are moved to a higher-priority queue.
174
 *
175
 * @param start Threshold priority.
176
 *
1 jermar 177
 */
178
void relink_rq(int start)
179
{
180
    link_t head;
181
    runq_t *r;
182
    int i, n;
183
 
184
    list_initialize(&head);
15 jermar 185
    spinlock_lock(&CPU->lock);
186
    if (CPU->needs_relink > NEEDS_RELINK_MAX) {
1 jermar 187
        for (i = start; i<RQ_COUNT-1; i++) {
188
            /* remember and empty rq[i + 1] */
15 jermar 189
            r = &CPU->rq[i + 1];
1 jermar 190
            spinlock_lock(&r->lock);
191
            list_concat(&head, &r->rq_head);
192
            n = r->n;
193
            r->n = 0;
194
            spinlock_unlock(&r->lock);
195
 
196
            /* append rq[i + 1] to rq[i] */
15 jermar 197
            r = &CPU->rq[i];
1 jermar 198
            spinlock_lock(&r->lock);
199
            list_concat(&r->rq_head, &head);
200
            r->n += n;
201
            spinlock_unlock(&r->lock);
202
        }
15 jermar 203
        CPU->needs_relink = 0;
1 jermar 204
    }
15 jermar 205
    spinlock_unlock(&CPU->lock);               
1 jermar 206
 
207
}
208
 
107 decky 209
 
210
/** The scheduler
211
 *
212
 * The thread scheduling procedure.
213
 *
1 jermar 214
 */
215
void scheduler(void)
216
{
217
    volatile pri_t pri;
218
 
219
    pri = cpu_priority_high();
220
 
221
    if (haltstate)
222
        halt();
223
 
15 jermar 224
    if (THREAD) {
225
        spinlock_lock(&THREAD->lock);
57 vana 226
        fpu_context_save(&(THREAD->saved_fpu_context));
15 jermar 227
        if (!context_save(&THREAD->saved_context)) {
1 jermar 228
            /*
229
             * This is the place where threads leave scheduler();
230
             */
22 jermar 231
            before_thread_runs();
15 jermar 232
                spinlock_unlock(&THREAD->lock);
233
            cpu_priority_restore(THREAD->saved_context.pri);
1 jermar 234
            return;
235
        }
15 jermar 236
        THREAD->saved_context.pri = pri;
1 jermar 237
    }
238
 
239
    /*
240
     * We may not keep the old stack.
241
     * Reason: If we kept the old stack and got blocked, for instance, in
242
     * find_best_thread(), the old thread could get rescheduled by another
243
     * CPU and overwrite the part of its own stack that was also used by
244
     * the scheduler on this CPU.
245
     *
246
     * Moreover, we have to bypass the compiler-generated POP sequence
247
     * which is fooled by SP being set to the very top of the stack.
248
     * Therefore the scheduler() function continues in
249
     * scheduler_separated_stack().
250
     */
15 jermar 251
    context_save(&CPU->saved_context);
97 jermar 252
    context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), CPU->stack, CPU_STACK_SIZE);
15 jermar 253
    context_restore(&CPU->saved_context);
1 jermar 254
    /* not reached */
255
}
256
 
107 decky 257
 
258
/** Scheduler stack switch wrapper
259
 *
260
 * Second part of the scheduler() function
261
 * using new stack. Handling the actual context
262
 * switch to a new thread.
263
 *
264
 */
1 jermar 265
void scheduler_separated_stack(void)
266
{
267
    int priority;
268
 
15 jermar 269
    if (THREAD) {
270
        switch (THREAD->state) {
1 jermar 271
            case Running:
15 jermar 272
                THREAD->state = Ready;
273
                spinlock_unlock(&THREAD->lock);
274
                thread_ready(THREAD);
1 jermar 275
                break;
276
 
277
            case Exiting:
15 jermar 278
                frame_free((__address) THREAD->kstack);
279
                if (THREAD->ustack) {
280
                    frame_free((__address) THREAD->ustack);
1 jermar 281
                }
282
 
283
                /*
284
                 * Detach from the containing task.
285
                 */
15 jermar 286
                spinlock_lock(&TASK->lock);
287
                list_remove(&THREAD->th_link);
288
                spinlock_unlock(&TASK->lock);
1 jermar 289
 
15 jermar 290
                spinlock_unlock(&THREAD->lock);
1 jermar 291
 
292
                spinlock_lock(&threads_lock);
15 jermar 293
                list_remove(&THREAD->threads_link);
1 jermar 294
                spinlock_unlock(&threads_lock);
73 vana 295
 
99 jermar 296
                spinlock_lock(&CPU->lock);
297
                if(CPU->fpu_owner==THREAD) CPU->fpu_owner=NULL;
298
                spinlock_unlock(&CPU->lock);
73 vana 299
 
1 jermar 300
 
15 jermar 301
                free(THREAD);
1 jermar 302
 
303
                break;
304
 
305
            case Sleeping:
306
                /*
307
                 * Prefer the thread after it's woken up.
308
                 */
15 jermar 309
                THREAD->pri = -1;
1 jermar 310
 
311
                /*
312
                 * We need to release wq->lock which we locked in waitq_sleep().
15 jermar 313
                 * Address of wq->lock is kept in THREAD->sleep_queue.
1 jermar 314
                 */
15 jermar 315
                spinlock_unlock(&THREAD->sleep_queue->lock);
1 jermar 316
 
317
                /*
318
                 * Check for possible requests for out-of-context invocation.
319
                 */
15 jermar 320
                if (THREAD->call_me) {
321
                    THREAD->call_me(THREAD->call_me_with);
322
                    THREAD->call_me = NULL;
323
                    THREAD->call_me_with = NULL;
1 jermar 324
                }
325
 
15 jermar 326
                spinlock_unlock(&THREAD->lock);
1 jermar 327
 
328
                break;
329
 
330
            default:
331
                /*
332
                 * Entering state is unexpected.
333
                 */
15 jermar 334
                panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]);
1 jermar 335
                break;
336
        }
15 jermar 337
        THREAD = NULL;
1 jermar 338
    }
339
 
15 jermar 340
    THREAD = find_best_thread();
1 jermar 341
 
15 jermar 342
    spinlock_lock(&THREAD->lock);
343
    priority = THREAD->pri;
344
    spinlock_unlock(&THREAD->lock);
1 jermar 345
 
346
    relink_rq(priority);       
347
 
15 jermar 348
    spinlock_lock(&THREAD->lock);  
1 jermar 349
 
350
    /*
351
     * If both the old and the new task are the same, lots of work is avoided.
352
     */
15 jermar 353
    if (TASK != THREAD->task) {
1 jermar 354
        vm_t *m1 = NULL;
355
        vm_t *m2;
356
 
15 jermar 357
        if (TASK) {
358
            spinlock_lock(&TASK->lock);
359
            m1 = TASK->vm;
360
            spinlock_unlock(&TASK->lock);
1 jermar 361
        }
362
 
15 jermar 363
        spinlock_lock(&THREAD->task->lock);
364
        m2 = THREAD->task->vm;
365
        spinlock_unlock(&THREAD->task->lock);
1 jermar 366
 
367
        /*
368
         * Note that it is possible for two tasks to share one vm mapping.
369
         */
370
        if (m1 != m2) {
371
            /*
372
             * Both tasks and vm mappings are different.
373
             * Replace the old one with the new one.
374
             */
375
            if (m1) {
376
                vm_uninstall(m1);
377
            }
378
            vm_install(m2);
379
        }
15 jermar 380
        TASK = THREAD->task;   
1 jermar 381
    }
382
 
15 jermar 383
    THREAD->state = Running;
1 jermar 384
 
385
    #ifdef SCHEDULER_VERBOSE
15 jermar 386
    printf("cpu%d: tid %d (pri=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->pri, THREAD->ticks, CPU->nrdy);
1 jermar 387
    #endif  
388
 
15 jermar 389
    context_restore(&THREAD->saved_context);
1 jermar 390
    /* not reached */
391
}
392
 
107 decky 393
 
1 jermar 394
#ifdef __SMP__
107 decky 395
/** Load balancing thread
396
 *
397
 * SMP load balancing thread, supervising thread supplies
398
 * for the CPU it's wired to.
399
 *
400
 * @param arg Generic thread argument (unused).
401
 *
1 jermar 402
 */
403
void kcpulb(void *arg)
404
{
405
    thread_t *t;
406
    int count, i, j, k = 0;
407
    pri_t pri;
408
 
409
loop:
410
    /*
411
     * Sleep until there's some work to do.
412
     */
15 jermar 413
    waitq_sleep(&CPU->kcpulb_wq);
1 jermar 414
 
415
not_satisfied:
416
    /*
417
     * Calculate the number of threads that will be migrated/stolen from
418
     * other CPU's. Note that situation can have changed between two
419
     * passes. Each time get the most up to date counts.
420
     */
421
    pri = cpu_priority_high();
15 jermar 422
    spinlock_lock(&CPU->lock);
1 jermar 423
    count = nrdy / config.cpu_active;
15 jermar 424
    count -= CPU->nrdy;
425
    spinlock_unlock(&CPU->lock);
1 jermar 426
    cpu_priority_restore(pri);
427
 
428
    if (count <= 0)
429
        goto satisfied;
430
 
431
    /*
432
     * Searching least priority queues on all CPU's first and most priority queues on all CPU's last.
433
     */
434
    for (j=RQ_COUNT-1; j >= 0; j--) {
435
        for (i=0; i < config.cpu_active; i++) {
436
            link_t *l;
437
            runq_t *r;
438
            cpu_t *cpu;
439
 
440
            cpu = &cpus[(i + k) % config.cpu_active];
441
            r = &cpu->rq[j];
442
 
443
            /*
444
             * Not interested in ourselves.
445
             * Doesn't require interrupt disabling for kcpulb is X_WIRED.
446
             */
15 jermar 447
            if (CPU == cpu)
1 jermar 448
                continue;
449
 
450
restart:        pri = cpu_priority_high();
451
            spinlock_lock(&r->lock);
452
            if (r->n == 0) {
453
                spinlock_unlock(&r->lock);
454
                cpu_priority_restore(pri);
455
                continue;
456
            }
457
 
458
            t = NULL;
459
            l = r->rq_head.prev;    /* search rq from the back */
460
            while (l != &r->rq_head) {
461
                t = list_get_instance(l, thread_t, rq_link);
462
                /*
463
                     * We don't want to steal CPU-wired threads neither threads already stolen.
464
                 * The latter prevents threads from migrating between CPU's without ever being run.
73 vana 465
                     * We don't want to steal threads whose FPU context is still in CPU
466
                 */
1 jermar 467
                spinlock_lock(&t->lock);
73 vana 468
                if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) {
1 jermar 469
                    /*
470
                     * Remove t from r.
471
                     */
472
 
473
                    spinlock_unlock(&t->lock);
474
 
475
                    /*
476
                     * Here we have to avoid deadlock with relink_rq(),
477
                     * because it locks cpu and r in a different order than we do.
478
                     */
479
                    if (!spinlock_trylock(&cpu->lock)) {
480
                        /* Release all locks and try again. */
481
                        spinlock_unlock(&r->lock);
482
                        cpu_priority_restore(pri);
483
                        goto restart;
484
                    }
485
                    cpu->nrdy--;
486
                    spinlock_unlock(&cpu->lock);
487
 
111 palkovsky 488
                    atomic_dec(&nrdy);
1 jermar 489
 
490
                        r->n--;
491
                    list_remove(&t->rq_link);
492
 
493
                    break;
494
                }
495
                spinlock_unlock(&t->lock);
496
                l = l->prev;
497
                t = NULL;
498
            }
499
            spinlock_unlock(&r->lock);
500
 
501
            if (t) {
502
                /*
503
                 * Ready t on local CPU
504
                 */
505
                spinlock_lock(&t->lock);
506
                #ifdef KCPULB_VERBOSE
15 jermar 507
                printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, CPU->nrdy, nrdy / config.cpu_active);
1 jermar 508
                #endif
509
                t->flags |= X_STOLEN;
510
                spinlock_unlock(&t->lock);
511
 
512
                thread_ready(t);
513
 
514
                cpu_priority_restore(pri);
515
 
516
                if (--count == 0)
517
                    goto satisfied;
518
 
519
                /*
520
                             * We are not satisfied yet, focus on another CPU next time.
521
                 */
522
                k++;
523
 
524
                continue;
525
            }
526
            cpu_priority_restore(pri);
527
        }
528
    }
529
 
15 jermar 530
    if (CPU->nrdy) {
1 jermar 531
        /*
532
         * Be a little bit light-weight and let migrated threads run.
533
         */
534
        scheduler();
535
    }
536
    else {
537
        /*
538
         * We failed to migrate a single thread.
539
         * Something more sophisticated should be done.
540
         */
541
        scheduler();
542
    }
543
 
544
    goto not_satisfied;
545
 
546
satisfied:
547
    /*
548
     * Tell find_best_thread() to wake us up later again.
549
     */
15 jermar 550
    CPU->kcpulbstarted = 0;
1 jermar 551
    goto loop;
552
}
553
 
554
#endif /* __SMP__ */