Subversion Repositories HelenOS-historic

Rev

Rev 827 | Rev 898 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 827 Rev 897
1
/*
1
/*
2
 * Copyright (C) 2001-2004 Jakub Jermar
2
 * Copyright (C) 2001-2004 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
#include <proc/scheduler.h>
29
#include <proc/scheduler.h>
30
#include <proc/thread.h>
30
#include <proc/thread.h>
31
#include <proc/task.h>
31
#include <proc/task.h>
32
#include <mm/frame.h>
32
#include <mm/frame.h>
33
#include <mm/page.h>
33
#include <mm/page.h>
34
#include <mm/as.h>
34
#include <mm/as.h>
35
#include <arch/asm.h>
35
#include <arch/asm.h>
36
#include <arch/faddr.h>
36
#include <arch/faddr.h>
37
#include <arch/atomic.h>
37
#include <arch/atomic.h>
38
#include <synch/spinlock.h>
38
#include <synch/spinlock.h>
39
#include <config.h>
39
#include <config.h>
40
#include <context.h>
40
#include <context.h>
41
#include <func.h>
41
#include <func.h>
42
#include <arch.h>
42
#include <arch.h>
43
#include <adt/list.h>
43
#include <adt/list.h>
44
#include <panic.h>
44
#include <panic.h>
45
#include <typedefs.h>
45
#include <typedefs.h>
46
#include <cpu.h>
46
#include <cpu.h>
47
#include <print.h>
47
#include <print.h>
48
#include <debug.h>
48
#include <debug.h>
49
 
49
 
50
atomic_t nrdy;
50
atomic_t nrdy;
51
 
51
 
52
/** Take actions before new thread runs
52
/** Take actions before new thread runs.
53
 *
53
 *
54
 * Perform actions that need to be
54
 * Perform actions that need to be
55
 * taken before the newly selected
55
 * taken before the newly selected
56
 * tread is passed control.
56
 * tread is passed control.
57
 *
57
 *
58
 * THREAD->lock is locked on entry
58
 * THREAD->lock is locked on entry
59
 *
59
 *
60
 */
60
 */
61
void before_thread_runs(void)
61
void before_thread_runs(void)
62
{
62
{
63
    before_thread_runs_arch();
63
    before_thread_runs_arch();
64
#ifdef CONFIG_FPU_LAZY
64
#ifdef CONFIG_FPU_LAZY
65
    if(THREAD==CPU->fpu_owner)
65
    if(THREAD==CPU->fpu_owner)
66
        fpu_enable();
66
        fpu_enable();
67
    else
67
    else
68
        fpu_disable();
68
        fpu_disable();
69
#else
69
#else
70
    fpu_enable();
70
    fpu_enable();
71
    if (THREAD->fpu_context_exists)
71
    if (THREAD->fpu_context_exists)
72
        fpu_context_restore(&(THREAD->saved_fpu_context));
72
        fpu_context_restore(&(THREAD->saved_fpu_context));
73
    else {
73
    else {
74
        fpu_init(&(THREAD->saved_fpu_context));
74
        fpu_init(&(THREAD->saved_fpu_context));
75
        THREAD->fpu_context_exists=1;
75
        THREAD->fpu_context_exists=1;
76
    }
76
    }
77
#endif
77
#endif
78
}
78
}
79
 
79
 
-
 
80
/** Take actions after old thread ran.
-
 
81
 *
-
 
82
 * Perform actions that need to be
-
 
83
 * taken after the running thread
-
 
84
 * was preempted by the scheduler.
-
 
85
 *
-
 
86
 * THREAD->lock is locked on entry
-
 
87
 *
-
 
88
 */
-
 
89
void after_thread_ran(void)
-
 
90
{
-
 
91
    after_thread_ran_arch();
-
 
92
}
-
 
93
 
80
#ifdef CONFIG_FPU_LAZY
94
#ifdef CONFIG_FPU_LAZY
81
void scheduler_fpu_lazy_request(void)
95
void scheduler_fpu_lazy_request(void)
82
{
96
{
83
    fpu_enable();
97
    fpu_enable();
84
    spinlock_lock(&CPU->lock);
98
    spinlock_lock(&CPU->lock);
85
 
99
 
86
    /* Save old context */
100
    /* Save old context */
87
    if (CPU->fpu_owner != NULL) {  
101
    if (CPU->fpu_owner != NULL) {  
88
        spinlock_lock(&CPU->fpu_owner->lock);
102
        spinlock_lock(&CPU->fpu_owner->lock);
89
        fpu_context_save(&CPU->fpu_owner->saved_fpu_context);
103
        fpu_context_save(&CPU->fpu_owner->saved_fpu_context);
90
        /* don't prevent migration */
104
        /* don't prevent migration */
91
        CPU->fpu_owner->fpu_context_engaged=0;
105
        CPU->fpu_owner->fpu_context_engaged=0;
92
        spinlock_unlock(&CPU->fpu_owner->lock);
106
        spinlock_unlock(&CPU->fpu_owner->lock);
93
    }
107
    }
94
 
108
 
95
    spinlock_lock(&THREAD->lock);
109
    spinlock_lock(&THREAD->lock);
96
    if (THREAD->fpu_context_exists)
110
    if (THREAD->fpu_context_exists)
97
        fpu_context_restore(&THREAD->saved_fpu_context);
111
        fpu_context_restore(&THREAD->saved_fpu_context);
98
    else {
112
    else {
99
        fpu_init(&(THREAD->saved_fpu_context));
113
        fpu_init(&(THREAD->saved_fpu_context));
100
        THREAD->fpu_context_exists=1;
114
        THREAD->fpu_context_exists=1;
101
    }
115
    }
102
    CPU->fpu_owner=THREAD;
116
    CPU->fpu_owner=THREAD;
103
    THREAD->fpu_context_engaged = 1;
117
    THREAD->fpu_context_engaged = 1;
104
 
118
 
105
    spinlock_unlock(&THREAD->lock);
119
    spinlock_unlock(&THREAD->lock);
106
    spinlock_unlock(&CPU->lock);
120
    spinlock_unlock(&CPU->lock);
107
}
121
}
108
#endif
122
#endif
109
 
123
 
110
/** Initialize scheduler
124
/** Initialize scheduler
111
 *
125
 *
112
 * Initialize kernel scheduler.
126
 * Initialize kernel scheduler.
113
 *
127
 *
114
 */
128
 */
115
void scheduler_init(void)
129
void scheduler_init(void)
116
{
130
{
117
}
131
}
118
 
132
 
119
 
133
 
120
/** Get thread to be scheduled
134
/** Get thread to be scheduled
121
 *
135
 *
122
 * Get the optimal thread to be scheduled
136
 * Get the optimal thread to be scheduled
123
 * according to thread accounting and scheduler
137
 * according to thread accounting and scheduler
124
 * policy.
138
 * policy.
125
 *
139
 *
126
 * @return Thread to be scheduled.
140
 * @return Thread to be scheduled.
127
 *
141
 *
128
 */
142
 */
129
static thread_t *find_best_thread(void)
143
static thread_t *find_best_thread(void)
130
{
144
{
131
    thread_t *t;
145
    thread_t *t;
132
    runq_t *r;
146
    runq_t *r;
133
    int i;
147
    int i;
134
 
148
 
135
    ASSERT(CPU != NULL);
149
    ASSERT(CPU != NULL);
136
 
150
 
137
loop:
151
loop:
138
    interrupts_enable();
152
    interrupts_enable();
139
   
153
   
140
    if (atomic_get(&CPU->nrdy) == 0) {
154
    if (atomic_get(&CPU->nrdy) == 0) {
141
        /*
155
        /*
142
         * For there was nothing to run, the CPU goes to sleep
156
         * For there was nothing to run, the CPU goes to sleep
143
         * until a hardware interrupt or an IPI comes.
157
         * until a hardware interrupt or an IPI comes.
144
         * This improves energy saving and hyperthreading.
158
         * This improves energy saving and hyperthreading.
145
         */
159
         */
146
 
160
 
147
        /*
161
        /*
148
         * An interrupt might occur right now and wake up a thread.
162
         * An interrupt might occur right now and wake up a thread.
149
         * In such case, the CPU will continue to go to sleep
163
         * In such case, the CPU will continue to go to sleep
150
         * even though there is a runnable thread.
164
         * even though there is a runnable thread.
151
         */
165
         */
152
 
166
 
153
         cpu_sleep();
167
         cpu_sleep();
154
         goto loop;
168
         goto loop;
155
    }
169
    }
156
 
170
 
157
    interrupts_disable();
171
    interrupts_disable();
158
   
172
   
159
    i = 0;
173
    i = 0;
160
    for (; i<RQ_COUNT; i++) {
174
    for (; i<RQ_COUNT; i++) {
161
        r = &CPU->rq[i];
175
        r = &CPU->rq[i];
162
        spinlock_lock(&r->lock);
176
        spinlock_lock(&r->lock);
163
        if (r->n == 0) {
177
        if (r->n == 0) {
164
            /*
178
            /*
165
             * If this queue is empty, try a lower-priority queue.
179
             * If this queue is empty, try a lower-priority queue.
166
             */
180
             */
167
            spinlock_unlock(&r->lock);
181
            spinlock_unlock(&r->lock);
168
            continue;
182
            continue;
169
        }
183
        }
170
 
184
 
171
        atomic_dec(&CPU->nrdy);
185
        atomic_dec(&CPU->nrdy);
172
        atomic_dec(&nrdy);
186
        atomic_dec(&nrdy);
173
        r->n--;
187
        r->n--;
174
 
188
 
175
        /*
189
        /*
176
         * Take the first thread from the queue.
190
         * Take the first thread from the queue.
177
         */
191
         */
178
        t = list_get_instance(r->rq_head.next, thread_t, rq_link);
192
        t = list_get_instance(r->rq_head.next, thread_t, rq_link);
179
        list_remove(&t->rq_link);
193
        list_remove(&t->rq_link);
180
 
194
 
181
        spinlock_unlock(&r->lock);
195
        spinlock_unlock(&r->lock);
182
 
196
 
183
        spinlock_lock(&t->lock);
197
        spinlock_lock(&t->lock);
184
        t->cpu = CPU;
198
        t->cpu = CPU;
185
 
199
 
186
        t->ticks = us2ticks((i+1)*10000);
200
        t->ticks = us2ticks((i+1)*10000);
187
        t->priority = i;    /* eventually correct rq index */
201
        t->priority = i;    /* eventually correct rq index */
188
 
202
 
189
        /*
203
        /*
190
         * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge.
204
         * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge.
191
         */
205
         */
192
        t->flags &= ~X_STOLEN;
206
        t->flags &= ~X_STOLEN;
193
        spinlock_unlock(&t->lock);
207
        spinlock_unlock(&t->lock);
194
 
208
 
195
        return t;
209
        return t;
196
    }
210
    }
197
    goto loop;
211
    goto loop;
198
 
212
 
199
}
213
}
200
 
214
 
201
 
215
 
202
/** Prevent rq starvation
216
/** Prevent rq starvation
203
 *
217
 *
204
 * Prevent low priority threads from starving in rq's.
218
 * Prevent low priority threads from starving in rq's.
205
 *
219
 *
206
 * When the function decides to relink rq's, it reconnects
220
 * When the function decides to relink rq's, it reconnects
207
 * respective pointers so that in result threads with 'pri'
221
 * respective pointers so that in result threads with 'pri'
208
 * greater or equal 'start' are moved to a higher-priority queue.
222
 * greater or equal 'start' are moved to a higher-priority queue.
209
 *
223
 *
210
 * @param start Threshold priority.
224
 * @param start Threshold priority.
211
 *
225
 *
212
 */
226
 */
213
static void relink_rq(int start)
227
static void relink_rq(int start)
214
{
228
{
215
    link_t head;
229
    link_t head;
216
    runq_t *r;
230
    runq_t *r;
217
    int i, n;
231
    int i, n;
218
 
232
 
219
    list_initialize(&head);
233
    list_initialize(&head);
220
    spinlock_lock(&CPU->lock);
234
    spinlock_lock(&CPU->lock);
221
    if (CPU->needs_relink > NEEDS_RELINK_MAX) {
235
    if (CPU->needs_relink > NEEDS_RELINK_MAX) {
222
        for (i = start; i<RQ_COUNT-1; i++) {
236
        for (i = start; i<RQ_COUNT-1; i++) {
223
            /* remember and empty rq[i + 1] */
237
            /* remember and empty rq[i + 1] */
224
            r = &CPU->rq[i + 1];
238
            r = &CPU->rq[i + 1];
225
            spinlock_lock(&r->lock);
239
            spinlock_lock(&r->lock);
226
            list_concat(&head, &r->rq_head);
240
            list_concat(&head, &r->rq_head);
227
            n = r->n;
241
            n = r->n;
228
            r->n = 0;
242
            r->n = 0;
229
            spinlock_unlock(&r->lock);
243
            spinlock_unlock(&r->lock);
230
       
244
       
231
            /* append rq[i + 1] to rq[i] */
245
            /* append rq[i + 1] to rq[i] */
232
            r = &CPU->rq[i];
246
            r = &CPU->rq[i];
233
            spinlock_lock(&r->lock);
247
            spinlock_lock(&r->lock);
234
            list_concat(&r->rq_head, &head);
248
            list_concat(&r->rq_head, &head);
235
            r->n += n;
249
            r->n += n;
236
            spinlock_unlock(&r->lock);
250
            spinlock_unlock(&r->lock);
237
        }
251
        }
238
        CPU->needs_relink = 0;
252
        CPU->needs_relink = 0;
239
    }
253
    }
240
    spinlock_unlock(&CPU->lock);
254
    spinlock_unlock(&CPU->lock);
241
 
255
 
242
}
256
}
243
 
257
 
244
 
258
 
245
/** Scheduler stack switch wrapper
259
/** Scheduler stack switch wrapper
246
 *
260
 *
247
 * Second part of the scheduler() function
261
 * Second part of the scheduler() function
248
 * using new stack. Handling the actual context
262
 * using new stack. Handling the actual context
249
 * switch to a new thread.
263
 * switch to a new thread.
250
 *
264
 *
251
 * Assume THREAD->lock is held.
265
 * Assume THREAD->lock is held.
252
 */
266
 */
253
static void scheduler_separated_stack(void)
267
static void scheduler_separated_stack(void)
254
{
268
{
255
    int priority;
269
    int priority;
256
 
270
 
257
    ASSERT(CPU != NULL);
271
    ASSERT(CPU != NULL);
258
 
272
 
259
    if (THREAD) {
273
    if (THREAD) {
-
 
274
        /* must be run after switch to scheduler stack */
-
 
275
        after_thread_ran();
-
 
276
 
260
        switch (THREAD->state) {
277
        switch (THREAD->state) {
261
            case Running:
278
            case Running:
262
            THREAD->state = Ready;
279
            THREAD->state = Ready;
263
            spinlock_unlock(&THREAD->lock);
280
            spinlock_unlock(&THREAD->lock);
264
            thread_ready(THREAD);
281
            thread_ready(THREAD);
265
            break;
282
            break;
266
 
283
 
267
            case Exiting:
284
            case Exiting:
268
            thread_destroy(THREAD);
285
            thread_destroy(THREAD);
269
            break;
286
            break;
270
           
287
           
271
            case Sleeping:
288
            case Sleeping:
272
            /*
289
            /*
273
             * Prefer the thread after it's woken up.
290
             * Prefer the thread after it's woken up.
274
             */
291
             */
275
            THREAD->priority = -1;
292
            THREAD->priority = -1;
276
 
293
 
277
            /*
294
            /*
278
             * We need to release wq->lock which we locked in waitq_sleep().
295
             * We need to release wq->lock which we locked in waitq_sleep().
279
             * Address of wq->lock is kept in THREAD->sleep_queue.
296
             * Address of wq->lock is kept in THREAD->sleep_queue.
280
             */
297
             */
281
            spinlock_unlock(&THREAD->sleep_queue->lock);
298
            spinlock_unlock(&THREAD->sleep_queue->lock);
282
 
299
 
283
            /*
300
            /*
284
             * Check for possible requests for out-of-context invocation.
301
             * Check for possible requests for out-of-context invocation.
285
             */
302
             */
286
            if (THREAD->call_me) {
303
            if (THREAD->call_me) {
287
                THREAD->call_me(THREAD->call_me_with);
304
                THREAD->call_me(THREAD->call_me_with);
288
                THREAD->call_me = NULL;
305
                THREAD->call_me = NULL;
289
                THREAD->call_me_with = NULL;
306
                THREAD->call_me_with = NULL;
290
            }
307
            }
291
 
308
 
292
            spinlock_unlock(&THREAD->lock);
309
            spinlock_unlock(&THREAD->lock);
293
 
310
 
294
            break;
311
            break;
295
 
312
 
296
            default:
313
            default:
297
            /*
314
            /*
298
             * Entering state is unexpected.
315
             * Entering state is unexpected.
299
             */
316
             */
300
            panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]);
317
            panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]);
301
            break;
318
            break;
302
        }
319
        }
-
 
320
 
303
        THREAD = NULL;
321
        THREAD = NULL;
304
    }
322
    }
305
 
323
 
306
 
324
 
307
    THREAD = find_best_thread();
325
    THREAD = find_best_thread();
308
   
326
   
309
    spinlock_lock(&THREAD->lock);
327
    spinlock_lock(&THREAD->lock);
310
    priority = THREAD->priority;
328
    priority = THREAD->priority;
311
    spinlock_unlock(&THREAD->lock);
329
    spinlock_unlock(&THREAD->lock);
312
 
330
 
313
    relink_rq(priority);       
331
    relink_rq(priority);       
314
 
332
 
315
    spinlock_lock(&THREAD->lock);  
333
    spinlock_lock(&THREAD->lock);  
316
 
334
 
317
    /*
335
    /*
318
     * If both the old and the new task are the same, lots of work is avoided.
336
     * If both the old and the new task are the same, lots of work is avoided.
319
     */
337
     */
320
    if (TASK != THREAD->task) {
338
    if (TASK != THREAD->task) {
321
        as_t *as1 = NULL;
339
        as_t *as1 = NULL;
322
        as_t *as2;
340
        as_t *as2;
323
 
341
 
324
        if (TASK) {
342
        if (TASK) {
325
            spinlock_lock(&TASK->lock);
343
            spinlock_lock(&TASK->lock);
326
            as1 = TASK->as;
344
            as1 = TASK->as;
327
            spinlock_unlock(&TASK->lock);
345
            spinlock_unlock(&TASK->lock);
328
        }
346
        }
329
 
347
 
330
        spinlock_lock(&THREAD->task->lock);
348
        spinlock_lock(&THREAD->task->lock);
331
        as2 = THREAD->task->as;
349
        as2 = THREAD->task->as;
332
        spinlock_unlock(&THREAD->task->lock);
350
        spinlock_unlock(&THREAD->task->lock);
333
       
351
       
334
        /*
352
        /*
335
         * Note that it is possible for two tasks to share one address space.
353
         * Note that it is possible for two tasks to share one address space.
336
         */
354
         */
337
        if (as1 != as2) {
355
        if (as1 != as2) {
338
            /*
356
            /*
339
             * Both tasks and address spaces are different.
357
             * Both tasks and address spaces are different.
340
             * Replace the old one with the new one.
358
             * Replace the old one with the new one.
341
             */
359
             */
342
            as_switch(as1, as2);
360
            as_switch(as1, as2);
343
        }
361
        }
344
        TASK = THREAD->task;   
362
        TASK = THREAD->task;   
345
    }
363
    }
346
 
364
 
347
    THREAD->state = Running;
365
    THREAD->state = Running;
348
 
366
 
349
    #ifdef SCHEDULER_VERBOSE
367
    #ifdef SCHEDULER_VERBOSE
350
    printf("cpu%d: tid %d (priority=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, atomic_get(&CPU->nrdy));
368
    printf("cpu%d: tid %d (priority=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, atomic_get(&CPU->nrdy));
351
    #endif  
369
    #endif  
352
 
370
 
353
    /*
371
    /*
-
 
372
     * Some architectures provide late kernel PA2KA(identity)
-
 
373
     * mapping in a page fault handler. However, the page fault
-
 
374
     * handler uses the kernel stack of the running thread and
-
 
375
     * therefore cannot be used to map it. The kernel stack, if
-
 
376
     * necessary, is to be mapped in before_thread_runs(). This
-
 
377
     * function must be executed before the switch to the new stack.
-
 
378
     */
-
 
379
    before_thread_runs();
-
 
380
 
-
 
381
    /*
354
     * Copy the knowledge of CPU, TASK, THREAD and preemption counter to thread's stack.
382
     * Copy the knowledge of CPU, TASK, THREAD and preemption counter to thread's stack.
355
     */
383
     */
356
    the_copy(THE, (the_t *) THREAD->kstack);
384
    the_copy(THE, (the_t *) THREAD->kstack);
357
   
385
   
358
    context_restore(&THREAD->saved_context);
386
    context_restore(&THREAD->saved_context);
359
    /* not reached */
387
    /* not reached */
360
}
388
}
361
 
389
 
362
 
390
 
363
/** The scheduler
391
/** The scheduler
364
 *
392
 *
365
 * The thread scheduling procedure.
393
 * The thread scheduling procedure.
366
 * Passes control directly to
394
 * Passes control directly to
367
 * scheduler_separated_stack().
395
 * scheduler_separated_stack().
368
 *
396
 *
369
 */
397
 */
370
void scheduler(void)
398
void scheduler(void)
371
{
399
{
372
    volatile ipl_t ipl;
400
    volatile ipl_t ipl;
373
 
401
 
374
    ASSERT(CPU != NULL);
402
    ASSERT(CPU != NULL);
375
 
403
 
376
    ipl = interrupts_disable();
404
    ipl = interrupts_disable();
377
 
405
 
378
    if (atomic_get(&haltstate))
406
    if (atomic_get(&haltstate))
379
        halt();
407
        halt();
380
 
408
 
381
    if (THREAD) {
409
    if (THREAD) {
382
        spinlock_lock(&THREAD->lock);
410
        spinlock_lock(&THREAD->lock);
383
#ifndef CONFIG_FPU_LAZY
411
#ifndef CONFIG_FPU_LAZY
384
        fpu_context_save(&(THREAD->saved_fpu_context));
412
        fpu_context_save(&(THREAD->saved_fpu_context));
385
#endif
413
#endif
386
        if (!context_save(&THREAD->saved_context)) {
414
        if (!context_save(&THREAD->saved_context)) {
387
            /*
415
            /*
388
             * This is the place where threads leave scheduler();
416
             * This is the place where threads leave scheduler();
389
             */
417
             */
390
            before_thread_runs();
-
 
391
            spinlock_unlock(&THREAD->lock);
418
            spinlock_unlock(&THREAD->lock);
392
            interrupts_restore(THREAD->saved_context.ipl);
419
            interrupts_restore(THREAD->saved_context.ipl);
393
            return;
420
            return;
394
        }
421
        }
395
 
422
 
396
        /*
423
        /*
397
         * Interrupt priority level of preempted thread is recorded here
424
         * Interrupt priority level of preempted thread is recorded here
398
         * to facilitate scheduler() invocations from interrupts_disable()'d
425
         * to facilitate scheduler() invocations from interrupts_disable()'d
399
         * code (e.g. waitq_sleep_timeout()).
426
         * code (e.g. waitq_sleep_timeout()).
400
         */
427
         */
401
        THREAD->saved_context.ipl = ipl;
428
        THREAD->saved_context.ipl = ipl;
402
    }
429
    }
403
 
430
 
404
    /*
431
    /*
405
     * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM
432
     * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM
406
     * and preemption counter. At this point THE could be coming either
433
     * and preemption counter. At this point THE could be coming either
407
     * from THREAD's or CPU's stack.
434
     * from THREAD's or CPU's stack.
408
     */
435
     */
409
    the_copy(THE, (the_t *) CPU->stack);
436
    the_copy(THE, (the_t *) CPU->stack);
410
 
437
 
411
    /*
438
    /*
412
     * We may not keep the old stack.
439
     * We may not keep the old stack.
413
     * Reason: If we kept the old stack and got blocked, for instance, in
440
     * Reason: If we kept the old stack and got blocked, for instance, in
414
     * find_best_thread(), the old thread could get rescheduled by another
441
     * find_best_thread(), the old thread could get rescheduled by another
415
     * CPU and overwrite the part of its own stack that was also used by
442
     * CPU and overwrite the part of its own stack that was also used by
416
     * the scheduler on this CPU.
443
     * the scheduler on this CPU.
417
     *
444
     *
418
     * Moreover, we have to bypass the compiler-generated POP sequence
445
     * Moreover, we have to bypass the compiler-generated POP sequence
419
     * which is fooled by SP being set to the very top of the stack.
446
     * which is fooled by SP being set to the very top of the stack.
420
     * Therefore the scheduler() function continues in
447
     * Therefore the scheduler() function continues in
421
     * scheduler_separated_stack().
448
     * scheduler_separated_stack().
422
     */
449
     */
423
    context_save(&CPU->saved_context);
450
    context_save(&CPU->saved_context);
424
    context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), (__address) CPU->stack, CPU_STACK_SIZE);
451
    context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), (__address) CPU->stack, CPU_STACK_SIZE);
425
    context_restore(&CPU->saved_context);
452
    context_restore(&CPU->saved_context);
426
    /* not reached */
453
    /* not reached */
427
}
454
}
428
 
455
 
429
 
456
 
430
 
457
 
431
 
458
 
432
 
459
 
433
#ifdef CONFIG_SMP
460
#ifdef CONFIG_SMP
434
/** Load balancing thread
461
/** Load balancing thread
435
 *
462
 *
436
 * SMP load balancing thread, supervising thread supplies
463
 * SMP load balancing thread, supervising thread supplies
437
 * for the CPU it's wired to.
464
 * for the CPU it's wired to.
438
 *
465
 *
439
 * @param arg Generic thread argument (unused).
466
 * @param arg Generic thread argument (unused).
440
 *
467
 *
441
 */
468
 */
442
void kcpulb(void *arg)
469
void kcpulb(void *arg)
443
{
470
{
444
    thread_t *t;
471
    thread_t *t;
445
    int count, average, i, j, k = 0;
472
    int count, average, i, j, k = 0;
446
    ipl_t ipl;
473
    ipl_t ipl;
447
 
474
 
448
loop:
475
loop:
449
    /*
476
    /*
450
     * Work in 1s intervals.
477
     * Work in 1s intervals.
451
     */
478
     */
452
    thread_sleep(1);
479
    thread_sleep(1);
453
 
480
 
454
not_satisfied:
481
not_satisfied:
455
    /*
482
    /*
456
     * Calculate the number of threads that will be migrated/stolen from
483
     * Calculate the number of threads that will be migrated/stolen from
457
     * other CPU's. Note that situation can have changed between two
484
     * other CPU's. Note that situation can have changed between two
458
     * passes. Each time get the most up to date counts.
485
     * passes. Each time get the most up to date counts.
459
     */
486
     */
460
    average = atomic_get(&nrdy) / config.cpu_active + 1;
487
    average = atomic_get(&nrdy) / config.cpu_active + 1;
461
    count = average - atomic_get(&CPU->nrdy);
488
    count = average - atomic_get(&CPU->nrdy);
462
 
489
 
463
    if (count <= 0)
490
    if (count <= 0)
464
        goto satisfied;
491
        goto satisfied;
465
 
492
 
466
    /*
493
    /*
467
     * Searching least priority queues on all CPU's first and most priority queues on all CPU's last.
494
     * Searching least priority queues on all CPU's first and most priority queues on all CPU's last.
468
     */
495
     */
469
    for (j=RQ_COUNT-1; j >= 0; j--) {
496
    for (j=RQ_COUNT-1; j >= 0; j--) {
470
        for (i=0; i < config.cpu_active; i++) {
497
        for (i=0; i < config.cpu_active; i++) {
471
            link_t *l;
498
            link_t *l;
472
            runq_t *r;
499
            runq_t *r;
473
            cpu_t *cpu;
500
            cpu_t *cpu;
474
 
501
 
475
            cpu = &cpus[(i + k) % config.cpu_active];
502
            cpu = &cpus[(i + k) % config.cpu_active];
476
 
503
 
477
            /*
504
            /*
478
             * Not interested in ourselves.
505
             * Not interested in ourselves.
479
             * Doesn't require interrupt disabling for kcpulb is X_WIRED.
506
             * Doesn't require interrupt disabling for kcpulb is X_WIRED.
480
             */
507
             */
481
            if (CPU == cpu)
508
            if (CPU == cpu)
482
                continue;
509
                continue;
483
            if (atomic_get(&cpu->nrdy) <= average)
510
            if (atomic_get(&cpu->nrdy) <= average)
484
                continue;
511
                continue;
485
 
512
 
486
            ipl = interrupts_disable();
513
            ipl = interrupts_disable();
487
            r = &cpu->rq[j];
514
            r = &cpu->rq[j];
488
            spinlock_lock(&r->lock);
515
            spinlock_lock(&r->lock);
489
            if (r->n == 0) {
516
            if (r->n == 0) {
490
                spinlock_unlock(&r->lock);
517
                spinlock_unlock(&r->lock);
491
                interrupts_restore(ipl);
518
                interrupts_restore(ipl);
492
                continue;
519
                continue;
493
            }
520
            }
494
       
521
       
495
            t = NULL;
522
            t = NULL;
496
            l = r->rq_head.prev;    /* search rq from the back */
523
            l = r->rq_head.prev;    /* search rq from the back */
497
            while (l != &r->rq_head) {
524
            while (l != &r->rq_head) {
498
                t = list_get_instance(l, thread_t, rq_link);
525
                t = list_get_instance(l, thread_t, rq_link);
499
                /*
526
                /*
500
                 * We don't want to steal CPU-wired threads neither threads already stolen.
527
                 * We don't want to steal CPU-wired threads neither threads already stolen.
501
                 * The latter prevents threads from migrating between CPU's without ever being run.
528
                 * The latter prevents threads from migrating between CPU's without ever being run.
502
                 * We don't want to steal threads whose FPU context is still in CPU.
529
                 * We don't want to steal threads whose FPU context is still in CPU.
503
                 */
530
                 */
504
                spinlock_lock(&t->lock);
531
                spinlock_lock(&t->lock);
505
                if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) {
532
                if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) {
506
                    /*
533
                    /*
507
                     * Remove t from r.
534
                     * Remove t from r.
508
                     */
535
                     */
509
                    spinlock_unlock(&t->lock);
536
                    spinlock_unlock(&t->lock);
510
                   
537
                   
511
                    atomic_dec(&cpu->nrdy);
538
                    atomic_dec(&cpu->nrdy);
512
                    atomic_dec(&nrdy);
539
                    atomic_dec(&nrdy);
513
 
540
 
514
                    r->n--;
541
                    r->n--;
515
                    list_remove(&t->rq_link);
542
                    list_remove(&t->rq_link);
516
 
543
 
517
                    break;
544
                    break;
518
                }
545
                }
519
                spinlock_unlock(&t->lock);
546
                spinlock_unlock(&t->lock);
520
                l = l->prev;
547
                l = l->prev;
521
                t = NULL;
548
                t = NULL;
522
            }
549
            }
523
            spinlock_unlock(&r->lock);
550
            spinlock_unlock(&r->lock);
524
 
551
 
525
            if (t) {
552
            if (t) {
526
                /*
553
                /*
527
                 * Ready t on local CPU
554
                 * Ready t on local CPU
528
                 */
555
                 */
529
                spinlock_lock(&t->lock);
556
                spinlock_lock(&t->lock);
530
                #ifdef KCPULB_VERBOSE
557
                #ifdef KCPULB_VERBOSE
531
                printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, atomic_get(&CPU->nrdy), atomic_get(&nrdy) / config.cpu_active);
558
                printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, atomic_get(&CPU->nrdy), atomic_get(&nrdy) / config.cpu_active);
532
                #endif
559
                #endif
533
                t->flags |= X_STOLEN;
560
                t->flags |= X_STOLEN;
534
                spinlock_unlock(&t->lock);
561
                spinlock_unlock(&t->lock);
535
   
562
   
536
                thread_ready(t);
563
                thread_ready(t);
537
 
564
 
538
                interrupts_restore(ipl);
565
                interrupts_restore(ipl);
539
   
566
   
540
                if (--count == 0)
567
                if (--count == 0)
541
                    goto satisfied;
568
                    goto satisfied;
542
                   
569
                   
543
                /*
570
                /*
544
                 * We are not satisfied yet, focus on another CPU next time.
571
                 * We are not satisfied yet, focus on another CPU next time.
545
                 */
572
                 */
546
                k++;
573
                k++;
547
               
574
               
548
                continue;
575
                continue;
549
            }
576
            }
550
            interrupts_restore(ipl);
577
            interrupts_restore(ipl);
551
        }
578
        }
552
    }
579
    }
553
 
580
 
554
    if (atomic_get(&CPU->nrdy)) {
581
    if (atomic_get(&CPU->nrdy)) {
555
        /*
582
        /*
556
         * Be a little bit light-weight and let migrated threads run.
583
         * Be a little bit light-weight and let migrated threads run.
557
         */
584
         */
558
        scheduler();
585
        scheduler();
559
    } else {
586
    } else {
560
        /*
587
        /*
561
         * We failed to migrate a single thread.
588
         * We failed to migrate a single thread.
562
         * Give up this turn.
589
         * Give up this turn.
563
         */
590
         */
564
        goto loop;
591
        goto loop;
565
    }
592
    }
566
       
593
       
567
    goto not_satisfied;
594
    goto not_satisfied;
568
 
595
 
569
satisfied:
596
satisfied:
570
    goto loop;
597
    goto loop;
571
}
598
}
572
 
599
 
573
#endif /* CONFIG_SMP */
600
#endif /* CONFIG_SMP */
574
 
601
 
575
 
602
 
576
/** Print information about threads & scheduler queues */
603
/** Print information about threads & scheduler queues */
577
void sched_print_list(void)
604
void sched_print_list(void)
578
{
605
{
579
    ipl_t ipl;
606
    ipl_t ipl;
580
    int cpu,i;
607
    int cpu,i;
581
    runq_t *r;
608
    runq_t *r;
582
    thread_t *t;
609
    thread_t *t;
583
    link_t *cur;
610
    link_t *cur;
584
 
611
 
585
    /* We are going to mess with scheduler structures,
612
    /* We are going to mess with scheduler structures,
586
     * let's not be interrupted */
613
     * let's not be interrupted */
587
    ipl = interrupts_disable();
614
    ipl = interrupts_disable();
588
    printf("*********** Scheduler dump ***********\n");
615
    printf("*********** Scheduler dump ***********\n");
589
    for (cpu=0;cpu < config.cpu_count; cpu++) {
616
    for (cpu=0;cpu < config.cpu_count; cpu++) {
590
        if (!cpus[cpu].active)
617
        if (!cpus[cpu].active)
591
            continue;
618
            continue;
592
        spinlock_lock(&cpus[cpu].lock);
619
        spinlock_lock(&cpus[cpu].lock);
593
        printf("cpu%d: nrdy: %d needs_relink: %d\n",
620
        printf("cpu%d: nrdy: %d needs_relink: %d\n",
594
               cpus[cpu].id, atomic_get(&cpus[cpu].nrdy), cpus[cpu].needs_relink);
621
               cpus[cpu].id, atomic_get(&cpus[cpu].nrdy), cpus[cpu].needs_relink);
595
       
622
       
596
        for (i=0; i<RQ_COUNT; i++) {
623
        for (i=0; i<RQ_COUNT; i++) {
597
            r = &cpus[cpu].rq[i];
624
            r = &cpus[cpu].rq[i];
598
            spinlock_lock(&r->lock);
625
            spinlock_lock(&r->lock);
599
            if (!r->n) {
626
            if (!r->n) {
600
                spinlock_unlock(&r->lock);
627
                spinlock_unlock(&r->lock);
601
                continue;
628
                continue;
602
            }
629
            }
603
            printf("\tRq %d: ", i);
630
            printf("\tRq %d: ", i);
604
            for (cur=r->rq_head.next; cur!=&r->rq_head; cur=cur->next) {
631
            for (cur=r->rq_head.next; cur!=&r->rq_head; cur=cur->next) {
605
                t = list_get_instance(cur, thread_t, rq_link);
632
                t = list_get_instance(cur, thread_t, rq_link);
606
                printf("%d(%s) ", t->tid,
633
                printf("%d(%s) ", t->tid,
607
                       thread_states[t->state]);
634
                       thread_states[t->state]);
608
            }
635
            }
609
            printf("\n");
636
            printf("\n");
610
            spinlock_unlock(&r->lock);
637
            spinlock_unlock(&r->lock);
611
        }
638
        }
612
        spinlock_unlock(&cpus[cpu].lock);
639
        spinlock_unlock(&cpus[cpu].lock);
613
    }
640
    }
614
   
641
   
615
    interrupts_restore(ipl);
642
    interrupts_restore(ipl);
616
}
643
}
617
 
644