Subversion Repositories HelenOS-historic

Rev

Rev 783 | Rev 785 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 783 Rev 784
1
/*
1
/*
2
 * Copyright (C) 2001-2004 Jakub Jermar
2
 * Copyright (C) 2001-2004 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
#include <proc/scheduler.h>
29
#include <proc/scheduler.h>
30
#include <proc/thread.h>
30
#include <proc/thread.h>
31
#include <proc/task.h>
31
#include <proc/task.h>
32
#include <mm/heap.h>
32
#include <mm/heap.h>
33
#include <mm/frame.h>
33
#include <mm/frame.h>
34
#include <mm/page.h>
34
#include <mm/page.h>
35
#include <mm/as.h>
35
#include <mm/as.h>
36
#include <arch/asm.h>
36
#include <arch/asm.h>
37
#include <arch/faddr.h>
37
#include <arch/faddr.h>
38
#include <arch/atomic.h>
38
#include <arch/atomic.h>
39
#include <synch/spinlock.h>
39
#include <synch/spinlock.h>
40
#include <config.h>
40
#include <config.h>
41
#include <context.h>
41
#include <context.h>
42
#include <func.h>
42
#include <func.h>
43
#include <arch.h>
43
#include <arch.h>
44
#include <list.h>
44
#include <list.h>
45
#include <panic.h>
45
#include <panic.h>
46
#include <typedefs.h>
46
#include <typedefs.h>
47
#include <cpu.h>
47
#include <cpu.h>
48
#include <print.h>
48
#include <print.h>
49
#include <debug.h>
49
#include <debug.h>
50
 
50
 
51
atomic_t nrdy;
51
atomic_t nrdy;
52
 
52
 
53
/** Take actions before new thread runs
53
/** Take actions before new thread runs
54
 *
54
 *
55
 * Perform actions that need to be
55
 * Perform actions that need to be
56
 * taken before the newly selected
56
 * taken before the newly selected
57
 * tread is passed control.
57
 * tread is passed control.
58
 *
58
 *
59
 */
59
 */
60
void before_thread_runs(void)
60
void before_thread_runs(void)
61
{
61
{
62
    before_thread_runs_arch();
62
    before_thread_runs_arch();
63
#ifdef CONFIG_FPU_LAZY
63
#ifdef CONFIG_FPU_LAZY
64
    if(THREAD==CPU->fpu_owner)
64
    if(THREAD==CPU->fpu_owner)
65
        fpu_enable();
65
        fpu_enable();
66
    else
66
    else
67
        fpu_disable();
67
        fpu_disable();
68
#else
68
#else
69
    fpu_enable();
69
    fpu_enable();
70
    if (THREAD->fpu_context_exists)
70
    if (THREAD->fpu_context_exists)
71
        fpu_context_restore(&(THREAD->saved_fpu_context));
71
        fpu_context_restore(&(THREAD->saved_fpu_context));
72
    else {
72
    else {
73
        fpu_init();
73
        fpu_init();
74
        THREAD->fpu_context_exists=1;
74
        THREAD->fpu_context_exists=1;
75
    }
75
    }
76
#endif
76
#endif
77
}
77
}
78
 
78
 
79
#ifdef CONFIG_FPU_LAZY
79
#ifdef CONFIG_FPU_LAZY
80
void scheduler_fpu_lazy_request(void)
80
void scheduler_fpu_lazy_request(void)
81
{
81
{
82
    fpu_enable();
82
    fpu_enable();
83
    if (CPU->fpu_owner != NULL) {  
83
    if (CPU->fpu_owner != NULL) {  
84
        fpu_context_save(&CPU->fpu_owner->saved_fpu_context);
84
        fpu_context_save(&CPU->fpu_owner->saved_fpu_context);
85
        /* don't prevent migration */
85
        /* don't prevent migration */
86
        CPU->fpu_owner->fpu_context_engaged=0;
86
        CPU->fpu_owner->fpu_context_engaged=0;
87
    }
87
    }
88
    if (THREAD->fpu_context_exists)
88
    if (THREAD->fpu_context_exists)
89
        fpu_context_restore(&THREAD->saved_fpu_context);
89
        fpu_context_restore(&THREAD->saved_fpu_context);
90
    else {
90
    else {
91
        fpu_init();
91
        fpu_init();
92
        THREAD->fpu_context_exists=1;
92
        THREAD->fpu_context_exists=1;
93
    }
93
    }
94
    CPU->fpu_owner=THREAD;
94
    CPU->fpu_owner=THREAD;
95
    THREAD->fpu_context_engaged = 1;
95
    THREAD->fpu_context_engaged = 1;
96
}
96
}
97
#endif
97
#endif
98
 
98
 
99
/** Initialize scheduler
99
/** Initialize scheduler
100
 *
100
 *
101
 * Initialize kernel scheduler.
101
 * Initialize kernel scheduler.
102
 *
102
 *
103
 */
103
 */
104
void scheduler_init(void)
104
void scheduler_init(void)
105
{
105
{
106
}
106
}
107
 
107
 
108
 
108
 
109
/** Get thread to be scheduled
109
/** Get thread to be scheduled
110
 *
110
 *
111
 * Get the optimal thread to be scheduled
111
 * Get the optimal thread to be scheduled
112
 * according to thread accounting and scheduler
112
 * according to thread accounting and scheduler
113
 * policy.
113
 * policy.
114
 *
114
 *
115
 * @return Thread to be scheduled.
115
 * @return Thread to be scheduled.
116
 *
116
 *
117
 */
117
 */
118
static thread_t *find_best_thread(void)
118
static thread_t *find_best_thread(void)
119
{
119
{
120
    thread_t *t;
120
    thread_t *t;
121
    runq_t *r;
121
    runq_t *r;
122
    int i;
122
    int i;
123
 
123
 
124
    ASSERT(CPU != NULL);
124
    ASSERT(CPU != NULL);
125
 
125
 
126
loop:
126
loop:
127
    interrupts_enable();
127
    interrupts_enable();
128
   
128
   
129
    if (atomic_get(&CPU->nrdy) == 0) {
129
    if (atomic_get(&CPU->nrdy) == 0) {
130
        /*
130
        /*
131
         * For there was nothing to run, the CPU goes to sleep
131
         * For there was nothing to run, the CPU goes to sleep
132
         * until a hardware interrupt or an IPI comes.
132
         * until a hardware interrupt or an IPI comes.
133
         * This improves energy saving and hyperthreading.
133
         * This improves energy saving and hyperthreading.
-
 
134
         *
-
 
135
         * - we might get an interrupt here that makes some thread runnable,
-
 
136
         *   in such a case we must wait for the next quantum to come
134
         */
137
         */
135
         cpu_sleep();
138
         cpu_sleep();
136
         goto loop;
139
         goto loop;
137
    }
140
    }
138
 
141
 
139
    interrupts_disable();
142
    interrupts_disable();
140
   
143
   
141
    i = 0;
144
    i = 0;
142
    for (; i<RQ_COUNT; i++) {
145
    for (; i<RQ_COUNT; i++) {
143
        r = &CPU->rq[i];
146
        r = &CPU->rq[i];
144
        spinlock_lock(&r->lock);
147
        spinlock_lock(&r->lock);
145
        if (r->n == 0) {
148
        if (r->n == 0) {
146
            /*
149
            /*
147
             * If this queue is empty, try a lower-priority queue.
150
             * If this queue is empty, try a lower-priority queue.
148
             */
151
             */
149
            spinlock_unlock(&r->lock);
152
            spinlock_unlock(&r->lock);
150
            continue;
153
            continue;
151
        }
154
        }
152
 
155
 
153
        atomic_dec(&CPU->nrdy);
156
        atomic_dec(&CPU->nrdy);
154
        atomic_dec(&nrdy);
157
        atomic_dec(&nrdy);
155
        r->n--;
158
        r->n--;
156
 
159
 
157
        /*
160
        /*
158
         * Take the first thread from the queue.
161
         * Take the first thread from the queue.
159
         */
162
         */
160
        t = list_get_instance(r->rq_head.next, thread_t, rq_link);
163
        t = list_get_instance(r->rq_head.next, thread_t, rq_link);
161
        list_remove(&t->rq_link);
164
        list_remove(&t->rq_link);
162
 
165
 
163
        spinlock_unlock(&r->lock);
166
        spinlock_unlock(&r->lock);
164
 
167
 
165
        spinlock_lock(&t->lock);
168
        spinlock_lock(&t->lock);
166
        t->cpu = CPU;
169
        t->cpu = CPU;
167
 
170
 
168
        t->ticks = us2ticks((i+1)*10000);
171
        t->ticks = us2ticks((i+1)*10000);
169
        t->priority = i;    /* eventually correct rq index */
172
        t->priority = i;    /* eventually correct rq index */
170
 
173
 
171
        /*
174
        /*
172
         * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge.
175
         * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge.
173
         */
176
         */
174
        t->flags &= ~X_STOLEN;
177
        t->flags &= ~X_STOLEN;
175
        spinlock_unlock(&t->lock);
178
        spinlock_unlock(&t->lock);
176
 
179
 
177
        return t;
180
        return t;
178
    }
181
    }
179
    goto loop;
182
    goto loop;
180
 
183
 
181
}
184
}
182
 
185
 
183
 
186
 
184
/** Prevent rq starvation
187
/** Prevent rq starvation
185
 *
188
 *
186
 * Prevent low priority threads from starving in rq's.
189
 * Prevent low priority threads from starving in rq's.
187
 *
190
 *
188
 * When the function decides to relink rq's, it reconnects
191
 * When the function decides to relink rq's, it reconnects
189
 * respective pointers so that in result threads with 'pri'
192
 * respective pointers so that in result threads with 'pri'
190
 * greater or equal 'start' are moved to a higher-priority queue.
193
 * greater or equal 'start' are moved to a higher-priority queue.
191
 *
194
 *
192
 * @param start Threshold priority.
195
 * @param start Threshold priority.
193
 *
196
 *
194
 */
197
 */
195
static void relink_rq(int start)
198
static void relink_rq(int start)
196
{
199
{
197
    link_t head;
200
    link_t head;
198
    runq_t *r;
201
    runq_t *r;
199
    int i, n;
202
    int i, n;
200
 
203
 
201
    list_initialize(&head);
204
    list_initialize(&head);
202
    spinlock_lock(&CPU->lock);
205
    spinlock_lock(&CPU->lock);
203
    if (CPU->needs_relink > NEEDS_RELINK_MAX) {
206
    if (CPU->needs_relink > NEEDS_RELINK_MAX) {
204
        for (i = start; i<RQ_COUNT-1; i++) {
207
        for (i = start; i<RQ_COUNT-1; i++) {
205
            /* remember and empty rq[i + 1] */
208
            /* remember and empty rq[i + 1] */
206
            r = &CPU->rq[i + 1];
209
            r = &CPU->rq[i + 1];
207
            spinlock_lock(&r->lock);
210
            spinlock_lock(&r->lock);
208
            list_concat(&head, &r->rq_head);
211
            list_concat(&head, &r->rq_head);
209
            n = r->n;
212
            n = r->n;
210
            r->n = 0;
213
            r->n = 0;
211
            spinlock_unlock(&r->lock);
214
            spinlock_unlock(&r->lock);
212
       
215
       
213
            /* append rq[i + 1] to rq[i] */
216
            /* append rq[i + 1] to rq[i] */
214
            r = &CPU->rq[i];
217
            r = &CPU->rq[i];
215
            spinlock_lock(&r->lock);
218
            spinlock_lock(&r->lock);
216
            list_concat(&r->rq_head, &head);
219
            list_concat(&r->rq_head, &head);
217
            r->n += n;
220
            r->n += n;
218
            spinlock_unlock(&r->lock);
221
            spinlock_unlock(&r->lock);
219
        }
222
        }
220
        CPU->needs_relink = 0;
223
        CPU->needs_relink = 0;
221
    }
224
    }
222
    spinlock_unlock(&CPU->lock);               
225
    spinlock_unlock(&CPU->lock);
223
 
226
 
224
}
227
}
225
 
228
 
226
 
229
 
227
/** Scheduler stack switch wrapper
230
/** Scheduler stack switch wrapper
228
 *
231
 *
229
 * Second part of the scheduler() function
232
 * Second part of the scheduler() function
230
 * using new stack. Handling the actual context
233
 * using new stack. Handling the actual context
231
 * switch to a new thread.
234
 * switch to a new thread.
232
 *
235
 *
233
 */
236
 */
234
static void scheduler_separated_stack(void)
237
static void scheduler_separated_stack(void)
235
{
238
{
236
    int priority;
239
    int priority;
237
 
240
 
238
    ASSERT(CPU != NULL);
241
    ASSERT(CPU != NULL);
239
 
242
 
240
    if (THREAD) {
243
    if (THREAD) {
241
        switch (THREAD->state) {
244
        switch (THREAD->state) {
242
            case Running:
245
            case Running:
243
            THREAD->state = Ready;
246
            THREAD->state = Ready;
244
            spinlock_unlock(&THREAD->lock);
247
            spinlock_unlock(&THREAD->lock);
245
            thread_ready(THREAD);
248
            thread_ready(THREAD);
246
            break;
249
            break;
247
 
250
 
248
            case Exiting:
251
            case Exiting:
249
            frame_free((__address) THREAD->kstack);
252
            frame_free((__address) THREAD->kstack);
250
            if (THREAD->ustack) {
253
            if (THREAD->ustack) {
251
                frame_free((__address) THREAD->ustack);
254
                frame_free((__address) THREAD->ustack);
252
            }
255
            }
253
 
256
 
254
            /*
257
            /*
255
             * Detach from the containing task.
258
             * Detach from the containing task.
256
             */
259
             */
257
            spinlock_lock(&TASK->lock);
260
            spinlock_lock(&TASK->lock);
258
            list_remove(&THREAD->th_link);
261
            list_remove(&THREAD->th_link);
259
            spinlock_unlock(&TASK->lock);
262
            spinlock_unlock(&TASK->lock);
260
 
263
 
261
            spinlock_unlock(&THREAD->lock);
264
            spinlock_unlock(&THREAD->lock);
262
   
265
   
263
            spinlock_lock(&threads_lock);
266
            spinlock_lock(&threads_lock);
264
            list_remove(&THREAD->threads_link);
267
            list_remove(&THREAD->threads_link);
265
            spinlock_unlock(&threads_lock);
268
            spinlock_unlock(&threads_lock);
266
 
269
 
267
            spinlock_lock(&CPU->lock);
270
            spinlock_lock(&CPU->lock);
268
            if(CPU->fpu_owner==THREAD)
271
            if(CPU->fpu_owner==THREAD)
269
                CPU->fpu_owner=NULL;
272
                CPU->fpu_owner=NULL;
270
            spinlock_unlock(&CPU->lock);
273
            spinlock_unlock(&CPU->lock);
271
 
274
 
272
            free(THREAD);
275
            free(THREAD);
273
 
276
 
274
            break;
277
            break;
275
   
278
   
276
            case Sleeping:
279
            case Sleeping:
277
            /*
280
            /*
278
             * Prefer the thread after it's woken up.
281
             * Prefer the thread after it's woken up.
279
             */
282
             */
280
            THREAD->priority = -1;
283
            THREAD->priority = -1;
281
 
284
 
282
            /*
285
            /*
283
             * We need to release wq->lock which we locked in waitq_sleep().
286
             * We need to release wq->lock which we locked in waitq_sleep().
284
             * Address of wq->lock is kept in THREAD->sleep_queue.
287
             * Address of wq->lock is kept in THREAD->sleep_queue.
285
             */
288
             */
286
            spinlock_unlock(&THREAD->sleep_queue->lock);
289
            spinlock_unlock(&THREAD->sleep_queue->lock);
287
 
290
 
288
            /*
291
            /*
289
             * Check for possible requests for out-of-context invocation.
292
             * Check for possible requests for out-of-context invocation.
290
             */
293
             */
291
            if (THREAD->call_me) {
294
            if (THREAD->call_me) {
292
                THREAD->call_me(THREAD->call_me_with);
295
                THREAD->call_me(THREAD->call_me_with);
293
                THREAD->call_me = NULL;
296
                THREAD->call_me = NULL;
294
                THREAD->call_me_with = NULL;
297
                THREAD->call_me_with = NULL;
295
            }
298
            }
296
 
299
 
297
            spinlock_unlock(&THREAD->lock);
300
            spinlock_unlock(&THREAD->lock);
298
 
301
 
299
            break;
302
            break;
300
 
303
 
301
            default:
304
            default:
302
            /*
305
            /*
303
             * Entering state is unexpected.
306
             * Entering state is unexpected.
304
             */
307
             */
305
            panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]);
308
            panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]);
306
            break;
309
            break;
307
        }
310
        }
308
        THREAD = NULL;
311
        THREAD = NULL;
309
    }
312
    }
310
 
313
 
311
 
314
 
312
    THREAD = find_best_thread();
315
    THREAD = find_best_thread();
313
   
316
   
314
    spinlock_lock(&THREAD->lock);
317
    spinlock_lock(&THREAD->lock);
315
    priority = THREAD->priority;
318
    priority = THREAD->priority;
316
    spinlock_unlock(&THREAD->lock);
319
    spinlock_unlock(&THREAD->lock);
317
 
320
 
318
    relink_rq(priority);       
321
    relink_rq(priority);       
319
 
322
 
320
    spinlock_lock(&THREAD->lock);  
323
    spinlock_lock(&THREAD->lock);  
321
 
324
 
322
    /*
325
    /*
323
     * If both the old and the new task are the same, lots of work is avoided.
326
     * If both the old and the new task are the same, lots of work is avoided.
324
     */
327
     */
325
    if (TASK != THREAD->task) {
328
    if (TASK != THREAD->task) {
326
        as_t *as1 = NULL;
329
        as_t *as1 = NULL;
327
        as_t *as2;
330
        as_t *as2;
328
 
331
 
329
        if (TASK) {
332
        if (TASK) {
330
            spinlock_lock(&TASK->lock);
333
            spinlock_lock(&TASK->lock);
331
            as1 = TASK->as;
334
            as1 = TASK->as;
332
            spinlock_unlock(&TASK->lock);
335
            spinlock_unlock(&TASK->lock);
333
        }
336
        }
334
 
337
 
335
        spinlock_lock(&THREAD->task->lock);
338
        spinlock_lock(&THREAD->task->lock);
336
        as2 = THREAD->task->as;
339
        as2 = THREAD->task->as;
337
        spinlock_unlock(&THREAD->task->lock);
340
        spinlock_unlock(&THREAD->task->lock);
338
       
341
       
339
        /*
342
        /*
340
         * Note that it is possible for two tasks to share one address space.
343
         * Note that it is possible for two tasks to share one address space.
341
         */
344
         */
342
        if (as1 != as2) {
345
        if (as1 != as2) {
343
            /*
346
            /*
344
             * Both tasks and address spaces are different.
347
             * Both tasks and address spaces are different.
345
             * Replace the old one with the new one.
348
             * Replace the old one with the new one.
346
             */
349
             */
347
            as_install(as2);
350
            as_install(as2);
348
        }
351
        }
349
        TASK = THREAD->task;   
352
        TASK = THREAD->task;   
350
    }
353
    }
351
 
354
 
352
    THREAD->state = Running;
355
    THREAD->state = Running;
353
 
356
 
354
    #ifdef SCHEDULER_VERBOSE
357
    #ifdef SCHEDULER_VERBOSE
355
    printf("cpu%d: tid %d (priority=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, CPU->nrdy);
358
    printf("cpu%d: tid %d (priority=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, CPU->nrdy);
356
    #endif  
359
    #endif  
357
 
360
 
358
    /*
361
    /*
359
     * Copy the knowledge of CPU, TASK, THREAD and preemption counter to thread's stack.
362
     * Copy the knowledge of CPU, TASK, THREAD and preemption counter to thread's stack.
360
     */
363
     */
361
    the_copy(THE, (the_t *) THREAD->kstack);
364
    the_copy(THE, (the_t *) THREAD->kstack);
362
   
365
   
363
    context_restore(&THREAD->saved_context);
366
    context_restore(&THREAD->saved_context);
364
    /* not reached */
367
    /* not reached */
365
}
368
}
366
 
369
 
367
 
370
 
368
/** The scheduler
371
/** The scheduler
369
 *
372
 *
370
 * The thread scheduling procedure.
373
 * The thread scheduling procedure.
371
 * Passes control directly to
374
 * Passes control directly to
372
 * scheduler_separated_stack().
375
 * scheduler_separated_stack().
373
 *
376
 *
374
 */
377
 */
375
void scheduler(void)
378
void scheduler(void)
376
{
379
{
377
    volatile ipl_t ipl;
380
    volatile ipl_t ipl;
378
 
381
 
379
    ASSERT(CPU != NULL);
382
    ASSERT(CPU != NULL);
380
 
383
 
381
    ipl = interrupts_disable();
384
    ipl = interrupts_disable();
382
 
385
 
383
    if (atomic_get(&haltstate))
386
    if (atomic_get(&haltstate))
384
        halt();
387
        halt();
385
 
388
 
386
    if (THREAD) {
389
    if (THREAD) {
387
        spinlock_lock(&THREAD->lock);
390
        spinlock_lock(&THREAD->lock);
388
#ifndef CONFIG_FPU_LAZY
391
#ifndef CONFIG_FPU_LAZY
389
        fpu_context_save(&(THREAD->saved_fpu_context));
392
        fpu_context_save(&(THREAD->saved_fpu_context));
390
#endif
393
#endif
391
        if (!context_save(&THREAD->saved_context)) {
394
        if (!context_save(&THREAD->saved_context)) {
392
            /*
395
            /*
393
             * This is the place where threads leave scheduler();
396
             * This is the place where threads leave scheduler();
394
             */
397
             */
395
            before_thread_runs();
398
            before_thread_runs();
396
            spinlock_unlock(&THREAD->lock);
399
            spinlock_unlock(&THREAD->lock);
397
            interrupts_restore(THREAD->saved_context.ipl);
400
            interrupts_restore(THREAD->saved_context.ipl);
398
            return;
401
            return;
399
        }
402
        }
400
 
403
 
401
        /*
404
        /*
402
         * Interrupt priority level of preempted thread is recorded here
405
         * Interrupt priority level of preempted thread is recorded here
403
         * to facilitate scheduler() invocations from interrupts_disable()'d
406
         * to facilitate scheduler() invocations from interrupts_disable()'d
404
         * code (e.g. waitq_sleep_timeout()).
407
         * code (e.g. waitq_sleep_timeout()).
405
         */
408
         */
406
        THREAD->saved_context.ipl = ipl;
409
        THREAD->saved_context.ipl = ipl;
407
    }
410
    }
408
 
411
 
409
    /*
412
    /*
410
     * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM
413
     * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM
411
     * and preemption counter. At this point THE could be coming either
414
     * and preemption counter. At this point THE could be coming either
412
     * from THREAD's or CPU's stack.
415
     * from THREAD's or CPU's stack.
413
     */
416
     */
414
    the_copy(THE, (the_t *) CPU->stack);
417
    the_copy(THE, (the_t *) CPU->stack);
415
 
418
 
416
    /*
419
    /*
417
     * We may not keep the old stack.
420
     * We may not keep the old stack.
418
     * Reason: If we kept the old stack and got blocked, for instance, in
421
     * Reason: If we kept the old stack and got blocked, for instance, in
419
     * find_best_thread(), the old thread could get rescheduled by another
422
     * find_best_thread(), the old thread could get rescheduled by another
420
     * CPU and overwrite the part of its own stack that was also used by
423
     * CPU and overwrite the part of its own stack that was also used by
421
     * the scheduler on this CPU.
424
     * the scheduler on this CPU.
422
     *
425
     *
423
     * Moreover, we have to bypass the compiler-generated POP sequence
426
     * Moreover, we have to bypass the compiler-generated POP sequence
424
     * which is fooled by SP being set to the very top of the stack.
427
     * which is fooled by SP being set to the very top of the stack.
425
     * Therefore the scheduler() function continues in
428
     * Therefore the scheduler() function continues in
426
     * scheduler_separated_stack().
429
     * scheduler_separated_stack().
427
     */
430
     */
428
    context_save(&CPU->saved_context);
431
    context_save(&CPU->saved_context);
429
    context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), (__address) CPU->stack, CPU_STACK_SIZE);
432
    context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), (__address) CPU->stack, CPU_STACK_SIZE);
430
    context_restore(&CPU->saved_context);
433
    context_restore(&CPU->saved_context);
431
    /* not reached */
434
    /* not reached */
432
}
435
}
433
 
436
 
434
 
437
 
435
 
438
 
436
 
439
 
437
 
440
 
438
#ifdef CONFIG_SMP
441
#ifdef CONFIG_SMP
439
/** Load balancing thread
442
/** Load balancing thread
440
 *
443
 *
441
 * SMP load balancing thread, supervising thread supplies
444
 * SMP load balancing thread, supervising thread supplies
442
 * for the CPU it's wired to.
445
 * for the CPU it's wired to.
443
 *
446
 *
444
 * @param arg Generic thread argument (unused).
447
 * @param arg Generic thread argument (unused).
445
 *
448
 *
446
 */
449
 */
447
void kcpulb(void *arg)
450
void kcpulb(void *arg)
448
{
451
{
449
    thread_t *t;
452
    thread_t *t;
450
    int count, average, i, j, k = 0;
453
    int count, average, i, j, k = 0;
451
    ipl_t ipl;
454
    ipl_t ipl;
452
 
455
 
453
loop:
456
loop:
454
    /*
457
    /*
455
     * Work in 1s intervals.
458
     * Work in 1s intervals.
456
     */
459
     */
457
    thread_sleep(1);
460
    thread_sleep(1);
458
 
461
 
459
not_satisfied:
462
not_satisfied:
460
    /*
463
    /*
461
     * Calculate the number of threads that will be migrated/stolen from
464
     * Calculate the number of threads that will be migrated/stolen from
462
     * other CPU's. Note that situation can have changed between two
465
     * other CPU's. Note that situation can have changed between two
463
     * passes. Each time get the most up to date counts.
466
     * passes. Each time get the most up to date counts.
464
     */
467
     */
465
    average = atomic_get(&nrdy) / config.cpu_active;
468
    average = atomic_get(&nrdy) / config.cpu_active + 1;
466
    count = average - atomic_get(&CPU->nrdy);
469
    count = average - atomic_get(&CPU->nrdy);
467
 
470
 
468
    if (count < 0)
471
    if (count <= 0)
469
        goto satisfied;
472
        goto satisfied;
470
 
473
 
471
    if (!count) { /* Try to steal threads from CPU's that have more then average count */
-
 
472
        count = 1;
-
 
473
        average += 1;
-
 
474
    }
-
 
475
 
-
 
476
    /*
474
    /*
477
     * Searching least priority queues on all CPU's first and most priority queues on all CPU's last.
475
     * Searching least priority queues on all CPU's first and most priority queues on all CPU's last.
478
     */
476
     */
479
    for (j=RQ_COUNT-1; j >= 0; j--) {
477
    for (j=RQ_COUNT-1; j >= 0; j--) {
480
        for (i=0; i < config.cpu_active; i++) {
478
        for (i=0; i < config.cpu_active; i++) {
481
            link_t *l;
479
            link_t *l;
482
            runq_t *r;
480
            runq_t *r;
483
            cpu_t *cpu;
481
            cpu_t *cpu;
484
 
482
 
485
            cpu = &cpus[(i + k) % config.cpu_active];
483
            cpu = &cpus[(i + k) % config.cpu_active];
486
 
484
 
487
            /*
485
            /*
488
             * Not interested in ourselves.
486
             * Not interested in ourselves.
489
             * Doesn't require interrupt disabling for kcpulb is X_WIRED.
487
             * Doesn't require interrupt disabling for kcpulb is X_WIRED.
490
             */
488
             */
491
            if (CPU == cpu)
489
            if (CPU == cpu)
492
                continue;
490
                continue;
493
            if (atomic_get(&cpu->nrdy) <= average)
491
            if (atomic_get(&cpu->nrdy) <= average)
494
                continue;
492
                continue;
495
 
493
 
496
restart:        ipl = interrupts_disable();
494
            ipl = interrupts_disable();
497
            r = &cpu->rq[j];
495
            r = &cpu->rq[j];
498
            spinlock_lock(&r->lock);
496
            spinlock_lock(&r->lock);
499
            if (r->n == 0) {
497
            if (r->n == 0) {
500
                spinlock_unlock(&r->lock);
498
                spinlock_unlock(&r->lock);
501
                interrupts_restore(ipl);
499
                interrupts_restore(ipl);
502
                continue;
500
                continue;
503
            }
501
            }
504
       
502
       
505
            t = NULL;
503
            t = NULL;
506
            l = r->rq_head.prev;    /* search rq from the back */
504
            l = r->rq_head.prev;    /* search rq from the back */
507
            while (l != &r->rq_head) {
505
            while (l != &r->rq_head) {
508
                t = list_get_instance(l, thread_t, rq_link);
506
                t = list_get_instance(l, thread_t, rq_link);
509
                /*
507
                /*
510
                 * We don't want to steal CPU-wired threads neither threads already stolen.
508
                 * We don't want to steal CPU-wired threads neither threads already stolen.
511
                 * The latter prevents threads from migrating between CPU's without ever being run.
509
                 * The latter prevents threads from migrating between CPU's without ever being run.
512
                 * We don't want to steal threads whose FPU context is still in CPU.
510
                 * We don't want to steal threads whose FPU context is still in CPU.
513
                 */
511
                 */
514
                spinlock_lock(&t->lock);
512
                spinlock_lock(&t->lock);
515
                if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) {
513
                if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) {
516
               
-
 
517
                    /*
514
                    /*
518
                     * Remove t from r.
515
                     * Remove t from r.
519
                     */
516
                     */
520
 
-
 
521
                    spinlock_unlock(&t->lock);
517
                    spinlock_unlock(&t->lock);
522
                   
518
                   
523
                    /*
-
 
524
                     * Here we have to avoid deadlock with relink_rq(),
-
 
525
                     * because it locks cpu and r in a different order than we do.
-
 
526
                     */
-
 
527
                    if (!spinlock_trylock(&cpu->lock)) {
-
 
528
                        /* Release all locks and try again. */
-
 
529
                        spinlock_unlock(&r->lock);
-
 
530
                        interrupts_restore(ipl);
-
 
531
                        goto restart;
-
 
532
                    }
-
 
533
                    atomic_dec(&cpu->nrdy);
519
                    atomic_dec(&cpu->nrdy);
534
                    spinlock_unlock(&cpu->lock);
-
 
535
 
-
 
536
                    atomic_dec(&nrdy);
520
                    atomic_dec(&nrdy);
537
 
521
 
538
                    r->n--;
522
                    r->n--;
539
                    list_remove(&t->rq_link);
523
                    list_remove(&t->rq_link);
540
 
524
 
541
                    break;
525
                    break;
542
                }
526
                }
543
                spinlock_unlock(&t->lock);
527
                spinlock_unlock(&t->lock);
544
                l = l->prev;
528
                l = l->prev;
545
                t = NULL;
529
                t = NULL;
546
            }
530
            }
547
            spinlock_unlock(&r->lock);
531
            spinlock_unlock(&r->lock);
548
 
532
 
549
            if (t) {
533
            if (t) {
550
                /*
534
                /*
551
                 * Ready t on local CPU
535
                 * Ready t on local CPU
552
                 */
536
                 */
553
                spinlock_lock(&t->lock);
537
                spinlock_lock(&t->lock);
554
                #ifdef KCPULB_VERBOSE
538
                #ifdef KCPULB_VERBOSE
555
                printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, atomic_get(&CPU->nrdy), atomic_get(&nrdy) / config.cpu_active);
539
                printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, atomic_get(&CPU->nrdy), atomic_get(&nrdy) / config.cpu_active);
556
                #endif
540
                #endif
557
                t->flags |= X_STOLEN;
541
                t->flags |= X_STOLEN;
558
                spinlock_unlock(&t->lock);
542
                spinlock_unlock(&t->lock);
559
   
543
   
560
                thread_ready(t);
544
                thread_ready(t);
561
 
545
 
562
                interrupts_restore(ipl);
546
                interrupts_restore(ipl);
563
   
547
   
564
                if (--count == 0)
548
                if (--count == 0)
565
                    goto satisfied;
549
                    goto satisfied;
566
                   
550
                   
567
                /*
551
                /*
568
                 * We are not satisfied yet, focus on another CPU next time.
552
                 * We are not satisfied yet, focus on another CPU next time.
569
                 */
553
                 */
570
                k++;
554
                k++;
571
               
555
               
572
                continue;
556
                continue;
573
            }
557
            }
574
            interrupts_restore(ipl);
558
            interrupts_restore(ipl);
575
        }
559
        }
576
    }
560
    }
577
 
561
 
578
    if (atomic_get(&CPU->nrdy)) {
562
    if (atomic_get(&CPU->nrdy)) {
579
        /*
563
        /*
580
         * Be a little bit light-weight and let migrated threads run.
564
         * Be a little bit light-weight and let migrated threads run.
581
         */
565
         */
582
        scheduler();
566
        scheduler();
583
    } else {
567
    } else {
584
        /*
568
        /*
585
         * We failed to migrate a single thread.
569
         * We failed to migrate a single thread.
586
         * Give up this turn.
570
         * Give up this turn.
587
         */
571
         */
588
        goto loop;
572
        goto loop;
589
    }
573
    }
590
       
574
       
591
    goto not_satisfied;
575
    goto not_satisfied;
592
 
576
 
593
satisfied:
577
satisfied:
594
    goto loop;
578
    goto loop;
595
}
579
}
596
 
580
 
597
#endif /* CONFIG_SMP */
581
#endif /* CONFIG_SMP */
598
 
582
 
599
 
583
 
600
/** Print information about threads & scheduler queues */
584
/** Print information about threads & scheduler queues */
601
void sched_print_list(void)
585
void sched_print_list(void)
602
{
586
{
603
    ipl_t ipl;
587
    ipl_t ipl;
604
    int cpu,i;
588
    int cpu,i;
605
    runq_t *r;
589
    runq_t *r;
606
    thread_t *t;
590
    thread_t *t;
607
    link_t *cur;
591
    link_t *cur;
608
 
592
 
609
    /* We are going to mess with scheduler structures,
593
    /* We are going to mess with scheduler structures,
610
     * let's not be interrupted */
594
     * let's not be interrupted */
611
    ipl = interrupts_disable();
595
    ipl = interrupts_disable();
612
    printf("*********** Scheduler dump ***********\n");
596
    printf("*********** Scheduler dump ***********\n");
613
    for (cpu=0;cpu < config.cpu_count; cpu++) {
597
    for (cpu=0;cpu < config.cpu_count; cpu++) {
614
        if (!cpus[cpu].active)
598
        if (!cpus[cpu].active)
615
            continue;
599
            continue;
616
        spinlock_lock(&cpus[cpu].lock);
600
        spinlock_lock(&cpus[cpu].lock);
617
        printf("cpu%d: nrdy: %d needs_relink: %d\n",
601
        printf("cpu%d: nrdy: %d needs_relink: %d\n",
618
               cpus[cpu].id, atomic_get(&cpus[cpu].nrdy), cpus[cpu].needs_relink);
602
               cpus[cpu].id, atomic_get(&cpus[cpu].nrdy), cpus[cpu].needs_relink);
619
       
603
       
620
        for (i=0; i<RQ_COUNT; i++) {
604
        for (i=0; i<RQ_COUNT; i++) {
621
            r = &cpus[cpu].rq[i];
605
            r = &cpus[cpu].rq[i];
622
            spinlock_lock(&r->lock);
606
            spinlock_lock(&r->lock);
623
            if (!r->n) {
607
            if (!r->n) {
624
                spinlock_unlock(&r->lock);
608
                spinlock_unlock(&r->lock);
625
                continue;
609
                continue;
626
            }
610
            }
627
            printf("\tRq %d: ", i);
611
            printf("\tRq %d: ", i);
628
            for (cur=r->rq_head.next; cur!=&r->rq_head; cur=cur->next) {
612
            for (cur=r->rq_head.next; cur!=&r->rq_head; cur=cur->next) {
629
                t = list_get_instance(cur, thread_t, rq_link);
613
                t = list_get_instance(cur, thread_t, rq_link);
630
                printf("%d(%s) ", t->tid,
614
                printf("%d(%s) ", t->tid,
631
                       thread_states[t->state]);
615
                       thread_states[t->state]);
632
            }
616
            }
633
            printf("\n");
617
            printf("\n");
634
            spinlock_unlock(&r->lock);
618
            spinlock_unlock(&r->lock);
635
        }
619
        }
636
        spinlock_unlock(&cpus[cpu].lock);
620
        spinlock_unlock(&cpus[cpu].lock);
637
    }
621
    }
638
   
622
   
639
    interrupts_restore(ipl);
623
    interrupts_restore(ipl);
640
}
624
}
641
 
625