Subversion Repositories HelenOS-historic

Rev

Rev 57 | Rev 73 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 57 Rev 68
1
/*
1
/*
2
 * Copyright (C) 2001-2004 Jakub Jermar
2
 * Copyright (C) 2001-2004 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
#include <proc/scheduler.h>
29
#include <proc/scheduler.h>
30
#include <proc/thread.h>
30
#include <proc/thread.h>
31
#include <proc/task.h>
31
#include <proc/task.h>
32
#include <cpu.h>
32
#include <cpu.h>
33
#include <mm/vm.h>
33
#include <mm/vm.h>
34
#include <config.h>
34
#include <config.h>
35
#include <context.h>
35
#include <context.h>
36
#include <func.h>
36
#include <func.h>
37
#include <arch.h>
37
#include <arch.h>
38
#include <arch/asm.h>
38
#include <arch/asm.h>
39
#include <list.h>
39
#include <list.h>
-
 
40
#include <panic.h>
40
#include <typedefs.h>
41
#include <typedefs.h>
41
#include <mm/page.h>
42
#include <mm/page.h>
42
#include <synch/spinlock.h>
43
#include <synch/spinlock.h>
43
 
44
 
44
#ifdef __SMP__
45
#ifdef __SMP__
45
#include <arch/smp/atomic.h>
46
#include <arch/smp/atomic.h>
46
#endif /* __SMP__ */
47
#endif /* __SMP__ */
47
 
48
 
48
/*
49
/*
49
 * NOTE ON ATOMIC READS:
50
 * NOTE ON ATOMIC READS:
50
 * Some architectures cannot read __u32 atomically.
51
 * Some architectures cannot read __u32 atomically.
51
 * For that reason, all accesses to nrdy and the likes must be protected by spinlock.
52
 * For that reason, all accesses to nrdy and the likes must be protected by spinlock.
52
 */
53
 */
53
 
54
 
54
spinlock_t nrdylock;
55
spinlock_t nrdylock;
55
volatile int nrdy;
56
volatile int nrdy;
56
 
57
 
57
void before_thread_runs(void)
58
void before_thread_runs(void)
58
{
59
{
59
    before_thread_runs_arch();
60
    before_thread_runs_arch();
60
    fpu_context_restore(&(THREAD->saved_fpu_context));
61
    fpu_context_restore(&(THREAD->saved_fpu_context));
61
}
62
}
62
 
63
 
63
 
64
 
64
void scheduler_init(void)
65
void scheduler_init(void)
65
{
66
{
66
    spinlock_initialize(&nrdylock);
67
    spinlock_initialize(&nrdylock);
67
}
68
}
68
 
69
 
69
/* cpu_priority_high()'d */
70
/* cpu_priority_high()'d */
70
struct thread *find_best_thread(void)
71
struct thread *find_best_thread(void)
71
{
72
{
72
    thread_t *t;
73
    thread_t *t;
73
    runq_t *r;
74
    runq_t *r;
74
    int i, n;
75
    int i, n;
75
 
76
 
76
loop:
77
loop:
77
    cpu_priority_high();
78
    cpu_priority_high();
78
 
79
 
79
    spinlock_lock(&CPU->lock);
80
    spinlock_lock(&CPU->lock);
80
    n = CPU->nrdy;
81
    n = CPU->nrdy;
81
    spinlock_unlock(&CPU->lock);
82
    spinlock_unlock(&CPU->lock);
82
 
83
 
83
    cpu_priority_low();
84
    cpu_priority_low();
84
   
85
   
85
    if (n == 0) {
86
    if (n == 0) {
86
        #ifdef __SMP__
87
        #ifdef __SMP__
87
        /*
88
        /*
88
         * If the load balancing thread is not running, wake it up and
89
         * If the load balancing thread is not running, wake it up and
89
         * set CPU-private flag that the kcpulb has been started.
90
         * set CPU-private flag that the kcpulb has been started.
90
         */
91
         */
91
        if (test_and_set(&CPU->kcpulbstarted) == 0) {
92
        if (test_and_set(&CPU->kcpulbstarted) == 0) {
92
                waitq_wakeup(&CPU->kcpulb_wq, 0);
93
                waitq_wakeup(&CPU->kcpulb_wq, 0);
93
            goto loop;
94
            goto loop;
94
        }
95
        }
95
        #endif /* __SMP__ */
96
        #endif /* __SMP__ */
96
       
97
       
97
        /*
98
        /*
98
         * For there was nothing to run, the CPU goes to sleep
99
         * For there was nothing to run, the CPU goes to sleep
99
         * until a hardware interrupt or an IPI comes.
100
         * until a hardware interrupt or an IPI comes.
100
         * This improves energy saving and hyperthreading.
101
         * This improves energy saving and hyperthreading.
101
         * On the other hand, several hardware interrupts can be ignored.
102
         * On the other hand, several hardware interrupts can be ignored.
102
         */
103
         */
103
         cpu_sleep();
104
         cpu_sleep();
104
         goto loop;
105
         goto loop;
105
    }
106
    }
106
 
107
 
107
    cpu_priority_high();
108
    cpu_priority_high();
108
 
109
 
109
    for (i = 0; i<RQ_COUNT; i++) {
110
    for (i = 0; i<RQ_COUNT; i++) {
110
        r = &CPU->rq[i];
111
        r = &CPU->rq[i];
111
        spinlock_lock(&r->lock);
112
        spinlock_lock(&r->lock);
112
        if (r->n == 0) {
113
        if (r->n == 0) {
113
            /*
114
            /*
114
             * If this queue is empty, try a lower-priority queue.
115
             * If this queue is empty, try a lower-priority queue.
115
             */
116
             */
116
            spinlock_unlock(&r->lock);
117
            spinlock_unlock(&r->lock);
117
            continue;
118
            continue;
118
        }
119
        }
119
   
120
   
120
        spinlock_lock(&nrdylock);
121
        spinlock_lock(&nrdylock);
121
        nrdy--;
122
        nrdy--;
122
        spinlock_unlock(&nrdylock);    
123
        spinlock_unlock(&nrdylock);    
123
 
124
 
124
        spinlock_lock(&CPU->lock);
125
        spinlock_lock(&CPU->lock);
125
        CPU->nrdy--;
126
        CPU->nrdy--;
126
        spinlock_unlock(&CPU->lock);
127
        spinlock_unlock(&CPU->lock);
127
 
128
 
128
        r->n--;
129
        r->n--;
129
 
130
 
130
        /*
131
        /*
131
         * Take the first thread from the queue.
132
         * Take the first thread from the queue.
132
         */
133
         */
133
        t = list_get_instance(r->rq_head.next, thread_t, rq_link);
134
        t = list_get_instance(r->rq_head.next, thread_t, rq_link);
134
        list_remove(&t->rq_link);
135
        list_remove(&t->rq_link);
135
 
136
 
136
        spinlock_unlock(&r->lock);
137
        spinlock_unlock(&r->lock);
137
 
138
 
138
        spinlock_lock(&t->lock);
139
        spinlock_lock(&t->lock);
139
        t->cpu = CPU;
140
        t->cpu = CPU;
140
 
141
 
141
        t->ticks = us2ticks((i+1)*10000);
142
        t->ticks = us2ticks((i+1)*10000);
142
        t->pri = i; /* eventually correct rq index */
143
        t->pri = i; /* eventually correct rq index */
143
 
144
 
144
        /*
145
        /*
145
         * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge.
146
         * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge.
146
         */
147
         */
147
        t->flags &= ~X_STOLEN;
148
        t->flags &= ~X_STOLEN;
148
        spinlock_unlock(&t->lock);
149
        spinlock_unlock(&t->lock);
149
 
150
 
150
        return t;
151
        return t;
151
    }
152
    }
152
    goto loop;
153
    goto loop;
153
 
154
 
154
}
155
}
155
 
156
 
156
/*
157
/*
157
 * This function prevents low priority threads from starving in rq's.
158
 * This function prevents low priority threads from starving in rq's.
158
 * When it decides to relink rq's, it reconnects respective pointers
159
 * When it decides to relink rq's, it reconnects respective pointers
159
 * so that in result threads with 'pri' greater or equal 'start' are
160
 * so that in result threads with 'pri' greater or equal 'start' are
160
 * moved to a higher-priority queue.
161
 * moved to a higher-priority queue.
161
 */
162
 */
162
void relink_rq(int start)
163
void relink_rq(int start)
163
{
164
{
164
    link_t head;
165
    link_t head;
165
    runq_t *r;
166
    runq_t *r;
166
    int i, n;
167
    int i, n;
167
 
168
 
168
    list_initialize(&head);
169
    list_initialize(&head);
169
    spinlock_lock(&CPU->lock);
170
    spinlock_lock(&CPU->lock);
170
    if (CPU->needs_relink > NEEDS_RELINK_MAX) {
171
    if (CPU->needs_relink > NEEDS_RELINK_MAX) {
171
        for (i = start; i<RQ_COUNT-1; i++) {
172
        for (i = start; i<RQ_COUNT-1; i++) {
172
            /* remember and empty rq[i + 1] */
173
            /* remember and empty rq[i + 1] */
173
            r = &CPU->rq[i + 1];
174
            r = &CPU->rq[i + 1];
174
            spinlock_lock(&r->lock);
175
            spinlock_lock(&r->lock);
175
            list_concat(&head, &r->rq_head);
176
            list_concat(&head, &r->rq_head);
176
            n = r->n;
177
            n = r->n;
177
            r->n = 0;
178
            r->n = 0;
178
            spinlock_unlock(&r->lock);
179
            spinlock_unlock(&r->lock);
179
       
180
       
180
            /* append rq[i + 1] to rq[i] */
181
            /* append rq[i + 1] to rq[i] */
181
            r = &CPU->rq[i];
182
            r = &CPU->rq[i];
182
            spinlock_lock(&r->lock);
183
            spinlock_lock(&r->lock);
183
            list_concat(&r->rq_head, &head);
184
            list_concat(&r->rq_head, &head);
184
            r->n += n;
185
            r->n += n;
185
            spinlock_unlock(&r->lock);
186
            spinlock_unlock(&r->lock);
186
        }
187
        }
187
        CPU->needs_relink = 0;
188
        CPU->needs_relink = 0;
188
    }
189
    }
189
    spinlock_unlock(&CPU->lock);               
190
    spinlock_unlock(&CPU->lock);               
190
 
191
 
191
}
192
}
192
 
193
 
193
/*
194
/*
194
 * The scheduler.
195
 * The scheduler.
195
 */
196
 */
196
void scheduler(void)
197
void scheduler(void)
197
{
198
{
198
    volatile pri_t pri;
199
    volatile pri_t pri;
199
 
200
 
200
    pri = cpu_priority_high();
201
    pri = cpu_priority_high();
201
 
202
 
202
    if (haltstate)
203
    if (haltstate)
203
        halt();
204
        halt();
204
 
205
 
205
    if (THREAD) {
206
    if (THREAD) {
206
        spinlock_lock(&THREAD->lock);
207
        spinlock_lock(&THREAD->lock);
207
        fpu_context_save(&(THREAD->saved_fpu_context));
208
        fpu_context_save(&(THREAD->saved_fpu_context));
208
        if (!context_save(&THREAD->saved_context)) {
209
        if (!context_save(&THREAD->saved_context)) {
209
            /*
210
            /*
210
             * This is the place where threads leave scheduler();
211
             * This is the place where threads leave scheduler();
211
             */
212
             */
212
            before_thread_runs();
213
            before_thread_runs();
213
                spinlock_unlock(&THREAD->lock);
214
                spinlock_unlock(&THREAD->lock);
214
            cpu_priority_restore(THREAD->saved_context.pri);
215
            cpu_priority_restore(THREAD->saved_context.pri);
215
            return;
216
            return;
216
        }
217
        }
217
        THREAD->saved_context.pri = pri;
218
        THREAD->saved_context.pri = pri;
218
    }
219
    }
219
 
220
 
220
    /*
221
    /*
221
     * We may not keep the old stack.
222
     * We may not keep the old stack.
222
     * Reason: If we kept the old stack and got blocked, for instance, in
223
     * Reason: If we kept the old stack and got blocked, for instance, in
223
     * find_best_thread(), the old thread could get rescheduled by another
224
     * find_best_thread(), the old thread could get rescheduled by another
224
     * CPU and overwrite the part of its own stack that was also used by
225
     * CPU and overwrite the part of its own stack that was also used by
225
     * the scheduler on this CPU.
226
     * the scheduler on this CPU.
226
     *
227
     *
227
     * Moreover, we have to bypass the compiler-generated POP sequence
228
     * Moreover, we have to bypass the compiler-generated POP sequence
228
     * which is fooled by SP being set to the very top of the stack.
229
     * which is fooled by SP being set to the very top of the stack.
229
     * Therefore the scheduler() function continues in
230
     * Therefore the scheduler() function continues in
230
     * scheduler_separated_stack().
231
     * scheduler_separated_stack().
231
     */
232
     */
232
    context_save(&CPU->saved_context);
233
    context_save(&CPU->saved_context);
233
    CPU->saved_context.sp = (__address) &CPU->stack[CPU_STACK_SIZE-8];
234
    CPU->saved_context.sp = (__address) &CPU->stack[CPU_STACK_SIZE-8];
234
    CPU->saved_context.pc = (__address) scheduler_separated_stack;
235
    CPU->saved_context.pc = (__address) scheduler_separated_stack;
235
    context_restore(&CPU->saved_context);
236
    context_restore(&CPU->saved_context);
236
    /* not reached */
237
    /* not reached */
237
}
238
}
238
 
239
 
239
void scheduler_separated_stack(void)
240
void scheduler_separated_stack(void)
240
{
241
{
241
    int priority;
242
    int priority;
242
 
243
 
243
    if (THREAD) {
244
    if (THREAD) {
244
        switch (THREAD->state) {
245
        switch (THREAD->state) {
245
            case Running:
246
            case Running:
246
                THREAD->state = Ready;
247
                THREAD->state = Ready;
247
                spinlock_unlock(&THREAD->lock);
248
                spinlock_unlock(&THREAD->lock);
248
                thread_ready(THREAD);
249
                thread_ready(THREAD);
249
                break;
250
                break;
250
 
251
 
251
            case Exiting:
252
            case Exiting:
252
                frame_free((__address) THREAD->kstack);
253
                frame_free((__address) THREAD->kstack);
253
                if (THREAD->ustack) {
254
                if (THREAD->ustack) {
254
                    frame_free((__address) THREAD->ustack);
255
                    frame_free((__address) THREAD->ustack);
255
                }
256
                }
256
               
257
               
257
                /*
258
                /*
258
                 * Detach from the containing task.
259
                 * Detach from the containing task.
259
                 */
260
                 */
260
                spinlock_lock(&TASK->lock);
261
                spinlock_lock(&TASK->lock);
261
                list_remove(&THREAD->th_link);
262
                list_remove(&THREAD->th_link);
262
                spinlock_unlock(&TASK->lock);
263
                spinlock_unlock(&TASK->lock);
263
 
264
 
264
                spinlock_unlock(&THREAD->lock);
265
                spinlock_unlock(&THREAD->lock);
265
               
266
               
266
                spinlock_lock(&threads_lock);
267
                spinlock_lock(&threads_lock);
267
                list_remove(&THREAD->threads_link);
268
                list_remove(&THREAD->threads_link);
268
                spinlock_unlock(&threads_lock);
269
                spinlock_unlock(&threads_lock);
269
               
270
               
270
                free(THREAD);
271
                free(THREAD);
271
               
272
               
272
                break;
273
                break;
273
               
274
               
274
            case Sleeping:
275
            case Sleeping:
275
                /*
276
                /*
276
                 * Prefer the thread after it's woken up.
277
                 * Prefer the thread after it's woken up.
277
                 */
278
                 */
278
                THREAD->pri = -1;
279
                THREAD->pri = -1;
279
 
280
 
280
                /*
281
                /*
281
                 * We need to release wq->lock which we locked in waitq_sleep().
282
                 * We need to release wq->lock which we locked in waitq_sleep().
282
                 * Address of wq->lock is kept in THREAD->sleep_queue.
283
                 * Address of wq->lock is kept in THREAD->sleep_queue.
283
                 */
284
                 */
284
                spinlock_unlock(&THREAD->sleep_queue->lock);
285
                spinlock_unlock(&THREAD->sleep_queue->lock);
285
 
286
 
286
                /*
287
                /*
287
                 * Check for possible requests for out-of-context invocation.
288
                 * Check for possible requests for out-of-context invocation.
288
                 */
289
                 */
289
                if (THREAD->call_me) {
290
                if (THREAD->call_me) {
290
                    THREAD->call_me(THREAD->call_me_with);
291
                    THREAD->call_me(THREAD->call_me_with);
291
                    THREAD->call_me = NULL;
292
                    THREAD->call_me = NULL;
292
                    THREAD->call_me_with = NULL;
293
                    THREAD->call_me_with = NULL;
293
                }
294
                }
294
 
295
 
295
                spinlock_unlock(&THREAD->lock);
296
                spinlock_unlock(&THREAD->lock);
296
               
297
               
297
                break;
298
                break;
298
 
299
 
299
            default:
300
            default:
300
                /*
301
                /*
301
                 * Entering state is unexpected.
302
                 * Entering state is unexpected.
302
                 */
303
                 */
303
                panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]);
304
                panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]);
304
                break;
305
                break;
305
        }
306
        }
306
        THREAD = NULL;
307
        THREAD = NULL;
307
    }
308
    }
308
   
309
   
309
    THREAD = find_best_thread();
310
    THREAD = find_best_thread();
310
   
311
   
311
    spinlock_lock(&THREAD->lock);
312
    spinlock_lock(&THREAD->lock);
312
    priority = THREAD->pri;
313
    priority = THREAD->pri;
313
    spinlock_unlock(&THREAD->lock);
314
    spinlock_unlock(&THREAD->lock);
314
   
315
   
315
    relink_rq(priority);       
316
    relink_rq(priority);       
316
 
317
 
317
    spinlock_lock(&THREAD->lock);  
318
    spinlock_lock(&THREAD->lock);  
318
 
319
 
319
    /*
320
    /*
320
     * If both the old and the new task are the same, lots of work is avoided.
321
     * If both the old and the new task are the same, lots of work is avoided.
321
     */
322
     */
322
    if (TASK != THREAD->task) {
323
    if (TASK != THREAD->task) {
323
        vm_t *m1 = NULL;
324
        vm_t *m1 = NULL;
324
        vm_t *m2;
325
        vm_t *m2;
325
 
326
 
326
        if (TASK) {
327
        if (TASK) {
327
            spinlock_lock(&TASK->lock);
328
            spinlock_lock(&TASK->lock);
328
            m1 = TASK->vm;
329
            m1 = TASK->vm;
329
            spinlock_unlock(&TASK->lock);
330
            spinlock_unlock(&TASK->lock);
330
        }
331
        }
331
 
332
 
332
        spinlock_lock(&THREAD->task->lock);
333
        spinlock_lock(&THREAD->task->lock);
333
        m2 = THREAD->task->vm;
334
        m2 = THREAD->task->vm;
334
        spinlock_unlock(&THREAD->task->lock);
335
        spinlock_unlock(&THREAD->task->lock);
335
       
336
       
336
        /*
337
        /*
337
         * Note that it is possible for two tasks to share one vm mapping.
338
         * Note that it is possible for two tasks to share one vm mapping.
338
         */
339
         */
339
        if (m1 != m2) {
340
        if (m1 != m2) {
340
            /*
341
            /*
341
             * Both tasks and vm mappings are different.
342
             * Both tasks and vm mappings are different.
342
             * Replace the old one with the new one.
343
             * Replace the old one with the new one.
343
             */
344
             */
344
            if (m1) {
345
            if (m1) {
345
                vm_uninstall(m1);
346
                vm_uninstall(m1);
346
            }
347
            }
347
            vm_install(m2);
348
            vm_install(m2);
348
        }
349
        }
349
        TASK = THREAD->task;   
350
        TASK = THREAD->task;   
350
    }
351
    }
351
 
352
 
352
    THREAD->state = Running;
353
    THREAD->state = Running;
353
 
354
 
354
    #ifdef SCHEDULER_VERBOSE
355
    #ifdef SCHEDULER_VERBOSE
355
    printf("cpu%d: tid %d (pri=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->pri, THREAD->ticks, CPU->nrdy);
356
    printf("cpu%d: tid %d (pri=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->pri, THREAD->ticks, CPU->nrdy);
356
    #endif  
357
    #endif  
357
 
358
 
358
    context_restore(&THREAD->saved_context);
359
    context_restore(&THREAD->saved_context);
359
    /* not reached */
360
    /* not reached */
360
}
361
}
361
 
362
 
362
#ifdef __SMP__
363
#ifdef __SMP__
363
/*
364
/*
364
 * This is the load balancing thread.
365
 * This is the load balancing thread.
365
 * It supervises thread supplies for the CPU it's wired to.
366
 * It supervises thread supplies for the CPU it's wired to.
366
 */
367
 */
367
void kcpulb(void *arg)
368
void kcpulb(void *arg)
368
{
369
{
369
    thread_t *t;
370
    thread_t *t;
370
    int count, i, j, k = 0;
371
    int count, i, j, k = 0;
371
    pri_t pri;
372
    pri_t pri;
372
 
373
 
373
loop:
374
loop:
374
    /*
375
    /*
375
     * Sleep until there's some work to do.
376
     * Sleep until there's some work to do.
376
     */
377
     */
377
    waitq_sleep(&CPU->kcpulb_wq);
378
    waitq_sleep(&CPU->kcpulb_wq);
378
 
379
 
379
not_satisfied:
380
not_satisfied:
380
    /*
381
    /*
381
     * Calculate the number of threads that will be migrated/stolen from
382
     * Calculate the number of threads that will be migrated/stolen from
382
     * other CPU's. Note that situation can have changed between two
383
     * other CPU's. Note that situation can have changed between two
383
     * passes. Each time get the most up to date counts.
384
     * passes. Each time get the most up to date counts.
384
     */
385
     */
385
    pri = cpu_priority_high();
386
    pri = cpu_priority_high();
386
    spinlock_lock(&CPU->lock);
387
    spinlock_lock(&CPU->lock);
387
    count = nrdy / config.cpu_active;
388
    count = nrdy / config.cpu_active;
388
    count -= CPU->nrdy;
389
    count -= CPU->nrdy;
389
    spinlock_unlock(&CPU->lock);
390
    spinlock_unlock(&CPU->lock);
390
    cpu_priority_restore(pri);
391
    cpu_priority_restore(pri);
391
 
392
 
392
    if (count <= 0)
393
    if (count <= 0)
393
        goto satisfied;
394
        goto satisfied;
394
 
395
 
395
    /*
396
    /*
396
     * Searching least priority queues on all CPU's first and most priority queues on all CPU's last.
397
     * Searching least priority queues on all CPU's first and most priority queues on all CPU's last.
397
     */
398
     */
398
    for (j=RQ_COUNT-1; j >= 0; j--) {
399
    for (j=RQ_COUNT-1; j >= 0; j--) {
399
        for (i=0; i < config.cpu_active; i++) {
400
        for (i=0; i < config.cpu_active; i++) {
400
            link_t *l;
401
            link_t *l;
401
            runq_t *r;
402
            runq_t *r;
402
            cpu_t *cpu;
403
            cpu_t *cpu;
403
 
404
 
404
            cpu = &cpus[(i + k) % config.cpu_active];
405
            cpu = &cpus[(i + k) % config.cpu_active];
405
            r = &cpu->rq[j];
406
            r = &cpu->rq[j];
406
 
407
 
407
            /*
408
            /*
408
             * Not interested in ourselves.
409
             * Not interested in ourselves.
409
             * Doesn't require interrupt disabling for kcpulb is X_WIRED.
410
             * Doesn't require interrupt disabling for kcpulb is X_WIRED.
410
             */
411
             */
411
            if (CPU == cpu)
412
            if (CPU == cpu)
412
                continue;
413
                continue;
413
 
414
 
414
restart:        pri = cpu_priority_high();
415
restart:        pri = cpu_priority_high();
415
            spinlock_lock(&r->lock);
416
            spinlock_lock(&r->lock);
416
            if (r->n == 0) {
417
            if (r->n == 0) {
417
                spinlock_unlock(&r->lock);
418
                spinlock_unlock(&r->lock);
418
                cpu_priority_restore(pri);
419
                cpu_priority_restore(pri);
419
                continue;
420
                continue;
420
            }
421
            }
421
       
422
       
422
            t = NULL;
423
            t = NULL;
423
            l = r->rq_head.prev;    /* search rq from the back */
424
            l = r->rq_head.prev;    /* search rq from the back */
424
            while (l != &r->rq_head) {
425
            while (l != &r->rq_head) {
425
                t = list_get_instance(l, thread_t, rq_link);
426
                t = list_get_instance(l, thread_t, rq_link);
426
                /*
427
                /*
427
                     * We don't want to steal CPU-wired threads neither threads already stolen.
428
                     * We don't want to steal CPU-wired threads neither threads already stolen.
428
                 * The latter prevents threads from migrating between CPU's without ever being run.
429
                 * The latter prevents threads from migrating between CPU's without ever being run.
429
                     */
430
                     */
430
                spinlock_lock(&t->lock);
431
                spinlock_lock(&t->lock);
431
                if (!(t->flags & (X_WIRED | X_STOLEN))) {
432
                if (!(t->flags & (X_WIRED | X_STOLEN))) {
432
                    /*
433
                    /*
433
                     * Remove t from r.
434
                     * Remove t from r.
434
                     */
435
                     */
435
 
436
 
436
                    spinlock_unlock(&t->lock);
437
                    spinlock_unlock(&t->lock);
437
                   
438
                   
438
                    /*
439
                    /*
439
                     * Here we have to avoid deadlock with relink_rq(),
440
                     * Here we have to avoid deadlock with relink_rq(),
440
                     * because it locks cpu and r in a different order than we do.
441
                     * because it locks cpu and r in a different order than we do.
441
                     */
442
                     */
442
                    if (!spinlock_trylock(&cpu->lock)) {
443
                    if (!spinlock_trylock(&cpu->lock)) {
443
                        /* Release all locks and try again. */
444
                        /* Release all locks and try again. */
444
                        spinlock_unlock(&r->lock);
445
                        spinlock_unlock(&r->lock);
445
                        cpu_priority_restore(pri);
446
                        cpu_priority_restore(pri);
446
                        goto restart;
447
                        goto restart;
447
                    }
448
                    }
448
                    cpu->nrdy--;
449
                    cpu->nrdy--;
449
                    spinlock_unlock(&cpu->lock);
450
                    spinlock_unlock(&cpu->lock);
450
 
451
 
451
                    spinlock_lock(&nrdylock);
452
                    spinlock_lock(&nrdylock);
452
                    nrdy--;
453
                    nrdy--;
453
                    spinlock_unlock(&nrdylock);                
454
                    spinlock_unlock(&nrdylock);                
454
 
455
 
455
                        r->n--;
456
                        r->n--;
456
                    list_remove(&t->rq_link);
457
                    list_remove(&t->rq_link);
457
 
458
 
458
                    break;
459
                    break;
459
                }
460
                }
460
                spinlock_unlock(&t->lock);
461
                spinlock_unlock(&t->lock);
461
                l = l->prev;
462
                l = l->prev;
462
                t = NULL;
463
                t = NULL;
463
            }
464
            }
464
            spinlock_unlock(&r->lock);
465
            spinlock_unlock(&r->lock);
465
 
466
 
466
            if (t) {
467
            if (t) {
467
                /*
468
                /*
468
                 * Ready t on local CPU
469
                 * Ready t on local CPU
469
                 */
470
                 */
470
                spinlock_lock(&t->lock);
471
                spinlock_lock(&t->lock);
471
                #ifdef KCPULB_VERBOSE
472
                #ifdef KCPULB_VERBOSE
472
                printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, CPU->nrdy, nrdy / config.cpu_active);
473
                printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, CPU->nrdy, nrdy / config.cpu_active);
473
                #endif
474
                #endif
474
                t->flags |= X_STOLEN;
475
                t->flags |= X_STOLEN;
475
                spinlock_unlock(&t->lock);
476
                spinlock_unlock(&t->lock);
476
   
477
   
477
                thread_ready(t);
478
                thread_ready(t);
478
 
479
 
479
                cpu_priority_restore(pri);
480
                cpu_priority_restore(pri);
480
   
481
   
481
                if (--count == 0)
482
                if (--count == 0)
482
                    goto satisfied;
483
                    goto satisfied;
483
                   
484
                   
484
                /*
485
                /*
485
                             * We are not satisfied yet, focus on another CPU next time.
486
                             * We are not satisfied yet, focus on another CPU next time.
486
                 */
487
                 */
487
                k++;
488
                k++;
488
               
489
               
489
                continue;
490
                continue;
490
            }
491
            }
491
            cpu_priority_restore(pri);
492
            cpu_priority_restore(pri);
492
        }
493
        }
493
    }
494
    }
494
 
495
 
495
    if (CPU->nrdy) {
496
    if (CPU->nrdy) {
496
        /*
497
        /*
497
         * Be a little bit light-weight and let migrated threads run.
498
         * Be a little bit light-weight and let migrated threads run.
498
         */
499
         */
499
        scheduler();
500
        scheduler();
500
    }
501
    }
501
    else {
502
    else {
502
        /*
503
        /*
503
         * We failed to migrate a single thread.
504
         * We failed to migrate a single thread.
504
         * Something more sophisticated should be done.
505
         * Something more sophisticated should be done.
505
         */
506
         */
506
        scheduler();
507
        scheduler();
507
    }
508
    }
508
       
509
       
509
    goto not_satisfied;
510
    goto not_satisfied;
510
   
511
   
511
satisfied:
512
satisfied:
512
    /*
513
    /*
513
     * Tell find_best_thread() to wake us up later again.
514
     * Tell find_best_thread() to wake us up later again.
514
     */
515
     */
515
    CPU->kcpulbstarted = 0;
516
    CPU->kcpulbstarted = 0;
516
    goto loop;
517
    goto loop;
517
}
518
}
518
 
519
 
519
#endif /* __SMP__ */
520
#endif /* __SMP__ */
520
 
521