Subversion Repositories HelenOS

Rev

Rev 1787 | Rev 1882 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 1787 Rev 1854
Line 139... Line 139...
139
        /* Allocate FPU context */
139
        /* Allocate FPU context */
140
        if (!THREAD->saved_fpu_context) {
140
        if (!THREAD->saved_fpu_context) {
141
            /* Might sleep */
141
            /* Might sleep */
142
            spinlock_unlock(&THREAD->lock);
142
            spinlock_unlock(&THREAD->lock);
143
            spinlock_unlock(&CPU->lock);
143
            spinlock_unlock(&CPU->lock);
144
            THREAD->saved_fpu_context = slab_alloc(fpu_context_slab,
144
            THREAD->saved_fpu_context = slab_alloc(fpu_context_slab, 0);
145
                                   0);
-
 
146
            /* We may have switched CPUs during slab_alloc */
145
            /* We may have switched CPUs during slab_alloc */
147
            goto restart;
146
            goto restart;
148
        }
147
        }
149
        fpu_init();
148
        fpu_init();
150
        THREAD->fpu_context_exists=1;
149
        THREAD->fpu_context_exists=1;
Line 233... Line 232...
233
 
232
 
234
        t->ticks = us2ticks((i+1)*10000);
233
        t->ticks = us2ticks((i+1)*10000);
235
        t->priority = i;    /* correct rq index */
234
        t->priority = i;    /* correct rq index */
236
 
235
 
237
        /*
236
        /*
238
         * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge.
237
         * Clear the THREAD_FLAG_STOLEN flag so that t can be migrated
-
 
238
         * when load balancing needs emerge.
239
         */
239
         */
240
        t->flags &= ~X_STOLEN;
240
        t->flags &= ~THREAD_FLAG_STOLEN;
241
        spinlock_unlock(&t->lock);
241
        spinlock_unlock(&t->lock);
242
 
242
 
243
        return t;
243
        return t;
244
    }
244
    }
245
    goto loop;
245
    goto loop;
Line 347... Line 347...
347
     * which is fooled by SP being set to the very top of the stack.
347
     * which is fooled by SP being set to the very top of the stack.
348
     * Therefore the scheduler() function continues in
348
     * Therefore the scheduler() function continues in
349
     * scheduler_separated_stack().
349
     * scheduler_separated_stack().
350
     */
350
     */
351
    context_save(&CPU->saved_context);
351
    context_save(&CPU->saved_context);
352
    context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), (uintptr_t) CPU->stack, CPU_STACK_SIZE);
352
    context_set(&CPU->saved_context, FADDR(scheduler_separated_stack),
-
 
353
        (uintptr_t) CPU->stack, CPU_STACK_SIZE);
353
    context_restore(&CPU->saved_context);
354
    context_restore(&CPU->saved_context);
354
    /* not reached */
355
    /* not reached */
355
}
356
}
356
 
357
 
357
/** Scheduler stack switch wrapper
358
/** Scheduler stack switch wrapper
Line 481... Line 482...
481
 
482
 
482
    spinlock_lock(&THREAD->lock);  
483
    spinlock_lock(&THREAD->lock);  
483
    THREAD->state = Running;
484
    THREAD->state = Running;
484
 
485
 
485
#ifdef SCHEDULER_VERBOSE
486
#ifdef SCHEDULER_VERBOSE
-
 
487
    printf("cpu%d: tid %d (priority=%d,ticks=%lld,nrdy=%ld)\n",
486
    printf("cpu%d: tid %d (priority=%d,ticks=%lld,nrdy=%ld)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, atomic_get(&CPU->nrdy));
488
        CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, atomic_get(&CPU->nrdy));
487
#endif  
489
#endif  
488
 
490
 
489
    /*
491
    /*
490
     * Some architectures provide late kernel PA2KA(identity)
492
     * Some architectures provide late kernel PA2KA(identity)
491
     * mapping in a page fault handler. However, the page fault
493
     * mapping in a page fault handler. However, the page fault
Line 554... Line 556...
554
 
556
 
555
            cpu = &cpus[(i + k) % config.cpu_active];
557
            cpu = &cpus[(i + k) % config.cpu_active];
556
 
558
 
557
            /*
559
            /*
558
             * Not interested in ourselves.
560
             * Not interested in ourselves.
559
             * Doesn't require interrupt disabling for kcpulb is X_WIRED.
561
             * Doesn't require interrupt disabling for kcpulb has THREAD_FLAG_WIRED.
560
             */
562
             */
561
            if (CPU == cpu)
563
            if (CPU == cpu)
562
                continue;
564
                continue;
563
            if (atomic_get(&cpu->nrdy) <= average)
565
            if (atomic_get(&cpu->nrdy) <= average)
564
                continue;
566
                continue;
Line 575... Line 577...
575
            t = NULL;
577
            t = NULL;
576
            l = r->rq_head.prev;    /* search rq from the back */
578
            l = r->rq_head.prev;    /* search rq from the back */
577
            while (l != &r->rq_head) {
579
            while (l != &r->rq_head) {
578
                t = list_get_instance(l, thread_t, rq_link);
580
                t = list_get_instance(l, thread_t, rq_link);
579
                /*
581
                /*
580
                 * We don't want to steal CPU-wired threads neither threads already stolen.
582
                 * We don't want to steal CPU-wired threads neither threads already
581
                 * The latter prevents threads from migrating between CPU's without ever being run.
583
                 * stolen. The latter prevents threads from migrating between CPU's
582
                 * We don't want to steal threads whose FPU context is still in CPU.
584
                 * without ever being run. We don't want to steal threads whose FPU
-
 
585
                 * context is still in CPU.
583
                 */
586
                 */
584
                spinlock_lock(&t->lock);
587
                spinlock_lock(&t->lock);
585
                if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) {
588
                if ((!(t->flags & (THREAD_FLAG_WIRED | THREAD_FLAG_STOLEN))) &&
-
 
589
                    (!(t->fpu_context_engaged)) ) {
586
                    /*
590
                    /*
587
                     * Remove t from r.
591
                     * Remove t from r.
588
                     */
592
                     */
589
                    spinlock_unlock(&t->lock);
593
                    spinlock_unlock(&t->lock);
590
                   
594
                   
Line 606... Line 610...
606
                /*
610
                /*
607
                 * Ready t on local CPU
611
                 * Ready t on local CPU
608
                 */
612
                 */
609
                spinlock_lock(&t->lock);
613
                spinlock_lock(&t->lock);
610
#ifdef KCPULB_VERBOSE
614
#ifdef KCPULB_VERBOSE
611
                printf("kcpulb%d: TID %d -> cpu%d, nrdy=%ld, avg=%nd\n", CPU->id, t->tid, CPU->id, atomic_get(&CPU->nrdy), atomic_get(&nrdy) / config.cpu_active);
615
                printf("kcpulb%d: TID %d -> cpu%d, nrdy=%ld, avg=%nd\n",
-
 
616
                    CPU->id, t->tid, CPU->id, atomic_get(&CPU->nrdy),
-
 
617
                    atomic_get(&nrdy) / config.cpu_active);
612
#endif
618
#endif
613
                t->flags |= X_STOLEN;
619
                t->flags |= THREAD_FLAG_STOLEN;
614
                t->state = Entering;
620
                t->state = Entering;
615
                spinlock_unlock(&t->lock);
621
                spinlock_unlock(&t->lock);
616
   
622
   
617
                thread_ready(t);
623
                thread_ready(t);
618
 
624