Subversion Repositories HelenOS-historic

Rev

Rev 898 | Rev 907 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 898 Rev 906
Line 61... Line 61...
61
 *
61
 *
62
 */
62
 */
63
void before_thread_runs(void)
63
void before_thread_runs(void)
64
{
64
{
65
    before_thread_runs_arch();
65
    before_thread_runs_arch();
66
    #ifdef CONFIG_FPU_LAZY
66
#ifdef CONFIG_FPU_LAZY
67
    if(THREAD==CPU->fpu_owner)
67
    if(THREAD==CPU->fpu_owner)
68
        fpu_enable();
68
        fpu_enable();
69
    else
69
    else
70
        fpu_disable();
70
        fpu_disable();
71
    #else
71
#else
72
    fpu_enable();
72
    fpu_enable();
73
    if (THREAD->fpu_context_exists)
73
    if (THREAD->fpu_context_exists)
74
        fpu_context_restore(&(THREAD->saved_fpu_context));
74
        fpu_context_restore(THREAD->saved_fpu_context);
75
    else {
75
    else {
76
        fpu_init(&(THREAD->saved_fpu_context));
76
        fpu_init();
77
        THREAD->fpu_context_exists=1;
77
        THREAD->fpu_context_exists=1;
78
    }
78
    }
79
    #endif
79
#endif
80
}
80
}
81
 
81
 
82
/** Take actions after THREAD had run.
82
/** Take actions after THREAD had run.
83
 *
83
 *
84
 * Perform actions that need to be
84
 * Perform actions that need to be
Line 100... Line 100...
100
    spinlock_lock(&CPU->lock);
100
    spinlock_lock(&CPU->lock);
101
 
101
 
102
    /* Save old context */
102
    /* Save old context */
103
    if (CPU->fpu_owner != NULL) {  
103
    if (CPU->fpu_owner != NULL) {  
104
        spinlock_lock(&CPU->fpu_owner->lock);
104
        spinlock_lock(&CPU->fpu_owner->lock);
105
        fpu_context_save(&CPU->fpu_owner->saved_fpu_context);
105
        fpu_context_save(CPU->fpu_owner->saved_fpu_context);
106
        /* don't prevent migration */
106
        /* don't prevent migration */
107
        CPU->fpu_owner->fpu_context_engaged=0;
107
        CPU->fpu_owner->fpu_context_engaged=0;
108
        spinlock_unlock(&CPU->fpu_owner->lock);
108
        spinlock_unlock(&CPU->fpu_owner->lock);
109
    }
109
    }
110
 
110
 
111
    spinlock_lock(&THREAD->lock);
111
    spinlock_lock(&THREAD->lock);
112
    if (THREAD->fpu_context_exists) {
112
    if (THREAD->fpu_context_exists) {
113
        fpu_context_restore(&THREAD->saved_fpu_context);
113
        fpu_context_restore(THREAD->saved_fpu_context);
114
    } else {
114
    } else {
-
 
115
        /* Allocate FPU context */
115
        fpu_init(&(THREAD->saved_fpu_context));
116
        if (!THREAD->saved_fpu_context) {
-
 
117
            /* Might sleep */
-
 
118
            spinlock_unlock(&THREAD->lock);
-
 
119
            THREAD->saved_fpu_context = slab_alloc(fpu_context_slab,
-
 
120
                                   0);
-
 
121
            spinlock_lock(&THREAD->lock);
-
 
122
        }
-
 
123
        fpu_init();
116
        THREAD->fpu_context_exists=1;
124
        THREAD->fpu_context_exists=1;
117
    }
125
    }
118
    CPU->fpu_owner=THREAD;
126
    CPU->fpu_owner=THREAD;
119
    THREAD->fpu_context_engaged = 1;
127
    THREAD->fpu_context_engaged = 1;
120
    spinlock_unlock(&THREAD->lock);
128
    spinlock_unlock(&THREAD->lock);
Line 272... Line 280...
272
    if (atomic_get(&haltstate))
280
    if (atomic_get(&haltstate))
273
        halt();
281
        halt();
274
 
282
 
275
    if (THREAD) {
283
    if (THREAD) {
276
        spinlock_lock(&THREAD->lock);
284
        spinlock_lock(&THREAD->lock);
277
        #ifndef CONFIG_FPU_LAZY
285
#ifndef CONFIG_FPU_LAZY
278
        fpu_context_save(&(THREAD->saved_fpu_context));
286
        fpu_context_save(THREAD->saved_fpu_context);
279
        #endif
287
#endif
280
        if (!context_save(&THREAD->saved_context)) {
288
        if (!context_save(&THREAD->saved_context)) {
281
            /*
289
            /*
282
             * This is the place where threads leave scheduler();
290
             * This is the place where threads leave scheduler();
283
             */
291
             */
284
            spinlock_unlock(&THREAD->lock);
292
            spinlock_unlock(&THREAD->lock);
Line 419... Line 427...
419
             * Both tasks and address spaces are different.
427
             * Both tasks and address spaces are different.
420
             * Replace the old one with the new one.
428
             * Replace the old one with the new one.
421
             */
429
             */
422
            as_switch(as1, as2);
430
            as_switch(as1, as2);
423
        }
431
        }
424
        TASK = THREAD->task;   
432
        TASK = THREAD->task;
425
    }
433
    }
426
 
434
 
427
    THREAD->state = Running;
435
    THREAD->state = Running;
428
 
436
 
429
    #ifdef SCHEDULER_VERBOSE
437
#ifdef SCHEDULER_VERBOSE
430
    printf("cpu%d: tid %d (priority=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, atomic_get(&CPU->nrdy));
438
    printf("cpu%d: tid %d (priority=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, atomic_get(&CPU->nrdy));
431
    #endif  
439
#endif  
432
 
440
 
433
    /*
441
    /*
434
     * Some architectures provide late kernel PA2KA(identity)
442
     * Some architectures provide late kernel PA2KA(identity)
435
     * mapping in a page fault handler. However, the page fault
443
     * mapping in a page fault handler. However, the page fault
436
     * handler uses the kernel stack of the running thread and
444
     * handler uses the kernel stack of the running thread and
Line 544... Line 552...
544
            if (t) {
552
            if (t) {
545
                /*
553
                /*
546
                 * Ready t on local CPU
554
                 * Ready t on local CPU
547
                 */
555
                 */
548
                spinlock_lock(&t->lock);
556
                spinlock_lock(&t->lock);
549
                #ifdef KCPULB_VERBOSE
557
#ifdef KCPULB_VERBOSE
550
                printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, atomic_get(&CPU->nrdy), atomic_get(&nrdy) / config.cpu_active);
558
                printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, atomic_get(&CPU->nrdy), atomic_get(&nrdy) / config.cpu_active);
551
                #endif
559
#endif
552
                t->flags |= X_STOLEN;
560
                t->flags |= X_STOLEN;
553
                spinlock_unlock(&t->lock);
561
                spinlock_unlock(&t->lock);
554
   
562
   
555
                thread_ready(t);
563
                thread_ready(t);
556
 
564