Subversion Repositories HelenOS

Rev

Rev 2436 | Rev 2446 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 2436 Rev 2440
Line 155... Line 155...
155
 
155
 
156
    /* call the architecture-specific part of the constructor */
156
    /* call the architecture-specific part of the constructor */
157
    thr_constructor_arch(t);
157
    thr_constructor_arch(t);
158
   
158
   
159
#ifdef ARCH_HAS_FPU
159
#ifdef ARCH_HAS_FPU
160
#  ifdef CONFIG_FPU_LAZY
160
#ifdef CONFIG_FPU_LAZY
161
    t->saved_fpu_context = NULL;
161
    t->saved_fpu_context = NULL;
162
#  else
162
#else
163
    t->saved_fpu_context = slab_alloc(fpu_context_slab,kmflags);
163
    t->saved_fpu_context = slab_alloc(fpu_context_slab, kmflags);
164
    if (!t->saved_fpu_context)
164
    if (!t->saved_fpu_context)
165
        return -1;
165
        return -1;
166
#  endif
166
#endif
167
#endif  
167
#endif  
168
 
168
 
169
    t->kstack = (uint8_t *) frame_alloc(STACK_FRAMES, FRAME_KA | kmflags);
169
    t->kstack = (uint8_t *) frame_alloc(STACK_FRAMES, FRAME_KA | kmflags);
170
    if (! t->kstack) {
170
    if (!t->kstack) {
171
#ifdef ARCH_HAS_FPU
171
#ifdef ARCH_HAS_FPU
172
        if (t->saved_fpu_context)
172
        if (t->saved_fpu_context)
173
            slab_free(fpu_context_slab,t->saved_fpu_context);
173
            slab_free(fpu_context_slab, t->saved_fpu_context);
174
#endif
174
#endif
175
        return -1;
175
        return -1;
176
    }
176
    }
177
 
177
 
178
    return 0;
178
    return 0;
Line 187... Line 187...
187
    thr_destructor_arch(t);
187
    thr_destructor_arch(t);
188
 
188
 
189
    frame_free(KA2PA(t->kstack));
189
    frame_free(KA2PA(t->kstack));
190
#ifdef ARCH_HAS_FPU
190
#ifdef ARCH_HAS_FPU
191
    if (t->saved_fpu_context)
191
    if (t->saved_fpu_context)
192
        slab_free(fpu_context_slab,t->saved_fpu_context);
192
        slab_free(fpu_context_slab, t->saved_fpu_context);
193
#endif
193
#endif
194
    return 1; /* One page freed */
194
    return 1; /* One page freed */
195
}
195
}
196
 
196
 
197
/** Initialize threads
197
/** Initialize threads
Line 230... Line 230...
230
 
230
 
231
    ipl = interrupts_disable();
231
    ipl = interrupts_disable();
232
 
232
 
233
    spinlock_lock(&t->lock);
233
    spinlock_lock(&t->lock);
234
 
234
 
235
    ASSERT(! (t->state == Ready));
235
    ASSERT(!(t->state == Ready));
236
 
236
 
237
    i = (t->priority < RQ_COUNT - 1) ? ++t->priority : t->priority;
237
    i = (t->priority < RQ_COUNT - 1) ? ++t->priority : t->priority;
238
   
238
   
239
    cpu = CPU;
239
    cpu = CPU;
240
    if (t->flags & THREAD_FLAG_WIRED) {
240
    if (t->flags & THREAD_FLAG_WIRED) {
Line 258... Line 258...
258
    atomic_inc(&cpu->nrdy);
258
    atomic_inc(&cpu->nrdy);
259
 
259
 
260
    interrupts_restore(ipl);
260
    interrupts_restore(ipl);
261
}
261
}
262
 
262
 
263
/** Destroy thread memory structure
-
 
264
 *
-
 
265
 * Detach thread from all queues, cpus etc. and destroy it.
-
 
266
 *
-
 
267
 * Assume thread->lock is held!!
-
 
268
 */
-
 
269
void thread_destroy(thread_t *t)
-
 
270
{
-
 
271
    bool destroy_task = false;
-
 
272
 
-
 
273
    ASSERT(t->state == Exiting || t->state == Undead);
-
 
274
    ASSERT(t->task);
-
 
275
    ASSERT(t->cpu);
-
 
276
 
-
 
277
    spinlock_lock(&t->cpu->lock);
-
 
278
    if(t->cpu->fpu_owner == t)
-
 
279
        t->cpu->fpu_owner = NULL;
-
 
280
    spinlock_unlock(&t->cpu->lock);
-
 
281
 
-
 
282
    spinlock_unlock(&t->lock);
-
 
283
 
-
 
284
    spinlock_lock(&threads_lock);
-
 
285
    btree_remove(&threads_btree, (btree_key_t) ((uintptr_t ) t), NULL);
-
 
286
    spinlock_unlock(&threads_lock);
-
 
287
 
-
 
288
    /*
-
 
289
     * Detach from the containing task.
-
 
290
     */
-
 
291
    spinlock_lock(&t->task->lock);
-
 
292
    list_remove(&t->th_link);
-
 
293
    if (--t->task->refcount == 0) {
-
 
294
        t->task->accept_new_threads = false;
-
 
295
        destroy_task = true;
-
 
296
    }
-
 
297
    spinlock_unlock(&t->task->lock);   
-
 
298
   
-
 
299
    if (destroy_task)
-
 
300
        task_destroy(t->task);
-
 
301
   
-
 
302
    /*
-
 
303
     * If the thread had a userspace context, free up its kernel_uarg
-
 
304
     * structure.
-
 
305
     */
-
 
306
    if (t->flags & THREAD_FLAG_USPACE) {
-
 
307
        ASSERT(t->thread_arg);
-
 
308
        free(t->thread_arg);
-
 
309
    }
-
 
310
 
-
 
311
    slab_free(thread_slab, t);
-
 
312
}
-
 
313
 
-
 
314
/** Create new thread
263
/** Create new thread
315
 *
264
 *
316
 * Create a new thread.
265
 * Create a new thread.
317
 *
266
 *
318
 * @param func      Thread's implementing function.
267
 * @param func      Thread's implementing function.
Line 390... Line 339...
390
    t->fpu_context_exists = 0;
339
    t->fpu_context_exists = 0;
391
    t->fpu_context_engaged = 0;
340
    t->fpu_context_engaged = 0;
392
 
341
 
393
    /* might depend on previous initialization */
342
    /* might depend on previous initialization */
394
    thread_create_arch(t); 
343
    thread_create_arch(t); 
395
   
344
 
396
    /*
-
 
397
     * Attach to the containing task.
-
 
398
     */
-
 
399
    ipl = interrupts_disable();  
345
    ipl = interrupts_disable();  
400
    spinlock_lock(&task->lock);
346
    spinlock_lock(&task->lock);
401
    if (!task->accept_new_threads) {
347
    if (!task->accept_new_threads) {
402
        spinlock_unlock(&task->lock);
348
        spinlock_unlock(&task->lock);
403
        slab_free(thread_slab, t);
349
        slab_free(thread_slab, t);
404
        interrupts_restore(ipl);
350
        interrupts_restore(ipl);
405
        return NULL;
351
        return NULL;
-
 
352
    } else {
-
 
353
        /*
-
 
354
         * Bump the reference count so that this task cannot be
-
 
355
         * destroyed while the new thread is being attached to it.
-
 
356
         */
-
 
357
        task->refcount++;
406
    }
358
    }
-
 
359
    spinlock_unlock(&task->lock);
-
 
360
    interrupts_restore(ipl);
-
 
361
 
-
 
362
    if (!(flags & THREAD_FLAG_NOATTACH))
-
 
363
        thread_attach(t, task);
-
 
364
 
-
 
365
    return t;
-
 
366
}
-
 
367
 
-
 
368
/** Destroy thread memory structure
-
 
369
 *
-
 
370
 * Detach thread from all queues, cpus etc. and destroy it.
-
 
371
 *
-
 
372
 * Assume thread->lock is held!!
-
 
373
 */
-
 
374
void thread_destroy(thread_t *t)
-
 
375
{
-
 
376
    bool destroy_task = false;
-
 
377
 
-
 
378
    ASSERT(t->state == Exiting || t->state == Undead);
-
 
379
    ASSERT(t->task);
-
 
380
    ASSERT(t->cpu);
-
 
381
 
-
 
382
    spinlock_lock(&t->cpu->lock);
-
 
383
    if (t->cpu->fpu_owner == t)
-
 
384
        t->cpu->fpu_owner = NULL;
-
 
385
    spinlock_unlock(&t->cpu->lock);
-
 
386
 
-
 
387
    spinlock_unlock(&t->lock);
-
 
388
 
-
 
389
    spinlock_lock(&threads_lock);
-
 
390
    btree_remove(&threads_btree, (btree_key_t) ((uintptr_t ) t), NULL);
-
 
391
    spinlock_unlock(&threads_lock);
-
 
392
 
-
 
393
    /*
-
 
394
     * Detach from the containing task.
-
 
395
     */
-
 
396
    spinlock_lock(&t->task->lock);
-
 
397
    list_remove(&t->th_link);
-
 
398
    if (--t->task->refcount == 0) {
-
 
399
        t->task->accept_new_threads = false;
-
 
400
        destroy_task = true;
-
 
401
    }
-
 
402
    spinlock_unlock(&t->task->lock);   
-
 
403
   
-
 
404
    if (destroy_task)
-
 
405
        task_destroy(t->task);
-
 
406
   
-
 
407
    /*
-
 
408
     * If the thread had a userspace context, free up its kernel_uarg
-
 
409
     * structure.
-
 
410
     */
-
 
411
    if (t->flags & THREAD_FLAG_USPACE) {
-
 
412
        ASSERT(t->thread_arg);
-
 
413
        free(t->thread_arg);
-
 
414
    }
-
 
415
 
-
 
416
    slab_free(thread_slab, t);
-
 
417
}
-
 
418
 
-
 
419
/** Make the thread visible to the system.
-
 
420
 *
-
 
421
 * Attach the thread structure to the current task and make it visible in the
-
 
422
 * threads_btree.
-
 
423
 *
-
 
424
 * @param t Thread to be attached to the task.
-
 
425
 * @param task  Task to which the thread is to be attached.
-
 
426
 */
-
 
427
void thread_attach(thread_t *t, task_t *task)
-
 
428
{
-
 
429
    ipl_t ipl;
-
 
430
 
-
 
431
    /*
-
 
432
     * Attach to the current task.
-
 
433
     */
-
 
434
    ipl = interrupts_disable();  
-
 
435
    spinlock_lock(&task->lock);
-
 
436
    ASSERT(task->refcount);
407
    list_append(&t->th_link, &task->th_head);
437
    list_append(&t->th_link, &task->th_head);
408
    if (task->refcount++ == 0)
438
    if (task->refcount == 1)
409
        task->main_thread = t;
439
        task->main_thread = t;
410
    spinlock_unlock(&task->lock);
440
    spinlock_unlock(&task->lock);
411
 
441
 
412
    /*
442
    /*
413
     * Register this thread in the system-wide list.
443
     * Register this thread in the system-wide list.
Line 416... Line 446...
416
    btree_insert(&threads_btree, (btree_key_t) ((uintptr_t) t), (void *) t,
446
    btree_insert(&threads_btree, (btree_key_t) ((uintptr_t) t), (void *) t,
417
        NULL);
447
        NULL);
418
    spinlock_unlock(&threads_lock);
448
    spinlock_unlock(&threads_lock);
419
   
449
   
420
    interrupts_restore(ipl);
450
    interrupts_restore(ipl);
421
   
-
 
422
    return t;
-
 
423
}
451
}
424
 
452
 
425
/** Terminate thread.
453
/** Terminate thread.
426
 *
454
 *
427
 * End current thread execution and switch it to the exiting state. All pending
455
 * End current thread execution and switch it to the exiting state. All pending
Line 663... Line 691...
663
    if (rc != 0) {
691
    if (rc != 0) {
664
        free(kernel_uarg);
692
        free(kernel_uarg);
665
        return (unative_t) rc;
693
        return (unative_t) rc;
666
    }
694
    }
667
 
695
 
668
    t = thread_create(uinit, kernel_uarg, TASK, THREAD_FLAG_USPACE, namebuf,
696
    t = thread_create(uinit, kernel_uarg, TASK,
669
        false);
697
        THREAD_FLAG_USPACE | THREAD_FLAG_NOATTACH, namebuf, false);
670
    if (t) {
698
    if (t) {
-
 
699
        if (uspace_thread_id != NULL) {
-
 
700
            int rc;
-
 
701
 
-
 
702
            rc = copy_to_uspace(uspace_thread_id, &t->tid,
-
 
703
                sizeof(t->tid));
-
 
704
            if (rc != 0) {
-
 
705
                ipl_t ipl;
-
 
706
 
-
 
707
                /*
-
 
708
                 * We have encountered a failure, but the thread
-
 
709
                 * has already been created. We need to undo its
-
 
710
                 * creation now.
-
 
711
                 */
-
 
712
 
-
 
713
                /*
-
 
714
                 * The new thread structure is initialized,
-
 
715
                 * but is still not visible to the system.
-
 
716
                 * We can safely deallocate it.
-
 
717
                 */
-
 
718
                slab_free(thread_slab, t);
-
 
719
                free(kernel_uarg);
-
 
720
 
-
 
721
                /*
-
 
722
                 * Now we need to decrement the task reference
-
 
723
                 * counter. Because we are running within the
-
 
724
                 * same task, thread t is not the last thread
-
 
725
                 * in the task, so it is safe to merely
-
 
726
                 * decrement the counter.
-
 
727
                 */
-
 
728
                ipl = interrupts_disable();
-
 
729
                spinlock_lock(&TASK->lock);
-
 
730
                TASK->refcount--;
-
 
731
                spinlock_unlock(&TASK->lock);
-
 
732
                interrupts_restore(ipl);
-
 
733
 
-
 
734
                return (unative_t) rc;
-
 
735
             }
-
 
736
        }
-
 
737
        thread_attach(t, TASK);
671
        thread_ready(t);
738
        thread_ready(t);
672
        if (uspace_thread_id != NULL)
-
 
673
            return (unative_t) copy_to_uspace(uspace_thread_id,
-
 
674
                &t->tid, sizeof(t->tid));
-
 
675
        else
739
 
676
            return 0;
740
        return 0;
677
    } else
741
    } else
678
        free(kernel_uarg);
742
        free(kernel_uarg);
679
 
743
 
680
    return (unative_t) ENOMEM;
744
    return (unative_t) ENOMEM;
681
}
745
}