Subversion Repositories HelenOS

Rev

Rev 3386 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 3386 Rev 4153
Line 100... Line 100...
100
 
100
 
101
SPINLOCK_INITIALIZE(tidlock);
101
SPINLOCK_INITIALIZE(tidlock);
102
thread_id_t last_tid = 0;
102
thread_id_t last_tid = 0;
103
 
103
 
104
static slab_cache_t *thread_slab;
104
static slab_cache_t *thread_slab;
105
#ifdef ARCH_HAS_FPU
105
#ifdef CONFIG_FPU
106
slab_cache_t *fpu_context_slab;
106
slab_cache_t *fpu_context_slab;
107
#endif
107
#endif
108
 
108
 
109
/** Thread wrapper.
109
/** Thread wrapper.
110
 *
110
 *
Line 159... Line 159...
159
    link_initialize(&t->th_link);
159
    link_initialize(&t->th_link);
160
 
160
 
161
    /* call the architecture-specific part of the constructor */
161
    /* call the architecture-specific part of the constructor */
162
    thr_constructor_arch(t);
162
    thr_constructor_arch(t);
163
   
163
   
164
#ifdef ARCH_HAS_FPU
164
#ifdef CONFIG_FPU
165
#ifdef CONFIG_FPU_LAZY
165
#ifdef CONFIG_FPU_LAZY
166
    t->saved_fpu_context = NULL;
166
    t->saved_fpu_context = NULL;
167
#else
167
#else
168
    t->saved_fpu_context = slab_alloc(fpu_context_slab, kmflags);
168
    t->saved_fpu_context = slab_alloc(fpu_context_slab, kmflags);
169
    if (!t->saved_fpu_context)
169
    if (!t->saved_fpu_context)
170
        return -1;
170
        return -1;
171
#endif
171
#endif
172
#endif  
172
#endif
173
 
173
 
174
    t->kstack = (uint8_t *) frame_alloc(STACK_FRAMES, FRAME_KA | kmflags);
174
    t->kstack = (uint8_t *) frame_alloc(STACK_FRAMES, FRAME_KA | kmflags);
175
    if (!t->kstack) {
175
    if (!t->kstack) {
176
#ifdef ARCH_HAS_FPU
176
#ifdef CONFIG_FPU
177
        if (t->saved_fpu_context)
177
        if (t->saved_fpu_context)
178
            slab_free(fpu_context_slab, t->saved_fpu_context);
178
            slab_free(fpu_context_slab, t->saved_fpu_context);
179
#endif
179
#endif
180
        return -1;
180
        return -1;
181
    }
181
    }
182
 
182
 
-
 
183
#ifdef CONFIG_UDEBUG
-
 
184
    mutex_initialize(&t->udebug.lock, MUTEX_PASSIVE);
-
 
185
#endif
-
 
186
 
183
    return 0;
187
    return 0;
184
}
188
}
185
 
189
 
186
/** Destruction of thread_t object */
190
/** Destruction of thread_t object */
187
static int thr_destructor(void *obj)
191
static int thr_destructor(void *obj)
Line 190... Line 194...
190
 
194
 
191
    /* call the architecture-specific part of the destructor */
195
    /* call the architecture-specific part of the destructor */
192
    thr_destructor_arch(t);
196
    thr_destructor_arch(t);
193
 
197
 
194
    frame_free(KA2PA(t->kstack));
198
    frame_free(KA2PA(t->kstack));
195
#ifdef ARCH_HAS_FPU
199
#ifdef CONFIG_FPU
196
    if (t->saved_fpu_context)
200
    if (t->saved_fpu_context)
197
        slab_free(fpu_context_slab, t->saved_fpu_context);
201
        slab_free(fpu_context_slab, t->saved_fpu_context);
198
#endif
202
#endif
199
    return 1; /* One page freed */
203
    return 1; /* One page freed */
200
}
204
}
Line 205... Line 209...
205
 *
209
 *
206
 */
210
 */
207
void thread_init(void)
211
void thread_init(void)
208
{
212
{
209
    THREAD = NULL;
213
    THREAD = NULL;
210
    atomic_set(&nrdy,0);
214
    atomic_set(&nrdy, 0);
211
    thread_slab = slab_cache_create("thread_slab", sizeof(thread_t), 0,
215
    thread_slab = slab_cache_create("thread_slab", sizeof(thread_t), 0,
212
        thr_constructor, thr_destructor, 0);
216
        thr_constructor, thr_destructor, 0);
213
 
217
 
214
#ifdef ARCH_HAS_FPU
218
#ifdef CONFIG_FPU
215
    fpu_context_slab = slab_cache_create("fpu_slab", sizeof(fpu_context_t),
219
    fpu_context_slab = slab_cache_create("fpu_slab", sizeof(fpu_context_t),
216
        FPU_CONTEXT_ALIGN, NULL, NULL, 0);
220
        FPU_CONTEXT_ALIGN, NULL, NULL, 0);
217
#endif
221
#endif
218
 
222
 
219
    avltree_create(&threads_tree);
223
    avltree_create(&threads_tree);
Line 273... Line 277...
273
 * @param arg       Thread's implementing function argument.
277
 * @param arg       Thread's implementing function argument.
274
 * @param task      Task to which the thread belongs. The caller must
278
 * @param task      Task to which the thread belongs. The caller must
275
 *          guarantee that the task won't cease to exist during the
279
 *          guarantee that the task won't cease to exist during the
276
 *          call. The task's lock may not be held.
280
 *          call. The task's lock may not be held.
277
 * @param flags     Thread flags.
281
 * @param flags     Thread flags.
278
 * @param name      Symbolic name.
282
 * @param name      Symbolic name (a copy is made).
279
 * @param uncounted Thread's accounting doesn't affect accumulated task
283
 * @param uncounted Thread's accounting doesn't affect accumulated task
280
 *          accounting.
284
 *          accounting.
281
 *
285
 *
282
 * @return      New thread's structure on success, NULL on failure.
286
 * @return      New thread's structure on success, NULL on failure.
283
 *
287
 *
Line 310... Line 314...
310
    ipl = interrupts_disable();
314
    ipl = interrupts_disable();
311
    t->saved_context.ipl = interrupts_read();
315
    t->saved_context.ipl = interrupts_read();
312
    interrupts_restore(ipl);
316
    interrupts_restore(ipl);
313
   
317
   
314
    memcpy(t->name, name, THREAD_NAME_BUFLEN);
318
    memcpy(t->name, name, THREAD_NAME_BUFLEN);
-
 
319
    t->name[THREAD_NAME_BUFLEN - 1] = '\0';
315
   
320
   
316
    t->thread_code = func;
321
    t->thread_code = func;
317
    t->thread_arg = arg;
322
    t->thread_arg = arg;
318
    t->ticks = -1;
323
    t->ticks = -1;
319
    t->cycles = 0;
324
    t->cycles = 0;
Line 345... Line 350...
345
    t->fpu_context_engaged = 0;
350
    t->fpu_context_engaged = 0;
346
 
351
 
347
    avltree_node_initialize(&t->threads_tree_node);
352
    avltree_node_initialize(&t->threads_tree_node);
348
    t->threads_tree_node.key = (uintptr_t) t;
353
    t->threads_tree_node.key = (uintptr_t) t;
349
 
354
 
-
 
355
#ifdef CONFIG_UDEBUG
-
 
356
    /* Init debugging stuff */
-
 
357
    udebug_thread_initialize(&t->udebug);
-
 
358
#endif
-
 
359
 
350
    /* might depend on previous initialization */
360
    /* might depend on previous initialization */
351
    thread_create_arch(t); 
361
    thread_create_arch(t); 
352
 
362
 
353
    if (!(flags & THREAD_FLAG_NOATTACH))
363
    if (!(flags & THREAD_FLAG_NOATTACH))
354
        thread_attach(t, task);
364
        thread_attach(t, task);
Line 407... Line 417...
407
void thread_attach(thread_t *t, task_t *task)
417
void thread_attach(thread_t *t, task_t *task)
408
{
418
{
409
    ipl_t ipl;
419
    ipl_t ipl;
410
 
420
 
411
    /*
421
    /*
412
     * Attach to the current task.
422
     * Attach to the specified task.
413
     */
423
     */
414
    ipl = interrupts_disable();
424
    ipl = interrupts_disable();
415
    spinlock_lock(&task->lock);
425
    spinlock_lock(&task->lock);
-
 
426
 
416
    atomic_inc(&task->refcount);
427
    atomic_inc(&task->refcount);
-
 
428
 
-
 
429
    /* Must not count kbox thread into lifecount */
-
 
430
    if (t->flags & THREAD_FLAG_USPACE)
417
    atomic_inc(&task->lifecount);
431
        atomic_inc(&task->lifecount);
-
 
432
 
418
    list_append(&t->th_link, &task->th_head);
433
    list_append(&t->th_link, &task->th_head);
419
    spinlock_unlock(&task->lock);
434
    spinlock_unlock(&task->lock);
420
 
435
 
421
    /*
436
    /*
422
     * Register this thread in the system-wide list.
437
     * Register this thread in the system-wide list.
Line 435... Line 450...
435
 */
450
 */
436
void thread_exit(void)
451
void thread_exit(void)
437
{
452
{
438
    ipl_t ipl;
453
    ipl_t ipl;
439
 
454
 
-
 
455
    if (THREAD->flags & THREAD_FLAG_USPACE) {
-
 
456
#ifdef CONFIG_UDEBUG
-
 
457
        /* Generate udebug THREAD_E event */
-
 
458
        udebug_thread_e_event();
-
 
459
#endif
440
    if (atomic_predec(&TASK->lifecount) == 0) {
460
        if (atomic_predec(&TASK->lifecount) == 0) {
441
        /*
461
            /*
442
         * We are the last thread in the task that still has not exited.
462
             * We are the last userspace thread in the task that
-
 
463
             * still has not exited. With the exception of the
443
         * With the exception of the moment the task was created, new
464
             * moment the task was created, new userspace threads
444
         * threads can only be created by threads of the same task.
465
             * can only be created by threads of the same task.
445
         * We are safe to perform cleanup.
466
             * We are safe to perform cleanup.
446
         */
467
             */
447
        if (THREAD->flags & THREAD_FLAG_USPACE) {
-
 
448
            ipc_cleanup();
468
            ipc_cleanup();
449
            futex_cleanup();
469
            futex_cleanup();
450
            LOG("Cleanup of task %" PRIu64" completed.", TASK->taskid);
470
            LOG("Cleanup of task %" PRIu64" completed.", TASK->taskid);
451
        }
471
        }
452
    }
472
    }
Line 687... Line 707...
687
 
707
 
688
/** Process syscall to create new thread.
708
/** Process syscall to create new thread.
689
 *
709
 *
690
 */
710
 */
691
unative_t sys_thread_create(uspace_arg_t *uspace_uarg, char *uspace_name,
711
unative_t sys_thread_create(uspace_arg_t *uspace_uarg, char *uspace_name,
692
    thread_id_t *uspace_thread_id)
712
    size_t name_len, thread_id_t *uspace_thread_id)
693
{
713
{
694
    thread_t *t;
714
    thread_t *t;
695
    char namebuf[THREAD_NAME_BUFLEN];
715
    char namebuf[THREAD_NAME_BUFLEN];
696
    uspace_arg_t *kernel_uarg;
716
    uspace_arg_t *kernel_uarg;
697
    int rc;
717
    int rc;
698
 
718
 
-
 
719
    if (name_len > THREAD_NAME_BUFLEN - 1)
-
 
720
        name_len = THREAD_NAME_BUFLEN - 1;
-
 
721
 
699
    rc = copy_from_uspace(namebuf, uspace_name, THREAD_NAME_BUFLEN);
722
    rc = copy_from_uspace(namebuf, uspace_name, name_len);
700
    if (rc != 0)
723
    if (rc != 0)
701
        return (unative_t) rc;
724
        return (unative_t) rc;
702
 
725
 
-
 
726
    namebuf[name_len] = '\0';
-
 
727
 
703
    /*
728
    /*
704
     * In case of failure, kernel_uarg will be deallocated in this function.
729
     * In case of failure, kernel_uarg will be deallocated in this function.
705
     * In case of success, kernel_uarg will be freed in uinit().
730
     * In case of success, kernel_uarg will be freed in uinit().
706
     */
731
     */
707
    kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0);
732
    kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0);
Line 736... Line 761...
736
                free(kernel_uarg);
761
                free(kernel_uarg);
737
 
762
 
738
                return (unative_t) rc;
763
                return (unative_t) rc;
739
             }
764
             }
740
        }
765
        }
-
 
766
#ifdef CONFIG_UDEBUG
-
 
767
        /*
-
 
768
         * Generate udebug THREAD_B event and attach the thread.
-
 
769
         * This must be done atomically (with the debug locks held),
-
 
770
         * otherwise we would either miss some thread or receive
-
 
771
         * THREAD_B events for threads that already existed
-
 
772
         * and could be detected with THREAD_READ before.
-
 
773
         */
-
 
774
        udebug_thread_b_event_attach(t, TASK);
-
 
775
#else
741
        thread_attach(t, TASK);
776
        thread_attach(t, TASK);
-
 
777
#endif
742
        thread_ready(t);
778
        thread_ready(t);
743
 
779
 
744
        return 0;
780
        return 0;
745
    } else
781
    } else
746
        free(kernel_uarg);
782
        free(kernel_uarg);