Subversion Repositories HelenOS

Rev

Rev 2436 | Rev 2504 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 2436 Rev 2446
Line 54... Line 54...
54
#include <print.h>
54
#include <print.h>
55
#include <lib/elf.h>
55
#include <lib/elf.h>
56
#include <errno.h>
56
#include <errno.h>
57
#include <func.h>
57
#include <func.h>
58
#include <syscall/copy.h>
58
#include <syscall/copy.h>
59
#include <console/klog.h>
-
 
60
 
59
 
61
#ifndef LOADED_PROG_STACK_PAGES_NO
60
#ifndef LOADED_PROG_STACK_PAGES_NO
62
#define LOADED_PROG_STACK_PAGES_NO 1
61
#define LOADED_PROG_STACK_PAGES_NO 1
63
#endif
62
#endif
64
 
63
 
Line 77... Line 76...
77
 */
76
 */
78
btree_t tasks_btree;
77
btree_t tasks_btree;
79
 
78
 
80
static task_id_t task_counter = 0;
79
static task_id_t task_counter = 0;
81
 
80
 
82
static void ktaskclnp(void *arg);
-
 
83
static void ktaskgc(void *arg);
-
 
84
 
-
 
85
/** Initialize tasks
81
/** Initialize tasks
86
 *
82
 *
87
 * Initialize kernel tasks support.
83
 * Initialize kernel tasks support.
88
 *
84
 *
89
 */
85
 */
Line 162... Line 158...
162
 
158
 
163
    spinlock_initialize(&ta->lock, "task_ta_lock");
159
    spinlock_initialize(&ta->lock, "task_ta_lock");
164
    list_initialize(&ta->th_head);
160
    list_initialize(&ta->th_head);
165
    ta->as = as;
161
    ta->as = as;
166
    ta->name = name;
162
    ta->name = name;
167
    ta->main_thread = NULL;
163
    atomic_set(&ta->refcount, 0);
168
    ta->refcount = 0;
164
    atomic_set(&ta->lifecount, 0);
169
    ta->context = CONTEXT;
165
    ta->context = CONTEXT;
170
 
166
 
171
    ta->capabilities = 0;
167
    ta->capabilities = 0;
172
    ta->accept_new_threads = true;
-
 
173
    ta->cycles = 0;
168
    ta->cycles = 0;
174
   
169
   
175
    ipc_answerbox_init(&ta->answerbox);
170
    ipc_answerbox_init(&ta->answerbox);
176
    for (i = 0; i < IPC_MAX_PHONES; i++)
171
    for (i = 0; i < IPC_MAX_PHONES; i++)
177
        ipc_phone_init(&ta->phones[i]);
172
        ipc_phone_init(&ta->phones[i]);
Line 189... Line 184...
189
     * Increment address space reference count.
184
     * Increment address space reference count.
190
     */
185
     */
191
    atomic_inc(&as->refcount);
186
    atomic_inc(&as->refcount);
192
 
187
 
193
    spinlock_lock(&tasks_lock);
188
    spinlock_lock(&tasks_lock);
194
 
-
 
195
    ta->taskid = ++task_counter;
189
    ta->taskid = ++task_counter;
196
    btree_insert(&tasks_btree, (btree_key_t) ta->taskid, (void *) ta, NULL);
190
    btree_insert(&tasks_btree, (btree_key_t) ta->taskid, (void *) ta, NULL);
197
 
-
 
198
    spinlock_unlock(&tasks_lock);
191
    spinlock_unlock(&tasks_lock);
199
    interrupts_restore(ipl);
192
    interrupts_restore(ipl);
200
 
193
 
201
    return ta;
194
    return ta;
202
}
195
}
Line 205... Line 198...
205
 *
198
 *
206
 * @param t Task to be destroyed.
199
 * @param t Task to be destroyed.
207
 */
200
 */
208
void task_destroy(task_t *t)
201
void task_destroy(task_t *t)
209
{
202
{
-
 
203
    /*
-
 
204
     * Remove the task from the task B+tree.
-
 
205
     */
-
 
206
    spinlock_lock(&tasks_lock);
-
 
207
    btree_remove(&tasks_btree, t->taskid, NULL);
-
 
208
    spinlock_unlock(&tasks_lock);
-
 
209
 
-
 
210
    /*
-
 
211
     * Perform architecture specific task destruction.
-
 
212
     */
210
    task_destroy_arch(t);
213
    task_destroy_arch(t);
-
 
214
 
-
 
215
    /*
-
 
216
     * Free up dynamically allocated state.
-
 
217
     */
211
    btree_destroy(&t->futexes);
218
    btree_destroy(&t->futexes);
212
 
219
 
-
 
220
    /*
-
 
221
     * Drop our reference to the address space.
-
 
222
     */
213
    if (atomic_predec(&t->as->refcount) == 0)
223
    if (atomic_predec(&t->as->refcount) == 0)
214
        as_destroy(t->as);
224
        as_destroy(t->as);
215
   
225
   
216
    free(t);
226
    free(t);
217
    TASK = NULL;
227
    TASK = NULL;
Line 227... Line 237...
227
task_t *task_run_program(void *program_addr, char *name)
237
task_t *task_run_program(void *program_addr, char *name)
228
{
238
{
229
    as_t *as;
239
    as_t *as;
230
    as_area_t *a;
240
    as_area_t *a;
231
    int rc;
241
    int rc;
232
    thread_t *t1, *t2;
242
    thread_t *t;
233
    task_t *task;
243
    task_t *task;
234
    uspace_arg_t *kernel_uarg;
244
    uspace_arg_t *kernel_uarg;
235
 
245
 
236
    as = as_create(0);
246
    as = as_create(0);
237
    ASSERT(as);
247
    ASSERT(as);
Line 261... Line 271...
261
        AS_AREA_ATTR_NONE, &anon_backend, NULL);
271
        AS_AREA_ATTR_NONE, &anon_backend, NULL);
262
 
272
 
263
    /*
273
    /*
264
     * Create the main thread.
274
     * Create the main thread.
265
     */
275
     */
266
    t1 = thread_create(uinit, kernel_uarg, task, THREAD_FLAG_USPACE,
276
    t = thread_create(uinit, kernel_uarg, task, THREAD_FLAG_USPACE,
267
        "uinit", false);
277
        "uinit", false);
268
    ASSERT(t1);
278
    ASSERT(t);
269
   
279
   
270
    /*
-
 
271
     * Create killer thread for the new task.
-
 
272
     */
-
 
273
    t2 = thread_create(ktaskgc, t1, task, 0, "ktaskgc", true);
-
 
274
    ASSERT(t2);
-
 
275
    thread_ready(t2);
280
    thread_ready(t);
276
 
-
 
277
    thread_ready(t1);
-
 
278
 
281
 
279
    return task;
282
    return task;
280
}
283
}
281
 
284
 
282
/** Syscall for reading task ID from userspace.
285
/** Syscall for reading task ID from userspace.
Line 345... Line 348...
345
    return ret;
348
    return ret;
346
}
349
}
347
 
350
 
348
/** Kill task.
351
/** Kill task.
349
 *
352
 *
-
 
353
 * This function is idempotent.
-
 
354
 * It signals all the task's threads to bail it out.
-
 
355
 *
350
 * @param id ID of the task to be killed.
356
 * @param id ID of the task to be killed.
351
 *
357
 *
352
 * @return 0 on success or an error code from errno.h
358
 * @return 0 on success or an error code from errno.h
353
 */
359
 */
354
int task_kill(task_id_t id)
360
int task_kill(task_id_t id)
355
{
361
{
356
    ipl_t ipl;
362
    ipl_t ipl;
357
    task_t *ta;
363
    task_t *ta;
358
    thread_t *t;
-
 
359
    link_t *cur;
364
    link_t *cur;
360
 
365
 
361
    if (id == 1)
366
    if (id == 1)
362
        return EPERM;
367
        return EPERM;
363
   
368
   
364
    ipl = interrupts_disable();
369
    ipl = interrupts_disable();
365
    spinlock_lock(&tasks_lock);
370
    spinlock_lock(&tasks_lock);
366
 
-
 
367
    if (!(ta = task_find_by_id(id))) {
371
    if (!(ta = task_find_by_id(id))) {
368
        spinlock_unlock(&tasks_lock);
372
        spinlock_unlock(&tasks_lock);
369
        interrupts_restore(ipl);
373
        interrupts_restore(ipl);
370
        return ENOENT;
374
        return ENOENT;
371
    }
375
    }
372
 
-
 
373
    spinlock_lock(&ta->lock);
-
 
374
    ta->refcount++;
-
 
375
    spinlock_unlock(&ta->lock);
-
 
376
 
-
 
377
    btree_remove(&tasks_btree, ta->taskid, NULL);
-
 
378
    spinlock_unlock(&tasks_lock);
376
    spinlock_unlock(&tasks_lock);
379
   
377
   
380
    t = thread_create(ktaskclnp, NULL, ta, 0, "ktaskclnp", true);
-
 
381
   
-
 
382
    spinlock_lock(&ta->lock);
-
 
383
    ta->accept_new_threads = false;
-
 
384
    ta->refcount--;
-
 
385
 
-
 
386
    /*
378
    /*
387
     * Interrupt all threads except ktaskclnp.
379
     * Interrupt all threads except ktaskclnp.
388
     */
380
     */
-
 
381
    spinlock_lock(&ta->lock);
389
    for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) {
382
    for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) {
390
        thread_t *thr;
383
        thread_t *thr;
391
        bool  sleeping = false;
384
        bool sleeping = false;
392
       
385
       
393
        thr = list_get_instance(cur, thread_t, th_link);
386
        thr = list_get_instance(cur, thread_t, th_link);
394
        if (thr == t)
-
 
395
            continue;
-
 
396
           
387
           
397
        spinlock_lock(&thr->lock);
388
        spinlock_lock(&thr->lock);
398
        thr->interrupted = true;
389
        thr->interrupted = true;
399
        if (thr->state == Sleeping)
390
        if (thr->state == Sleeping)
400
            sleeping = true;
391
            sleeping = true;
401
        spinlock_unlock(&thr->lock);
392
        spinlock_unlock(&thr->lock);
402
       
393
       
403
        if (sleeping)
394
        if (sleeping)
404
            waitq_interrupt_sleep(thr);
395
            waitq_interrupt_sleep(thr);
405
    }
396
    }
406
   
-
 
407
    spinlock_unlock(&ta->lock);
397
    spinlock_unlock(&ta->lock);
408
    interrupts_restore(ipl);
398
    interrupts_restore(ipl);
409
   
399
   
410
    if (t)
-
 
411
        thread_ready(t);
-
 
412
 
-
 
413
    return 0;
400
    return 0;
414
}
401
}
415
 
402
 
416
/** Print task list */
403
/** Print task list */
417
void task_print_list(void)
404
void task_print_list(void)
Line 423... Line 410...
423
    ipl = interrupts_disable();
410
    ipl = interrupts_disable();
424
    spinlock_lock(&tasks_lock);
411
    spinlock_lock(&tasks_lock);
425
   
412
   
426
    printf("taskid name       ctx address    as         cycles     threads "
413
    printf("taskid name       ctx address    as         cycles     threads "
427
        "calls  callee\n");
414
        "calls  callee\n");
428
    printf("------ ---------- --- ---------- ---------- ---------- ------- "        "------ ------>\n");
415
    printf("------ ---------- --- ---------- ---------- ---------- ------- "
-
 
416
        "------ ------>\n");
429
 
417
 
430
    for (cur = tasks_btree.leaf_head.next; cur != &tasks_btree.leaf_head;
418
    for (cur = tasks_btree.leaf_head.next; cur != &tasks_btree.leaf_head;
431
        cur = cur->next) {
419
        cur = cur->next) {
432
        btree_node_t *node;
420
        btree_node_t *node;
433
        unsigned int i;
421
        unsigned int i;
Line 462... Line 450...
462
 
450
 
463
    spinlock_unlock(&tasks_lock);
451
    spinlock_unlock(&tasks_lock);
464
    interrupts_restore(ipl);
452
    interrupts_restore(ipl);
465
}
453
}
466
 
454
 
467
/** Kernel thread used to cleanup the task after it is killed. */
-
 
468
void ktaskclnp(void *arg)
-
 
469
{
-
 
470
    ipl_t ipl;
-
 
471
    thread_t *t = NULL, *main_thread;
-
 
472
    link_t *cur;
-
 
473
    bool again;
-
 
474
 
-
 
475
    thread_detach(THREAD);
-
 
476
 
-
 
477
loop:
-
 
478
    ipl = interrupts_disable();
-
 
479
    spinlock_lock(&TASK->lock);
-
 
480
   
-
 
481
    main_thread = TASK->main_thread;
-
 
482
   
-
 
483
    /*
-
 
484
     * Find a thread to join.
-
 
485
     */
-
 
486
    again = false;
-
 
487
    for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) {
-
 
488
        t = list_get_instance(cur, thread_t, th_link);
-
 
489
 
-
 
490
        spinlock_lock(&t->lock);
-
 
491
        if (t == THREAD) {
-
 
492
            spinlock_unlock(&t->lock);
-
 
493
            continue;
-
 
494
        } else if (t == main_thread) {
-
 
495
            spinlock_unlock(&t->lock);
-
 
496
            continue;
-
 
497
        } else if (t->join_type != None) {
-
 
498
            spinlock_unlock(&t->lock);
-
 
499
            again = true;
-
 
500
            continue;
-
 
501
        } else {
-
 
502
            t->join_type = TaskClnp;
-
 
503
            spinlock_unlock(&t->lock);
-
 
504
            again = false;
-
 
505
            break;
-
 
506
        }
-
 
507
    }
-
 
508
   
-
 
509
    spinlock_unlock(&TASK->lock);
-
 
510
    interrupts_restore(ipl);
-
 
511
   
-
 
512
    if (again) {
-
 
513
        /*
-
 
514
         * Other cleanup (e.g. ktaskgc) is in progress.
-
 
515
         */
-
 
516
        scheduler();
-
 
517
        goto loop;
-
 
518
    }
-
 
519
   
-
 
520
    if (t != THREAD) {
-
 
521
        ASSERT(t != main_thread);   /* uninit is joined and detached
-
 
522
                         * in ktaskgc */
-
 
523
        thread_join(t);
-
 
524
        thread_detach(t);
-
 
525
        goto loop;          /* go for another thread */
-
 
526
    }
-
 
527
   
-
 
528
    /*
-
 
529
     * Now there are no other threads in this task
-
 
530
     * and no new threads can be created.
-
 
531
     */
-
 
532
 
-
 
533
    ipc_cleanup();
-
 
534
    futex_cleanup();
-
 
535
    klog_printf("Cleanup of task %llu completed.", TASK->taskid);
-
 
536
}
-
 
537
 
-
 
538
/** Kernel thread used to kill the userspace task when its main thread exits.
-
 
539
 *
-
 
540
 * This thread waits until the main userspace thread (i.e. uninit) exits.
-
 
541
 * When this happens, the task is killed. In the meantime, exited threads
-
 
542
 * are garbage collected.
-
 
543
 *
-
 
544
 * @param arg Pointer to the thread structure of the task's main thread.
-
 
545
 */
-
 
546
void ktaskgc(void *arg)
-
 
547
{
-
 
548
    thread_t *t = (thread_t *) arg;
-
 
549
loop:  
-
 
550
    /*
-
 
551
     * Userspace threads cannot detach themselves,
-
 
552
     * therefore the thread pointer is guaranteed to be valid.
-
 
553
     */
-
 
554
    if (thread_join_timeout(t, 1000000, SYNCH_FLAGS_NONE) ==
-
 
555
        ESYNCH_TIMEOUT) {   /* sleep uninterruptibly here! */
-
 
556
        ipl_t ipl;
-
 
557
        link_t *cur;
-
 
558
        thread_t *thr = NULL;
-
 
559
   
-
 
560
        /*
-
 
561
         * The join timed out. Try to do some garbage collection of
-
 
562
         * Undead threads.
-
 
563
         */
-
 
564
more_gc:       
-
 
565
        ipl = interrupts_disable();
-
 
566
        spinlock_lock(&TASK->lock);
-
 
567
       
-
 
568
        for (cur = TASK->th_head.next; cur != &TASK->th_head;
-
 
569
            cur = cur->next) {
-
 
570
            thr = list_get_instance(cur, thread_t, th_link);
-
 
571
            spinlock_lock(&thr->lock);
-
 
572
            if (thr != t && thr->state == Undead &&
-
 
573
                thr->join_type == None) {
-
 
574
                thr->join_type = TaskGC;
-
 
575
                spinlock_unlock(&thr->lock);
-
 
576
                break;
-
 
577
            }
-
 
578
            spinlock_unlock(&thr->lock);
-
 
579
            thr = NULL;
-
 
580
        }
-
 
581
        spinlock_unlock(&TASK->lock);
-
 
582
        interrupts_restore(ipl);
-
 
583
       
-
 
584
        if (thr) {
-
 
585
            thread_join(thr);
-
 
586
            thread_detach(thr);
-
 
587
            scheduler();
-
 
588
            goto more_gc;
-
 
589
        }
-
 
590
           
-
 
591
        goto loop;
-
 
592
    }
-
 
593
    thread_detach(t);
-
 
594
    task_kill(TASK->taskid);
-
 
595
}
-
 
596
 
-
 
597
/** @}
455
/** @}
598
 */
456
 */