Subversion Repositories HelenOS

Rev

Rev 2227 | Rev 2446 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 2227 Rev 2436
1
/*
1
/*
2
 * Copyright (c) 2001-2004 Jakub Jermar
2
 * Copyright (c) 2001-2004 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
/** @addtogroup genericproc
29
/** @addtogroup genericproc
30
 * @{
30
 * @{
31
 */
31
 */
32
 
32
 
33
/**
33
/**
34
 * @file
34
 * @file
35
 * @brief   Task management.
35
 * @brief   Task management.
36
 */
36
 */
37
 
37
 
38
#include <main/uinit.h>
38
#include <main/uinit.h>
39
#include <proc/thread.h>
39
#include <proc/thread.h>
40
#include <proc/task.h>
40
#include <proc/task.h>
41
#include <proc/uarg.h>
41
#include <proc/uarg.h>
42
#include <mm/as.h>
42
#include <mm/as.h>
43
#include <mm/slab.h>
43
#include <mm/slab.h>
44
#include <atomic.h>
44
#include <atomic.h>
45
#include <synch/spinlock.h>
45
#include <synch/spinlock.h>
46
#include <synch/waitq.h>
46
#include <synch/waitq.h>
47
#include <arch.h>
47
#include <arch.h>
48
#include <panic.h>
48
#include <panic.h>
49
#include <adt/btree.h>
49
#include <adt/btree.h>
50
#include <adt/list.h>
50
#include <adt/list.h>
51
#include <ipc/ipc.h>
51
#include <ipc/ipc.h>
52
#include <security/cap.h>
52
#include <security/cap.h>
53
#include <memstr.h>
53
#include <memstr.h>
54
#include <print.h>
54
#include <print.h>
55
#include <lib/elf.h>
55
#include <lib/elf.h>
56
#include <errno.h>
56
#include <errno.h>
57
#include <func.h>
57
#include <func.h>
58
#include <syscall/copy.h>
58
#include <syscall/copy.h>
59
#include <console/klog.h>
59
#include <console/klog.h>
60
 
60
 
61
#ifndef LOADED_PROG_STACK_PAGES_NO
61
#ifndef LOADED_PROG_STACK_PAGES_NO
62
#define LOADED_PROG_STACK_PAGES_NO 1
62
#define LOADED_PROG_STACK_PAGES_NO 1
63
#endif
63
#endif
64
 
64
 
65
/** Spinlock protecting the tasks_btree B+tree. */
65
/** Spinlock protecting the tasks_btree B+tree. */
66
SPINLOCK_INITIALIZE(tasks_lock);
66
SPINLOCK_INITIALIZE(tasks_lock);
67
 
67
 
68
/** B+tree of active tasks.
68
/** B+tree of active tasks.
69
 *
69
 *
70
 * The task is guaranteed to exist after it was found in the tasks_btree as
70
 * The task is guaranteed to exist after it was found in the tasks_btree as
71
 * long as:
71
 * long as:
72
 * @li the tasks_lock is held,
72
 * @li the tasks_lock is held,
73
 * @li the task's lock is held when task's lock is acquired before releasing
73
 * @li the task's lock is held when task's lock is acquired before releasing
74
 *     tasks_lock or
74
 *     tasks_lock or
75
 * @li the task's refcount is greater than 0
75
 * @li the task's refcount is greater than 0
76
 *
76
 *
77
 */
77
 */
78
btree_t tasks_btree;
78
btree_t tasks_btree;
79
 
79
 
80
static task_id_t task_counter = 0;
80
static task_id_t task_counter = 0;
81
 
81
 
82
static void ktaskclnp(void *arg);
82
static void ktaskclnp(void *arg);
83
static void ktaskgc(void *arg);
83
static void ktaskgc(void *arg);
84
 
84
 
85
/** Initialize tasks
85
/** Initialize tasks
86
 *
86
 *
87
 * Initialize kernel tasks support.
87
 * Initialize kernel tasks support.
88
 *
88
 *
89
 */
89
 */
90
void task_init(void)
90
void task_init(void)
91
{
91
{
92
    TASK = NULL;
92
    TASK = NULL;
93
    btree_create(&tasks_btree);
93
    btree_create(&tasks_btree);
94
}
94
}
95
 
95
 
96
/** Kill all tasks except the current task.
96
/** Kill all tasks except the current task.
97
 *
97
 *
98
 */
98
 */
99
void task_done(void)
99
void task_done(void)
100
{
100
{
101
    task_t *t;
101
    task_t *t;
102
    do { /* Repeat until there are any tasks except TASK */
102
    do { /* Repeat until there are any tasks except TASK */
103
       
103
       
104
        /* Messing with task structures, avoid deadlock */
104
        /* Messing with task structures, avoid deadlock */
105
        ipl_t ipl = interrupts_disable();
105
        ipl_t ipl = interrupts_disable();
106
        spinlock_lock(&tasks_lock);
106
        spinlock_lock(&tasks_lock);
107
       
107
       
108
        t = NULL;
108
        t = NULL;
109
        link_t *cur;
109
        link_t *cur;
-
 
110
        for (cur = tasks_btree.leaf_head.next;
110
        for (cur = tasks_btree.leaf_head.next; cur != &tasks_btree.leaf_head; cur = cur->next) {
111
            cur != &tasks_btree.leaf_head; cur = cur->next) {
-
 
112
            btree_node_t *node;
-
 
113
           
111
            btree_node_t *node = list_get_instance(cur, btree_node_t, leaf_link);
114
            node = list_get_instance(cur, btree_node_t, leaf_link);
112
           
115
           
113
            unsigned int i;
116
            unsigned int i;
114
            for (i = 0; i < node->keys; i++) {
117
            for (i = 0; i < node->keys; i++) {
115
                if ((task_t *) node->value[i] != TASK) {
118
                if ((task_t *) node->value[i] != TASK) {
116
                    t = (task_t *) node->value[i];
119
                    t = (task_t *) node->value[i];
117
                    break;
120
                    break;
118
                }
121
                }
119
            }
122
            }
120
        }
123
        }
121
       
124
       
122
        if (t != NULL) {
125
        if (t != NULL) {
123
            task_id_t id = t->taskid;
126
            task_id_t id = t->taskid;
124
           
127
           
125
            spinlock_unlock(&tasks_lock);
128
            spinlock_unlock(&tasks_lock);
126
            interrupts_restore(ipl);
129
            interrupts_restore(ipl);
127
           
130
           
128
#ifdef CONFIG_DEBUG
131
#ifdef CONFIG_DEBUG
129
            printf("Killing task %llu\n", id);
132
            printf("Killing task %llu\n", id);
130
#endif          
133
#endif          
131
            task_kill(id);
134
            task_kill(id);
132
        } else {
135
        } else {
133
            spinlock_unlock(&tasks_lock);
136
            spinlock_unlock(&tasks_lock);
134
            interrupts_restore(ipl);
137
            interrupts_restore(ipl);
135
        }
138
        }
136
       
139
       
137
    } while (t != NULL);
140
    } while (t != NULL);
138
}
141
}
139
 
142
 
140
/** Create new task
143
/** Create new task
141
 *
144
 *
142
 * Create new task with no threads.
145
 * Create new task with no threads.
143
 *
146
 *
144
 * @param as Task's address space.
147
 * @param as Task's address space.
145
 * @param name Symbolic name.
148
 * @param name Symbolic name.
146
 *
149
 *
147
 * @return New task's structure
150
 * @return New task's structure
148
 *
151
 *
149
 */
152
 */
150
task_t *task_create(as_t *as, char *name)
153
task_t *task_create(as_t *as, char *name)
151
{
154
{
152
    ipl_t ipl;
155
    ipl_t ipl;
153
    task_t *ta;
156
    task_t *ta;
154
    int i;
157
    int i;
155
   
158
   
156
    ta = (task_t *) malloc(sizeof(task_t), 0);
159
    ta = (task_t *) malloc(sizeof(task_t), 0);
157
 
160
 
158
    task_create_arch(ta);
161
    task_create_arch(ta);
159
 
162
 
160
    spinlock_initialize(&ta->lock, "task_ta_lock");
163
    spinlock_initialize(&ta->lock, "task_ta_lock");
161
    list_initialize(&ta->th_head);
164
    list_initialize(&ta->th_head);
162
    ta->as = as;
165
    ta->as = as;
163
    ta->name = name;
166
    ta->name = name;
164
    ta->main_thread = NULL;
167
    ta->main_thread = NULL;
165
    ta->refcount = 0;
168
    ta->refcount = 0;
166
    ta->context = CONTEXT;
169
    ta->context = CONTEXT;
167
 
170
 
168
    ta->capabilities = 0;
171
    ta->capabilities = 0;
169
    ta->accept_new_threads = true;
172
    ta->accept_new_threads = true;
170
    ta->cycles = 0;
173
    ta->cycles = 0;
171
   
174
   
172
    ipc_answerbox_init(&ta->answerbox);
175
    ipc_answerbox_init(&ta->answerbox);
173
    for (i = 0; i < IPC_MAX_PHONES; i++)
176
    for (i = 0; i < IPC_MAX_PHONES; i++)
174
        ipc_phone_init(&ta->phones[i]);
177
        ipc_phone_init(&ta->phones[i]);
175
    if ((ipc_phone_0) && (context_check(ipc_phone_0->task->context,
178
    if ((ipc_phone_0) && (context_check(ipc_phone_0->task->context,
176
        ta->context)))
179
        ta->context)))
177
        ipc_phone_connect(&ta->phones[0], ipc_phone_0);
180
        ipc_phone_connect(&ta->phones[0], ipc_phone_0);
178
    atomic_set(&ta->active_calls, 0);
181
    atomic_set(&ta->active_calls, 0);
179
 
182
 
180
    mutex_initialize(&ta->futexes_lock);
183
    mutex_initialize(&ta->futexes_lock);
181
    btree_create(&ta->futexes);
184
    btree_create(&ta->futexes);
182
   
185
   
183
    ipl = interrupts_disable();
186
    ipl = interrupts_disable();
184
 
187
 
185
    /*
188
    /*
186
     * Increment address space reference count.
189
     * Increment address space reference count.
187
     */
190
     */
188
    atomic_inc(&as->refcount);
191
    atomic_inc(&as->refcount);
189
 
192
 
190
    spinlock_lock(&tasks_lock);
193
    spinlock_lock(&tasks_lock);
191
 
194
 
192
    ta->taskid = ++task_counter;
195
    ta->taskid = ++task_counter;
193
    btree_insert(&tasks_btree, (btree_key_t) ta->taskid, (void *) ta, NULL);
196
    btree_insert(&tasks_btree, (btree_key_t) ta->taskid, (void *) ta, NULL);
194
 
197
 
195
    spinlock_unlock(&tasks_lock);
198
    spinlock_unlock(&tasks_lock);
196
    interrupts_restore(ipl);
199
    interrupts_restore(ipl);
197
 
200
 
198
    return ta;
201
    return ta;
199
}
202
}
200
 
203
 
201
/** Destroy task.
204
/** Destroy task.
202
 *
205
 *
203
 * @param t Task to be destroyed.
206
 * @param t Task to be destroyed.
204
 */
207
 */
205
void task_destroy(task_t *t)
208
void task_destroy(task_t *t)
206
{
209
{
207
    task_destroy_arch(t);
210
    task_destroy_arch(t);
208
    btree_destroy(&t->futexes);
211
    btree_destroy(&t->futexes);
209
 
212
 
210
    if (atomic_predec(&t->as->refcount) == 0)
213
    if (atomic_predec(&t->as->refcount) == 0)
211
        as_destroy(t->as);
214
        as_destroy(t->as);
212
   
215
   
213
    free(t);
216
    free(t);
214
    TASK = NULL;
217
    TASK = NULL;
215
}
218
}
216
 
219
 
217
/** Create new task with 1 thread and run it
220
/** Create new task with 1 thread and run it
218
 *
221
 *
219
 * @param program_addr Address of program executable image.
222
 * @param program_addr Address of program executable image.
220
 * @param name Program name.
223
 * @param name Program name.
221
 *
224
 *
222
 * @return Task of the running program or NULL on error.
225
 * @return Task of the running program or NULL on error.
223
 */
226
 */
224
task_t * task_run_program(void *program_addr, char *name)
227
task_t *task_run_program(void *program_addr, char *name)
225
{
228
{
226
    as_t *as;
229
    as_t *as;
227
    as_area_t *a;
230
    as_area_t *a;
228
    int rc;
231
    int rc;
229
    thread_t *t1, *t2;
232
    thread_t *t1, *t2;
230
    task_t *task;
233
    task_t *task;
231
    uspace_arg_t *kernel_uarg;
234
    uspace_arg_t *kernel_uarg;
232
 
235
 
233
    as = as_create(0);
236
    as = as_create(0);
234
    ASSERT(as);
237
    ASSERT(as);
235
 
238
 
236
    rc = elf_load((elf_header_t *) program_addr, as);
239
    rc = elf_load((elf_header_t *) program_addr, as);
237
    if (rc != EE_OK) {
240
    if (rc != EE_OK) {
238
        as_destroy(as);
241
        as_destroy(as);
239
        return NULL;
242
        return NULL;
240
    }
243
    }
241
   
244
   
242
    kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0);
245
    kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0);
243
    kernel_uarg->uspace_entry =
246
    kernel_uarg->uspace_entry =
244
        (void *) ((elf_header_t *) program_addr)->e_entry;
247
        (void *) ((elf_header_t *) program_addr)->e_entry;
245
    kernel_uarg->uspace_stack = (void *) USTACK_ADDRESS;
248
    kernel_uarg->uspace_stack = (void *) USTACK_ADDRESS;
246
    kernel_uarg->uspace_thread_function = NULL;
249
    kernel_uarg->uspace_thread_function = NULL;
247
    kernel_uarg->uspace_thread_arg = NULL;
250
    kernel_uarg->uspace_thread_arg = NULL;
248
    kernel_uarg->uspace_uarg = NULL;
251
    kernel_uarg->uspace_uarg = NULL;
249
   
252
   
250
    task = task_create(as, name);
253
    task = task_create(as, name);
251
    ASSERT(task);
254
    ASSERT(task);
252
 
255
 
253
    /*
256
    /*
254
     * Create the data as_area.
257
     * Create the data as_area.
255
     */
258
     */
256
    a = as_area_create(as, AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE,
259
    a = as_area_create(as, AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE,
257
        LOADED_PROG_STACK_PAGES_NO * PAGE_SIZE, USTACK_ADDRESS,
260
        LOADED_PROG_STACK_PAGES_NO * PAGE_SIZE, USTACK_ADDRESS,
258
        AS_AREA_ATTR_NONE, &anon_backend, NULL);
261
        AS_AREA_ATTR_NONE, &anon_backend, NULL);
259
 
262
 
260
    /*
263
    /*
261
     * Create the main thread.
264
     * Create the main thread.
262
     */
265
     */
263
    t1 = thread_create(uinit, kernel_uarg, task, THREAD_FLAG_USPACE,
266
    t1 = thread_create(uinit, kernel_uarg, task, THREAD_FLAG_USPACE,
264
        "uinit", false);
267
        "uinit", false);
265
    ASSERT(t1);
268
    ASSERT(t1);
266
   
269
   
267
    /*
270
    /*
268
     * Create killer thread for the new task.
271
     * Create killer thread for the new task.
269
     */
272
     */
270
    t2 = thread_create(ktaskgc, t1, task, 0, "ktaskgc", true);
273
    t2 = thread_create(ktaskgc, t1, task, 0, "ktaskgc", true);
271
    ASSERT(t2);
274
    ASSERT(t2);
272
    thread_ready(t2);
275
    thread_ready(t2);
273
 
276
 
274
    thread_ready(t1);
277
    thread_ready(t1);
275
 
278
 
276
    return task;
279
    return task;
277
}
280
}
278
 
281
 
279
/** Syscall for reading task ID from userspace.
282
/** Syscall for reading task ID from userspace.
280
 *
283
 *
281
 * @param uspace_task_id Userspace address of 8-byte buffer where to store
284
 * @param uspace_task_id Userspace address of 8-byte buffer where to store
282
 * current task ID.
285
 * current task ID.
283
 *
286
 *
284
 * @return 0 on success or an error code from @ref errno.h.
287
 * @return 0 on success or an error code from @ref errno.h.
285
 */
288
 */
286
unative_t sys_task_get_id(task_id_t *uspace_task_id)
289
unative_t sys_task_get_id(task_id_t *uspace_task_id)
287
{
290
{
288
    /*
291
    /*
289
     * No need to acquire lock on TASK because taskid
292
     * No need to acquire lock on TASK because taskid
290
     * remains constant for the lifespan of the task.
293
     * remains constant for the lifespan of the task.
291
     */
294
     */
292
    return (unative_t) copy_to_uspace(uspace_task_id, &TASK->taskid,
295
    return (unative_t) copy_to_uspace(uspace_task_id, &TASK->taskid,
293
        sizeof(TASK->taskid));
296
        sizeof(TASK->taskid));
294
}
297
}
295
 
298
 
296
/** Find task structure corresponding to task ID.
299
/** Find task structure corresponding to task ID.
297
 *
300
 *
298
 * The tasks_lock must be already held by the caller of this function
301
 * The tasks_lock must be already held by the caller of this function
299
 * and interrupts must be disabled.
302
 * and interrupts must be disabled.
300
 *
303
 *
301
 * @param id Task ID.
304
 * @param id Task ID.
302
 *
305
 *
303
 * @return Task structure address or NULL if there is no such task ID.
306
 * @return Task structure address or NULL if there is no such task ID.
304
 */
307
 */
305
task_t *task_find_by_id(task_id_t id)
308
task_t *task_find_by_id(task_id_t id)
306
{
309
{
307
    btree_node_t *leaf;
310
    btree_node_t *leaf;
308
   
311
   
309
    return (task_t *) btree_search(&tasks_btree, (btree_key_t) id, &leaf);
312
    return (task_t *) btree_search(&tasks_btree, (btree_key_t) id, &leaf);
310
}
313
}
311
 
314
 
312
/** Get accounting data of given task.
315
/** Get accounting data of given task.
313
 *
316
 *
314
 * Note that task lock of 't' must be already held and
317
 * Note that task lock of 't' must be already held and
315
 * interrupts must be already disabled.
318
 * interrupts must be already disabled.
316
 *
319
 *
317
 * @param t Pointer to thread.
320
 * @param t Pointer to thread.
318
 *
321
 *
319
 */
322
 */
320
uint64_t task_get_accounting(task_t *t)
323
uint64_t task_get_accounting(task_t *t)
321
{
324
{
322
    /* Accumulated value of task */
325
    /* Accumulated value of task */
323
    uint64_t ret = t->cycles;
326
    uint64_t ret = t->cycles;
324
   
327
   
325
    /* Current values of threads */
328
    /* Current values of threads */
326
    link_t *cur;
329
    link_t *cur;
327
    for (cur = t->th_head.next; cur != &t->th_head; cur = cur->next) {
330
    for (cur = t->th_head.next; cur != &t->th_head; cur = cur->next) {
328
        thread_t *thr = list_get_instance(cur, thread_t, th_link);
331
        thread_t *thr = list_get_instance(cur, thread_t, th_link);
329
       
332
       
330
        spinlock_lock(&thr->lock);
333
        spinlock_lock(&thr->lock);
331
        /* Process only counted threads */
334
        /* Process only counted threads */
332
        if (!thr->uncounted) {
335
        if (!thr->uncounted) {
333
            if (thr == THREAD) {
336
            if (thr == THREAD) {
334
                /* Update accounting of current thread */
337
                /* Update accounting of current thread */
335
                thread_update_accounting();
338
                thread_update_accounting();
336
            }
339
            }
337
            ret += thr->cycles;
340
            ret += thr->cycles;
338
        }
341
        }
339
        spinlock_unlock(&thr->lock);
342
        spinlock_unlock(&thr->lock);
340
    }
343
    }
341
   
344
   
342
    return ret;
345
    return ret;
343
}
346
}
344
 
347
 
345
/** Kill task.
348
/** Kill task.
346
 *
349
 *
347
 * @param id ID of the task to be killed.
350
 * @param id ID of the task to be killed.
348
 *
351
 *
349
 * @return 0 on success or an error code from errno.h
352
 * @return 0 on success or an error code from errno.h
350
 */
353
 */
351
int task_kill(task_id_t id)
354
int task_kill(task_id_t id)
352
{
355
{
353
    ipl_t ipl;
356
    ipl_t ipl;
354
    task_t *ta;
357
    task_t *ta;
355
    thread_t *t;
358
    thread_t *t;
356
    link_t *cur;
359
    link_t *cur;
357
 
360
 
358
    if (id == 1)
361
    if (id == 1)
359
        return EPERM;
362
        return EPERM;
360
   
363
   
361
    ipl = interrupts_disable();
364
    ipl = interrupts_disable();
362
    spinlock_lock(&tasks_lock);
365
    spinlock_lock(&tasks_lock);
363
 
366
 
364
    if (!(ta = task_find_by_id(id))) {
367
    if (!(ta = task_find_by_id(id))) {
365
        spinlock_unlock(&tasks_lock);
368
        spinlock_unlock(&tasks_lock);
366
        interrupts_restore(ipl);
369
        interrupts_restore(ipl);
367
        return ENOENT;
370
        return ENOENT;
368
    }
371
    }
369
 
372
 
370
    spinlock_lock(&ta->lock);
373
    spinlock_lock(&ta->lock);
371
    ta->refcount++;
374
    ta->refcount++;
372
    spinlock_unlock(&ta->lock);
375
    spinlock_unlock(&ta->lock);
373
 
376
 
374
    btree_remove(&tasks_btree, ta->taskid, NULL);
377
    btree_remove(&tasks_btree, ta->taskid, NULL);
375
    spinlock_unlock(&tasks_lock);
378
    spinlock_unlock(&tasks_lock);
376
   
379
   
377
    t = thread_create(ktaskclnp, NULL, ta, 0, "ktaskclnp", true);
380
    t = thread_create(ktaskclnp, NULL, ta, 0, "ktaskclnp", true);
378
   
381
   
379
    spinlock_lock(&ta->lock);
382
    spinlock_lock(&ta->lock);
380
    ta->accept_new_threads = false;
383
    ta->accept_new_threads = false;
381
    ta->refcount--;
384
    ta->refcount--;
382
 
385
 
383
    /*
386
    /*
384
     * Interrupt all threads except ktaskclnp.
387
     * Interrupt all threads except ktaskclnp.
385
     */
388
     */
386
    for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) {
389
    for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) {
387
        thread_t *thr;
390
        thread_t *thr;
388
        bool  sleeping = false;
391
        bool  sleeping = false;
389
       
392
       
390
        thr = list_get_instance(cur, thread_t, th_link);
393
        thr = list_get_instance(cur, thread_t, th_link);
391
        if (thr == t)
394
        if (thr == t)
392
            continue;
395
            continue;
393
           
396
           
394
        spinlock_lock(&thr->lock);
397
        spinlock_lock(&thr->lock);
395
        thr->interrupted = true;
398
        thr->interrupted = true;
396
        if (thr->state == Sleeping)
399
        if (thr->state == Sleeping)
397
            sleeping = true;
400
            sleeping = true;
398
        spinlock_unlock(&thr->lock);
401
        spinlock_unlock(&thr->lock);
399
       
402
       
400
        if (sleeping)
403
        if (sleeping)
401
            waitq_interrupt_sleep(thr);
404
            waitq_interrupt_sleep(thr);
402
    }
405
    }
403
   
406
   
404
    spinlock_unlock(&ta->lock);
407
    spinlock_unlock(&ta->lock);
405
    interrupts_restore(ipl);
408
    interrupts_restore(ipl);
406
   
409
   
407
    if (t)
410
    if (t)
408
        thread_ready(t);
411
        thread_ready(t);
409
 
412
 
410
    return 0;
413
    return 0;
411
}
414
}
412
 
415
 
413
/** Print task list */
416
/** Print task list */
414
void task_print_list(void)
417
void task_print_list(void)
415
{
418
{
416
    link_t *cur;
419
    link_t *cur;
417
    ipl_t ipl;
420
    ipl_t ipl;
418
   
421
   
419
    /* Messing with task structures, avoid deadlock */
422
    /* Messing with task structures, avoid deadlock */
420
    ipl = interrupts_disable();
423
    ipl = interrupts_disable();
421
    spinlock_lock(&tasks_lock);
424
    spinlock_lock(&tasks_lock);
422
   
425
   
423
    printf("taskid name       ctx address    as         cycles     threads "
426
    printf("taskid name       ctx address    as         cycles     threads "
424
        "calls  callee\n");
427
        "calls  callee\n");
425
    printf("------ ---------- --- ---------- ---------- ---------- ------- "        "------ ------>\n");
428
    printf("------ ---------- --- ---------- ---------- ---------- ------- "        "------ ------>\n");
426
 
429
 
427
    for (cur = tasks_btree.leaf_head.next; cur != &tasks_btree.leaf_head;
430
    for (cur = tasks_btree.leaf_head.next; cur != &tasks_btree.leaf_head;
428
        cur = cur->next) {
431
        cur = cur->next) {
429
        btree_node_t *node;
432
        btree_node_t *node;
430
        unsigned int i;
433
        unsigned int i;
431
       
434
       
432
        node = list_get_instance(cur, btree_node_t, leaf_link);
435
        node = list_get_instance(cur, btree_node_t, leaf_link);
433
        for (i = 0; i < node->keys; i++) {
436
        for (i = 0; i < node->keys; i++) {
434
            task_t *t;
437
            task_t *t;
435
            int j;
438
            int j;
436
 
439
 
437
            t = (task_t *) node->value[i];
440
            t = (task_t *) node->value[i];
438
       
441
       
439
            spinlock_lock(&t->lock);
442
            spinlock_lock(&t->lock);
440
           
443
           
441
            uint64_t cycles;
444
            uint64_t cycles;
442
            char suffix;
445
            char suffix;
443
            order(task_get_accounting(t), &cycles, &suffix);
446
            order(task_get_accounting(t), &cycles, &suffix);
444
           
447
           
445
            printf("%-6llu %-10s %-3ld %#10zx %#10zx %9llu%c %7zd "
448
            printf("%-6llu %-10s %-3ld %#10zx %#10zx %9llu%c %7zd "
446
                "%6zd", t->taskid, t->name, t->context, t, t->as,
449
                "%6zd", t->taskid, t->name, t->context, t, t->as,
447
                cycles, suffix, t->refcount,
450
                cycles, suffix, t->refcount,
448
                atomic_get(&t->active_calls));
451
                atomic_get(&t->active_calls));
449
            for (j = 0; j < IPC_MAX_PHONES; j++) {
452
            for (j = 0; j < IPC_MAX_PHONES; j++) {
450
                if (t->phones[j].callee)
453
                if (t->phones[j].callee)
451
                    printf(" %zd:%#zx", j,
454
                    printf(" %zd:%#zx", j,
452
                        t->phones[j].callee);
455
                        t->phones[j].callee);
453
            }
456
            }
454
            printf("\n");
457
            printf("\n");
455
           
458
           
456
            spinlock_unlock(&t->lock);
459
            spinlock_unlock(&t->lock);
457
        }
460
        }
458
    }
461
    }
459
 
462
 
460
    spinlock_unlock(&tasks_lock);
463
    spinlock_unlock(&tasks_lock);
461
    interrupts_restore(ipl);
464
    interrupts_restore(ipl);
462
}
465
}
463
 
466
 
464
/** Kernel thread used to cleanup the task after it is killed. */
467
/** Kernel thread used to cleanup the task after it is killed. */
465
void ktaskclnp(void *arg)
468
void ktaskclnp(void *arg)
466
{
469
{
467
    ipl_t ipl;
470
    ipl_t ipl;
468
    thread_t *t = NULL, *main_thread;
471
    thread_t *t = NULL, *main_thread;
469
    link_t *cur;
472
    link_t *cur;
470
    bool again;
473
    bool again;
471
 
474
 
472
    thread_detach(THREAD);
475
    thread_detach(THREAD);
473
 
476
 
474
loop:
477
loop:
475
    ipl = interrupts_disable();
478
    ipl = interrupts_disable();
476
    spinlock_lock(&TASK->lock);
479
    spinlock_lock(&TASK->lock);
477
   
480
   
478
    main_thread = TASK->main_thread;
481
    main_thread = TASK->main_thread;
479
   
482
   
480
    /*
483
    /*
481
     * Find a thread to join.
484
     * Find a thread to join.
482
     */
485
     */
483
    again = false;
486
    again = false;
484
    for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) {
487
    for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) {
485
        t = list_get_instance(cur, thread_t, th_link);
488
        t = list_get_instance(cur, thread_t, th_link);
486
 
489
 
487
        spinlock_lock(&t->lock);
490
        spinlock_lock(&t->lock);
488
        if (t == THREAD) {
491
        if (t == THREAD) {
489
            spinlock_unlock(&t->lock);
492
            spinlock_unlock(&t->lock);
490
            continue;
493
            continue;
491
        } else if (t == main_thread) {
494
        } else if (t == main_thread) {
492
            spinlock_unlock(&t->lock);
495
            spinlock_unlock(&t->lock);
493
            continue;
496
            continue;
494
        } else if (t->join_type != None) {
497
        } else if (t->join_type != None) {
495
            spinlock_unlock(&t->lock);
498
            spinlock_unlock(&t->lock);
496
            again = true;
499
            again = true;
497
            continue;
500
            continue;
498
        } else {
501
        } else {
499
            t->join_type = TaskClnp;
502
            t->join_type = TaskClnp;
500
            spinlock_unlock(&t->lock);
503
            spinlock_unlock(&t->lock);
501
            again = false;
504
            again = false;
502
            break;
505
            break;
503
        }
506
        }
504
    }
507
    }
505
   
508
   
506
    spinlock_unlock(&TASK->lock);
509
    spinlock_unlock(&TASK->lock);
507
    interrupts_restore(ipl);
510
    interrupts_restore(ipl);
508
   
511
   
509
    if (again) {
512
    if (again) {
510
        /*
513
        /*
511
         * Other cleanup (e.g. ktaskgc) is in progress.
514
         * Other cleanup (e.g. ktaskgc) is in progress.
512
         */
515
         */
513
        scheduler();
516
        scheduler();
514
        goto loop;
517
        goto loop;
515
    }
518
    }
516
   
519
   
517
    if (t != THREAD) {
520
    if (t != THREAD) {
518
        ASSERT(t != main_thread);   /* uninit is joined and detached
521
        ASSERT(t != main_thread);   /* uninit is joined and detached
519
                         * in ktaskgc */
522
                         * in ktaskgc */
520
        thread_join(t);
523
        thread_join(t);
521
        thread_detach(t);
524
        thread_detach(t);
522
        goto loop;          /* go for another thread */
525
        goto loop;          /* go for another thread */
523
    }
526
    }
524
   
527
   
525
    /*
528
    /*
526
     * Now there are no other threads in this task
529
     * Now there are no other threads in this task
527
     * and no new threads can be created.
530
     * and no new threads can be created.
528
     */
531
     */
529
 
532
 
530
    ipc_cleanup();
533
    ipc_cleanup();
531
    futex_cleanup();
534
    futex_cleanup();
532
    klog_printf("Cleanup of task %llu completed.", TASK->taskid);
535
    klog_printf("Cleanup of task %llu completed.", TASK->taskid);
533
}
536
}
534
 
537
 
535
/** Kernel thread used to kill the userspace task when its main thread exits.
538
/** Kernel thread used to kill the userspace task when its main thread exits.
536
 *
539
 *
537
 * This thread waits until the main userspace thread (i.e. uninit) exits.
540
 * This thread waits until the main userspace thread (i.e. uninit) exits.
538
 * When this happens, the task is killed. In the meantime, exited threads
541
 * When this happens, the task is killed. In the meantime, exited threads
539
 * are garbage collected.
542
 * are garbage collected.
540
 *
543
 *
541
 * @param arg Pointer to the thread structure of the task's main thread.
544
 * @param arg Pointer to the thread structure of the task's main thread.
542
 */
545
 */
543
void ktaskgc(void *arg)
546
void ktaskgc(void *arg)
544
{
547
{
545
    thread_t *t = (thread_t *) arg;
548
    thread_t *t = (thread_t *) arg;
546
loop:  
549
loop:  
547
    /*
550
    /*
548
     * Userspace threads cannot detach themselves,
551
     * Userspace threads cannot detach themselves,
549
     * therefore the thread pointer is guaranteed to be valid.
552
     * therefore the thread pointer is guaranteed to be valid.
550
     */
553
     */
551
    if (thread_join_timeout(t, 1000000, SYNCH_FLAGS_NONE) ==
554
    if (thread_join_timeout(t, 1000000, SYNCH_FLAGS_NONE) ==
552
        ESYNCH_TIMEOUT) {   /* sleep uninterruptibly here! */
555
        ESYNCH_TIMEOUT) {   /* sleep uninterruptibly here! */
553
        ipl_t ipl;
556
        ipl_t ipl;
554
        link_t *cur;
557
        link_t *cur;
555
        thread_t *thr = NULL;
558
        thread_t *thr = NULL;
556
   
559
   
557
        /*
560
        /*
558
         * The join timed out. Try to do some garbage collection of
561
         * The join timed out. Try to do some garbage collection of
559
         * Undead threads.
562
         * Undead threads.
560
         */
563
         */
561
more_gc:       
564
more_gc:       
562
        ipl = interrupts_disable();
565
        ipl = interrupts_disable();
563
        spinlock_lock(&TASK->lock);
566
        spinlock_lock(&TASK->lock);
564
       
567
       
565
        for (cur = TASK->th_head.next; cur != &TASK->th_head;
568
        for (cur = TASK->th_head.next; cur != &TASK->th_head;
566
            cur = cur->next) {
569
            cur = cur->next) {
567
            thr = list_get_instance(cur, thread_t, th_link);
570
            thr = list_get_instance(cur, thread_t, th_link);
568
            spinlock_lock(&thr->lock);
571
            spinlock_lock(&thr->lock);
569
            if (thr != t && thr->state == Undead &&
572
            if (thr != t && thr->state == Undead &&
570
                thr->join_type == None) {
573
                thr->join_type == None) {
571
                thr->join_type = TaskGC;
574
                thr->join_type = TaskGC;
572
                spinlock_unlock(&thr->lock);
575
                spinlock_unlock(&thr->lock);
573
                break;
576
                break;
574
            }
577
            }
575
            spinlock_unlock(&thr->lock);
578
            spinlock_unlock(&thr->lock);
576
            thr = NULL;
579
            thr = NULL;
577
        }
580
        }
578
        spinlock_unlock(&TASK->lock);
581
        spinlock_unlock(&TASK->lock);
579
        interrupts_restore(ipl);
582
        interrupts_restore(ipl);
580
       
583
       
581
        if (thr) {
584
        if (thr) {
582
            thread_join(thr);
585
            thread_join(thr);
583
            thread_detach(thr);
586
            thread_detach(thr);
584
            scheduler();
587
            scheduler();
585
            goto more_gc;
588
            goto more_gc;
586
        }
589
        }
587
           
590
           
588
        goto loop;
591
        goto loop;
589
    }
592
    }
590
    thread_detach(t);
593
    thread_detach(t);
591
    task_kill(TASK->taskid);
594
    task_kill(TASK->taskid);
592
}
595
}
593
 
596
 
594
/** @}
597
/** @}
595
 */
598
 */
596
 
599