Subversion Repositories HelenOS

Rev

Rev 2446 | Rev 2632 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 2446 Rev 2504
1
/*
1
/*
2
 * Copyright (c) 2001-2004 Jakub Jermar
2
 * Copyright (c) 2001-2004 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
/** @addtogroup genericproc
29
/** @addtogroup genericproc
30
 * @{
30
 * @{
31
 */
31
 */
32
 
32
 
33
/**
33
/**
34
 * @file
34
 * @file
35
 * @brief   Task management.
35
 * @brief   Task management.
36
 */
36
 */
37
 
37
 
38
#include <main/uinit.h>
38
#include <main/uinit.h>
39
#include <proc/thread.h>
39
#include <proc/thread.h>
40
#include <proc/task.h>
40
#include <proc/task.h>
41
#include <proc/uarg.h>
41
#include <proc/uarg.h>
42
#include <mm/as.h>
42
#include <mm/as.h>
43
#include <mm/slab.h>
43
#include <mm/slab.h>
44
#include <atomic.h>
44
#include <atomic.h>
45
#include <synch/spinlock.h>
45
#include <synch/spinlock.h>
46
#include <synch/waitq.h>
46
#include <synch/waitq.h>
47
#include <arch.h>
47
#include <arch.h>
48
#include <panic.h>
48
#include <panic.h>
-
 
49
#include <adt/avl.h>
49
#include <adt/btree.h>
50
#include <adt/btree.h>
50
#include <adt/list.h>
51
#include <adt/list.h>
51
#include <ipc/ipc.h>
52
#include <ipc/ipc.h>
52
#include <security/cap.h>
53
#include <security/cap.h>
53
#include <memstr.h>
54
#include <memstr.h>
54
#include <print.h>
55
#include <print.h>
55
#include <lib/elf.h>
56
#include <lib/elf.h>
56
#include <errno.h>
57
#include <errno.h>
57
#include <func.h>
58
#include <func.h>
58
#include <syscall/copy.h>
59
#include <syscall/copy.h>
59
 
60
 
60
#ifndef LOADED_PROG_STACK_PAGES_NO
61
#ifndef LOADED_PROG_STACK_PAGES_NO
61
#define LOADED_PROG_STACK_PAGES_NO 1
62
#define LOADED_PROG_STACK_PAGES_NO 1
62
#endif
63
#endif
63
 
64
 
64
/** Spinlock protecting the tasks_btree B+tree. */
65
/** Spinlock protecting the tasks_tree AVL tree. */
65
SPINLOCK_INITIALIZE(tasks_lock);
66
SPINLOCK_INITIALIZE(tasks_lock);
66
 
67
 
67
/** B+tree of active tasks.
68
/** AVL tree of active tasks.
68
 *
69
 *
69
 * The task is guaranteed to exist after it was found in the tasks_btree as
70
 * The task is guaranteed to exist after it was found in the tasks_tree as
70
 * long as:
71
 * long as:
71
 * @li the tasks_lock is held,
72
 * @li the tasks_lock is held,
72
 * @li the task's lock is held when task's lock is acquired before releasing
73
 * @li the task's lock is held when task's lock is acquired before releasing
73
 *     tasks_lock or
74
 *     tasks_lock or
74
 * @li the task's refcount is greater than 0
75
 * @li the task's refcount is greater than 0
75
 *
76
 *
76
 */
77
 */
77
btree_t tasks_btree;
78
avltree_t tasks_tree;
78
 
79
 
79
static task_id_t task_counter = 0;
80
static task_id_t task_counter = 0;
80
 
81
 
81
/** Initialize tasks
82
/** Initialize tasks
82
 *
83
 *
83
 * Initialize kernel tasks support.
84
 * Initialize kernel tasks support.
84
 *
85
 *
85
 */
86
 */
86
void task_init(void)
87
void task_init(void)
87
{
88
{
88
    TASK = NULL;
89
    TASK = NULL;
89
    btree_create(&tasks_btree);
90
    avltree_create(&tasks_tree);
-
 
91
}
-
 
92
 
-
 
93
/*
-
 
94
 * The idea behind this walker is to remember a single task different from TASK.
-
 
95
 */
-
 
96
static bool task_done_walker(avltree_node_t *node, void *arg)
-
 
97
{
-
 
98
    task_t *t = avltree_get_instance(node, task_t, tasks_tree_node);
-
 
99
    task_t **tp = (task_t **) arg;
-
 
100
 
-
 
101
    if (t != TASK) {
-
 
102
        *tp = t;
-
 
103
        return false;   /* stop walking */
-
 
104
    }
-
 
105
 
-
 
106
    return true;    /* continue the walk */
90
}
107
}
91
 
108
 
92
/** Kill all tasks except the current task.
109
/** Kill all tasks except the current task.
93
 *
110
 *
94
 */
111
 */
95
void task_done(void)
112
void task_done(void)
96
{
113
{
97
    task_t *t;
114
    task_t *t;
98
    do { /* Repeat until there are any tasks except TASK */
115
    do { /* Repeat until there are any tasks except TASK */
99
       
116
       
100
        /* Messing with task structures, avoid deadlock */
117
        /* Messing with task structures, avoid deadlock */
101
        ipl_t ipl = interrupts_disable();
118
        ipl_t ipl = interrupts_disable();
102
        spinlock_lock(&tasks_lock);
119
        spinlock_lock(&tasks_lock);
103
       
120
       
104
        t = NULL;
121
        t = NULL;
105
        link_t *cur;
-
 
106
        for (cur = tasks_btree.leaf_head.next;
122
        avltree_walk(&tasks_tree, task_done_walker, &t);
107
            cur != &tasks_btree.leaf_head; cur = cur->next) {
-
 
108
            btree_node_t *node;
-
 
109
           
-
 
110
            node = list_get_instance(cur, btree_node_t, leaf_link);
-
 
111
           
-
 
112
            unsigned int i;
-
 
113
            for (i = 0; i < node->keys; i++) {
-
 
114
                if ((task_t *) node->value[i] != TASK) {
-
 
115
                    t = (task_t *) node->value[i];
-
 
116
                    break;
-
 
117
                }
-
 
118
            }
-
 
119
        }
-
 
120
       
123
       
121
        if (t != NULL) {
124
        if (t != NULL) {
122
            task_id_t id = t->taskid;
125
            task_id_t id = t->taskid;
123
           
126
           
124
            spinlock_unlock(&tasks_lock);
127
            spinlock_unlock(&tasks_lock);
125
            interrupts_restore(ipl);
128
            interrupts_restore(ipl);
126
           
129
           
127
#ifdef CONFIG_DEBUG
130
#ifdef CONFIG_DEBUG
128
            printf("Killing task %llu\n", id);
131
            printf("Killing task %llu\n", id);
129
#endif          
132
#endif          
130
            task_kill(id);
133
            task_kill(id);
131
        } else {
134
        } else {
132
            spinlock_unlock(&tasks_lock);
135
            spinlock_unlock(&tasks_lock);
133
            interrupts_restore(ipl);
136
            interrupts_restore(ipl);
134
        }
137
        }
135
       
138
       
136
    } while (t != NULL);
139
    } while (t != NULL);
137
}
140
}
138
 
141
 
139
/** Create new task
142
/** Create new task
140
 *
143
 *
141
 * Create new task with no threads.
144
 * Create new task with no threads.
142
 *
145
 *
143
 * @param as Task's address space.
146
 * @param as Task's address space.
144
 * @param name Symbolic name.
147
 * @param name Symbolic name.
145
 *
148
 *
146
 * @return New task's structure
149
 * @return New task's structure
147
 *
150
 *
148
 */
151
 */
149
task_t *task_create(as_t *as, char *name)
152
task_t *task_create(as_t *as, char *name)
150
{
153
{
151
    ipl_t ipl;
154
    ipl_t ipl;
152
    task_t *ta;
155
    task_t *ta;
153
    int i;
156
    int i;
154
   
157
   
155
    ta = (task_t *) malloc(sizeof(task_t), 0);
158
    ta = (task_t *) malloc(sizeof(task_t), 0);
156
 
159
 
157
    task_create_arch(ta);
160
    task_create_arch(ta);
158
 
161
 
159
    spinlock_initialize(&ta->lock, "task_ta_lock");
162
    spinlock_initialize(&ta->lock, "task_ta_lock");
160
    list_initialize(&ta->th_head);
163
    list_initialize(&ta->th_head);
161
    ta->as = as;
164
    ta->as = as;
162
    ta->name = name;
165
    ta->name = name;
163
    atomic_set(&ta->refcount, 0);
166
    atomic_set(&ta->refcount, 0);
164
    atomic_set(&ta->lifecount, 0);
167
    atomic_set(&ta->lifecount, 0);
165
    ta->context = CONTEXT;
168
    ta->context = CONTEXT;
166
 
169
 
167
    ta->capabilities = 0;
170
    ta->capabilities = 0;
168
    ta->cycles = 0;
171
    ta->cycles = 0;
169
   
172
   
170
    ipc_answerbox_init(&ta->answerbox);
173
    ipc_answerbox_init(&ta->answerbox);
171
    for (i = 0; i < IPC_MAX_PHONES; i++)
174
    for (i = 0; i < IPC_MAX_PHONES; i++)
172
        ipc_phone_init(&ta->phones[i]);
175
        ipc_phone_init(&ta->phones[i]);
173
    if ((ipc_phone_0) && (context_check(ipc_phone_0->task->context,
176
    if ((ipc_phone_0) && (context_check(ipc_phone_0->task->context,
174
        ta->context)))
177
        ta->context)))
175
        ipc_phone_connect(&ta->phones[0], ipc_phone_0);
178
        ipc_phone_connect(&ta->phones[0], ipc_phone_0);
176
    atomic_set(&ta->active_calls, 0);
179
    atomic_set(&ta->active_calls, 0);
177
 
180
 
178
    mutex_initialize(&ta->futexes_lock);
181
    mutex_initialize(&ta->futexes_lock);
179
    btree_create(&ta->futexes);
182
    btree_create(&ta->futexes);
180
   
183
   
181
    ipl = interrupts_disable();
184
    ipl = interrupts_disable();
182
 
185
 
183
    /*
186
    /*
184
     * Increment address space reference count.
187
     * Increment address space reference count.
185
     */
188
     */
186
    atomic_inc(&as->refcount);
189
    atomic_inc(&as->refcount);
187
 
190
 
188
    spinlock_lock(&tasks_lock);
191
    spinlock_lock(&tasks_lock);
189
    ta->taskid = ++task_counter;
192
    ta->taskid = ++task_counter;
-
 
193
    avltree_node_initialize(&ta->tasks_tree_node);
-
 
194
    ta->tasks_tree_node.key = ta->taskid;
190
    btree_insert(&tasks_btree, (btree_key_t) ta->taskid, (void *) ta, NULL);
195
    avltree_insert(&tasks_tree, &ta->tasks_tree_node);
191
    spinlock_unlock(&tasks_lock);
196
    spinlock_unlock(&tasks_lock);
192
    interrupts_restore(ipl);
197
    interrupts_restore(ipl);
193
 
198
 
194
    return ta;
199
    return ta;
195
}
200
}
196
 
201
 
197
/** Destroy task.
202
/** Destroy task.
198
 *
203
 *
199
 * @param t Task to be destroyed.
204
 * @param t Task to be destroyed.
200
 */
205
 */
201
void task_destroy(task_t *t)
206
void task_destroy(task_t *t)
202
{
207
{
203
    /*
208
    /*
204
     * Remove the task from the task B+tree.
209
     * Remove the task from the task B+tree.
205
     */
210
     */
206
    spinlock_lock(&tasks_lock);
211
    spinlock_lock(&tasks_lock);
207
    btree_remove(&tasks_btree, t->taskid, NULL);
212
    avltree_delete(&tasks_tree, &t->tasks_tree_node);
208
    spinlock_unlock(&tasks_lock);
213
    spinlock_unlock(&tasks_lock);
209
 
214
 
210
    /*
215
    /*
211
     * Perform architecture specific task destruction.
216
     * Perform architecture specific task destruction.
212
     */
217
     */
213
    task_destroy_arch(t);
218
    task_destroy_arch(t);
214
 
219
 
215
    /*
220
    /*
216
     * Free up dynamically allocated state.
221
     * Free up dynamically allocated state.
217
     */
222
     */
218
    btree_destroy(&t->futexes);
223
    btree_destroy(&t->futexes);
219
 
224
 
220
    /*
225
    /*
221
     * Drop our reference to the address space.
226
     * Drop our reference to the address space.
222
     */
227
     */
223
    if (atomic_predec(&t->as->refcount) == 0)
228
    if (atomic_predec(&t->as->refcount) == 0)
224
        as_destroy(t->as);
229
        as_destroy(t->as);
225
   
230
   
226
    free(t);
231
    free(t);
227
    TASK = NULL;
232
    TASK = NULL;
228
}
233
}
229
 
234
 
230
/** Create new task with 1 thread and run it
235
/** Create new task with 1 thread and run it
231
 *
236
 *
232
 * @param program_addr Address of program executable image.
237
 * @param program_addr Address of program executable image.
233
 * @param name Program name.
238
 * @param name Program name.
234
 *
239
 *
235
 * @return Task of the running program or NULL on error.
240
 * @return Task of the running program or NULL on error.
236
 */
241
 */
237
task_t *task_run_program(void *program_addr, char *name)
242
task_t *task_run_program(void *program_addr, char *name)
238
{
243
{
239
    as_t *as;
244
    as_t *as;
240
    as_area_t *a;
245
    as_area_t *a;
241
    int rc;
246
    int rc;
242
    thread_t *t;
247
    thread_t *t;
243
    task_t *task;
248
    task_t *task;
244
    uspace_arg_t *kernel_uarg;
249
    uspace_arg_t *kernel_uarg;
245
 
250
 
246
    as = as_create(0);
251
    as = as_create(0);
247
    ASSERT(as);
252
    ASSERT(as);
248
 
253
 
249
    rc = elf_load((elf_header_t *) program_addr, as);
254
    rc = elf_load((elf_header_t *) program_addr, as);
250
    if (rc != EE_OK) {
255
    if (rc != EE_OK) {
251
        as_destroy(as);
256
        as_destroy(as);
252
        return NULL;
257
        return NULL;
253
    }
258
    }
254
   
259
   
255
    kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0);
260
    kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0);
256
    kernel_uarg->uspace_entry =
261
    kernel_uarg->uspace_entry =
257
        (void *) ((elf_header_t *) program_addr)->e_entry;
262
        (void *) ((elf_header_t *) program_addr)->e_entry;
258
    kernel_uarg->uspace_stack = (void *) USTACK_ADDRESS;
263
    kernel_uarg->uspace_stack = (void *) USTACK_ADDRESS;
259
    kernel_uarg->uspace_thread_function = NULL;
264
    kernel_uarg->uspace_thread_function = NULL;
260
    kernel_uarg->uspace_thread_arg = NULL;
265
    kernel_uarg->uspace_thread_arg = NULL;
261
    kernel_uarg->uspace_uarg = NULL;
266
    kernel_uarg->uspace_uarg = NULL;
262
   
267
   
263
    task = task_create(as, name);
268
    task = task_create(as, name);
264
    ASSERT(task);
269
    ASSERT(task);
265
 
270
 
266
    /*
271
    /*
267
     * Create the data as_area.
272
     * Create the data as_area.
268
     */
273
     */
269
    a = as_area_create(as, AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE,
274
    a = as_area_create(as, AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE,
270
        LOADED_PROG_STACK_PAGES_NO * PAGE_SIZE, USTACK_ADDRESS,
275
        LOADED_PROG_STACK_PAGES_NO * PAGE_SIZE, USTACK_ADDRESS,
271
        AS_AREA_ATTR_NONE, &anon_backend, NULL);
276
        AS_AREA_ATTR_NONE, &anon_backend, NULL);
272
 
277
 
273
    /*
278
    /*
274
     * Create the main thread.
279
     * Create the main thread.
275
     */
280
     */
276
    t = thread_create(uinit, kernel_uarg, task, THREAD_FLAG_USPACE,
281
    t = thread_create(uinit, kernel_uarg, task, THREAD_FLAG_USPACE,
277
        "uinit", false);
282
        "uinit", false);
278
    ASSERT(t);
283
    ASSERT(t);
279
   
284
   
280
    thread_ready(t);
285
    thread_ready(t);
281
 
286
 
282
    return task;
287
    return task;
283
}
288
}
284
 
289
 
285
/** Syscall for reading task ID from userspace.
290
/** Syscall for reading task ID from userspace.
286
 *
291
 *
287
 * @param uspace_task_id Userspace address of 8-byte buffer where to store
292
 * @param uspace_task_id Userspace address of 8-byte buffer where to store
288
 * current task ID.
293
 * current task ID.
289
 *
294
 *
290
 * @return 0 on success or an error code from @ref errno.h.
295
 * @return 0 on success or an error code from @ref errno.h.
291
 */
296
 */
292
unative_t sys_task_get_id(task_id_t *uspace_task_id)
297
unative_t sys_task_get_id(task_id_t *uspace_task_id)
293
{
298
{
294
    /*
299
    /*
295
     * No need to acquire lock on TASK because taskid
300
     * No need to acquire lock on TASK because taskid
296
     * remains constant for the lifespan of the task.
301
     * remains constant for the lifespan of the task.
297
     */
302
     */
298
    return (unative_t) copy_to_uspace(uspace_task_id, &TASK->taskid,
303
    return (unative_t) copy_to_uspace(uspace_task_id, &TASK->taskid,
299
        sizeof(TASK->taskid));
304
        sizeof(TASK->taskid));
300
}
305
}
301
 
306
 
302
/** Find task structure corresponding to task ID.
307
/** Find task structure corresponding to task ID.
303
 *
308
 *
304
 * The tasks_lock must be already held by the caller of this function
309
 * The tasks_lock must be already held by the caller of this function
305
 * and interrupts must be disabled.
310
 * and interrupts must be disabled.
306
 *
311
 *
307
 * @param id Task ID.
312
 * @param id Task ID.
308
 *
313
 *
309
 * @return Task structure address or NULL if there is no such task ID.
314
 * @return Task structure address or NULL if there is no such task ID.
310
 */
315
 */
311
task_t *task_find_by_id(task_id_t id)
316
task_t *task_find_by_id(task_id_t id)
312
{
317
{
313
    btree_node_t *leaf;
318
    avltree_node_t *node;
314
   
319
   
315
    return (task_t *) btree_search(&tasks_btree, (btree_key_t) id, &leaf);
320
    node = avltree_search(&tasks_tree, (avltree_key_t) id);
-
 
321
 
-
 
322
    if (node)
-
 
323
        return avltree_get_instance(node, task_t, tasks_tree_node);
-
 
324
    return NULL;
316
}
325
}
317
 
326
 
318
/** Get accounting data of given task.
327
/** Get accounting data of given task.
319
 *
328
 *
320
 * Note that task lock of 't' must be already held and
329
 * Note that task lock of 't' must be already held and
321
 * interrupts must be already disabled.
330
 * interrupts must be already disabled.
322
 *
331
 *
323
 * @param t Pointer to thread.
332
 * @param t Pointer to thread.
324
 *
333
 *
325
 */
334
 */
326
uint64_t task_get_accounting(task_t *t)
335
uint64_t task_get_accounting(task_t *t)
327
{
336
{
328
    /* Accumulated value of task */
337
    /* Accumulated value of task */
329
    uint64_t ret = t->cycles;
338
    uint64_t ret = t->cycles;
330
   
339
   
331
    /* Current values of threads */
340
    /* Current values of threads */
332
    link_t *cur;
341
    link_t *cur;
333
    for (cur = t->th_head.next; cur != &t->th_head; cur = cur->next) {
342
    for (cur = t->th_head.next; cur != &t->th_head; cur = cur->next) {
334
        thread_t *thr = list_get_instance(cur, thread_t, th_link);
343
        thread_t *thr = list_get_instance(cur, thread_t, th_link);
335
       
344
       
336
        spinlock_lock(&thr->lock);
345
        spinlock_lock(&thr->lock);
337
        /* Process only counted threads */
346
        /* Process only counted threads */
338
        if (!thr->uncounted) {
347
        if (!thr->uncounted) {
339
            if (thr == THREAD) {
348
            if (thr == THREAD) {
340
                /* Update accounting of current thread */
349
                /* Update accounting of current thread */
341
                thread_update_accounting();
350
                thread_update_accounting();
342
            }
351
            }
343
            ret += thr->cycles;
352
            ret += thr->cycles;
344
        }
353
        }
345
        spinlock_unlock(&thr->lock);
354
        spinlock_unlock(&thr->lock);
346
    }
355
    }
347
   
356
   
348
    return ret;
357
    return ret;
349
}
358
}
350
 
359
 
351
/** Kill task.
360
/** Kill task.
352
 *
361
 *
353
 * This function is idempotent.
362
 * This function is idempotent.
354
 * It signals all the task's threads to bail it out.
363
 * It signals all the task's threads to bail it out.
355
 *
364
 *
356
 * @param id ID of the task to be killed.
365
 * @param id ID of the task to be killed.
357
 *
366
 *
358
 * @return 0 on success or an error code from errno.h
367
 * @return 0 on success or an error code from errno.h
359
 */
368
 */
360
int task_kill(task_id_t id)
369
int task_kill(task_id_t id)
361
{
370
{
362
    ipl_t ipl;
371
    ipl_t ipl;
363
    task_t *ta;
372
    task_t *ta;
364
    link_t *cur;
373
    link_t *cur;
365
 
374
 
366
    if (id == 1)
375
    if (id == 1)
367
        return EPERM;
376
        return EPERM;
368
   
377
   
369
    ipl = interrupts_disable();
378
    ipl = interrupts_disable();
370
    spinlock_lock(&tasks_lock);
379
    spinlock_lock(&tasks_lock);
371
    if (!(ta = task_find_by_id(id))) {
380
    if (!(ta = task_find_by_id(id))) {
372
        spinlock_unlock(&tasks_lock);
381
        spinlock_unlock(&tasks_lock);
373
        interrupts_restore(ipl);
382
        interrupts_restore(ipl);
374
        return ENOENT;
383
        return ENOENT;
375
    }
384
    }
376
    spinlock_unlock(&tasks_lock);
385
    spinlock_unlock(&tasks_lock);
377
   
386
   
378
    /*
387
    /*
379
     * Interrupt all threads except ktaskclnp.
388
     * Interrupt all threads except ktaskclnp.
380
     */
389
     */
381
    spinlock_lock(&ta->lock);
390
    spinlock_lock(&ta->lock);
382
    for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) {
391
    for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) {
383
        thread_t *thr;
392
        thread_t *thr;
384
        bool sleeping = false;
393
        bool sleeping = false;
385
       
394
       
386
        thr = list_get_instance(cur, thread_t, th_link);
395
        thr = list_get_instance(cur, thread_t, th_link);
387
           
396
           
388
        spinlock_lock(&thr->lock);
397
        spinlock_lock(&thr->lock);
389
        thr->interrupted = true;
398
        thr->interrupted = true;
390
        if (thr->state == Sleeping)
399
        if (thr->state == Sleeping)
391
            sleeping = true;
400
            sleeping = true;
392
        spinlock_unlock(&thr->lock);
401
        spinlock_unlock(&thr->lock);
393
       
402
       
394
        if (sleeping)
403
        if (sleeping)
395
            waitq_interrupt_sleep(thr);
404
            waitq_interrupt_sleep(thr);
396
    }
405
    }
397
    spinlock_unlock(&ta->lock);
406
    spinlock_unlock(&ta->lock);
398
    interrupts_restore(ipl);
407
    interrupts_restore(ipl);
399
   
408
   
400
    return 0;
409
    return 0;
401
}
410
}
402
 
411
 
-
 
412
static bool task_print_walker(avltree_node_t *node, void *arg)
-
 
413
{
-
 
414
    task_t *t = avltree_get_instance(node, task_t, tasks_tree_node);
-
 
415
    int j;
-
 
416
       
-
 
417
    spinlock_lock(&t->lock);
-
 
418
           
-
 
419
    uint64_t cycles;
-
 
420
    char suffix;
-
 
421
    order(task_get_accounting(t), &cycles, &suffix);
-
 
422
           
-
 
423
    printf("%-6llu %-10s %-3ld %#10zx %#10zx %9llu%c %7zd %6zd",
-
 
424
        t->taskid, t->name, t->context, t, t->as, cycles, suffix,
-
 
425
        t->refcount, atomic_get(&t->active_calls));
-
 
426
    for (j = 0; j < IPC_MAX_PHONES; j++) {
-
 
427
        if (t->phones[j].callee)
-
 
428
            printf(" %zd:%#zx", j, t->phones[j].callee);
-
 
429
    }
-
 
430
    printf("\n");
-
 
431
           
-
 
432
    spinlock_unlock(&t->lock);
-
 
433
    return true;
-
 
434
}
-
 
435
 
403
/** Print task list */
436
/** Print task list */
404
void task_print_list(void)
437
void task_print_list(void)
405
{
438
{
406
    link_t *cur;
-
 
407
    ipl_t ipl;
439
    ipl_t ipl;
408
   
440
   
409
    /* Messing with task structures, avoid deadlock */
441
    /* Messing with task structures, avoid deadlock */
410
    ipl = interrupts_disable();
442
    ipl = interrupts_disable();
411
    spinlock_lock(&tasks_lock);
443
    spinlock_lock(&tasks_lock);
412
   
444
   
413
    printf("taskid name       ctx address    as         cycles     threads "
445
    printf("taskid name       ctx address    as         cycles     threads "
414
        "calls  callee\n");
446
        "calls  callee\n");
415
    printf("------ ---------- --- ---------- ---------- ---------- ------- "
447
    printf("------ ---------- --- ---------- ---------- ---------- ------- "
416
        "------ ------>\n");
448
        "------ ------>\n");
417
 
449
 
418
    for (cur = tasks_btree.leaf_head.next; cur != &tasks_btree.leaf_head;
-
 
419
        cur = cur->next) {
-
 
420
        btree_node_t *node;
-
 
421
        unsigned int i;
-
 
422
       
-
 
423
        node = list_get_instance(cur, btree_node_t, leaf_link);
-
 
424
        for (i = 0; i < node->keys; i++) {
-
 
425
            task_t *t;
-
 
426
            int j;
-
 
427
 
-
 
428
            t = (task_t *) node->value[i];
-
 
429
       
-
 
430
            spinlock_lock(&t->lock);
-
 
431
           
-
 
432
            uint64_t cycles;
-
 
433
            char suffix;
-
 
434
            order(task_get_accounting(t), &cycles, &suffix);
450
    avltree_walk(&tasks_tree, task_print_walker, NULL);
435
           
-
 
436
            printf("%-6llu %-10s %-3ld %#10zx %#10zx %9llu%c %7zd "
-
 
437
                "%6zd", t->taskid, t->name, t->context, t, t->as,
-
 
438
                cycles, suffix, t->refcount,
-
 
439
                atomic_get(&t->active_calls));
-
 
440
            for (j = 0; j < IPC_MAX_PHONES; j++) {
-
 
441
                if (t->phones[j].callee)
-
 
442
                    printf(" %zd:%#zx", j,
-
 
443
                        t->phones[j].callee);
-
 
444
            }
-
 
445
            printf("\n");
-
 
446
           
-
 
447
            spinlock_unlock(&t->lock);
-
 
448
        }
-
 
449
    }
-
 
450
 
451
 
451
    spinlock_unlock(&tasks_lock);
452
    spinlock_unlock(&tasks_lock);
452
    interrupts_restore(ipl);
453
    interrupts_restore(ipl);
453
}
454
}
454
 
455
 
455
/** @}
456
/** @}
456
 */
457
 */
457
 
458