Subversion Repositories HelenOS

Rev

Rev 4377 | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 4377 Rev 4692
1
/*
1
/*
2
 * Copyright (c) 2001-2004 Jakub Jermar
2
 * Copyright (c) 2001-2004 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
/** @addtogroup genericproc
29
/** @addtogroup genericproc
30
 * @{
30
 * @{
31
 */
31
 */
32
 
32
 
33
/**
33
/**
34
 * @file
34
 * @file
35
 * @brief   Task management.
35
 * @brief   Task management.
36
 */
36
 */
37
 
37
 
38
#include <proc/thread.h>
38
#include <proc/thread.h>
39
#include <proc/task.h>
39
#include <proc/task.h>
40
#include <mm/as.h>
40
#include <mm/as.h>
41
#include <mm/slab.h>
41
#include <mm/slab.h>
42
#include <atomic.h>
42
#include <atomic.h>
43
#include <synch/spinlock.h>
43
#include <synch/spinlock.h>
44
#include <synch/waitq.h>
44
#include <synch/waitq.h>
45
#include <arch.h>
45
#include <arch.h>
46
#include <arch/barrier.h>
46
#include <arch/barrier.h>
47
#include <adt/avl.h>
47
#include <adt/avl.h>
48
#include <adt/btree.h>
48
#include <adt/btree.h>
49
#include <adt/list.h>
49
#include <adt/list.h>
50
#include <ipc/ipc.h>
50
#include <ipc/ipc.h>
51
#include <ipc/ipcrsc.h>
51
#include <ipc/ipcrsc.h>
52
#include <print.h>
52
#include <print.h>
53
#include <errno.h>
53
#include <errno.h>
54
#include <func.h>
54
#include <func.h>
55
#include <string.h>
55
#include <string.h>
56
#include <syscall/copy.h>
56
#include <syscall/copy.h>
-
 
57
#include <macros.h>
-
 
58
#include <ipc/event.h>
57
 
59
 
58
/** Spinlock protecting the tasks_tree AVL tree. */
60
/** Spinlock protecting the tasks_tree AVL tree. */
59
SPINLOCK_INITIALIZE(tasks_lock);
61
SPINLOCK_INITIALIZE(tasks_lock);
60
 
62
 
61
/** AVL tree of active tasks.
63
/** AVL tree of active tasks.
62
 *
64
 *
63
 * The task is guaranteed to exist after it was found in the tasks_tree as
65
 * The task is guaranteed to exist after it was found in the tasks_tree as
64
 * long as:
66
 * long as:
65
 * @li the tasks_lock is held,
67
 * @li the tasks_lock is held,
66
 * @li the task's lock is held when task's lock is acquired before releasing
68
 * @li the task's lock is held when task's lock is acquired before releasing
67
 *     tasks_lock or
69
 *     tasks_lock or
68
 * @li the task's refcount is greater than 0
70
 * @li the task's refcount is greater than 0
69
 *
71
 *
70
 */
72
 */
71
avltree_t tasks_tree;
73
avltree_t tasks_tree;
72
 
74
 
73
static task_id_t task_counter = 0;
75
static task_id_t task_counter = 0;
74
 
76
 
75
/** Initialize kernel tasks support. */
77
/** Initialize kernel tasks support. */
76
void task_init(void)
78
void task_init(void)
77
{
79
{
78
    TASK = NULL;
80
    TASK = NULL;
79
    avltree_create(&tasks_tree);
81
    avltree_create(&tasks_tree);
80
}
82
}
81
 
83
 
82
/*
84
/*
83
 * The idea behind this walker is to remember a single task different from
85
 * The idea behind this walker is to remember a single task different from
84
 * TASK.
86
 * TASK.
85
 */
87
 */
86
static bool task_done_walker(avltree_node_t *node, void *arg)
88
static bool task_done_walker(avltree_node_t *node, void *arg)
87
{
89
{
88
    task_t *t = avltree_get_instance(node, task_t, tasks_tree_node);
90
    task_t *t = avltree_get_instance(node, task_t, tasks_tree_node);
89
    task_t **tp = (task_t **) arg;
91
    task_t **tp = (task_t **) arg;
90
 
92
 
91
    if (t != TASK) {
93
    if (t != TASK) {
92
        *tp = t;
94
        *tp = t;
93
        return false;   /* stop walking */
95
        return false;   /* stop walking */
94
    }
96
    }
95
 
97
 
96
    return true;    /* continue the walk */
98
    return true;    /* continue the walk */
97
}
99
}
98
 
100
 
99
/** Kill all tasks except the current task. */
101
/** Kill all tasks except the current task. */
100
void task_done(void)
102
void task_done(void)
101
{
103
{
102
    task_t *t;
104
    task_t *t;
103
    do { /* Repeat until there are any tasks except TASK */
105
    do { /* Repeat until there are any tasks except TASK */
104
       
106
       
105
        /* Messing with task structures, avoid deadlock */
107
        /* Messing with task structures, avoid deadlock */
106
        ipl_t ipl = interrupts_disable();
108
        ipl_t ipl = interrupts_disable();
107
        spinlock_lock(&tasks_lock);
109
        spinlock_lock(&tasks_lock);
108
       
110
       
109
        t = NULL;
111
        t = NULL;
110
        avltree_walk(&tasks_tree, task_done_walker, &t);
112
        avltree_walk(&tasks_tree, task_done_walker, &t);
111
       
113
       
112
        if (t != NULL) {
114
        if (t != NULL) {
113
            task_id_t id = t->taskid;
115
            task_id_t id = t->taskid;
114
           
116
           
115
            spinlock_unlock(&tasks_lock);
117
            spinlock_unlock(&tasks_lock);
116
            interrupts_restore(ipl);
118
            interrupts_restore(ipl);
117
           
119
           
118
#ifdef CONFIG_DEBUG
120
#ifdef CONFIG_DEBUG
119
            printf("Killing task %" PRIu64 "\n", id);
121
            printf("Killing task %" PRIu64 "\n", id);
120
#endif          
122
#endif          
121
            task_kill(id);
123
            task_kill(id);
122
            thread_usleep(10000);
124
            thread_usleep(10000);
123
        } else {
125
        } else {
124
            spinlock_unlock(&tasks_lock);
126
            spinlock_unlock(&tasks_lock);
125
            interrupts_restore(ipl);
127
            interrupts_restore(ipl);
126
        }
128
        }
127
       
129
       
128
    } while (t != NULL);
130
    } while (t != NULL);
129
}
131
}
130
 
132
 
131
/** Create new task with no threads.
133
/** Create new task with no threads.
132
 *
134
 *
133
 * @param as        Task's address space.
135
 * @param as        Task's address space.
134
 * @param name      Symbolic name (a copy is made).
136
 * @param name      Symbolic name (a copy is made).
135
 *
137
 *
136
 * @return      New task's structure.
138
 * @return      New task's structure.
137
 *
139
 *
138
 */
140
 */
139
task_t *task_create(as_t *as, char *name)
141
task_t *task_create(as_t *as, char *name)
140
{
142
{
141
    ipl_t ipl;
143
    ipl_t ipl;
142
    task_t *ta;
144
    task_t *ta;
143
    int i;
145
    int i;
144
   
146
   
145
    ta = (task_t *) malloc(sizeof(task_t), 0);
147
    ta = (task_t *) malloc(sizeof(task_t), 0);
146
 
148
 
147
    task_create_arch(ta);
149
    task_create_arch(ta);
148
 
150
 
149
    spinlock_initialize(&ta->lock, "task_ta_lock");
151
    spinlock_initialize(&ta->lock, "task_ta_lock");
150
    list_initialize(&ta->th_head);
152
    list_initialize(&ta->th_head);
151
    ta->as = as;
153
    ta->as = as;
152
 
154
 
153
    memcpy(ta->name, name, TASK_NAME_BUFLEN);
155
    memcpy(ta->name, name, TASK_NAME_BUFLEN);
154
    ta->name[TASK_NAME_BUFLEN - 1] = 0;
156
    ta->name[TASK_NAME_BUFLEN - 1] = 0;
155
 
157
 
156
    atomic_set(&ta->refcount, 0);
158
    atomic_set(&ta->refcount, 0);
157
    atomic_set(&ta->lifecount, 0);
159
    atomic_set(&ta->lifecount, 0);
158
    ta->context = CONTEXT;
160
    ta->context = CONTEXT;
159
 
161
 
160
    ta->capabilities = 0;
162
    ta->capabilities = 0;
161
    ta->cycles = 0;
163
    ta->cycles = 0;
162
 
164
 
163
#ifdef CONFIG_UDEBUG
165
#ifdef CONFIG_UDEBUG
164
    /* Init debugging stuff */
166
    /* Init debugging stuff */
165
    udebug_task_init(&ta->udebug);
167
    udebug_task_init(&ta->udebug);
166
 
168
 
167
    /* Init kbox stuff */
169
    /* Init kbox stuff */
168
    ipc_answerbox_init(&ta->kb.box, ta);
170
    ipc_answerbox_init(&ta->kb.box, ta);
169
    ta->kb.thread = NULL;
171
    ta->kb.thread = NULL;
170
    mutex_initialize(&ta->kb.cleanup_lock, MUTEX_PASSIVE);
172
    mutex_initialize(&ta->kb.cleanup_lock, MUTEX_PASSIVE);
171
    ta->kb.finished = false;
173
    ta->kb.finished = false;
172
#endif
174
#endif
173
 
175
 
174
    ipc_answerbox_init(&ta->answerbox, ta);
176
    ipc_answerbox_init(&ta->answerbox, ta);
175
    for (i = 0; i < IPC_MAX_PHONES; i++)
177
    for (i = 0; i < IPC_MAX_PHONES; i++)
176
        ipc_phone_init(&ta->phones[i]);
178
        ipc_phone_init(&ta->phones[i]);
177
    if ((ipc_phone_0) && (context_check(ipc_phone_0->task->context,
179
    if ((ipc_phone_0) && (context_check(ipc_phone_0->task->context,
178
        ta->context)))
180
        ta->context)))
179
        ipc_phone_connect(&ta->phones[0], ipc_phone_0);
181
        ipc_phone_connect(&ta->phones[0], ipc_phone_0);
180
    atomic_set(&ta->active_calls, 0);
182
    atomic_set(&ta->active_calls, 0);
181
 
183
 
182
    mutex_initialize(&ta->futexes_lock, MUTEX_PASSIVE);
184
    mutex_initialize(&ta->futexes_lock, MUTEX_PASSIVE);
183
    btree_create(&ta->futexes);
185
    btree_create(&ta->futexes);
184
   
186
   
185
    ipl = interrupts_disable();
187
    ipl = interrupts_disable();
186
 
188
 
187
    /*
189
    /*
188
     * Increment address space reference count.
190
     * Increment address space reference count.
189
     */
191
     */
190
    atomic_inc(&as->refcount);
192
    atomic_inc(&as->refcount);
191
 
193
 
192
    spinlock_lock(&tasks_lock);
194
    spinlock_lock(&tasks_lock);
193
    ta->taskid = ++task_counter;
195
    ta->taskid = ++task_counter;
194
    avltree_node_initialize(&ta->tasks_tree_node);
196
    avltree_node_initialize(&ta->tasks_tree_node);
195
    ta->tasks_tree_node.key = ta->taskid;
197
    ta->tasks_tree_node.key = ta->taskid;
196
    avltree_insert(&tasks_tree, &ta->tasks_tree_node);
198
    avltree_insert(&tasks_tree, &ta->tasks_tree_node);
197
    spinlock_unlock(&tasks_lock);
199
    spinlock_unlock(&tasks_lock);
198
    interrupts_restore(ipl);
200
    interrupts_restore(ipl);
199
 
201
   
-
 
202
    /*
-
 
203
     * Notify about task creation.
-
 
204
     */
-
 
205
    if (event_is_subscribed(EVENT_WAIT))
-
 
206
        event_notify_3(EVENT_WAIT, TASK_CREATE, LOWER32(ta->taskid),
-
 
207
            UPPER32(ta->taskid));
-
 
208
   
200
    return ta;
209
    return ta;
201
}
210
}
202
 
211
 
203
/** Destroy task.
212
/** Destroy task.
204
 *
213
 *
205
 * @param t     Task to be destroyed.
214
 * @param t     Task to be destroyed.
206
 */
215
 */
207
void task_destroy(task_t *t)
216
void task_destroy(task_t *t)
208
{
217
{
209
    /*
218
    /*
210
     * Remove the task from the task B+tree.
219
     * Remove the task from the task B+tree.
211
     */
220
     */
212
    spinlock_lock(&tasks_lock);
221
    spinlock_lock(&tasks_lock);
213
    avltree_delete(&tasks_tree, &t->tasks_tree_node);
222
    avltree_delete(&tasks_tree, &t->tasks_tree_node);
214
    spinlock_unlock(&tasks_lock);
223
    spinlock_unlock(&tasks_lock);
215
 
224
 
216
    /*
225
    /*
217
     * Perform architecture specific task destruction.
226
     * Perform architecture specific task destruction.
218
     */
227
     */
219
    task_destroy_arch(t);
228
    task_destroy_arch(t);
220
 
229
 
221
    /*
230
    /*
222
     * Free up dynamically allocated state.
231
     * Free up dynamically allocated state.
223
     */
232
     */
224
    btree_destroy(&t->futexes);
233
    btree_destroy(&t->futexes);
225
 
234
 
226
    /*
235
    /*
227
     * Drop our reference to the address space.
236
     * Drop our reference to the address space.
228
     */
237
     */
229
    if (atomic_predec(&t->as->refcount) == 0)
238
    if (atomic_predec(&t->as->refcount) == 0)
230
        as_destroy(t->as);
239
        as_destroy(t->as);
231
   
240
   
-
 
241
    /*
-
 
242
     * Notify about task destruction.
-
 
243
     */
-
 
244
    if (event_is_subscribed(EVENT_WAIT))
-
 
245
        event_notify_3(EVENT_WAIT, TASK_DESTROY, LOWER32(t->taskid),
-
 
246
            UPPER32(t->taskid));
-
 
247
   
232
    free(t);
248
    free(t);
233
    TASK = NULL;
249
    TASK = NULL;
234
}
250
}
235
 
251
 
236
/** Syscall for reading task ID from userspace.
252
/** Syscall for reading task ID from userspace.
237
 *
253
 *
238
 * @param       uspace_task_id userspace address of 8-byte buffer
254
 * @param       uspace_task_id userspace address of 8-byte buffer
239
 *          where to store current task ID.
255
 *          where to store current task ID.
240
 *
256
 *
241
 * @return      Zero on success or an error code from @ref errno.h.
257
 * @return      Zero on success or an error code from @ref errno.h.
242
 */
258
 */
243
unative_t sys_task_get_id(task_id_t *uspace_task_id)
259
unative_t sys_task_get_id(task_id_t *uspace_task_id)
244
{
260
{
245
    /*
261
    /*
246
     * No need to acquire lock on TASK because taskid remains constant for
262
     * No need to acquire lock on TASK because taskid remains constant for
247
     * the lifespan of the task.
263
     * the lifespan of the task.
248
     */
264
     */
249
    return (unative_t) copy_to_uspace(uspace_task_id, &TASK->taskid,
265
    return (unative_t) copy_to_uspace(uspace_task_id, &TASK->taskid,
250
        sizeof(TASK->taskid));
266
        sizeof(TASK->taskid));
251
}
267
}
252
 
268
 
253
/** Syscall for setting the task name.
269
/** Syscall for setting the task name.
254
 *
270
 *
255
 * The name simplifies identifying the task in the task list.
271
 * The name simplifies identifying the task in the task list.
256
 *
272
 *
257
 * @param name  The new name for the task. (typically the same
273
 * @param name  The new name for the task. (typically the same
258
 *      as the command used to execute it).
274
 *      as the command used to execute it).
259
 *
275
 *
260
 * @return 0 on success or an error code from @ref errno.h.
276
 * @return 0 on success or an error code from @ref errno.h.
261
 */
277
 */
262
unative_t sys_task_set_name(const char *uspace_name, size_t name_len)
278
unative_t sys_task_set_name(const char *uspace_name, size_t name_len)
263
{
279
{
264
    int rc;
280
    int rc;
265
    char namebuf[TASK_NAME_BUFLEN];
281
    char namebuf[TASK_NAME_BUFLEN];
266
 
282
 
267
    /* Cap length of name and copy it from userspace. */
283
    /* Cap length of name and copy it from userspace. */
268
 
284
 
269
    if (name_len > TASK_NAME_BUFLEN - 1)
285
    if (name_len > TASK_NAME_BUFLEN - 1)
270
        name_len = TASK_NAME_BUFLEN - 1;
286
        name_len = TASK_NAME_BUFLEN - 1;
271
 
287
 
272
    rc = copy_from_uspace(namebuf, uspace_name, name_len);
288
    rc = copy_from_uspace(namebuf, uspace_name, name_len);
273
    if (rc != 0)
289
    if (rc != 0)
274
        return (unative_t) rc;
290
        return (unative_t) rc;
275
 
291
 
276
    namebuf[name_len] = '\0';
292
    namebuf[name_len] = '\0';
277
    str_cpy(TASK->name, TASK_NAME_BUFLEN, namebuf);
293
    str_cpy(TASK->name, TASK_NAME_BUFLEN, namebuf);
278
 
294
 
279
    return EOK;
295
    return EOK;
280
}
296
}
281
 
297
 
282
/** Find task structure corresponding to task ID.
298
/** Find task structure corresponding to task ID.
283
 *
299
 *
284
 * The tasks_lock must be already held by the caller of this function and
300
 * The tasks_lock must be already held by the caller of this function and
285
 * interrupts must be disabled.
301
 * interrupts must be disabled.
286
 *
302
 *
287
 * @param id        Task ID.
303
 * @param id        Task ID.
288
 *
304
 *
289
 * @return      Task structure address or NULL if there is no such task
305
 * @return      Task structure address or NULL if there is no such task
290
 *          ID.
306
 *          ID.
291
 */
307
 */
292
task_t *task_find_by_id(task_id_t id) { avltree_node_t *node;
308
task_t *task_find_by_id(task_id_t id) { avltree_node_t *node;
293
   
309
   
294
    node = avltree_search(&tasks_tree, (avltree_key_t) id);
310
    node = avltree_search(&tasks_tree, (avltree_key_t) id);
295
 
311
 
296
    if (node)
312
    if (node)
297
        return avltree_get_instance(node, task_t, tasks_tree_node);
313
        return avltree_get_instance(node, task_t, tasks_tree_node);
298
    return NULL;
314
    return NULL;
299
}
315
}
300
 
316
 
301
/** Get accounting data of given task.
317
/** Get accounting data of given task.
302
 *
318
 *
303
 * Note that task lock of 't' must be already held and interrupts must be
319
 * Note that task lock of 't' must be already held and interrupts must be
304
 * already disabled.
320
 * already disabled.
305
 *
321
 *
306
 * @param t     Pointer to thread.
322
 * @param t     Pointer to thread.
307
 *
323
 *
308
 * @return      Number of cycles used by the task and all its threads
324
 * @return      Number of cycles used by the task and all its threads
309
 *          so far.
325
 *          so far.
310
 */
326
 */
311
uint64_t task_get_accounting(task_t *t)
327
uint64_t task_get_accounting(task_t *t)
312
{
328
{
313
    /* Accumulated value of task */
329
    /* Accumulated value of task */
314
    uint64_t ret = t->cycles;
330
    uint64_t ret = t->cycles;
315
   
331
   
316
    /* Current values of threads */
332
    /* Current values of threads */
317
    link_t *cur;
333
    link_t *cur;
318
    for (cur = t->th_head.next; cur != &t->th_head; cur = cur->next) {
334
    for (cur = t->th_head.next; cur != &t->th_head; cur = cur->next) {
319
        thread_t *thr = list_get_instance(cur, thread_t, th_link);
335
        thread_t *thr = list_get_instance(cur, thread_t, th_link);
320
       
336
       
321
        spinlock_lock(&thr->lock);
337
        spinlock_lock(&thr->lock);
322
        /* Process only counted threads */
338
        /* Process only counted threads */
323
        if (!thr->uncounted) {
339
        if (!thr->uncounted) {
324
            if (thr == THREAD) {
340
            if (thr == THREAD) {
325
                /* Update accounting of current thread */
341
                /* Update accounting of current thread */
326
                thread_update_accounting();
342
                thread_update_accounting();
327
            }
343
            }
328
            ret += thr->cycles;
344
            ret += thr->cycles;
329
        }
345
        }
330
        spinlock_unlock(&thr->lock);
346
        spinlock_unlock(&thr->lock);
331
    }
347
    }
332
   
348
   
333
    return ret;
349
    return ret;
334
}
350
}
335
 
351
 
336
/** Kill task.
352
/** Kill task.
337
 *
353
 *
338
 * This function is idempotent.
354
 * This function is idempotent.
339
 * It signals all the task's threads to bail it out.
355
 * It signals all the task's threads to bail it out.
340
 *
356
 *
341
 * @param id        ID of the task to be killed.
357
 * @param id        ID of the task to be killed.
342
 *
358
 *
343
 * @return      Zero on success or an error code from errno.h.
359
 * @return      Zero on success or an error code from errno.h.
344
 */
360
 */
345
int task_kill(task_id_t id)
361
int task_kill(task_id_t id)
346
{
362
{
347
    ipl_t ipl;
363
    ipl_t ipl;
348
    task_t *ta;
364
    task_t *ta;
349
    link_t *cur;
365
    link_t *cur;
350
 
366
 
351
    if (id == 1)
367
    if (id == 1)
352
        return EPERM;
368
        return EPERM;
353
   
369
   
354
    ipl = interrupts_disable();
370
    ipl = interrupts_disable();
355
    spinlock_lock(&tasks_lock);
371
    spinlock_lock(&tasks_lock);
356
    if (!(ta = task_find_by_id(id))) {
372
    if (!(ta = task_find_by_id(id))) {
357
        spinlock_unlock(&tasks_lock);
373
        spinlock_unlock(&tasks_lock);
358
        interrupts_restore(ipl);
374
        interrupts_restore(ipl);
359
        return ENOENT;
375
        return ENOENT;
360
    }
376
    }
361
    spinlock_unlock(&tasks_lock);
377
    spinlock_unlock(&tasks_lock);
362
   
378
   
363
    /*
379
    /*
364
     * Interrupt all threads.
380
     * Interrupt all threads.
365
     */
381
     */
366
    spinlock_lock(&ta->lock);
382
    spinlock_lock(&ta->lock);
367
    for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) {
383
    for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) {
368
        thread_t *thr;
384
        thread_t *thr;
369
        bool sleeping = false;
385
        bool sleeping = false;
370
       
386
       
371
        thr = list_get_instance(cur, thread_t, th_link);
387
        thr = list_get_instance(cur, thread_t, th_link);
372
       
388
       
373
        spinlock_lock(&thr->lock);
389
        spinlock_lock(&thr->lock);
374
        thr->interrupted = true;
390
        thr->interrupted = true;
375
        if (thr->state == Sleeping)
391
        if (thr->state == Sleeping)
376
            sleeping = true;
392
            sleeping = true;
377
        spinlock_unlock(&thr->lock);
393
        spinlock_unlock(&thr->lock);
378
       
394
       
379
        if (sleeping)
395
        if (sleeping)
380
            waitq_interrupt_sleep(thr);
396
            waitq_interrupt_sleep(thr);
381
    }
397
    }
382
    spinlock_unlock(&ta->lock);
398
    spinlock_unlock(&ta->lock);
383
    interrupts_restore(ipl);
399
    interrupts_restore(ipl);
384
   
400
   
385
    return 0;
401
    return 0;
386
}
402
}
387
 
403
 
388
static bool task_print_walker(avltree_node_t *node, void *arg)
404
static bool task_print_walker(avltree_node_t *node, void *arg)
389
{
405
{
390
    task_t *t = avltree_get_instance(node, task_t, tasks_tree_node);
406
    task_t *t = avltree_get_instance(node, task_t, tasks_tree_node);
391
    int j;
407
    int j;
392
       
408
       
393
    spinlock_lock(&t->lock);
409
    spinlock_lock(&t->lock);
394
           
410
           
395
    uint64_t cycles;
411
    uint64_t cycles;
396
    char suffix;
412
    char suffix;
397
    order(task_get_accounting(t), &cycles, &suffix);
413
    order(task_get_accounting(t), &cycles, &suffix);
398
 
414
 
399
#ifdef __32_BITS__  
415
#ifdef __32_BITS__  
400
    printf("%-6" PRIu64 " %-12s %-3" PRIu32 " %10p %10p %9" PRIu64
416
    printf("%-6" PRIu64 " %-12s %-3" PRIu32 " %10p %10p %9" PRIu64
401
        "%c %7ld %6ld", t->taskid, t->name, t->context, t, t->as, cycles,
417
        "%c %7ld %6ld", t->taskid, t->name, t->context, t, t->as, cycles,
402
        suffix, atomic_get(&t->refcount), atomic_get(&t->active_calls));
418
        suffix, atomic_get(&t->refcount), atomic_get(&t->active_calls));
403
#endif
419
#endif
404
 
420
 
405
#ifdef __64_BITS__
421
#ifdef __64_BITS__
406
    printf("%-6" PRIu64 " %-12s %-3" PRIu32 " %18p %18p %9" PRIu64
422
    printf("%-6" PRIu64 " %-12s %-3" PRIu32 " %18p %18p %9" PRIu64
407
        "%c %7ld %6ld", t->taskid, t->name, t->context, t, t->as, cycles,
423
        "%c %7ld %6ld", t->taskid, t->name, t->context, t, t->as, cycles,
408
        suffix, atomic_get(&t->refcount), atomic_get(&t->active_calls));
424
        suffix, atomic_get(&t->refcount), atomic_get(&t->active_calls));
409
#endif
425
#endif
410
 
426
 
411
    for (j = 0; j < IPC_MAX_PHONES; j++) {
427
    for (j = 0; j < IPC_MAX_PHONES; j++) {
412
        if (t->phones[j].callee)
428
        if (t->phones[j].callee)
413
            printf(" %d:%p", j, t->phones[j].callee);
429
            printf(" %d:%p", j, t->phones[j].callee);
414
    }
430
    }
415
    printf("\n");
431
    printf("\n");
416
           
432
           
417
    spinlock_unlock(&t->lock);
433
    spinlock_unlock(&t->lock);
418
    return true;
434
    return true;
419
}
435
}
420
 
436
 
421
/** Print task list */
437
/** Print task list */
422
void task_print_list(void)
438
void task_print_list(void)
423
{
439
{
424
    ipl_t ipl;
440
    ipl_t ipl;
425
   
441
   
426
    /* Messing with task structures, avoid deadlock */
442
    /* Messing with task structures, avoid deadlock */
427
    ipl = interrupts_disable();
443
    ipl = interrupts_disable();
428
    spinlock_lock(&tasks_lock);
444
    spinlock_lock(&tasks_lock);
429
 
445
 
430
#ifdef __32_BITS__  
446
#ifdef __32_BITS__  
431
    printf("taskid name         ctx address    as         "
447
    printf("taskid name         ctx address    as         "
432
        "cycles     threads calls  callee\n");
448
        "cycles     threads calls  callee\n");
433
    printf("------ ------------ --- ---------- ---------- "
449
    printf("------ ------------ --- ---------- ---------- "
434
        "---------- ------- ------ ------>\n");
450
        "---------- ------- ------ ------>\n");
435
#endif
451
#endif
436
 
452
 
437
#ifdef __64_BITS__
453
#ifdef __64_BITS__
438
    printf("taskid name         ctx address            as                 "
454
    printf("taskid name         ctx address            as                 "
439
        "cycles     threads calls  callee\n");
455
        "cycles     threads calls  callee\n");
440
    printf("------ ------------ --- ------------------ ------------------ "
456
    printf("------ ------------ --- ------------------ ------------------ "
441
        "---------- ------- ------ ------>\n");
457
        "---------- ------- ------ ------>\n");
442
#endif
458
#endif
443
 
459
 
444
    avltree_walk(&tasks_tree, task_print_walker, NULL);
460
    avltree_walk(&tasks_tree, task_print_walker, NULL);
445
 
461
 
446
    spinlock_unlock(&tasks_lock);
462
    spinlock_unlock(&tasks_lock);
447
    interrupts_restore(ipl);
463
    interrupts_restore(ipl);
448
}
464
}
449
 
465
 
450
/** @}
466
/** @}
451
 */
467
 */
452
 
468