Subversion Repositories HelenOS

Rev

Rev 4448 | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 4448 Rev 4452
1
/*
1
/*
2
 * Copyright (c) 2001-2004 Jakub Jermar
2
 * Copyright (c) 2001-2004 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
/** @addtogroup genericproc
29
/** @addtogroup genericproc
30
 * @{
30
 * @{
31
 */
31
 */
32
 
32
 
33
/**
33
/**
34
 * @file
34
 * @file
35
 * @brief   Task management.
35
 * @brief   Task management.
36
 */
36
 */
37
 
37
 
38
#include <proc/thread.h>
38
#include <proc/thread.h>
39
#include <proc/task.h>
39
#include <proc/task.h>
40
#include <mm/as.h>
40
#include <mm/as.h>
41
#include <mm/slab.h>
41
#include <mm/slab.h>
42
#include <atomic.h>
42
#include <atomic.h>
43
#include <synch/spinlock.h>
43
#include <synch/spinlock.h>
44
#include <synch/waitq.h>
44
#include <synch/waitq.h>
45
#include <arch.h>
45
#include <arch.h>
46
#include <arch/barrier.h>
46
#include <arch/barrier.h>
47
#include <adt/avl.h>
47
#include <adt/avl.h>
48
#include <adt/btree.h>
48
#include <adt/btree.h>
49
#include <adt/list.h>
49
#include <adt/list.h>
50
#include <ipc/ipc.h>
50
#include <ipc/ipc.h>
51
#include <ipc/ipcrsc.h>
51
#include <ipc/ipcrsc.h>
52
#include <print.h>
52
#include <print.h>
53
#include <errno.h>
53
#include <errno.h>
54
#include <func.h>
54
#include <func.h>
55
#include <string.h>
55
#include <string.h>
56
#include <syscall/copy.h>
56
#include <syscall/copy.h>
57
#include <macros.h>
57
#include <macros.h>
58
#include <ipc/event.h>
58
#include <ipc/event.h>
59
 
59
 
60
/** Spinlock protecting the tasks_tree AVL tree. */
60
/** Spinlock protecting the tasks_tree AVL tree. */
61
SPINLOCK_INITIALIZE(tasks_lock);
61
SPINLOCK_INITIALIZE(tasks_lock);
62
 
62
 
63
/** AVL tree of active tasks.
63
/** AVL tree of active tasks.
64
 *
64
 *
65
 * The task is guaranteed to exist after it was found in the tasks_tree as
65
 * The task is guaranteed to exist after it was found in the tasks_tree as
66
 * long as:
66
 * long as:
67
 * @li the tasks_lock is held,
67
 * @li the tasks_lock is held,
68
 * @li the task's lock is held when task's lock is acquired before releasing
68
 * @li the task's lock is held when task's lock is acquired before releasing
69
 *     tasks_lock or
69
 *     tasks_lock or
70
 * @li the task's refcount is greater than 0
70
 * @li the task's refcount is greater than 0
71
 *
71
 *
72
 */
72
 */
73
avltree_t tasks_tree;
73
avltree_t tasks_tree;
74
 
74
 
75
static task_id_t task_counter = 0;
75
static task_id_t task_counter = 0;
76
 
76
 
77
/** Initialize kernel tasks support. */
77
/** Initialize kernel tasks support. */
78
void task_init(void)
78
void task_init(void)
79
{
79
{
80
    TASK = NULL;
80
    TASK = NULL;
81
    avltree_create(&tasks_tree);
81
    avltree_create(&tasks_tree);
82
}
82
}
83
 
83
 
84
/*
84
/*
85
 * The idea behind this walker is to remember a single task different from
85
 * The idea behind this walker is to remember a single task different from
86
 * TASK.
86
 * TASK.
87
 */
87
 */
88
static bool task_done_walker(avltree_node_t *node, void *arg)
88
static bool task_done_walker(avltree_node_t *node, void *arg)
89
{
89
{
90
    task_t *t = avltree_get_instance(node, task_t, tasks_tree_node);
90
    task_t *t = avltree_get_instance(node, task_t, tasks_tree_node);
91
    task_t **tp = (task_t **) arg;
91
    task_t **tp = (task_t **) arg;
92
 
92
 
93
    if (t != TASK) {
93
    if (t != TASK) {
94
        *tp = t;
94
        *tp = t;
95
        return false;   /* stop walking */
95
        return false;   /* stop walking */
96
    }
96
    }
97
 
97
 
98
    return true;    /* continue the walk */
98
    return true;    /* continue the walk */
99
}
99
}
100
 
100
 
101
/** Kill all tasks except the current task. */
101
/** Kill all tasks except the current task. */
102
void task_done(void)
102
void task_done(void)
103
{
103
{
104
    task_t *t;
104
    task_t *t;
105
    do { /* Repeat until there are any tasks except TASK */
105
    do { /* Repeat until there are any tasks except TASK */
106
       
106
       
107
        /* Messing with task structures, avoid deadlock */
107
        /* Messing with task structures, avoid deadlock */
108
        ipl_t ipl = interrupts_disable();
108
        ipl_t ipl = interrupts_disable();
109
        spinlock_lock(&tasks_lock);
109
        spinlock_lock(&tasks_lock);
110
       
110
       
111
        t = NULL;
111
        t = NULL;
112
        avltree_walk(&tasks_tree, task_done_walker, &t);
112
        avltree_walk(&tasks_tree, task_done_walker, &t);
113
       
113
       
114
        if (t != NULL) {
114
        if (t != NULL) {
115
            task_id_t id = t->taskid;
115
            task_id_t id = t->taskid;
116
           
116
           
117
            spinlock_unlock(&tasks_lock);
117
            spinlock_unlock(&tasks_lock);
118
            interrupts_restore(ipl);
118
            interrupts_restore(ipl);
119
           
119
           
120
#ifdef CONFIG_DEBUG
120
#ifdef CONFIG_DEBUG
121
            printf("Killing task %" PRIu64 "\n", id);
121
            printf("Killing task %" PRIu64 "\n", id);
122
#endif          
122
#endif          
123
            task_kill(id);
123
            task_kill(id);
124
            thread_usleep(10000);
124
            thread_usleep(10000);
125
        } else {
125
        } else {
126
            spinlock_unlock(&tasks_lock);
126
            spinlock_unlock(&tasks_lock);
127
            interrupts_restore(ipl);
127
            interrupts_restore(ipl);
128
        }
128
        }
129
       
129
       
130
    } while (t != NULL);
130
    } while (t != NULL);
131
}
131
}
132
 
132
 
133
/** Create new task with no threads.
133
/** Create new task with no threads.
134
 *
134
 *
135
 * @param as        Task's address space.
135
 * @param as        Task's address space.
136
 * @param name      Symbolic name (a copy is made).
136
 * @param name      Symbolic name (a copy is made).
137
 *
137
 *
138
 * @return      New task's structure.
138
 * @return      New task's structure.
139
 *
139
 *
140
 */
140
 */
141
task_t *task_create(as_t *as, char *name)
141
task_t *task_create(as_t *as, char *name)
142
{
142
{
143
    ipl_t ipl;
143
    ipl_t ipl;
144
    task_t *ta;
144
    task_t *ta;
145
    int i;
145
    int i;
146
   
146
   
147
    ta = (task_t *) malloc(sizeof(task_t), 0);
147
    ta = (task_t *) malloc(sizeof(task_t), 0);
148
 
148
 
149
    task_create_arch(ta);
149
    task_create_arch(ta);
150
 
150
 
151
    spinlock_initialize(&ta->lock, "task_ta_lock");
151
    spinlock_initialize(&ta->lock, "task_ta_lock");
152
    list_initialize(&ta->th_head);
152
    list_initialize(&ta->th_head);
153
    ta->as = as;
153
    ta->as = as;
154
 
154
 
155
    memcpy(ta->name, name, TASK_NAME_BUFLEN);
155
    memcpy(ta->name, name, TASK_NAME_BUFLEN);
156
    ta->name[TASK_NAME_BUFLEN - 1] = 0;
156
    ta->name[TASK_NAME_BUFLEN - 1] = 0;
157
 
157
 
158
    atomic_set(&ta->refcount, 0);
158
    atomic_set(&ta->refcount, 0);
159
    atomic_set(&ta->lifecount, 0);
159
    atomic_set(&ta->lifecount, 0);
160
    ta->context = CONTEXT;
160
    ta->context = CONTEXT;
161
 
161
 
162
    ta->capabilities = 0;
162
    ta->capabilities = 0;
163
    ta->cycles = 0;
163
    ta->cycles = 0;
164
 
164
 
165
#ifdef CONFIG_UDEBUG
165
#ifdef CONFIG_UDEBUG
166
    /* Init debugging stuff */
166
    /* Init debugging stuff */
167
    udebug_task_init(&ta->udebug);
167
    udebug_task_init(&ta->udebug);
168
 
168
 
169
    /* Init kbox stuff */
169
    /* Init kbox stuff */
170
    ipc_answerbox_init(&ta->kb.box, ta);
170
    ipc_answerbox_init(&ta->kb.box, ta);
171
    ta->kb.thread = NULL;
171
    ta->kb.thread = NULL;
172
    mutex_initialize(&ta->kb.cleanup_lock, MUTEX_PASSIVE);
172
    mutex_initialize(&ta->kb.cleanup_lock, MUTEX_PASSIVE);
173
    ta->kb.finished = false;
173
    ta->kb.finished = false;
174
#endif
174
#endif
175
 
175
 
176
    ipc_answerbox_init(&ta->answerbox, ta);
176
    ipc_answerbox_init(&ta->answerbox, ta);
177
    for (i = 0; i < IPC_MAX_PHONES; i++)
177
    for (i = 0; i < IPC_MAX_PHONES; i++)
178
        ipc_phone_init(&ta->phones[i]);
178
        ipc_phone_init(&ta->phones[i]);
179
    if ((ipc_phone_0) && (context_check(ipc_phone_0->task->context,
179
    if ((ipc_phone_0) && (context_check(ipc_phone_0->task->context,
180
        ta->context)))
180
        ta->context)))
181
        ipc_phone_connect(&ta->phones[0], ipc_phone_0);
181
        ipc_phone_connect(&ta->phones[0], ipc_phone_0);
182
    atomic_set(&ta->active_calls, 0);
182
    atomic_set(&ta->active_calls, 0);
183
 
183
 
184
    mutex_initialize(&ta->futexes_lock, MUTEX_PASSIVE);
184
    mutex_initialize(&ta->futexes_lock, MUTEX_PASSIVE);
185
    btree_create(&ta->futexes);
185
    btree_create(&ta->futexes);
186
   
186
   
187
    ipl = interrupts_disable();
187
    ipl = interrupts_disable();
188
 
188
 
189
    /*
189
    /*
190
     * Increment address space reference count.
190
     * Increment address space reference count.
191
     */
191
     */
192
    atomic_inc(&as->refcount);
192
    atomic_inc(&as->refcount);
193
 
193
 
194
    spinlock_lock(&tasks_lock);
194
    spinlock_lock(&tasks_lock);
195
    ta->taskid = ++task_counter;
195
    ta->taskid = ++task_counter;
196
    avltree_node_initialize(&ta->tasks_tree_node);
196
    avltree_node_initialize(&ta->tasks_tree_node);
197
    ta->tasks_tree_node.key = ta->taskid;
197
    ta->tasks_tree_node.key = ta->taskid;
198
    avltree_insert(&tasks_tree, &ta->tasks_tree_node);
198
    avltree_insert(&tasks_tree, &ta->tasks_tree_node);
199
    spinlock_unlock(&tasks_lock);
199
    spinlock_unlock(&tasks_lock);
200
    interrupts_restore(ipl);
200
    interrupts_restore(ipl);
201
 
201
   
-
 
202
    /*
-
 
203
     * Notify about task creation.
-
 
204
     */
-
 
205
    if (event_is_subscribed(EVENT_WAIT))
-
 
206
        event_notify_3(EVENT_WAIT, TASK_CREATE, LOWER32(ta->taskid),
-
 
207
            UPPER32(ta->taskid));
-
 
208
   
202
    return ta;
209
    return ta;
203
}
210
}
204
 
211
 
205
/** Destroy task.
212
/** Destroy task.
206
 *
213
 *
207
 * @param t     Task to be destroyed.
214
 * @param t     Task to be destroyed.
208
 */
215
 */
209
void task_destroy(task_t *t)
216
void task_destroy(task_t *t)
210
{
217
{
211
    /*
218
    /*
212
     * Remove the task from the task B+tree.
219
     * Remove the task from the task B+tree.
213
     */
220
     */
214
    spinlock_lock(&tasks_lock);
221
    spinlock_lock(&tasks_lock);
215
    avltree_delete(&tasks_tree, &t->tasks_tree_node);
222
    avltree_delete(&tasks_tree, &t->tasks_tree_node);
216
    spinlock_unlock(&tasks_lock);
223
    spinlock_unlock(&tasks_lock);
217
 
224
 
218
    /*
225
    /*
219
     * Perform architecture specific task destruction.
226
     * Perform architecture specific task destruction.
220
     */
227
     */
221
    task_destroy_arch(t);
228
    task_destroy_arch(t);
222
 
229
 
223
    /*
230
    /*
224
     * Free up dynamically allocated state.
231
     * Free up dynamically allocated state.
225
     */
232
     */
226
    btree_destroy(&t->futexes);
233
    btree_destroy(&t->futexes);
227
 
234
 
228
    /*
235
    /*
229
     * Drop our reference to the address space.
236
     * Drop our reference to the address space.
230
     */
237
     */
231
    if (atomic_predec(&t->as->refcount) == 0)
238
    if (atomic_predec(&t->as->refcount) == 0)
232
        as_destroy(t->as);
239
        as_destroy(t->as);
233
   
240
   
234
    /*
241
    /*
235
     * Notify about task destruction.
242
     * Notify about task destruction.
236
     */
243
     */
237
    if (event_is_subscribed(EVENT_WAIT))
244
    if (event_is_subscribed(EVENT_WAIT))
238
        event_notify_2(EVENT_WAIT, LOWER32(t->taskid), UPPER32(t->taskid));
245
        event_notify_3(EVENT_WAIT, TASK_DESTROY, LOWER32(t->taskid),
-
 
246
            UPPER32(t->taskid));
239
   
247
   
240
    free(t);
248
    free(t);
241
    TASK = NULL;
249
    TASK = NULL;
242
}
250
}
243
 
251
 
244
/** Syscall for reading task ID from userspace.
252
/** Syscall for reading task ID from userspace.
245
 *
253
 *
246
 * @param       uspace_task_id userspace address of 8-byte buffer
254
 * @param       uspace_task_id userspace address of 8-byte buffer
247
 *          where to store current task ID.
255
 *          where to store current task ID.
248
 *
256
 *
249
 * @return      Zero on success or an error code from @ref errno.h.
257
 * @return      Zero on success or an error code from @ref errno.h.
250
 */
258
 */
251
unative_t sys_task_get_id(task_id_t *uspace_task_id)
259
unative_t sys_task_get_id(task_id_t *uspace_task_id)
252
{
260
{
253
    /*
261
    /*
254
     * No need to acquire lock on TASK because taskid remains constant for
262
     * No need to acquire lock on TASK because taskid remains constant for
255
     * the lifespan of the task.
263
     * the lifespan of the task.
256
     */
264
     */
257
    return (unative_t) copy_to_uspace(uspace_task_id, &TASK->taskid,
265
    return (unative_t) copy_to_uspace(uspace_task_id, &TASK->taskid,
258
        sizeof(TASK->taskid));
266
        sizeof(TASK->taskid));
259
}
267
}
260
 
268
 
261
/** Syscall for setting the task name.
269
/** Syscall for setting the task name.
262
 *
270
 *
263
 * The name simplifies identifying the task in the task list.
271
 * The name simplifies identifying the task in the task list.
264
 *
272
 *
265
 * @param name  The new name for the task. (typically the same
273
 * @param name  The new name for the task. (typically the same
266
 *      as the command used to execute it).
274
 *      as the command used to execute it).
267
 *
275
 *
268
 * @return 0 on success or an error code from @ref errno.h.
276
 * @return 0 on success or an error code from @ref errno.h.
269
 */
277
 */
270
unative_t sys_task_set_name(const char *uspace_name, size_t name_len)
278
unative_t sys_task_set_name(const char *uspace_name, size_t name_len)
271
{
279
{
272
    int rc;
280
    int rc;
273
    char namebuf[TASK_NAME_BUFLEN];
281
    char namebuf[TASK_NAME_BUFLEN];
274
 
282
 
275
    /* Cap length of name and copy it from userspace. */
283
    /* Cap length of name and copy it from userspace. */
276
 
284
 
277
    if (name_len > TASK_NAME_BUFLEN - 1)
285
    if (name_len > TASK_NAME_BUFLEN - 1)
278
        name_len = TASK_NAME_BUFLEN - 1;
286
        name_len = TASK_NAME_BUFLEN - 1;
279
 
287
 
280
    rc = copy_from_uspace(namebuf, uspace_name, name_len);
288
    rc = copy_from_uspace(namebuf, uspace_name, name_len);
281
    if (rc != 0)
289
    if (rc != 0)
282
        return (unative_t) rc;
290
        return (unative_t) rc;
283
 
291
 
284
    namebuf[name_len] = '\0';
292
    namebuf[name_len] = '\0';
285
    str_cpy(TASK->name, TASK_NAME_BUFLEN, namebuf);
293
    str_cpy(TASK->name, TASK_NAME_BUFLEN, namebuf);
286
 
294
 
287
    return EOK;
295
    return EOK;
288
}
296
}
289
 
297
 
290
/** Find task structure corresponding to task ID.
298
/** Find task structure corresponding to task ID.
291
 *
299
 *
292
 * The tasks_lock must be already held by the caller of this function and
300
 * The tasks_lock must be already held by the caller of this function and
293
 * interrupts must be disabled.
301
 * interrupts must be disabled.
294
 *
302
 *
295
 * @param id        Task ID.
303
 * @param id        Task ID.
296
 *
304
 *
297
 * @return      Task structure address or NULL if there is no such task
305
 * @return      Task structure address or NULL if there is no such task
298
 *          ID.
306
 *          ID.
299
 */
307
 */
300
task_t *task_find_by_id(task_id_t id) { avltree_node_t *node;
308
task_t *task_find_by_id(task_id_t id) { avltree_node_t *node;
301
   
309
   
302
    node = avltree_search(&tasks_tree, (avltree_key_t) id);
310
    node = avltree_search(&tasks_tree, (avltree_key_t) id);
303
 
311
 
304
    if (node)
312
    if (node)
305
        return avltree_get_instance(node, task_t, tasks_tree_node);
313
        return avltree_get_instance(node, task_t, tasks_tree_node);
306
    return NULL;
314
    return NULL;
307
}
315
}
308
 
316
 
309
/** Get accounting data of given task.
317
/** Get accounting data of given task.
310
 *
318
 *
311
 * Note that task lock of 't' must be already held and interrupts must be
319
 * Note that task lock of 't' must be already held and interrupts must be
312
 * already disabled.
320
 * already disabled.
313
 *
321
 *
314
 * @param t     Pointer to thread.
322
 * @param t     Pointer to thread.
315
 *
323
 *
316
 * @return      Number of cycles used by the task and all its threads
324
 * @return      Number of cycles used by the task and all its threads
317
 *          so far.
325
 *          so far.
318
 */
326
 */
319
uint64_t task_get_accounting(task_t *t)
327
uint64_t task_get_accounting(task_t *t)
320
{
328
{
321
    /* Accumulated value of task */
329
    /* Accumulated value of task */
322
    uint64_t ret = t->cycles;
330
    uint64_t ret = t->cycles;
323
   
331
   
324
    /* Current values of threads */
332
    /* Current values of threads */
325
    link_t *cur;
333
    link_t *cur;
326
    for (cur = t->th_head.next; cur != &t->th_head; cur = cur->next) {
334
    for (cur = t->th_head.next; cur != &t->th_head; cur = cur->next) {
327
        thread_t *thr = list_get_instance(cur, thread_t, th_link);
335
        thread_t *thr = list_get_instance(cur, thread_t, th_link);
328
       
336
       
329
        spinlock_lock(&thr->lock);
337
        spinlock_lock(&thr->lock);
330
        /* Process only counted threads */
338
        /* Process only counted threads */
331
        if (!thr->uncounted) {
339
        if (!thr->uncounted) {
332
            if (thr == THREAD) {
340
            if (thr == THREAD) {
333
                /* Update accounting of current thread */
341
                /* Update accounting of current thread */
334
                thread_update_accounting();
342
                thread_update_accounting();
335
            }
343
            }
336
            ret += thr->cycles;
344
            ret += thr->cycles;
337
        }
345
        }
338
        spinlock_unlock(&thr->lock);
346
        spinlock_unlock(&thr->lock);
339
    }
347
    }
340
   
348
   
341
    return ret;
349
    return ret;
342
}
350
}
343
 
351
 
344
/** Kill task.
352
/** Kill task.
345
 *
353
 *
346
 * This function is idempotent.
354
 * This function is idempotent.
347
 * It signals all the task's threads to bail it out.
355
 * It signals all the task's threads to bail it out.
348
 *
356
 *
349
 * @param id        ID of the task to be killed.
357
 * @param id        ID of the task to be killed.
350
 *
358
 *
351
 * @return      Zero on success or an error code from errno.h.
359
 * @return      Zero on success or an error code from errno.h.
352
 */
360
 */
353
int task_kill(task_id_t id)
361
int task_kill(task_id_t id)
354
{
362
{
355
    ipl_t ipl;
363
    ipl_t ipl;
356
    task_t *ta;
364
    task_t *ta;
357
    link_t *cur;
365
    link_t *cur;
358
 
366
 
359
    if (id == 1)
367
    if (id == 1)
360
        return EPERM;
368
        return EPERM;
361
   
369
   
362
    ipl = interrupts_disable();
370
    ipl = interrupts_disable();
363
    spinlock_lock(&tasks_lock);
371
    spinlock_lock(&tasks_lock);
364
    if (!(ta = task_find_by_id(id))) {
372
    if (!(ta = task_find_by_id(id))) {
365
        spinlock_unlock(&tasks_lock);
373
        spinlock_unlock(&tasks_lock);
366
        interrupts_restore(ipl);
374
        interrupts_restore(ipl);
367
        return ENOENT;
375
        return ENOENT;
368
    }
376
    }
369
    spinlock_unlock(&tasks_lock);
377
    spinlock_unlock(&tasks_lock);
370
   
378
   
371
    /*
379
    /*
372
     * Interrupt all threads.
380
     * Interrupt all threads.
373
     */
381
     */
374
    spinlock_lock(&ta->lock);
382
    spinlock_lock(&ta->lock);
375
    for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) {
383
    for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) {
376
        thread_t *thr;
384
        thread_t *thr;
377
        bool sleeping = false;
385
        bool sleeping = false;
378
       
386
       
379
        thr = list_get_instance(cur, thread_t, th_link);
387
        thr = list_get_instance(cur, thread_t, th_link);
380
       
388
       
381
        spinlock_lock(&thr->lock);
389
        spinlock_lock(&thr->lock);
382
        thr->interrupted = true;
390
        thr->interrupted = true;
383
        if (thr->state == Sleeping)
391
        if (thr->state == Sleeping)
384
            sleeping = true;
392
            sleeping = true;
385
        spinlock_unlock(&thr->lock);
393
        spinlock_unlock(&thr->lock);
386
       
394
       
387
        if (sleeping)
395
        if (sleeping)
388
            waitq_interrupt_sleep(thr);
396
            waitq_interrupt_sleep(thr);
389
    }
397
    }
390
    spinlock_unlock(&ta->lock);
398
    spinlock_unlock(&ta->lock);
391
    interrupts_restore(ipl);
399
    interrupts_restore(ipl);
392
   
400
   
393
    return 0;
401
    return 0;
394
}
402
}
395
 
403
 
396
static bool task_print_walker(avltree_node_t *node, void *arg)
404
static bool task_print_walker(avltree_node_t *node, void *arg)
397
{
405
{
398
    task_t *t = avltree_get_instance(node, task_t, tasks_tree_node);
406
    task_t *t = avltree_get_instance(node, task_t, tasks_tree_node);
399
    int j;
407
    int j;
400
       
408
       
401
    spinlock_lock(&t->lock);
409
    spinlock_lock(&t->lock);
402
           
410
           
403
    uint64_t cycles;
411
    uint64_t cycles;
404
    char suffix;
412
    char suffix;
405
    order(task_get_accounting(t), &cycles, &suffix);
413
    order(task_get_accounting(t), &cycles, &suffix);
406
 
414
 
407
#ifdef __32_BITS__  
415
#ifdef __32_BITS__  
408
    printf("%-6" PRIu64 " %-12s %-3" PRIu32 " %10p %10p %9" PRIu64
416
    printf("%-6" PRIu64 " %-12s %-3" PRIu32 " %10p %10p %9" PRIu64
409
        "%c %7ld %6ld", t->taskid, t->name, t->context, t, t->as, cycles,
417
        "%c %7ld %6ld", t->taskid, t->name, t->context, t, t->as, cycles,
410
        suffix, atomic_get(&t->refcount), atomic_get(&t->active_calls));
418
        suffix, atomic_get(&t->refcount), atomic_get(&t->active_calls));
411
#endif
419
#endif
412
 
420
 
413
#ifdef __64_BITS__
421
#ifdef __64_BITS__
414
    printf("%-6" PRIu64 " %-12s %-3" PRIu32 " %18p %18p %9" PRIu64
422
    printf("%-6" PRIu64 " %-12s %-3" PRIu32 " %18p %18p %9" PRIu64
415
        "%c %7ld %6ld", t->taskid, t->name, t->context, t, t->as, cycles,
423
        "%c %7ld %6ld", t->taskid, t->name, t->context, t, t->as, cycles,
416
        suffix, atomic_get(&t->refcount), atomic_get(&t->active_calls));
424
        suffix, atomic_get(&t->refcount), atomic_get(&t->active_calls));
417
#endif
425
#endif
418
 
426
 
419
    for (j = 0; j < IPC_MAX_PHONES; j++) {
427
    for (j = 0; j < IPC_MAX_PHONES; j++) {
420
        if (t->phones[j].callee)
428
        if (t->phones[j].callee)
421
            printf(" %d:%p", j, t->phones[j].callee);
429
            printf(" %d:%p", j, t->phones[j].callee);
422
    }
430
    }
423
    printf("\n");
431
    printf("\n");
424
           
432
           
425
    spinlock_unlock(&t->lock);
433
    spinlock_unlock(&t->lock);
426
    return true;
434
    return true;
427
}
435
}
428
 
436
 
429
/** Print task list */
437
/** Print task list */
430
void task_print_list(void)
438
void task_print_list(void)
431
{
439
{
432
    ipl_t ipl;
440
    ipl_t ipl;
433
   
441
   
434
    /* Messing with task structures, avoid deadlock */
442
    /* Messing with task structures, avoid deadlock */
435
    ipl = interrupts_disable();
443
    ipl = interrupts_disable();
436
    spinlock_lock(&tasks_lock);
444
    spinlock_lock(&tasks_lock);
437
 
445
 
438
#ifdef __32_BITS__  
446
#ifdef __32_BITS__  
439
    printf("taskid name         ctx address    as         "
447
    printf("taskid name         ctx address    as         "
440
        "cycles     threads calls  callee\n");
448
        "cycles     threads calls  callee\n");
441
    printf("------ ------------ --- ---------- ---------- "
449
    printf("------ ------------ --- ---------- ---------- "
442
        "---------- ------- ------ ------>\n");
450
        "---------- ------- ------ ------>\n");
443
#endif
451
#endif
444
 
452
 
445
#ifdef __64_BITS__
453
#ifdef __64_BITS__
446
    printf("taskid name         ctx address            as                 "
454
    printf("taskid name         ctx address            as                 "
447
        "cycles     threads calls  callee\n");
455
        "cycles     threads calls  callee\n");
448
    printf("------ ------------ --- ------------------ ------------------ "
456
    printf("------ ------------ --- ------------------ ------------------ "
449
        "---------- ------- ------ ------>\n");
457
        "---------- ------- ------ ------>\n");
450
#endif
458
#endif
451
 
459
 
452
    avltree_walk(&tasks_tree, task_print_walker, NULL);
460
    avltree_walk(&tasks_tree, task_print_walker, NULL);
453
 
461
 
454
    spinlock_unlock(&tasks_lock);
462
    spinlock_unlock(&tasks_lock);
455
    interrupts_restore(ipl);
463
    interrupts_restore(ipl);
456
}
464
}
457
 
465
 
458
/** @}
466
/** @}
459
 */
467
 */
460
 
468