Subversion Repositories HelenOS

Rev

Rev 3665 | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 3665 Rev 3862
1
/*
1
/*
2
 * Copyright (c) 2001-2004 Jakub Jermar
2
 * Copyright (c) 2001-2004 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
/** @addtogroup genericproc
29
/** @addtogroup genericproc
30
 * @{
30
 * @{
31
 */
31
 */
32
 
32
 
33
/**
33
/**
34
 * @file
34
 * @file
35
 * @brief   Task management.
35
 * @brief   Task management.
36
 */
36
 */
37
 
37
 
38
#include <proc/thread.h>
38
#include <proc/thread.h>
39
#include <proc/task.h>
39
#include <proc/task.h>
40
#include <mm/as.h>
40
#include <mm/as.h>
41
#include <mm/slab.h>
41
#include <mm/slab.h>
42
#include <atomic.h>
42
#include <atomic.h>
43
#include <synch/spinlock.h>
43
#include <synch/spinlock.h>
44
#include <synch/waitq.h>
44
#include <synch/waitq.h>
45
#include <arch.h>
45
#include <arch.h>
46
#include <arch/barrier.h>
46
#include <arch/barrier.h>
47
#include <adt/avl.h>
47
#include <adt/avl.h>
48
#include <adt/btree.h>
48
#include <adt/btree.h>
49
#include <adt/list.h>
49
#include <adt/list.h>
50
#include <ipc/ipc.h>
50
#include <ipc/ipc.h>
51
#include <ipc/ipcrsc.h>
51
#include <ipc/ipcrsc.h>
52
#include <print.h>
52
#include <print.h>
53
#include <errno.h>
53
#include <errno.h>
54
#include <func.h>
54
#include <func.h>
55
#include <syscall/copy.h>
55
#include <syscall/copy.h>
-
 
56
#include <arch/asm.h>
56
 
57
 
57
/** Spinlock protecting the tasks_tree AVL tree. */
58
/** Spinlock protecting the tasks_tree AVL tree. */
58
SPINLOCK_INITIALIZE(tasks_lock);
59
SPINLOCK_INITIALIZE(tasks_lock);
59
 
60
 
60
/** AVL tree of active tasks.
61
/** AVL tree of active tasks.
61
 *
62
 *
62
 * The task is guaranteed to exist after it was found in the tasks_tree as
63
 * The task is guaranteed to exist after it was found in the tasks_tree as
63
 * long as:
64
 * long as:
64
 * @li the tasks_lock is held,
65
 * @li the tasks_lock is held,
65
 * @li the task's lock is held when task's lock is acquired before releasing
66
 * @li the task's lock is held when task's lock is acquired before releasing
66
 *     tasks_lock or
67
 *     tasks_lock or
67
 * @li the task's refcount is greater than 0
68
 * @li the task's refcount is greater than 0
68
 *
69
 *
69
 */
70
 */
70
avltree_t tasks_tree;
71
avltree_t tasks_tree;
71
 
72
 
72
static task_id_t task_counter = 0;
73
static task_id_t task_counter = 0;
73
 
74
 
74
/** Initialize kernel tasks support. */
75
/** Initialize kernel tasks support. */
75
void task_init(void)
76
void task_init(void)
76
{
77
{
77
    TASK = NULL;
78
    TASK = NULL;
78
    avltree_create(&tasks_tree);
79
    avltree_create(&tasks_tree);
79
}
80
}
80
 
81
 
81
/*
82
/*
82
 * The idea behind this walker is to remember a single task different from
83
 * The idea behind this walker is to remember a single task different from
83
 * TASK.
84
 * TASK.
84
 */
85
 */
85
static bool task_done_walker(avltree_node_t *node, void *arg)
86
static bool task_done_walker(avltree_node_t *node, void *arg)
86
{
87
{
87
    task_t *t = avltree_get_instance(node, task_t, tasks_tree_node);
88
    task_t *t = avltree_get_instance(node, task_t, tasks_tree_node);
88
    task_t **tp = (task_t **) arg;
89
    task_t **tp = (task_t **) arg;
89
 
90
 
90
    if (t != TASK) {
91
    if (t != TASK) {
91
        *tp = t;
92
        *tp = t;
92
        return false;   /* stop walking */
93
        return false;   /* stop walking */
93
    }
94
    }
94
 
95
 
95
    return true;    /* continue the walk */
96
    return true;    /* continue the walk */
96
}
97
}
97
 
98
 
98
/** Kill all tasks except the current task. */
99
/** Kill all tasks except the current task. */
99
void task_done(void)
100
void task_done(void)
100
{
101
{
101
    task_t *t;
102
    task_t *t;
102
    do { /* Repeat until there are any tasks except TASK */
103
    do { /* Repeat until there are any tasks except TASK */
103
       
104
       
104
        /* Messing with task structures, avoid deadlock */
105
        /* Messing with task structures, avoid deadlock */
105
        ipl_t ipl = interrupts_disable();
106
        ipl_t ipl = interrupts_disable();
106
        spinlock_lock(&tasks_lock);
107
        spinlock_lock(&tasks_lock);
107
       
108
       
108
        t = NULL;
109
        t = NULL;
109
        avltree_walk(&tasks_tree, task_done_walker, &t);
110
        avltree_walk(&tasks_tree, task_done_walker, &t);
110
       
111
       
111
        if (t != NULL) {
112
        if (t != NULL) {
112
            task_id_t id = t->taskid;
113
            task_id_t id = t->taskid;
113
           
114
           
114
            spinlock_unlock(&tasks_lock);
115
            spinlock_unlock(&tasks_lock);
115
            interrupts_restore(ipl);
116
            interrupts_restore(ipl);
116
           
117
           
117
#ifdef CONFIG_DEBUG
118
#ifdef CONFIG_DEBUG
118
            printf("Killing task %" PRIu64 "\n", id);
119
            printf("Killing task %" PRIu64 "\n", id);
119
#endif          
120
#endif          
120
            task_kill(id);
121
            task_kill(id);
121
            thread_usleep(10000);
122
            thread_usleep(10000);
122
        } else {
123
        } else {
123
            spinlock_unlock(&tasks_lock);
124
            spinlock_unlock(&tasks_lock);
124
            interrupts_restore(ipl);
125
            interrupts_restore(ipl);
125
        }
126
        }
126
       
127
       
127
    } while (t != NULL);
128
    } while (t != NULL);
128
}
129
}
129
 
130
 
130
/** Create new task with no threads.
131
/** Create new task with no threads.
131
 *
132
 *
132
 * @param as        Task's address space.
133
 * @param as        Task's address space.
133
 * @param name      Symbolic name (a copy is made).
134
 * @param name      Symbolic name (a copy is made).
134
 *
135
 *
135
 * @return      New task's structure.
136
 * @return      New task's structure.
136
 *
137
 *
137
 */
138
 */
138
task_t *task_create(as_t *as, char *name)
139
task_t *task_create(as_t *as, char *name)
139
{
140
{
140
    ipl_t ipl;
141
    ipl_t ipl;
141
    task_t *ta;
142
    task_t *ta;
142
    int i;
143
    int i;
143
   
144
   
144
    ta = (task_t *) malloc(sizeof(task_t), 0);
145
    ta = (task_t *) malloc(sizeof(task_t), 0);
145
 
146
 
146
    task_create_arch(ta);
147
    task_create_arch(ta);
147
 
148
 
148
    spinlock_initialize(&ta->lock, "task_ta_lock");
149
    spinlock_initialize(&ta->lock, "task_ta_lock");
149
    list_initialize(&ta->th_head);
150
    list_initialize(&ta->th_head);
150
    ta->as = as;
151
    ta->as = as;
151
 
152
 
152
    memcpy(ta->name, name, TASK_NAME_BUFLEN);
153
    memcpy(ta->name, name, TASK_NAME_BUFLEN);
153
    ta->name[TASK_NAME_BUFLEN - 1] = '\0';
154
    ta->name[TASK_NAME_BUFLEN - 1] = '\0';
154
 
155
 
155
    atomic_set(&ta->refcount, 0);
156
    atomic_set(&ta->refcount, 0);
156
    atomic_set(&ta->lifecount, 0);
157
    atomic_set(&ta->lifecount, 0);
157
    ta->context = CONTEXT;
158
    ta->context = CONTEXT;
158
 
159
 
159
    ta->capabilities = 0;
160
    ta->capabilities = 0;
160
    ta->cycles = 0;
161
    ta->cycles = 0;
161
 
162
 
162
#ifdef CONFIG_UDEBUG
163
#ifdef CONFIG_UDEBUG
163
    /* Init debugging stuff */
164
    /* Init debugging stuff */
164
    udebug_task_init(&ta->udebug);
165
    udebug_task_init(&ta->udebug);
165
 
166
 
166
    /* Init kbox stuff */
167
    /* Init kbox stuff */
167
    ipc_answerbox_init(&ta->kb.box, ta);
168
    ipc_answerbox_init(&ta->kb.box, ta);
168
    ta->kb.thread = NULL;
169
    ta->kb.thread = NULL;
169
    mutex_initialize(&ta->kb.cleanup_lock, MUTEX_PASSIVE);
170
    mutex_initialize(&ta->kb.cleanup_lock, MUTEX_PASSIVE);
170
    ta->kb.finished = false;
171
    ta->kb.finished = false;
171
#endif
172
#endif
172
 
173
 
173
    ipc_answerbox_init(&ta->answerbox, ta);
174
    ipc_answerbox_init(&ta->answerbox, ta);
174
    for (i = 0; i < IPC_MAX_PHONES; i++)
175
    for (i = 0; i < IPC_MAX_PHONES; i++)
175
        ipc_phone_init(&ta->phones[i]);
176
        ipc_phone_init(&ta->phones[i]);
176
    if ((ipc_phone_0) && (context_check(ipc_phone_0->task->context,
177
    if ((ipc_phone_0) && (context_check(ipc_phone_0->task->context,
177
        ta->context)))
178
        ta->context)))
178
        ipc_phone_connect(&ta->phones[0], ipc_phone_0);
179
        ipc_phone_connect(&ta->phones[0], ipc_phone_0);
179
    atomic_set(&ta->active_calls, 0);
180
    atomic_set(&ta->active_calls, 0);
180
 
181
 
181
    mutex_initialize(&ta->futexes_lock, MUTEX_PASSIVE);
182
    mutex_initialize(&ta->futexes_lock, MUTEX_PASSIVE);
182
    btree_create(&ta->futexes);
183
    btree_create(&ta->futexes);
183
   
184
   
184
    ipl = interrupts_disable();
185
    ipl = interrupts_disable();
185
 
186
 
186
    /*
187
    /*
187
     * Increment address space reference count.
188
     * Increment address space reference count.
188
     */
189
     */
189
    atomic_inc(&as->refcount);
190
    atomic_inc(&as->refcount);
190
 
191
 
191
    spinlock_lock(&tasks_lock);
192
    spinlock_lock(&tasks_lock);
192
    ta->taskid = ++task_counter;
193
    ta->taskid = ++task_counter;
193
    avltree_node_initialize(&ta->tasks_tree_node);
194
    avltree_node_initialize(&ta->tasks_tree_node);
194
    ta->tasks_tree_node.key = ta->taskid;
195
    ta->tasks_tree_node.key = ta->taskid;
195
    avltree_insert(&tasks_tree, &ta->tasks_tree_node);
196
    avltree_insert(&tasks_tree, &ta->tasks_tree_node);
196
    spinlock_unlock(&tasks_lock);
197
    spinlock_unlock(&tasks_lock);
197
    interrupts_restore(ipl);
198
    interrupts_restore(ipl);
198
 
199
 
199
    return ta;
200
    return ta;
200
}
201
}
201
 
202
 
202
/** Destroy task.
203
/** Destroy task.
203
 *
204
 *
204
 * @param t     Task to be destroyed.
205
 * @param t     Task to be destroyed.
205
 */
206
 */
206
void task_destroy(task_t *t)
207
void task_destroy(task_t *t)
207
{
208
{
208
    /*
209
    /*
209
     * Remove the task from the task B+tree.
210
     * Remove the task from the task B+tree.
210
     */
211
     */
211
    spinlock_lock(&tasks_lock);
212
    spinlock_lock(&tasks_lock);
212
    avltree_delete(&tasks_tree, &t->tasks_tree_node);
213
    avltree_delete(&tasks_tree, &t->tasks_tree_node);
213
    spinlock_unlock(&tasks_lock);
214
    spinlock_unlock(&tasks_lock);
214
 
215
 
215
    /*
216
    /*
216
     * Perform architecture specific task destruction.
217
     * Perform architecture specific task destruction.
217
     */
218
     */
218
    task_destroy_arch(t);
219
    task_destroy_arch(t);
219
 
220
 
220
    /*
221
    /*
221
     * Free up dynamically allocated state.
222
     * Free up dynamically allocated state.
222
     */
223
     */
223
    btree_destroy(&t->futexes);
224
    btree_destroy(&t->futexes);
224
 
225
 
225
    /*
226
    /*
226
     * Drop our reference to the address space.
227
     * Drop our reference to the address space.
227
     */
228
     */
228
    if (atomic_predec(&t->as->refcount) == 0)
229
    if (atomic_predec(&t->as->refcount) == 0)
229
        as_destroy(t->as);
230
        as_destroy(t->as);
230
   
231
   
231
    free(t);
232
    free(t);
232
    TASK = NULL;
233
    TASK = NULL;
233
}
234
}
234
 
235
 
235
/** Syscall for reading task ID from userspace.
236
/** Syscall for reading task ID from userspace.
236
 *
237
 *
237
 * @param       uspace_task_id userspace address of 8-byte buffer
238
 * @param       uspace_task_id userspace address of 8-byte buffer
238
 *          where to store current task ID.
239
 *          where to store current task ID.
239
 *
240
 *
240
 * @return      Zero on success or an error code from @ref errno.h.
241
 * @return      Zero on success or an error code from @ref errno.h.
241
 */
242
 */
242
unative_t sys_task_get_id(task_id_t *uspace_task_id)
243
unative_t sys_task_get_id(task_id_t *uspace_task_id)
243
{
244
{
244
    /*
245
    /*
245
     * No need to acquire lock on TASK because taskid remains constant for
246
     * No need to acquire lock on TASK because taskid remains constant for
246
     * the lifespan of the task.
247
     * the lifespan of the task.
247
     */
248
     */
248
    return (unative_t) copy_to_uspace(uspace_task_id, &TASK->taskid,
249
    return (unative_t) copy_to_uspace(uspace_task_id, &TASK->taskid,
249
        sizeof(TASK->taskid));
250
        sizeof(TASK->taskid));
250
}
251
}
251
 
252
 
252
/** Find task structure corresponding to task ID.
253
/** Find task structure corresponding to task ID.
253
 *
254
 *
254
 * The tasks_lock must be already held by the caller of this function and
255
 * The tasks_lock must be already held by the caller of this function and
255
 * interrupts must be disabled.
256
 * interrupts must be disabled.
256
 *
257
 *
257
 * @param id        Task ID.
258
 * @param id        Task ID.
258
 *
259
 *
259
 * @return      Task structure address or NULL if there is no such task
260
 * @return      Task structure address or NULL if there is no such task
260
 *          ID.
261
 *          ID.
261
 */
262
 */
262
task_t *task_find_by_id(task_id_t id) { avltree_node_t *node;
263
task_t *task_find_by_id(task_id_t id) { avltree_node_t *node;
263
   
264
   
264
    node = avltree_search(&tasks_tree, (avltree_key_t) id);
265
    node = avltree_search(&tasks_tree, (avltree_key_t) id);
265
 
266
 
266
    if (node)
267
    if (node)
267
        return avltree_get_instance(node, task_t, tasks_tree_node);
268
        return avltree_get_instance(node, task_t, tasks_tree_node);
268
    return NULL;
269
    return NULL;
269
}
270
}
270
 
271
 
271
/** Get accounting data of given task.
272
/** Get accounting data of given task.
272
 *
273
 *
273
 * Note that task lock of 't' must be already held and interrupts must be
274
 * Note that task lock of 't' must be already held and interrupts must be
274
 * already disabled.
275
 * already disabled.
275
 *
276
 *
276
 * @param t     Pointer to thread.
277
 * @param t     Pointer to thread.
277
 *
278
 *
278
 * @return      Number of cycles used by the task and all its threads
279
 * @return      Number of cycles used by the task and all its threads
279
 *          so far.
280
 *          so far.
280
 */
281
 */
281
uint64_t task_get_accounting(task_t *t)
282
uint64_t task_get_accounting(task_t *t)
282
{
283
{
283
    /* Accumulated value of task */
284
    /* Accumulated value of task */
284
    uint64_t ret = t->cycles;
285
    uint64_t ret = t->cycles;
285
   
286
   
286
    /* Current values of threads */
287
    /* Current values of threads */
287
    link_t *cur;
288
    link_t *cur;
288
    for (cur = t->th_head.next; cur != &t->th_head; cur = cur->next) {
289
    for (cur = t->th_head.next; cur != &t->th_head; cur = cur->next) {
289
        thread_t *thr = list_get_instance(cur, thread_t, th_link);
290
        thread_t *thr = list_get_instance(cur, thread_t, th_link);
290
       
291
       
291
        spinlock_lock(&thr->lock);
292
        spinlock_lock(&thr->lock);
292
        /* Process only counted threads */
293
        /* Process only counted threads */
293
        if (!thr->uncounted) {
294
        if (!thr->uncounted) {
294
            if (thr == THREAD) {
295
            if (thr == THREAD) {
295
                /* Update accounting of current thread */
296
                /* Update accounting of current thread */
296
                thread_update_accounting();
297
                thread_update_accounting();
297
            }
298
            }
298
            ret += thr->cycles;
299
            ret += thr->cycles;
299
        }
300
        }
300
        spinlock_unlock(&thr->lock);
301
        spinlock_unlock(&thr->lock);
301
    }
302
    }
302
   
303
   
303
    return ret;
304
    return ret;
304
}
305
}
305
 
306
 
306
/** Kill task.
307
/** Kill task.
307
 *
308
 *
308
 * This function is idempotent.
309
 * This function is idempotent.
309
 * It signals all the task's threads to bail it out.
310
 * It signals all the task's threads to bail it out.
310
 *
311
 *
311
 * @param id        ID of the task to be killed.
312
 * @param id        ID of the task to be killed.
312
 *
313
 *
313
 * @return      Zero on success or an error code from errno.h.
314
 * @return      Zero on success or an error code from errno.h.
314
 */
315
 */
315
int task_kill(task_id_t id)
316
int task_kill(task_id_t id)
316
{
317
{
317
    ipl_t ipl;
318
    ipl_t ipl;
318
    task_t *ta;
319
    task_t *ta;
319
    link_t *cur;
320
    link_t *cur;
320
 
321
 
321
    if (id == 1)
322
    if (id == 1)
322
        return EPERM;
323
        return EPERM;
323
   
324
   
324
    ipl = interrupts_disable();
325
    ipl = interrupts_disable();
325
    spinlock_lock(&tasks_lock);
326
    spinlock_lock(&tasks_lock);
326
    if (!(ta = task_find_by_id(id))) {
327
    if (!(ta = task_find_by_id(id))) {
327
        spinlock_unlock(&tasks_lock);
328
        spinlock_unlock(&tasks_lock);
328
        interrupts_restore(ipl);
329
        interrupts_restore(ipl);
329
        return ENOENT;
330
        return ENOENT;
330
    }
331
    }
331
    spinlock_unlock(&tasks_lock);
332
    spinlock_unlock(&tasks_lock);
332
   
333
   
333
    /*
334
    /*
334
     * Interrupt all threads.
335
     * Interrupt all threads.
335
     */
336
     */
336
    spinlock_lock(&ta->lock);
337
    spinlock_lock(&ta->lock);
337
    for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) {
338
    for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) {
338
        thread_t *thr;
339
        thread_t *thr;
339
        bool sleeping = false;
340
        bool sleeping = false;
340
       
341
       
341
        thr = list_get_instance(cur, thread_t, th_link);
342
        thr = list_get_instance(cur, thread_t, th_link);
342
           
343
           
343
        spinlock_lock(&thr->lock);
344
        spinlock_lock(&thr->lock);
344
        thr->interrupted = true;
345
        thr->interrupted = true;
345
        if (thr->state == Sleeping)
346
        if (thr->state == Sleeping)
346
            sleeping = true;
347
            sleeping = true;
347
        spinlock_unlock(&thr->lock);
348
        spinlock_unlock(&thr->lock);
348
       
349
       
349
        if (sleeping)
350
        if (sleeping)
350
            waitq_interrupt_sleep(thr);
351
            waitq_interrupt_sleep(thr);
351
    }
352
    }
352
    spinlock_unlock(&ta->lock);
353
    spinlock_unlock(&ta->lock);
353
    interrupts_restore(ipl);
354
    interrupts_restore(ipl);
354
   
355
   
355
    return 0;
356
    return 0;
356
}
357
}
357
 
358
 
358
static bool task_print_walker(avltree_node_t *node, void *arg)
359
static bool task_print_walker(avltree_node_t *node, void *arg)
359
{
360
{
360
    task_t *t = avltree_get_instance(node, task_t, tasks_tree_node);
361
    task_t *t = avltree_get_instance(node, task_t, tasks_tree_node);
361
    int j;
362
    int j;
362
       
363
       
363
    spinlock_lock(&t->lock);
364
    spinlock_lock(&t->lock);
364
           
365
           
365
    uint64_t cycles;
366
    uint64_t cycles;
366
    char suffix;
367
    char suffix;
367
    order(task_get_accounting(t), &cycles, &suffix);
368
    order(task_get_accounting(t), &cycles, &suffix);
368
 
369
 
369
#ifdef __32_BITS__  
370
#ifdef __32_BITS__  
370
    printf("%-6" PRIu64 " %-10s %-3" PRIu32 " %10p %10p %9" PRIu64
371
    printf("%-6" PRIu64 " %-10s %-3" PRIu32 " %10p %10p %9" PRIu64
371
        "%c %7ld %6ld", t->taskid, t->name, t->context, t, t->as, cycles,
372
        "%c %7ld %6ld", t->taskid, t->name, t->context, t, t->as, cycles,
372
        suffix, atomic_get(&t->refcount), atomic_get(&t->active_calls));
373
        suffix, atomic_get(&t->refcount), atomic_get(&t->active_calls));
373
#endif
374
#endif
374
 
375
 
375
#ifdef __64_BITS__
376
#ifdef __64_BITS__
376
    printf("%-6" PRIu64 " %-10s %-3" PRIu32 " %18p %18p %9" PRIu64
377
    printf("%-6" PRIu64 " %-10s %-3" PRIu32 " %18p %18p %9" PRIu64
377
        "%c %7ld %6ld", t->taskid, t->name, t->context, t, t->as, cycles,
378
        "%c %7ld %6ld", t->taskid, t->name, t->context, t, t->as, cycles,
378
        suffix, atomic_get(&t->refcount), atomic_get(&t->active_calls));
379
        suffix, atomic_get(&t->refcount), atomic_get(&t->active_calls));
379
#endif
380
#endif
380
 
381
 
381
    for (j = 0; j < IPC_MAX_PHONES; j++) {
382
    for (j = 0; j < IPC_MAX_PHONES; j++) {
382
        if (t->phones[j].callee)
383
        if (t->phones[j].callee)
383
            printf(" %d:%p", j, t->phones[j].callee);
384
            printf(" %d:%p", j, t->phones[j].callee);
384
    }
385
    }
385
    printf("\n");
386
    printf("\n");
386
           
387
           
387
    spinlock_unlock(&t->lock);
388
    spinlock_unlock(&t->lock);
388
    return true;
389
    return true;
389
}
390
}
390
 
391
 
391
/** Print task list */
392
/** Print task list */
392
void task_print_list(void)
393
void task_print_list(void)
393
{
394
{
394
    ipl_t ipl;
395
    ipl_t ipl;
395
   
396
   
396
    /* Messing with task structures, avoid deadlock */
397
    /* Messing with task structures, avoid deadlock */
397
    ipl = interrupts_disable();
398
    ipl = interrupts_disable();
398
    spinlock_lock(&tasks_lock);
399
    spinlock_lock(&tasks_lock);
399
 
400
 
400
#ifdef __32_BITS__  
401
#ifdef __32_BITS__  
401
    printf("taskid name       ctx address    as         "
402
    printf("taskid name       ctx address    as         "
402
        "cycles     threads calls  callee\n");
403
        "cycles     threads calls  callee\n");
403
    printf("------ ---------- --- ---------- ---------- "
404
    printf("------ ---------- --- ---------- ---------- "
404
        "---------- ------- ------ ------>\n");
405
        "---------- ------- ------ ------>\n");
405
#endif
406
#endif
406
 
407
 
407
#ifdef __64_BITS__
408
#ifdef __64_BITS__
408
    printf("taskid name       ctx address            as                 "
409
    printf("taskid name       ctx address            as                 "
409
        "cycles     threads calls  callee\n");
410
        "cycles     threads calls  callee\n");
410
    printf("------ ---------- --- ------------------ ------------------ "
411
    printf("------ ---------- --- ------------------ ------------------ "
411
        "---------- ------- ------ ------>\n");
412
        "---------- ------- ------ ------>\n");
412
#endif
413
#endif
413
 
414
 
414
    avltree_walk(&tasks_tree, task_print_walker, NULL);
415
    avltree_walk(&tasks_tree, task_print_walker, NULL);
415
 
416
 
416
    spinlock_unlock(&tasks_lock);
417
    spinlock_unlock(&tasks_lock);
417
    interrupts_restore(ipl);
418
    interrupts_restore(ipl);
418
}
419
}
419
 
420
 
420
/** @}
421
/** @}
421
 */
422
 */
422
 
423