Subversion Repositories HelenOS

Rev

Rev 3426 | Rev 3597 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 3426 Rev 3431
1
/*
1
/*
2
 * Copyright (c) 2001-2004 Jakub Jermar
2
 * Copyright (c) 2001-2004 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
/** @addtogroup genericproc
29
/** @addtogroup genericproc
30
 * @{
30
 * @{
31
 */
31
 */
32
 
32
 
33
/**
33
/**
34
 * @file
34
 * @file
35
 * @brief   Task management.
35
 * @brief   Task management.
36
 */
36
 */
37
 
37
 
38
#include <proc/thread.h>
38
#include <proc/thread.h>
39
#include <proc/task.h>
39
#include <proc/task.h>
40
#include <mm/as.h>
40
#include <mm/as.h>
41
#include <mm/slab.h>
41
#include <mm/slab.h>
42
#include <atomic.h>
42
#include <atomic.h>
43
#include <synch/spinlock.h>
43
#include <synch/spinlock.h>
44
#include <synch/waitq.h>
44
#include <synch/waitq.h>
45
#include <arch.h>
45
#include <arch.h>
46
#include <arch/barrier.h>
46
#include <arch/barrier.h>
47
#include <adt/avl.h>
47
#include <adt/avl.h>
48
#include <adt/btree.h>
48
#include <adt/btree.h>
49
#include <adt/list.h>
49
#include <adt/list.h>
50
#include <ipc/ipc.h>
50
#include <ipc/ipc.h>
51
#include <ipc/ipcrsc.h>
51
#include <ipc/ipcrsc.h>
52
#include <print.h>
52
#include <print.h>
53
#include <errno.h>
53
#include <errno.h>
54
#include <func.h>
54
#include <func.h>
55
#include <syscall/copy.h>
55
#include <syscall/copy.h>
56
 
56
 
57
/** Spinlock protecting the tasks_tree AVL tree. */
57
/** Spinlock protecting the tasks_tree AVL tree. */
58
SPINLOCK_INITIALIZE(tasks_lock);
58
SPINLOCK_INITIALIZE(tasks_lock);
59
 
59
 
60
/** AVL tree of active tasks.
60
/** AVL tree of active tasks.
61
 *
61
 *
62
 * The task is guaranteed to exist after it was found in the tasks_tree as
62
 * The task is guaranteed to exist after it was found in the tasks_tree as
63
 * long as:
63
 * long as:
64
 * @li the tasks_lock is held,
64
 * @li the tasks_lock is held,
65
 * @li the task's lock is held when task's lock is acquired before releasing
65
 * @li the task's lock is held when task's lock is acquired before releasing
66
 *     tasks_lock or
66
 *     tasks_lock or
67
 * @li the task's refcount is greater than 0
67
 * @li the task's refcount is greater than 0
68
 *
68
 *
69
 */
69
 */
70
avltree_t tasks_tree;
70
avltree_t tasks_tree;
71
 
71
 
72
static task_id_t task_counter = 0;
72
static task_id_t task_counter = 0;
73
 
73
 
74
/** Initialize kernel tasks support. */
74
/** Initialize kernel tasks support. */
75
void task_init(void)
75
void task_init(void)
76
{
76
{
77
    TASK = NULL;
77
    TASK = NULL;
78
    avltree_create(&tasks_tree);
78
    avltree_create(&tasks_tree);
79
}
79
}
80
 
80
 
81
/*
81
/*
82
 * The idea behind this walker is to remember a single task different from
82
 * The idea behind this walker is to remember a single task different from
83
 * TASK.
83
 * TASK.
84
 */
84
 */
85
static bool task_done_walker(avltree_node_t *node, void *arg)
85
static bool task_done_walker(avltree_node_t *node, void *arg)
86
{
86
{
87
    task_t *t = avltree_get_instance(node, task_t, tasks_tree_node);
87
    task_t *t = avltree_get_instance(node, task_t, tasks_tree_node);
88
    task_t **tp = (task_t **) arg;
88
    task_t **tp = (task_t **) arg;
89
 
89
 
90
    if (t != TASK) {
90
    if (t != TASK) {
91
        *tp = t;
91
        *tp = t;
92
        return false;   /* stop walking */
92
        return false;   /* stop walking */
93
    }
93
    }
94
 
94
 
95
    return true;    /* continue the walk */
95
    return true;    /* continue the walk */
96
}
96
}
97
 
97
 
98
/** Kill all tasks except the current task. */
98
/** Kill all tasks except the current task. */
99
void task_done(void)
99
void task_done(void)
100
{
100
{
101
    task_t *t;
101
    task_t *t;
102
    do { /* Repeat until there are any tasks except TASK */
102
    do { /* Repeat until there are any tasks except TASK */
103
       
103
       
104
        /* Messing with task structures, avoid deadlock */
104
        /* Messing with task structures, avoid deadlock */
105
        ipl_t ipl = interrupts_disable();
105
        ipl_t ipl = interrupts_disable();
106
        spinlock_lock(&tasks_lock);
106
        spinlock_lock(&tasks_lock);
107
       
107
       
108
        t = NULL;
108
        t = NULL;
109
        avltree_walk(&tasks_tree, task_done_walker, &t);
109
        avltree_walk(&tasks_tree, task_done_walker, &t);
110
       
110
       
111
        if (t != NULL) {
111
        if (t != NULL) {
112
            task_id_t id = t->taskid;
112
            task_id_t id = t->taskid;
113
           
113
           
114
            spinlock_unlock(&tasks_lock);
114
            spinlock_unlock(&tasks_lock);
115
            interrupts_restore(ipl);
115
            interrupts_restore(ipl);
116
           
116
           
117
#ifdef CONFIG_DEBUG
117
#ifdef CONFIG_DEBUG
118
            printf("Killing task %" PRIu64 "\n", id);
118
            printf("Killing task %" PRIu64 "\n", id);
119
#endif          
119
#endif          
120
            task_kill(id);
120
            task_kill(id);
121
            thread_usleep(10000);
121
            thread_usleep(10000);
122
        } else {
122
        } else {
123
            spinlock_unlock(&tasks_lock);
123
            spinlock_unlock(&tasks_lock);
124
            interrupts_restore(ipl);
124
            interrupts_restore(ipl);
125
        }
125
        }
126
       
126
       
127
    } while (t != NULL);
127
    } while (t != NULL);
128
}
128
}
129
 
129
 
130
/** Create new task with no threads.
130
/** Create new task with no threads.
131
 *
131
 *
132
 * @param as        Task's address space.
132
 * @param as        Task's address space.
133
 * @param name      Symbolic name.
133
 * @param name      Symbolic name.
134
 *
134
 *
135
 * @return      New task's structure.
135
 * @return      New task's structure.
136
 *
136
 *
137
 */
137
 */
138
task_t *task_create(as_t *as, char *name)
138
task_t *task_create(as_t *as, char *name)
139
{
139
{
140
    ipl_t ipl;
140
    ipl_t ipl;
141
    task_t *ta;
141
    task_t *ta;
142
    int i;
142
    int i;
143
   
143
   
144
    ta = (task_t *) malloc(sizeof(task_t), 0);
144
    ta = (task_t *) malloc(sizeof(task_t), 0);
145
 
145
 
146
    task_create_arch(ta);
146
    task_create_arch(ta);
147
 
147
 
148
    spinlock_initialize(&ta->lock, "task_ta_lock");
148
    spinlock_initialize(&ta->lock, "task_ta_lock");
149
    list_initialize(&ta->th_head);
149
    list_initialize(&ta->th_head);
150
    ta->as = as;
150
    ta->as = as;
151
    ta->name = name;
151
    ta->name = name;
152
    atomic_set(&ta->refcount, 0);
152
    atomic_set(&ta->refcount, 0);
153
    atomic_set(&ta->lifecount, 0);
153
    atomic_set(&ta->lifecount, 0);
154
    ta->context = CONTEXT;
154
    ta->context = CONTEXT;
155
 
155
 
156
    ta->capabilities = 0;
156
    ta->capabilities = 0;
157
    ta->cycles = 0;
157
    ta->cycles = 0;
158
 
158
 
-
 
159
#ifdef CONFIG_UDEBUG
159
    /* Init debugging stuff */
160
    /* Init debugging stuff */
160
    udebug_task_init(&ta->udebug);
161
    udebug_task_init(&ta->udebug);
161
 
162
 
162
    /* Init kbox stuff */
163
    /* Init kbox stuff */
163
    ipc_answerbox_init(&ta->kernel_box, ta);
164
    ipc_answerbox_init(&ta->kernel_box, ta);
164
    ta->kb_thread = NULL;
165
    ta->kb_thread = NULL;
165
    mutex_initialize(&ta->kb_cleanup_lock, MUTEX_PASSIVE);
166
    mutex_initialize(&ta->kb_cleanup_lock, MUTEX_PASSIVE);
166
    ta->kb_finished = false;
167
    ta->kb_finished = false;
-
 
168
#endif
167
 
169
 
168
    ipc_answerbox_init(&ta->answerbox, ta);
170
    ipc_answerbox_init(&ta->answerbox, ta);
169
    for (i = 0; i < IPC_MAX_PHONES; i++)
171
    for (i = 0; i < IPC_MAX_PHONES; i++)
170
        ipc_phone_init(&ta->phones[i]);
172
        ipc_phone_init(&ta->phones[i]);
171
    if ((ipc_phone_0) && (context_check(ipc_phone_0->task->context,
173
    if ((ipc_phone_0) && (context_check(ipc_phone_0->task->context,
172
        ta->context)))
174
        ta->context)))
173
        ipc_phone_connect(&ta->phones[0], ipc_phone_0);
175
        ipc_phone_connect(&ta->phones[0], ipc_phone_0);
174
    atomic_set(&ta->active_calls, 0);
176
    atomic_set(&ta->active_calls, 0);
175
 
177
 
176
    mutex_initialize(&ta->futexes_lock, MUTEX_PASSIVE);
178
    mutex_initialize(&ta->futexes_lock, MUTEX_PASSIVE);
177
    btree_create(&ta->futexes);
179
    btree_create(&ta->futexes);
178
   
180
   
179
    ipl = interrupts_disable();
181
    ipl = interrupts_disable();
180
 
182
 
181
    /*
183
    /*
182
     * Increment address space reference count.
184
     * Increment address space reference count.
183
     */
185
     */
184
    atomic_inc(&as->refcount);
186
    atomic_inc(&as->refcount);
185
 
187
 
186
    spinlock_lock(&tasks_lock);
188
    spinlock_lock(&tasks_lock);
187
    ta->taskid = ++task_counter;
189
    ta->taskid = ++task_counter;
188
    avltree_node_initialize(&ta->tasks_tree_node);
190
    avltree_node_initialize(&ta->tasks_tree_node);
189
    ta->tasks_tree_node.key = ta->taskid;
191
    ta->tasks_tree_node.key = ta->taskid;
190
    avltree_insert(&tasks_tree, &ta->tasks_tree_node);
192
    avltree_insert(&tasks_tree, &ta->tasks_tree_node);
191
    spinlock_unlock(&tasks_lock);
193
    spinlock_unlock(&tasks_lock);
192
    interrupts_restore(ipl);
194
    interrupts_restore(ipl);
193
 
195
 
194
    return ta;
196
    return ta;
195
}
197
}
196
 
198
 
197
/** Destroy task.
199
/** Destroy task.
198
 *
200
 *
199
 * @param t     Task to be destroyed.
201
 * @param t     Task to be destroyed.
200
 */
202
 */
201
void task_destroy(task_t *t)
203
void task_destroy(task_t *t)
202
{
204
{
203
    /*
205
    /*
204
     * Remove the task from the task B+tree.
206
     * Remove the task from the task B+tree.
205
     */
207
     */
206
    spinlock_lock(&tasks_lock);
208
    spinlock_lock(&tasks_lock);
207
    avltree_delete(&tasks_tree, &t->tasks_tree_node);
209
    avltree_delete(&tasks_tree, &t->tasks_tree_node);
208
    spinlock_unlock(&tasks_lock);
210
    spinlock_unlock(&tasks_lock);
209
 
211
 
210
    /*
212
    /*
211
     * Perform architecture specific task destruction.
213
     * Perform architecture specific task destruction.
212
     */
214
     */
213
    task_destroy_arch(t);
215
    task_destroy_arch(t);
214
 
216
 
215
    /*
217
    /*
216
     * Free up dynamically allocated state.
218
     * Free up dynamically allocated state.
217
     */
219
     */
218
    btree_destroy(&t->futexes);
220
    btree_destroy(&t->futexes);
219
 
221
 
220
    /*
222
    /*
221
     * Drop our reference to the address space.
223
     * Drop our reference to the address space.
222
     */
224
     */
223
    if (atomic_predec(&t->as->refcount) == 0)
225
    if (atomic_predec(&t->as->refcount) == 0)
224
        as_destroy(t->as);
226
        as_destroy(t->as);
225
   
227
   
226
    free(t);
228
    free(t);
227
    TASK = NULL;
229
    TASK = NULL;
228
}
230
}
229
 
231
 
230
/** Syscall for reading task ID from userspace.
232
/** Syscall for reading task ID from userspace.
231
 *
233
 *
232
 * @param       uspace_task_id userspace address of 8-byte buffer
234
 * @param       uspace_task_id userspace address of 8-byte buffer
233
 *          where to store current task ID.
235
 *          where to store current task ID.
234
 *
236
 *
235
 * @return      Zero on success or an error code from @ref errno.h.
237
 * @return      Zero on success or an error code from @ref errno.h.
236
 */
238
 */
237
unative_t sys_task_get_id(task_id_t *uspace_task_id)
239
unative_t sys_task_get_id(task_id_t *uspace_task_id)
238
{
240
{
239
    /*
241
    /*
240
     * No need to acquire lock on TASK because taskid remains constant for
242
     * No need to acquire lock on TASK because taskid remains constant for
241
     * the lifespan of the task.
243
     * the lifespan of the task.
242
     */
244
     */
243
    return (unative_t) copy_to_uspace(uspace_task_id, &TASK->taskid,
245
    return (unative_t) copy_to_uspace(uspace_task_id, &TASK->taskid,
244
        sizeof(TASK->taskid));
246
        sizeof(TASK->taskid));
245
}
247
}
246
 
248
 
247
/** Find task structure corresponding to task ID.
249
/** Find task structure corresponding to task ID.
248
 *
250
 *
249
 * The tasks_lock must be already held by the caller of this function and
251
 * The tasks_lock must be already held by the caller of this function and
250
 * interrupts must be disabled.
252
 * interrupts must be disabled.
251
 *
253
 *
252
 * @param id        Task ID.
254
 * @param id        Task ID.
253
 *
255
 *
254
 * @return      Task structure address or NULL if there is no such task
256
 * @return      Task structure address or NULL if there is no such task
255
 *          ID.
257
 *          ID.
256
 */
258
 */
257
task_t *task_find_by_id(task_id_t id) { avltree_node_t *node;
259
task_t *task_find_by_id(task_id_t id) { avltree_node_t *node;
258
   
260
   
259
    node = avltree_search(&tasks_tree, (avltree_key_t) id);
261
    node = avltree_search(&tasks_tree, (avltree_key_t) id);
260
 
262
 
261
    if (node)
263
    if (node)
262
        return avltree_get_instance(node, task_t, tasks_tree_node);
264
        return avltree_get_instance(node, task_t, tasks_tree_node);
263
    return NULL;
265
    return NULL;
264
}
266
}
265
 
267
 
266
/** Get accounting data of given task.
268
/** Get accounting data of given task.
267
 *
269
 *
268
 * Note that task lock of 't' must be already held and interrupts must be
270
 * Note that task lock of 't' must be already held and interrupts must be
269
 * already disabled.
271
 * already disabled.
270
 *
272
 *
271
 * @param t     Pointer to thread.
273
 * @param t     Pointer to thread.
272
 *
274
 *
273
 * @return      Number of cycles used by the task and all its threads
275
 * @return      Number of cycles used by the task and all its threads
274
 *          so far.
276
 *          so far.
275
 */
277
 */
276
uint64_t task_get_accounting(task_t *t)
278
uint64_t task_get_accounting(task_t *t)
277
{
279
{
278
    /* Accumulated value of task */
280
    /* Accumulated value of task */
279
    uint64_t ret = t->cycles;
281
    uint64_t ret = t->cycles;
280
   
282
   
281
    /* Current values of threads */
283
    /* Current values of threads */
282
    link_t *cur;
284
    link_t *cur;
283
    for (cur = t->th_head.next; cur != &t->th_head; cur = cur->next) {
285
    for (cur = t->th_head.next; cur != &t->th_head; cur = cur->next) {
284
        thread_t *thr = list_get_instance(cur, thread_t, th_link);
286
        thread_t *thr = list_get_instance(cur, thread_t, th_link);
285
       
287
       
286
        spinlock_lock(&thr->lock);
288
        spinlock_lock(&thr->lock);
287
        /* Process only counted threads */
289
        /* Process only counted threads */
288
        if (!thr->uncounted) {
290
        if (!thr->uncounted) {
289
            if (thr == THREAD) {
291
            if (thr == THREAD) {
290
                /* Update accounting of current thread */
292
                /* Update accounting of current thread */
291
                thread_update_accounting();
293
                thread_update_accounting();
292
            }
294
            }
293
            ret += thr->cycles;
295
            ret += thr->cycles;
294
        }
296
        }
295
        spinlock_unlock(&thr->lock);
297
        spinlock_unlock(&thr->lock);
296
    }
298
    }
297
   
299
   
298
    return ret;
300
    return ret;
299
}
301
}
300
 
302
 
301
/** Kill task.
303
/** Kill task.
302
 *
304
 *
303
 * This function is idempotent.
305
 * This function is idempotent.
304
 * It signals all the task's threads to bail it out.
306
 * It signals all the task's threads to bail it out.
305
 *
307
 *
306
 * @param id        ID of the task to be killed.
308
 * @param id        ID of the task to be killed.
307
 *
309
 *
308
 * @return      Zero on success or an error code from errno.h.
310
 * @return      Zero on success or an error code from errno.h.
309
 */
311
 */
310
int task_kill(task_id_t id)
312
int task_kill(task_id_t id)
311
{
313
{
312
    ipl_t ipl;
314
    ipl_t ipl;
313
    task_t *ta;
315
    task_t *ta;
314
    link_t *cur;
316
    link_t *cur;
315
 
317
 
316
    if (id == 1)
318
    if (id == 1)
317
        return EPERM;
319
        return EPERM;
318
   
320
   
319
    ipl = interrupts_disable();
321
    ipl = interrupts_disable();
320
    spinlock_lock(&tasks_lock);
322
    spinlock_lock(&tasks_lock);
321
    if (!(ta = task_find_by_id(id))) {
323
    if (!(ta = task_find_by_id(id))) {
322
        spinlock_unlock(&tasks_lock);
324
        spinlock_unlock(&tasks_lock);
323
        interrupts_restore(ipl);
325
        interrupts_restore(ipl);
324
        return ENOENT;
326
        return ENOENT;
325
    }
327
    }
326
    spinlock_unlock(&tasks_lock);
328
    spinlock_unlock(&tasks_lock);
327
   
329
   
328
    /*
330
    /*
329
     * Interrupt all threads.
331
     * Interrupt all threads.
330
     */
332
     */
331
    spinlock_lock(&ta->lock);
333
    spinlock_lock(&ta->lock);
332
    for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) {
334
    for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) {
333
        thread_t *thr;
335
        thread_t *thr;
334
        bool sleeping = false;
336
        bool sleeping = false;
335
       
337
       
336
        thr = list_get_instance(cur, thread_t, th_link);
338
        thr = list_get_instance(cur, thread_t, th_link);
337
           
339
           
338
        spinlock_lock(&thr->lock);
340
        spinlock_lock(&thr->lock);
339
        thr->interrupted = true;
341
        thr->interrupted = true;
340
        if (thr->state == Sleeping)
342
        if (thr->state == Sleeping)
341
            sleeping = true;
343
            sleeping = true;
342
        spinlock_unlock(&thr->lock);
344
        spinlock_unlock(&thr->lock);
343
       
345
       
344
        if (sleeping)
346
        if (sleeping)
345
            waitq_interrupt_sleep(thr);
347
            waitq_interrupt_sleep(thr);
346
    }
348
    }
347
    spinlock_unlock(&ta->lock);
349
    spinlock_unlock(&ta->lock);
348
    interrupts_restore(ipl);
350
    interrupts_restore(ipl);
349
   
351
   
350
    return 0;
352
    return 0;
351
}
353
}
352
 
354
 
353
static bool task_print_walker(avltree_node_t *node, void *arg)
355
static bool task_print_walker(avltree_node_t *node, void *arg)
354
{
356
{
355
    task_t *t = avltree_get_instance(node, task_t, tasks_tree_node);
357
    task_t *t = avltree_get_instance(node, task_t, tasks_tree_node);
356
    int j;
358
    int j;
357
       
359
       
358
    spinlock_lock(&t->lock);
360
    spinlock_lock(&t->lock);
359
           
361
           
360
    uint64_t cycles;
362
    uint64_t cycles;
361
    char suffix;
363
    char suffix;
362
    order(task_get_accounting(t), &cycles, &suffix);
364
    order(task_get_accounting(t), &cycles, &suffix);
363
 
365
 
364
#ifdef __32_BITS__  
366
#ifdef __32_BITS__  
365
    printf("%-6" PRIu64 " %-10s %-3" PRIu32 " %10p %10p %9" PRIu64
367
    printf("%-6" PRIu64 " %-10s %-3" PRIu32 " %10p %10p %9" PRIu64
366
        "%c %7ld %6ld", t->taskid, t->name, t->context, t, t->as, cycles,
368
        "%c %7ld %6ld", t->taskid, t->name, t->context, t, t->as, cycles,
367
        suffix, atomic_get(&t->refcount), atomic_get(&t->active_calls));
369
        suffix, atomic_get(&t->refcount), atomic_get(&t->active_calls));
368
#endif
370
#endif
369
 
371
 
370
#ifdef __64_BITS__
372
#ifdef __64_BITS__
371
    printf("%-6" PRIu64 " %-10s %-3" PRIu32 " %18p %18p %9" PRIu64
373
    printf("%-6" PRIu64 " %-10s %-3" PRIu32 " %18p %18p %9" PRIu64
372
        "%c %7ld %6ld", t->taskid, t->name, t->context, t, t->as, cycles,
374
        "%c %7ld %6ld", t->taskid, t->name, t->context, t, t->as, cycles,
373
        suffix, atomic_get(&t->refcount), atomic_get(&t->active_calls));
375
        suffix, atomic_get(&t->refcount), atomic_get(&t->active_calls));
374
#endif
376
#endif
375
 
377
 
376
    for (j = 0; j < IPC_MAX_PHONES; j++) {
378
    for (j = 0; j < IPC_MAX_PHONES; j++) {
377
        if (t->phones[j].callee)
379
        if (t->phones[j].callee)
378
            printf(" %d:%p", j, t->phones[j].callee);
380
            printf(" %d:%p", j, t->phones[j].callee);
379
    }
381
    }
380
    printf("\n");
382
    printf("\n");
381
           
383
           
382
    spinlock_unlock(&t->lock);
384
    spinlock_unlock(&t->lock);
383
    return true;
385
    return true;
384
}
386
}
385
 
387
 
386
/** Print task list */
388
/** Print task list */
387
void task_print_list(void)
389
void task_print_list(void)
388
{
390
{
389
    ipl_t ipl;
391
    ipl_t ipl;
390
   
392
   
391
    /* Messing with task structures, avoid deadlock */
393
    /* Messing with task structures, avoid deadlock */
392
    ipl = interrupts_disable();
394
    ipl = interrupts_disable();
393
    spinlock_lock(&tasks_lock);
395
    spinlock_lock(&tasks_lock);
394
 
396
 
395
#ifdef __32_BITS__  
397
#ifdef __32_BITS__  
396
    printf("taskid name       ctx address    as         "
398
    printf("taskid name       ctx address    as         "
397
        "cycles     threads calls  callee\n");
399
        "cycles     threads calls  callee\n");
398
    printf("------ ---------- --- ---------- ---------- "
400
    printf("------ ---------- --- ---------- ---------- "
399
        "---------- ------- ------ ------>\n");
401
        "---------- ------- ------ ------>\n");
400
#endif
402
#endif
401
 
403
 
402
#ifdef __64_BITS__
404
#ifdef __64_BITS__
403
    printf("taskid name       ctx address            as                 "
405
    printf("taskid name       ctx address            as                 "
404
        "cycles     threads calls  callee\n");
406
        "cycles     threads calls  callee\n");
405
    printf("------ ---------- --- ------------------ ------------------ "
407
    printf("------ ---------- --- ------------------ ------------------ "
406
        "---------- ------- ------ ------>\n");
408
        "---------- ------- ------ ------>\n");
407
#endif
409
#endif
408
 
410
 
409
    avltree_walk(&tasks_tree, task_print_walker, NULL);
411
    avltree_walk(&tasks_tree, task_print_walker, NULL);
410
 
412
 
411
    spinlock_unlock(&tasks_lock);
413
    spinlock_unlock(&tasks_lock);
412
    interrupts_restore(ipl);
414
    interrupts_restore(ipl);
413
}
415
}
414
 
416
 
415
/** @}
417
/** @}
416
 */
418
 */
417
 
419