Subversion Repositories HelenOS

Rev

Rev 3150 | Rev 3155 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1 jermar 1
/*
2071 jermar 2
 * Copyright (c) 2001-2004 Jakub Jermar
1 jermar 3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
1757 jermar 29
/** @addtogroup genericproc
1702 cejka 30
 * @{
31
 */
32
 
1248 jermar 33
/**
1702 cejka 34
 * @file
1248 jermar 35
 * @brief	Task management.
36
 */
37
 
973 palkovsky 38
#include <main/uinit.h>
1 jermar 39
#include <proc/thread.h>
40
#include <proc/task.h>
1078 jermar 41
#include <proc/uarg.h>
703 jermar 42
#include <mm/as.h>
814 palkovsky 43
#include <mm/slab.h>
2183 jermar 44
#include <atomic.h>
1 jermar 45
#include <synch/spinlock.h>
2109 jermar 46
#include <synch/waitq.h>
1 jermar 47
#include <arch.h>
3153 svoboda 48
#include <arch/barrier.h>
1 jermar 49
#include <panic.h>
2504 jermar 50
#include <adt/avl.h>
1159 jermar 51
#include <adt/btree.h>
788 jermar 52
#include <adt/list.h>
955 palkovsky 53
#include <ipc/ipc.h>
3004 svoboda 54
#include <ipc/ipcrsc.h>
1174 jermar 55
#include <security/cap.h>
955 palkovsky 56
#include <memstr.h>
1060 palkovsky 57
#include <print.h>
2000 decky 58
#include <lib/elf.h>
1579 jermar 59
#include <errno.h>
2050 decky 60
#include <func.h>
1288 jermar 61
#include <syscall/copy.h>
973 palkovsky 62
 
1170 vana 63
#ifndef LOADED_PROG_STACK_PAGES_NO
64
#define LOADED_PROG_STACK_PAGES_NO 1
65
#endif
1168 vana 66
 
2504 jermar 67
/** Spinlock protecting the tasks_tree AVL tree. */
623 jermar 68
SPINLOCK_INITIALIZE(tasks_lock);
1636 jermar 69
 
2504 jermar 70
/** AVL tree of active tasks.
1636 jermar 71
 *
2504 jermar 72
 * The task is guaranteed to exist after it was found in the tasks_tree as
2087 jermar 73
 * long as:
1636 jermar 74
 * @li the tasks_lock is held,
2087 jermar 75
 * @li the task's lock is held when task's lock is acquired before releasing
76
 *     tasks_lock or
1880 jermar 77
 * @li the task's refcount is greater than 0
1636 jermar 78
 *
79
 */
2504 jermar 80
avltree_t tasks_tree;
1636 jermar 81
 
1005 palkovsky 82
static task_id_t task_counter = 0;
1 jermar 83
 
3001 svoboda 84
/**
85
 * Points to the binary image used as the program loader. All non-initial
86
 * tasks are created from this executable image.
87
 */
88
void *program_loader = NULL;
89
 
90
 
107 decky 91
/** Initialize tasks
92
 *
93
 * Initialize kernel tasks support.
94
 *
95
 */
1 jermar 96
void task_init(void)
97
{
15 jermar 98
	TASK = NULL;
2504 jermar 99
	avltree_create(&tasks_tree);
1 jermar 100
}
101
 
2504 jermar 102
/*
103
 * The idea behind this walker is to remember a single task different from TASK.
104
 */
105
static bool task_done_walker(avltree_node_t *node, void *arg)
106
{
107
	task_t *t = avltree_get_instance(node, task_t, tasks_tree_node);
108
	task_t **tp = (task_t **) arg;
109
 
110
	if (t != TASK) { 
111
		*tp = t;
112
		return false;	/* stop walking */
113
	}
114
 
115
	return true;	/* continue the walk */
116
}
117
 
2227 decky 118
/** Kill all tasks except the current task.
119
 *
120
 */
121
void task_done(void)
122
{
123
	task_t *t;
124
	do { /* Repeat until there are any tasks except TASK */
125
 
126
		/* Messing with task structures, avoid deadlock */
127
		ipl_t ipl = interrupts_disable();
128
		spinlock_lock(&tasks_lock);
129
 
130
		t = NULL;
2504 jermar 131
		avltree_walk(&tasks_tree, task_done_walker, &t);
2227 decky 132
 
133
		if (t != NULL) {
134
			task_id_t id = t->taskid;
135
 
136
			spinlock_unlock(&tasks_lock);
137
			interrupts_restore(ipl);
138
 
139
#ifdef CONFIG_DEBUG
3149 svoboda 140
			printf("Killing task %" PRIu64 "\n", id);
2227 decky 141
#endif			
142
			task_kill(id);
2632 decky 143
			thread_usleep(10000);
2227 decky 144
		} else {
145
			spinlock_unlock(&tasks_lock);
146
			interrupts_restore(ipl);
147
		}
148
 
149
	} while (t != NULL);
150
}
107 decky 151
 
152
/** Create new task
153
 *
154
 * Create new task with no threads.
155
 *
703 jermar 156
 * @param as Task's address space.
1062 jermar 157
 * @param name Symbolic name.
107 decky 158
 *
973 palkovsky 159
 * @return New task's structure
107 decky 160
 *
161
 */
1062 jermar 162
task_t *task_create(as_t *as, char *name)
1 jermar 163
{
413 jermar 164
	ipl_t ipl;
1 jermar 165
	task_t *ta;
1040 palkovsky 166
	int i;
1 jermar 167
 
822 palkovsky 168
	ta = (task_t *) malloc(sizeof(task_t), 0);
169
 
1185 jermar 170
	task_create_arch(ta);
171
 
822 palkovsky 172
	spinlock_initialize(&ta->lock, "task_ta_lock");
173
	list_initialize(&ta->th_head);
174
	ta->as = as;
1062 jermar 175
	ta->name = name;
2446 jermar 176
	atomic_set(&ta->refcount, 0);
177
	atomic_set(&ta->lifecount, 0);
1839 decky 178
	ta->context = CONTEXT;
1579 jermar 179
 
1174 jermar 180
	ta->capabilities = 0;
2039 decky 181
	ta->cycles = 0;
1040 palkovsky 182
 
2802 jermar 183
	ipc_answerbox_init(&ta->answerbox, ta);
1839 decky 184
	for (i = 0; i < IPC_MAX_PHONES; i++)
1040 palkovsky 185
		ipc_phone_init(&ta->phones[i]);
2087 jermar 186
	if ((ipc_phone_0) && (context_check(ipc_phone_0->task->context,
187
	    ta->context)))
1040 palkovsky 188
		ipc_phone_connect(&ta->phones[0], ipc_phone_0);
998 palkovsky 189
	atomic_set(&ta->active_calls, 0);
1460 jermar 190
 
191
	mutex_initialize(&ta->futexes_lock);
192
	btree_create(&ta->futexes);
822 palkovsky 193
 
194
	ipl = interrupts_disable();
1468 jermar 195
 
196
	/*
197
	 * Increment address space reference count.
198
	 */
2183 jermar 199
	atomic_inc(&as->refcount);
1468 jermar 200
 
822 palkovsky 201
	spinlock_lock(&tasks_lock);
1005 palkovsky 202
	ta->taskid = ++task_counter;
2504 jermar 203
	avltree_node_initialize(&ta->tasks_tree_node);
204
	ta->tasks_tree_node.key = ta->taskid; 
205
	avltree_insert(&tasks_tree, &ta->tasks_tree_node);
822 palkovsky 206
	spinlock_unlock(&tasks_lock);
207
	interrupts_restore(ipl);
208
 
1 jermar 209
	return ta;
210
}
211
 
1579 jermar 212
/** Destroy task.
213
 *
214
 * @param t Task to be destroyed.
215
 */
216
void task_destroy(task_t *t)
217
{
2446 jermar 218
	/*
219
	 * Remove the task from the task B+tree.
220
	 */
221
	spinlock_lock(&tasks_lock);
2504 jermar 222
	avltree_delete(&tasks_tree, &t->tasks_tree_node);
2446 jermar 223
	spinlock_unlock(&tasks_lock);
224
 
225
	/*
226
	 * Perform architecture specific task destruction.
227
	 */
1587 jermar 228
	task_destroy_arch(t);
2446 jermar 229
 
230
	/*
231
	 * Free up dynamically allocated state.
232
	 */
1587 jermar 233
	btree_destroy(&t->futexes);
234
 
2446 jermar 235
	/*
236
	 * Drop our reference to the address space.
237
	 */
2183 jermar 238
	if (atomic_predec(&t->as->refcount) == 0) 
1587 jermar 239
		as_destroy(t->as);
240
 
241
	free(t);
242
	TASK = NULL;
1579 jermar 243
}
244
 
973 palkovsky 245
/** Create new task with 1 thread and run it
246
 *
3001 svoboda 247
 * @param as Address space containing a binary program image.
248
 * @param entry_addr Program entry-point address in program address space.
249
 * @param name Program name.
1062 jermar 250
 *
1229 jermar 251
 * @return Task of the running program or NULL on error.
973 palkovsky 252
 */
3150 svoboda 253
task_t *task_create_from_as(as_t *as, uintptr_t entry_addr, char *name,
3149 svoboda 254
	thread_t **thr)
973 palkovsky 255
{
256
	as_area_t *a;
2446 jermar 257
	thread_t *t;
973 palkovsky 258
	task_t *task;
1078 jermar 259
	uspace_arg_t *kernel_uarg;
973 palkovsky 260
 
1078 jermar 261
	kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0);
3001 svoboda 262
	kernel_uarg->uspace_entry = (void *) entry_addr;
1078 jermar 263
	kernel_uarg->uspace_stack = (void *) USTACK_ADDRESS;
264
	kernel_uarg->uspace_thread_function = NULL;
265
	kernel_uarg->uspace_thread_arg = NULL;
266
	kernel_uarg->uspace_uarg = NULL;
1066 jermar 267
 
1062 jermar 268
	task = task_create(as, name);
1115 jermar 269
	ASSERT(task);
270
 
973 palkovsky 271
	/*
272
	 * Create the data as_area.
273
	 */
2087 jermar 274
	a = as_area_create(as, AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE,
275
	    LOADED_PROG_STACK_PAGES_NO * PAGE_SIZE, USTACK_ADDRESS,
276
	    AS_AREA_ATTR_NONE, &anon_backend, NULL);
1115 jermar 277
 
1585 jermar 278
	/*
279
	 * Create the main thread.
280
	 */
2446 jermar 281
	t = thread_create(uinit, kernel_uarg, task, THREAD_FLAG_USPACE,
2087 jermar 282
	    "uinit", false);
2446 jermar 283
	ASSERT(t);
3149 svoboda 284
 
285
	*thr = t;
973 palkovsky 286
 
287
	return task;
288
}
1060 palkovsky 289
 
3001 svoboda 290
/** Parse an executable image in the physical memory.
291
 *
292
 * If the image belongs to a program loader, it is registered as such,
293
 * (and *task is set to NULL). Otherwise a task is created from the
294
 * executable image. The task is returned in *task.
295
 *
296
 * @param program_addr Address of program executable image.
297
 * @param name Program name. 
298
 * @param task Where to store the pointer to the newly created task.
299
 *
300
 * @return EOK on success or negative error code.
301
 */
3149 svoboda 302
int task_parse_initial(void *program_addr, char *name, thread_t **t)
3001 svoboda 303
{
304
	as_t *as;
305
	unsigned int rc;
3149 svoboda 306
	task_t *task;
3001 svoboda 307
 
308
	as = as_create(0);
309
	ASSERT(as);
310
 
311
	rc = elf_load((elf_header_t *) program_addr, as, 0);
312
	if (rc != EE_OK) {
313
		as_destroy(as);
3150 svoboda 314
		*t = NULL;
3001 svoboda 315
		if (rc != EE_LOADER)
316
			return ENOTSUP;
317
 
318
		/* Register image as the program loader */
319
		ASSERT(program_loader == NULL);
320
		program_loader = program_addr;
321
		return EOK;
322
	}
323
 
3149 svoboda 324
	task = task_create_from_as(as, ((elf_header_t *) program_addr)->e_entry,
325
	    name, t);
3001 svoboda 326
 
327
	return EOK;
328
}
329
 
330
/** Create a task from the program loader image.
331
 *
332
 * @param name Program name. 
3004 svoboda 333
 * @param t Buffer for storing pointer to the newly created task.
3001 svoboda 334
 *
335
 * @return Task of the running program or NULL on error.
336
 */
3004 svoboda 337
int task_create_from_loader(char *name, task_t **t)
3001 svoboda 338
{
339
	as_t *as;
340
	unsigned int rc;
3004 svoboda 341
	void *loader;
3150 svoboda 342
	thread_t *thr;
3001 svoboda 343
 
344
	as = as_create(0);
345
	ASSERT(as);
346
 
3004 svoboda 347
	loader = program_loader;
348
	if (!loader) return ENOENT;
349
 
3001 svoboda 350
	rc = elf_load((elf_header_t *) program_loader, as, ELD_F_LOADER);
351
	if (rc != EE_OK) {
352
		as_destroy(as);
3004 svoboda 353
		return ENOENT;
3001 svoboda 354
	}
355
 
3004 svoboda 356
	*t = task_create_from_as(
3150 svoboda 357
		as, ((elf_header_t *) program_loader)->e_entry, name, &thr);
3004 svoboda 358
 
359
	return EOK;
3001 svoboda 360
}
361
 
362
/** Make task ready.
363
 *
364
 * Switch task's thread to the ready state.
365
 *
366
 * @param ta Task to make ready.
367
 */
368
void task_ready(task_t *t)
369
{
370
	thread_t *th;
371
 
372
	th = list_get_instance(t->th_head.next, thread_t, th_link);
373
	thread_ready(th);
374
}
375
 
1176 jermar 376
/** Syscall for reading task ID from userspace.
377
 *
2087 jermar 378
 * @param uspace_task_id Userspace address of 8-byte buffer where to store
379
 * current task ID.
1176 jermar 380
 *
1288 jermar 381
 * @return 0 on success or an error code from @ref errno.h.
1176 jermar 382
 */
1780 jermar 383
unative_t sys_task_get_id(task_id_t *uspace_task_id)
1176 jermar 384
{
385
	/*
386
	 * No need to acquire lock on TASK because taskid
387
	 * remains constant for the lifespan of the task.
388
	 */
2087 jermar 389
	return (unative_t) copy_to_uspace(uspace_task_id, &TASK->taskid,
390
	    sizeof(TASK->taskid));
1176 jermar 391
}
392
 
3001 svoboda 393
/** Syscall for creating a new task from userspace.
394
 *
3004 svoboda 395
 * Creates a new task from the program loader image, connects a phone
396
 * to it and stores the phone id into the provided buffer.
3001 svoboda 397
 *
3004 svoboda 398
 * @param uspace_phone_id Userspace address where to store the phone id.
3001 svoboda 399
 *
400
 * @return 0 on success or an error code from @ref errno.h.
401
 */
3149 svoboda 402
unative_t sys_task_spawn_loader(int *uspace_phone_id)
3001 svoboda 403
{
404
	task_t *t;
3004 svoboda 405
	int fake_id;
3001 svoboda 406
	int rc;
3004 svoboda 407
	int phone_id;
3001 svoboda 408
 
3004 svoboda 409
	fake_id = 0;
410
 
3001 svoboda 411
	/* Before we even try creating the task, see if we can write the id */
3004 svoboda 412
	rc = (unative_t) copy_to_uspace(uspace_phone_id, &fake_id,
3001 svoboda 413
	    sizeof(fake_id));
414
	if (rc != 0)
415
		return rc;
416
 
3004 svoboda 417
	phone_id = phone_alloc();
418
	if (phone_id < 0)
419
		return ELIMIT;
3001 svoboda 420
 
3004 svoboda 421
	rc = task_create_from_loader("loader", &t);
422
	if (rc != 0)
423
		return rc;
424
 
425
	phone_connect(phone_id, &t->answerbox);
426
 
3001 svoboda 427
	/* No need to aquire lock before task_ready() */
3004 svoboda 428
	rc = (unative_t) copy_to_uspace(uspace_phone_id, &phone_id,
429
	    sizeof(phone_id));
3001 svoboda 430
	if (rc != 0) {
431
		/* Ooops */
3004 svoboda 432
		ipc_phone_hangup(&TASK->phones[phone_id]);
3001 svoboda 433
		task_kill(t->taskid);
434
		return rc;
435
	}
436
 
437
	task_ready(t);
438
 
439
	return EOK;
440
}
441
 
3149 svoboda 442
unative_t sys_task_spawn(void *image, size_t size)
443
{
444
	void *kimage = malloc(size, 0);
445
	if (kimage == NULL)
446
		return ENOMEM;
447
 
448
	int rc = copy_from_uspace(kimage, image, size);
449
	if (rc != EOK)
450
		return rc;
3153 svoboda 451
 
452
	/*
453
	 * Not very efficient and it would be better to call it on code only,
454
	 * but this whole function is a temporary hack anyway and one day it
455
	 * will go in favor of the userspace dynamic loader.
456
	 */
457
	smc_coherence_block(kimage, size);
3149 svoboda 458
 
3153 svoboda 459
	uspace_arg_t *kernel_uarg;
460
	kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0);
3149 svoboda 461
	if (kernel_uarg == NULL) {
462
		free(kimage);
463
		return ENOMEM;
464
	}
465
 
466
	kernel_uarg->uspace_entry =
467
	    (void *) ((elf_header_t *) kimage)->e_entry;
468
	kernel_uarg->uspace_stack = (void *) USTACK_ADDRESS;
469
	kernel_uarg->uspace_thread_function = NULL;
470
	kernel_uarg->uspace_thread_arg = NULL;
471
	kernel_uarg->uspace_uarg = NULL;
472
 
473
	as_t *as = as_create(0);
474
	if (as == NULL) {
475
		free(kernel_uarg);
476
		free(kimage);
477
		return ENOMEM;
478
	}
479
 
3150 svoboda 480
	unsigned int erc = elf_load((elf_header_t *) kimage, as, ELD_F_NONE);
3149 svoboda 481
	if (erc != EE_OK) {
482
		as_destroy(as);
483
		free(kernel_uarg);
484
		free(kimage);
485
		return ENOENT;
486
	}
487
 
488
	as_area_t *area = as_area_create(as,
3153 svoboda 489
	    AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE,
490
	    LOADED_PROG_STACK_PAGES_NO * PAGE_SIZE, USTACK_ADDRESS,
491
	    AS_AREA_ATTR_NONE, &anon_backend, NULL);
3149 svoboda 492
	if (area == NULL) {
493
		as_destroy(as);
494
		free(kernel_uarg);
495
		free(kimage);
496
		return ENOMEM;
497
	}
498
 
499
	task_t *task = task_create(as, "app");
500
	if (task == NULL) {
501
		as_destroy(as);
502
		free(kernel_uarg);
503
		free(kimage);
504
		return ENOENT;
505
	}
506
 
507
	// FIXME: control the capabilities
508
	cap_set(task, cap_get(TASK));
509
 
510
	thread_t *thread = thread_create(uinit, kernel_uarg, task,
3153 svoboda 511
	    THREAD_FLAG_USPACE, "user", false);
3149 svoboda 512
	if (thread == NULL) {
513
		task_destroy(task);
514
		as_destroy(as);
515
		free(kernel_uarg);
516
		free(kimage);
517
		return ENOENT;
518
	}
519
 
520
	thread_ready(thread);
521
 
522
	return EOK;
523
}
524
 
1178 jermar 525
/** Find task structure corresponding to task ID.
526
 *
527
 * The tasks_lock must be already held by the caller of this function
528
 * and interrupts must be disabled.
529
 *
530
 * @param id Task ID.
531
 *
532
 * @return Task structure address or NULL if there is no such task ID.
533
 */
534
task_t *task_find_by_id(task_id_t id)
535
{
2504 jermar 536
	avltree_node_t *node;
1178 jermar 537
 
2504 jermar 538
	node = avltree_search(&tasks_tree, (avltree_key_t) id);
539
 
540
	if (node)
541
		return avltree_get_instance(node, task_t, tasks_tree_node); 
542
	return NULL;
1178 jermar 543
}
544
 
2039 decky 545
/** Get accounting data of given task.
546
 *
2048 jermar 547
 * Note that task lock of 't' must be already held and
2039 decky 548
 * interrupts must be already disabled.
549
 *
550
 * @param t Pointer to thread.
551
 *
552
 */
553
uint64_t task_get_accounting(task_t *t)
554
{
555
	/* Accumulated value of task */
556
	uint64_t ret = t->cycles;
557
 
558
	/* Current values of threads */
559
	link_t *cur;
560
	for (cur = t->th_head.next; cur != &t->th_head; cur = cur->next) {
561
		thread_t *thr = list_get_instance(cur, thread_t, th_link);
562
 
563
		spinlock_lock(&thr->lock);
2042 decky 564
		/* Process only counted threads */
565
		if (!thr->uncounted) {
2087 jermar 566
			if (thr == THREAD) {
567
				/* Update accounting of current thread */
568
				thread_update_accounting();
569
			} 
2042 decky 570
			ret += thr->cycles;
571
		}
2039 decky 572
		spinlock_unlock(&thr->lock);
573
	}
574
 
575
	return ret;
576
}
577
 
1579 jermar 578
/** Kill task.
579
 *
2446 jermar 580
 * This function is idempotent.
581
 * It signals all the task's threads to bail it out.
582
 *
1579 jermar 583
 * @param id ID of the task to be killed.
584
 *
585
 * @return 0 on success or an error code from errno.h
586
 */
587
int task_kill(task_id_t id)
588
{
589
	ipl_t ipl;
590
	task_t *ta;
591
	link_t *cur;
1600 jermar 592
 
593
	if (id == 1)
594
		return EPERM;
1579 jermar 595
 
596
	ipl = interrupts_disable();
597
	spinlock_lock(&tasks_lock);
598
	if (!(ta = task_find_by_id(id))) {
599
		spinlock_unlock(&tasks_lock);
600
		interrupts_restore(ipl);
601
		return ENOENT;
602
	}
1587 jermar 603
	spinlock_unlock(&tasks_lock);
1579 jermar 604
 
1585 jermar 605
	/*
1687 jermar 606
	 * Interrupt all threads except ktaskclnp.
2446 jermar 607
	 */
608
	spinlock_lock(&ta->lock);
1579 jermar 609
	for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) {
610
		thread_t *thr;
2446 jermar 611
		bool sleeping = false;
1579 jermar 612
 
613
		thr = list_get_instance(cur, thread_t, th_link);
614
 
615
		spinlock_lock(&thr->lock);
616
		thr->interrupted = true;
617
		if (thr->state == Sleeping)
618
			sleeping = true;
619
		spinlock_unlock(&thr->lock);
620
 
621
		if (sleeping)
2109 jermar 622
			waitq_interrupt_sleep(thr);
1579 jermar 623
	}
1580 jermar 624
	spinlock_unlock(&ta->lock);
625
	interrupts_restore(ipl);
1579 jermar 626
 
627
	return 0;
628
}
629
 
2504 jermar 630
static bool task_print_walker(avltree_node_t *node, void *arg)
631
{
632
	task_t *t = avltree_get_instance(node, task_t, tasks_tree_node);
633
	int j;
634
 
635
	spinlock_lock(&t->lock);
636
 
637
	uint64_t cycles;
638
	char suffix;
639
	order(task_get_accounting(t), &cycles, &suffix);
3149 svoboda 640
 
641
#ifdef __32_BITS__	
3153 svoboda 642
	printf("%-6" PRIu64 " %-10s %-3" PRIu32 " %10p %10p %9" PRIu64
643
	    "%c %7ld %6ld", t->taskid, t->name, t->context, t, t->as, cycles,
644
	    suffix, atomic_get(&t->refcount), atomic_get(&t->active_calls));
3149 svoboda 645
#endif
646
 
647
#ifdef __64_BITS__
3153 svoboda 648
	printf("%-6" PRIu64 " %-10s %-3" PRIu32 " %18p %18p %9" PRIu64
649
	    "%c %7ld %6ld", t->taskid, t->name, t->context, t, t->as, cycles,
650
	    suffix, atomic_get(&t->refcount), atomic_get(&t->active_calls));
3149 svoboda 651
#endif
652
 
2504 jermar 653
	for (j = 0; j < IPC_MAX_PHONES; j++) {
654
		if (t->phones[j].callee)
3149 svoboda 655
			printf(" %d:%p", j, t->phones[j].callee);
2504 jermar 656
	}
657
	printf("\n");
658
 
659
	spinlock_unlock(&t->lock);
660
	return true;
661
}
662
 
1060 palkovsky 663
/** Print task list */
664
void task_print_list(void)
665
{
666
	ipl_t ipl;
667
 
2227 decky 668
	/* Messing with task structures, avoid deadlock */
1060 palkovsky 669
	ipl = interrupts_disable();
670
	spinlock_lock(&tasks_lock);
671
 
3149 svoboda 672
#ifdef __32_BITS__	
673
	printf("taskid name       ctx address    as         "
3153 svoboda 674
	    "cycles     threads calls  callee\n");
3149 svoboda 675
	printf("------ ---------- --- ---------- ---------- "
3153 svoboda 676
	    "---------- ------- ------ ------>\n");
3149 svoboda 677
#endif
678
 
679
#ifdef __64_BITS__
680
	printf("taskid name       ctx address            as                 "
3153 svoboda 681
	    "cycles     threads calls  callee\n");
3149 svoboda 682
	printf("------ ---------- --- ------------------ ------------------ "
3153 svoboda 683
	    "---------- ------- ------ ------>\n");
3149 svoboda 684
#endif
685
 
2504 jermar 686
	avltree_walk(&tasks_tree, task_print_walker, NULL);
1159 jermar 687
 
1060 palkovsky 688
	spinlock_unlock(&tasks_lock);
689
	interrupts_restore(ipl);
690
}
1579 jermar 691
 
1757 jermar 692
/** @}
1702 cejka 693
 */