Subversion Repositories HelenOS

Rev

Rev 2502 | Rev 2712 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1 jermar 1
/*
2071 jermar 2
 * Copyright (c) 2001-2004 Jakub Jermar
1 jermar 3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
1757 jermar 29
/** @addtogroup genericproc
1702 cejka 30
 * @{
31
 */
32
 
1248 jermar 33
/**
1702 cejka 34
 * @file
1248 jermar 35
 * @brief	Thread management functions.
36
 */
37
 
1 jermar 38
#include <proc/scheduler.h>
39
#include <proc/thread.h>
40
#include <proc/task.h>
1078 jermar 41
#include <proc/uarg.h>
1 jermar 42
#include <mm/frame.h>
43
#include <mm/page.h>
44
#include <arch/asm.h>
2030 decky 45
#include <arch/cycle.h>
1 jermar 46
#include <arch.h>
47
#include <synch/synch.h>
48
#include <synch/spinlock.h>
49
#include <synch/waitq.h>
50
#include <synch/rwlock.h>
51
#include <cpu.h>
52
#include <func.h>
53
#include <context.h>
2502 jermar 54
#include <adt/avl.h>
788 jermar 55
#include <adt/list.h>
1 jermar 56
#include <time/clock.h>
2089 decky 57
#include <time/timeout.h>
7 jermar 58
#include <config.h>
59
#include <arch/interrupt.h>
10 jermar 60
#include <smp/ipi.h>
76 jermar 61
#include <arch/faddr.h>
1104 jermar 62
#include <atomic.h>
195 vana 63
#include <memstr.h>
777 palkovsky 64
#include <print.h>
787 palkovsky 65
#include <mm/slab.h>
66
#include <debug.h>
1066 jermar 67
#include <main/uinit.h>
1288 jermar 68
#include <syscall/copy.h>
69
#include <errno.h>
2446 jermar 70
#include <console/klog.h>
7 jermar 71
 
1 jermar 72
 
1571 jermar 73
/** Thread states */
74
char *thread_states[] = {
75
	"Invalid",
76
	"Running",
77
	"Sleeping",
78
	"Ready",
79
	"Entering",
80
	"Exiting",
2451 jermar 81
	"Lingering"
1571 jermar 82
}; 
83
 
2502 jermar 84
/** Lock protecting the threads_tree AVL tree.
2067 jermar 85
 *
86
 * For locking rules, see declaration thereof.
87
 */
1158 jermar 88
SPINLOCK_INITIALIZE(threads_lock);
1 jermar 89
 
2502 jermar 90
/** ALV tree of all threads.
1636 jermar 91
 *
2502 jermar 92
 * When a thread is found in the threads_tree AVL tree, it is guaranteed to
2067 jermar 93
 * exist as long as the threads_lock is held.
1636 jermar 94
 */
2502 jermar 95
avltree_t threads_tree;		
1636 jermar 96
 
623 jermar 97
SPINLOCK_INITIALIZE(tidlock);
2216 decky 98
thread_id_t last_tid = 0;
1 jermar 99
 
787 palkovsky 100
static slab_cache_t *thread_slab;
906 palkovsky 101
#ifdef ARCH_HAS_FPU
102
slab_cache_t *fpu_context_slab;
103
#endif
107 decky 104
 
2067 jermar 105
/** Thread wrapper.
107 decky 106
 *
2067 jermar 107
 * This wrapper is provided to ensure that every thread makes a call to
108
 * thread_exit() when its implementing function returns.
1 jermar 109
 *
413 jermar 110
 * interrupts_disable() is assumed.
107 decky 111
 *
1 jermar 112
 */
452 decky 113
static void cushion(void)
1 jermar 114
{
15 jermar 115
	void (*f)(void *) = THREAD->thread_code;
116
	void *arg = THREAD->thread_arg;
2032 decky 117
	THREAD->last_cycle = get_cycle();
1 jermar 118
 
2039 decky 119
	/* This is where each thread wakes up after its creation */
15 jermar 120
	spinlock_unlock(&THREAD->lock);
413 jermar 121
	interrupts_enable();
1 jermar 122
 
123
	f(arg);
2039 decky 124
 
125
	/* Accumulate accounting to the task */
126
	ipl_t ipl = interrupts_disable();
127
 
128
	spinlock_lock(&THREAD->lock);
2042 decky 129
	if (!THREAD->uncounted) {
130
		thread_update_accounting();
131
		uint64_t cycles = THREAD->cycles;
132
		THREAD->cycles = 0;
133
		spinlock_unlock(&THREAD->lock);
134
 
135
		spinlock_lock(&TASK->lock);
136
		TASK->cycles += cycles;
137
		spinlock_unlock(&TASK->lock);
138
	} else
139
		spinlock_unlock(&THREAD->lock);
2039 decky 140
 
141
	interrupts_restore(ipl);
142
 
1 jermar 143
	thread_exit();
144
	/* not reached */
145
}
146
 
787 palkovsky 147
/** Initialization and allocation for thread_t structure */
148
static int thr_constructor(void *obj, int kmflags)
149
{
1820 decky 150
	thread_t *t = (thread_t *) obj;
107 decky 151
 
787 palkovsky 152
	spinlock_initialize(&t->lock, "thread_t_lock");
153
	link_initialize(&t->rq_link);
154
	link_initialize(&t->wq_link);
155
	link_initialize(&t->th_link);
1854 jermar 156
 
157
	/* call the architecture-specific part of the constructor */
158
	thr_constructor_arch(t);
787 palkovsky 159
 
906 palkovsky 160
#ifdef ARCH_HAS_FPU
2440 jermar 161
#ifdef CONFIG_FPU_LAZY
906 palkovsky 162
	t->saved_fpu_context = NULL;
2440 jermar 163
#else
164
	t->saved_fpu_context = slab_alloc(fpu_context_slab, kmflags);
906 palkovsky 165
	if (!t->saved_fpu_context)
166
		return -1;
2440 jermar 167
#endif
906 palkovsky 168
#endif	
169
 
2118 decky 170
	t->kstack = (uint8_t *) frame_alloc(STACK_FRAMES, FRAME_KA | kmflags);
2440 jermar 171
	if (!t->kstack) {
906 palkovsky 172
#ifdef ARCH_HAS_FPU
173
		if (t->saved_fpu_context)
2440 jermar 174
			slab_free(fpu_context_slab, t->saved_fpu_context);
906 palkovsky 175
#endif
842 palkovsky 176
		return -1;
906 palkovsky 177
	}
787 palkovsky 178
 
179
	return 0;
180
}
181
 
182
/** Destruction of thread_t object */
183
static int thr_destructor(void *obj)
184
{
1820 decky 185
	thread_t *t = (thread_t *) obj;
787 palkovsky 186
 
1854 jermar 187
	/* call the architecture-specific part of the destructor */
188
	thr_destructor_arch(t);
189
 
1760 palkovsky 190
	frame_free(KA2PA(t->kstack));
906 palkovsky 191
#ifdef ARCH_HAS_FPU
192
	if (t->saved_fpu_context)
2440 jermar 193
		slab_free(fpu_context_slab, t->saved_fpu_context);
906 palkovsky 194
#endif
787 palkovsky 195
	return 1; /* One page freed */
196
}
197
 
107 decky 198
/** Initialize threads
199
 *
200
 * Initialize kernel threads support.
201
 *
202
 */
1 jermar 203
void thread_init(void)
204
{
15 jermar 205
	THREAD = NULL;
625 palkovsky 206
	atomic_set(&nrdy,0);
2067 jermar 207
	thread_slab = slab_cache_create("thread_slab", sizeof(thread_t), 0,
2087 jermar 208
	    thr_constructor, thr_destructor, 0);
2067 jermar 209
 
906 palkovsky 210
#ifdef ARCH_HAS_FPU
2067 jermar 211
	fpu_context_slab = slab_cache_create("fpu_slab", sizeof(fpu_context_t),
2087 jermar 212
	    FPU_CONTEXT_ALIGN, NULL, NULL, 0);
906 palkovsky 213
#endif
1158 jermar 214
 
2502 jermar 215
	avltree_create(&threads_tree);
1 jermar 216
}
217
 
107 decky 218
/** Make thread ready
219
 *
220
 * Switch thread t to the ready state.
221
 *
222
 * @param t Thread to make ready.
223
 *
224
 */
1 jermar 225
void thread_ready(thread_t *t)
226
{
227
	cpu_t *cpu;
228
	runq_t *r;
413 jermar 229
	ipl_t ipl;
625 palkovsky 230
	int i, avg;
1 jermar 231
 
413 jermar 232
	ipl = interrupts_disable();
1 jermar 233
 
234
	spinlock_lock(&t->lock);
235
 
2440 jermar 236
	ASSERT(!(t->state == Ready));
1086 palkovsky 237
 
2067 jermar 238
	i = (t->priority < RQ_COUNT - 1) ? ++t->priority : t->priority;
1 jermar 239
 
16 jermar 240
	cpu = CPU;
1854 jermar 241
	if (t->flags & THREAD_FLAG_WIRED) {
2268 jermar 242
		ASSERT(t->cpu != NULL);
1 jermar 243
		cpu = t->cpu;
244
	}
1083 palkovsky 245
	t->state = Ready;
1 jermar 246
	spinlock_unlock(&t->lock);
247
 
107 decky 248
	/*
1 jermar 249
	 * Append t to respective ready queue on respective processor.
250
	 */
251
	r = &cpu->rq[i];
252
	spinlock_lock(&r->lock);
253
	list_append(&t->rq_link, &r->rq_head);
254
	r->n++;
255
	spinlock_unlock(&r->lock);
256
 
475 jermar 257
	atomic_inc(&nrdy);
625 palkovsky 258
	avg = atomic_get(&nrdy) / config.cpu_active;
783 palkovsky 259
	atomic_inc(&cpu->nrdy);
1 jermar 260
 
413 jermar 261
	interrupts_restore(ipl);
1 jermar 262
}
263
 
107 decky 264
/** Create new thread
265
 *
266
 * Create a new thread.
267
 *
2042 decky 268
 * @param func      Thread's implementing function.
269
 * @param arg       Thread's implementing function argument.
270
 * @param task      Task to which the thread belongs.
271
 * @param flags     Thread flags.
272
 * @param name      Symbolic name.
2067 jermar 273
 * @param uncounted Thread's accounting doesn't affect accumulated task
2436 jermar 274
 * 		    accounting.
107 decky 275
 *
276
 * @return New thread's structure on success, NULL on failure.
277
 *
278
 */
2067 jermar 279
thread_t *thread_create(void (* func)(void *), void *arg, task_t *task,
2436 jermar 280
    int flags, char *name, bool uncounted)
1 jermar 281
{
282
	thread_t *t;
822 palkovsky 283
	ipl_t ipl;
284
 
787 palkovsky 285
	t = (thread_t *) slab_alloc(thread_slab, 0);
842 palkovsky 286
	if (!t)
287
		return NULL;
1 jermar 288
 
822 palkovsky 289
	/* Not needed, but good for debugging */
2067 jermar 290
	memsetb((uintptr_t) t->kstack, THREAD_STACK_SIZE * 1 << STACK_FRAMES,
2087 jermar 291
	    0);
822 palkovsky 292
 
293
	ipl = interrupts_disable();
294
	spinlock_lock(&tidlock);
295
	t->tid = ++last_tid;
296
	spinlock_unlock(&tidlock);
297
	interrupts_restore(ipl);
298
 
299
	context_save(&t->saved_context);
2067 jermar 300
	context_set(&t->saved_context, FADDR(cushion), (uintptr_t) t->kstack,
2087 jermar 301
	    THREAD_STACK_SIZE);
822 palkovsky 302
 
303
	the_initialize((the_t *) t->kstack);
304
 
305
	ipl = interrupts_disable();
306
	t->saved_context.ipl = interrupts_read();
307
	interrupts_restore(ipl);
308
 
1066 jermar 309
	memcpy(t->name, name, THREAD_NAME_BUFLEN);
310
 
822 palkovsky 311
	t->thread_code = func;
312
	t->thread_arg = arg;
313
	t->ticks = -1;
2030 decky 314
	t->cycles = 0;
2042 decky 315
	t->uncounted = uncounted;
822 palkovsky 316
	t->priority = -1;		/* start in rq[0] */
317
	t->cpu = NULL;
1854 jermar 318
	t->flags = flags;
822 palkovsky 319
	t->state = Entering;
320
	t->call_me = NULL;
321
	t->call_me_with = NULL;
322
 
323
	timeout_initialize(&t->sleep_timeout);
1502 jermar 324
	t->sleep_interruptible = false;
822 palkovsky 325
	t->sleep_queue = NULL;
326
	t->timeout_pending = 0;
1288 jermar 327
 
328
	t->in_copy_from_uspace = false;
329
	t->in_copy_to_uspace = false;
1579 jermar 330
 
331
	t->interrupted = false;	
1571 jermar 332
	t->detached = false;
333
	waitq_initialize(&t->join_wq);
334
 
822 palkovsky 335
	t->rwlock_holder_type = RWLOCK_NONE;
210 decky 336
 
822 palkovsky 337
	t->task = task;
338
 
860 decky 339
	t->fpu_context_exists = 0;
340
	t->fpu_context_engaged = 0;
1854 jermar 341
 
2502 jermar 342
	avltree_node_initialize(&t->threads_tree_node);
343
	t->threads_tree_node.key = (uintptr_t) t;
344
 
2067 jermar 345
	/* might depend on previous initialization */
346
	thread_create_arch(t);	
2440 jermar 347
 
348
	if (!(flags & THREAD_FLAG_NOATTACH))
349
		thread_attach(t, task);
350
 
351
	return t;
352
}
353
 
354
/** Destroy thread memory structure
355
 *
356
 * Detach thread from all queues, cpus etc. and destroy it.
357
 *
358
 * Assume thread->lock is held!!
359
 */
360
void thread_destroy(thread_t *t)
361
{
2451 jermar 362
	ASSERT(t->state == Exiting || t->state == Lingering);
2440 jermar 363
	ASSERT(t->task);
364
	ASSERT(t->cpu);
365
 
366
	spinlock_lock(&t->cpu->lock);
367
	if (t->cpu->fpu_owner == t)
368
		t->cpu->fpu_owner = NULL;
369
	spinlock_unlock(&t->cpu->lock);
370
 
371
	spinlock_unlock(&t->lock);
372
 
373
	spinlock_lock(&threads_lock);
2502 jermar 374
	avltree_delete(&threads_tree, &t->threads_tree_node);
2440 jermar 375
	spinlock_unlock(&threads_lock);
376
 
377
	/*
378
	 * Detach from the containing task.
379
	 */
380
	spinlock_lock(&t->task->lock);
381
	list_remove(&t->th_link);
382
	spinlock_unlock(&t->task->lock);	
2446 jermar 383
 
384
	/*
385
	 * t is guaranteed to be the very last thread of its task.
386
	 * It is safe to destroy the task.
387
	 */
388
	if (atomic_predec(&t->task->refcount) == 0)
2440 jermar 389
		task_destroy(t->task);
390
 
391
	slab_free(thread_slab, t);
392
}
393
 
394
/** Make the thread visible to the system.
395
 *
396
 * Attach the thread structure to the current task and make it visible in the
2502 jermar 397
 * threads_tree.
2440 jermar 398
 *
399
 * @param t	Thread to be attached to the task.
400
 * @param task	Task to which the thread is to be attached.
401
 */
402
void thread_attach(thread_t *t, task_t *task)
403
{
404
	ipl_t ipl;
405
 
406
	/*
407
	 * Attach to the current task.
408
	 */
2446 jermar 409
	ipl = interrupts_disable();
2440 jermar 410
	spinlock_lock(&task->lock);
2446 jermar 411
	atomic_inc(&task->refcount);
412
	atomic_inc(&task->lifecount);
1579 jermar 413
	list_append(&t->th_link, &task->th_head);
414
	spinlock_unlock(&task->lock);
415
 
416
	/*
822 palkovsky 417
	 * Register this thread in the system-wide list.
418
	 */
419
	spinlock_lock(&threads_lock);
2502 jermar 420
	avltree_insert(&threads_tree, &t->threads_tree_node);
822 palkovsky 421
	spinlock_unlock(&threads_lock);
422
 
423
	interrupts_restore(ipl);
1 jermar 424
}
425
 
1687 jermar 426
/** Terminate thread.
107 decky 427
 *
2067 jermar 428
 * End current thread execution and switch it to the exiting state. All pending
429
 * timeouts are executed.
107 decky 430
 */
1 jermar 431
void thread_exit(void)
432
{
413 jermar 433
	ipl_t ipl;
1 jermar 434
 
2446 jermar 435
	if (atomic_predec(&TASK->lifecount) == 0) {
436
		/*
437
		 * We are the last thread in the task that still has not exited.
438
		 * With the exception of the moment the task was created, new
439
		 * threads can only be created by threads of the same task.
440
		 * We are safe to perform cleanup.
441
		 */
442
		if (THREAD->flags & THREAD_FLAG_USPACE) {
443
			ipc_cleanup();
444
		        futex_cleanup();
445
			klog_printf("Cleanup of task %llu completed.",
446
			    TASK->taskid);
447
		}
448
	}
449
 
1 jermar 450
restart:
413 jermar 451
	ipl = interrupts_disable();
15 jermar 452
	spinlock_lock(&THREAD->lock);
2067 jermar 453
	if (THREAD->timeout_pending) { 
454
		/* busy waiting for timeouts in progress */
15 jermar 455
		spinlock_unlock(&THREAD->lock);
413 jermar 456
		interrupts_restore(ipl);
1 jermar 457
		goto restart;
458
	}
2446 jermar 459
 
15 jermar 460
	THREAD->state = Exiting;
461
	spinlock_unlock(&THREAD->lock);
1 jermar 462
	scheduler();
1595 palkovsky 463
 
464
	/* Not reached */
465
	while (1)
466
		;
1 jermar 467
}
468
 
107 decky 469
 
470
/** Thread sleep
471
 *
472
 * Suspend execution of the current thread.
473
 *
474
 * @param sec Number of seconds to sleep.
475
 *
476
 */
1780 jermar 477
void thread_sleep(uint32_t sec)
1 jermar 478
{
2067 jermar 479
	thread_usleep(sec * 1000000);
1 jermar 480
}
107 decky 481
 
1571 jermar 482
/** Wait for another thread to exit.
483
 *
484
 * @param t Thread to join on exit.
485
 * @param usec Timeout in microseconds.
486
 * @param flags Mode of operation.
487
 *
488
 * @return An error code from errno.h or an error code from synch.h.
489
 */
1780 jermar 490
int thread_join_timeout(thread_t *t, uint32_t usec, int flags)
1571 jermar 491
{
492
	ipl_t ipl;
493
	int rc;
494
 
495
	if (t == THREAD)
496
		return EINVAL;
497
 
498
	/*
499
	 * Since thread join can only be called once on an undetached thread,
500
	 * the thread pointer is guaranteed to be still valid.
501
	 */
502
 
503
	ipl = interrupts_disable();
504
	spinlock_lock(&t->lock);
505
	ASSERT(!t->detached);
506
	spinlock_unlock(&t->lock);
1687 jermar 507
	interrupts_restore(ipl);
1571 jermar 508
 
1687 jermar 509
	rc = waitq_sleep_timeout(&t->join_wq, usec, flags);
1571 jermar 510
 
511
	return rc;	
512
}
513
 
514
/** Detach thread.
515
 *
2451 jermar 516
 * Mark the thread as detached, if the thread is already in the Lingering
517
 * state, deallocate its resources.
1571 jermar 518
 *
519
 * @param t Thread to be detached.
520
 */
521
void thread_detach(thread_t *t)
522
{
523
	ipl_t ipl;
524
 
525
	/*
2183 jermar 526
	 * Since the thread is expected not to be already detached,
1571 jermar 527
	 * pointer to it must be still valid.
528
	 */
529
	ipl = interrupts_disable();
530
	spinlock_lock(&t->lock);
531
	ASSERT(!t->detached);
2451 jermar 532
	if (t->state == Lingering) {
1571 jermar 533
		thread_destroy(t);	/* unlocks &t->lock */
534
		interrupts_restore(ipl);
535
		return;
536
	} else {
537
		t->detached = true;
538
	}
539
	spinlock_unlock(&t->lock);
540
	interrupts_restore(ipl);
541
}
542
 
107 decky 543
/** Thread usleep
544
 *
545
 * Suspend execution of the current thread.
546
 *
547
 * @param usec Number of microseconds to sleep.
548
 *
549
 */	
1780 jermar 550
void thread_usleep(uint32_t usec)
1 jermar 551
{
552
	waitq_t wq;
553
 
554
	waitq_initialize(&wq);
555
 
1502 jermar 556
	(void) waitq_sleep_timeout(&wq, usec, SYNCH_FLAGS_NON_BLOCKING);
1 jermar 557
}
558
 
107 decky 559
/** Register thread out-of-context invocation
560
 *
561
 * Register a function and its argument to be executed
562
 * on next context switch to the current thread.
563
 *
564
 * @param call_me      Out-of-context function.
565
 * @param call_me_with Out-of-context function argument.
566
 *
567
 */
1 jermar 568
void thread_register_call_me(void (* call_me)(void *), void *call_me_with)
569
{
413 jermar 570
	ipl_t ipl;
1 jermar 571
 
413 jermar 572
	ipl = interrupts_disable();
15 jermar 573
	spinlock_lock(&THREAD->lock);
574
	THREAD->call_me = call_me;
575
	THREAD->call_me_with = call_me_with;
576
	spinlock_unlock(&THREAD->lock);
413 jermar 577
	interrupts_restore(ipl);
1 jermar 578
}
777 palkovsky 579
 
2504 jermar 580
static bool thread_walker(avltree_node_t *node, void *arg)
2502 jermar 581
{
582
	thread_t *t;
583
 
584
	t = avltree_get_instance(node, thread_t, threads_tree_node);
585
 
586
	uint64_t cycles;
587
	char suffix;
588
	order(t->cycles, &cycles, &suffix);
589
 
590
	printf("%-6llu %-10s %#10zx %-8s %#10zx %-3ld %#10zx %#10zx %9llu%c ",
591
	    t->tid, t->name, t, thread_states[t->state], t->task,
592
	    t->task->context, t->thread_code, t->kstack, cycles, suffix);
593
 
594
	if (t->cpu)
595
		printf("%-4zd", t->cpu->id);
596
	else
597
		printf("none");
598
 
599
	if (t->state == Sleeping)
600
		printf(" %#10zx", t->sleep_queue);
601
 
602
	printf("\n");
2504 jermar 603
 
604
	return true;
2502 jermar 605
}
606
 
777 palkovsky 607
/** Print list of threads debug info */
608
void thread_print_list(void)
609
{
610
	ipl_t ipl;
611
 
612
	/* Messing with thread structures, avoid deadlock */
613
	ipl = interrupts_disable();
614
	spinlock_lock(&threads_lock);
2030 decky 615
 
2087 jermar 616
	printf("tid    name       address    state    task       ctx code    "
2487 jermar 617
	    "   stack      cycles     cpu  waitqueue\n");
2087 jermar 618
	printf("------ ---------- ---------- -------- ---------- --- --------"
2487 jermar 619
	    "-- ---------- ---------- ---- ---------\n");
777 palkovsky 620
 
2504 jermar 621
	avltree_walk(&threads_tree, thread_walker, NULL);
1158 jermar 622
 
777 palkovsky 623
	spinlock_unlock(&threads_lock);
1060 palkovsky 624
	interrupts_restore(ipl);
777 palkovsky 625
}
1066 jermar 626
 
1158 jermar 627
/** Check whether thread exists.
628
 *
629
 * Note that threads_lock must be already held and
630
 * interrupts must be already disabled.
631
 *
632
 * @param t Pointer to thread.
633
 *
634
 * @return True if thread t is known to the system, false otherwise.
635
 */
636
bool thread_exists(thread_t *t)
637
{
2502 jermar 638
	avltree_node_t *node;
639
 
640
	node = avltree_search(&threads_tree, (avltree_key_t) ((uintptr_t) t));
1158 jermar 641
 
2502 jermar 642
	return node != NULL;
1158 jermar 643
}
644
 
2030 decky 645
 
646
/** Update accounting of current thread.
647
 *
648
 * Note that thread_lock on THREAD must be already held and
649
 * interrupts must be already disabled.
650
 *
651
 */
652
void thread_update_accounting(void)
653
{
654
	uint64_t time = get_cycle();
655
	THREAD->cycles += time - THREAD->last_cycle;
656
	THREAD->last_cycle = time;
657
}
658
 
1066 jermar 659
/** Process syscall to create new thread.
660
 *
661
 */
2436 jermar 662
unative_t sys_thread_create(uspace_arg_t *uspace_uarg, char *uspace_name,
663
    thread_id_t *uspace_thread_id)
1066 jermar 664
{
1210 vana 665
	thread_t *t;
666
	char namebuf[THREAD_NAME_BUFLEN];
1103 jermar 667
	uspace_arg_t *kernel_uarg;
1288 jermar 668
	int rc;
1066 jermar 669
 
1288 jermar 670
	rc = copy_from_uspace(namebuf, uspace_name, THREAD_NAME_BUFLEN);
671
	if (rc != 0)
1780 jermar 672
		return (unative_t) rc;
1066 jermar 673
 
2470 jermar 674
	/*
675
	 * In case of failure, kernel_uarg will be deallocated in this function.
676
	 * In case of success, kernel_uarg will be freed in uinit().
677
	 */
678
	kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0);
679
 
1288 jermar 680
	rc = copy_from_uspace(kernel_uarg, uspace_uarg, sizeof(uspace_arg_t));
681
	if (rc != 0) {
682
		free(kernel_uarg);
1780 jermar 683
		return (unative_t) rc;
1288 jermar 684
	}
1078 jermar 685
 
2440 jermar 686
	t = thread_create(uinit, kernel_uarg, TASK,
687
	    THREAD_FLAG_USPACE | THREAD_FLAG_NOATTACH, namebuf, false);
2087 jermar 688
	if (t) {
2440 jermar 689
		if (uspace_thread_id != NULL) {
690
			int rc;
691
 
692
			rc = copy_to_uspace(uspace_thread_id, &t->tid,
693
			    sizeof(t->tid));
694
			if (rc != 0) {
695
				/*
696
				 * We have encountered a failure, but the thread
697
				 * has already been created. We need to undo its
698
				 * creation now.
699
				 */
700
 
701
				/*
2446 jermar 702
				 * The new thread structure is initialized, but
703
				 * is still not visible to the system.
2440 jermar 704
				 * We can safely deallocate it.
705
				 */
706
				slab_free(thread_slab, t);
707
			 	free(kernel_uarg);
708
 
709
				return (unative_t) rc;
710
			 }
711
		}
712
		thread_attach(t, TASK);
1210 vana 713
		thread_ready(t);
2440 jermar 714
 
715
		return 0;
2216 decky 716
	} else
1078 jermar 717
		free(kernel_uarg);
1066 jermar 718
 
1780 jermar 719
	return (unative_t) ENOMEM;
1066 jermar 720
}
721
 
722
/** Process syscall to terminate thread.
723
 *
724
 */
1780 jermar 725
unative_t sys_thread_exit(int uspace_status)
1066 jermar 726
{
1210 vana 727
	thread_exit();
728
	/* Unreachable */
729
	return 0;
1066 jermar 730
}
1702 cejka 731
 
2187 decky 732
/** Syscall for getting TID.
733
 *
2216 decky 734
 * @param uspace_thread_id Userspace address of 8-byte buffer where to store
735
 * current thread ID.
736
 *
737
 * @return 0 on success or an error code from @ref errno.h.
2187 decky 738
 */
2216 decky 739
unative_t sys_thread_get_id(thread_id_t *uspace_thread_id)
2187 decky 740
{
741
	/*
742
	 * No need to acquire lock on THREAD because tid
743
	 * remains constant for the lifespan of the thread.
744
	 */
2216 decky 745
	return (unative_t) copy_to_uspace(uspace_thread_id, &THREAD->tid,
746
	    sizeof(THREAD->tid));
2187 decky 747
}
748
 
1757 jermar 749
/** @}
1702 cejka 750
 */