350,12 → 350,10 |
/* |
* Attach to the containing task. |
*/ |
ipl = interrupts_disable(); |
spinlock_lock(&task->lock); |
if (!task->accept_new_threads) { |
spinlock_unlock(&task->lock); |
slab_free(thread_slab, t); |
interrupts_restore(ipl); |
return NULL; |
} |
list_append(&t->th_link, &task->th_head); |
366,6 → 364,7 |
/* |
* Register this thread in the system-wide list. |
*/ |
ipl = interrupts_disable(); |
spinlock_lock(&threads_lock); |
btree_insert(&threads_btree, (btree_key_t) ((__address) t), (void *) t, NULL); |
spinlock_unlock(&threads_lock); |
375,7 → 374,7 |
return t; |
} |
|
/** Terminate thread. |
/** Make thread exiting |
* |
* End current thread execution and switch it to the exiting |
* state. All pending timeouts are executed. |
438,12 → 437,17 |
|
ipl = interrupts_disable(); |
spinlock_lock(&t->lock); |
|
ASSERT(!t->detached); |
|
(void) waitq_sleep_prepare(&t->join_wq); |
spinlock_unlock(&t->lock); |
|
rc = waitq_sleep_timeout_unsafe(&t->join_wq, usec, flags); |
|
waitq_sleep_finish(&t->join_wq, rc, ipl); |
interrupts_restore(ipl); |
|
rc = waitq_sleep_timeout(&t->join_wq, usec, flags); |
|
return rc; |
} |
|
462,6 → 466,7 |
* Since the thread is expected to not be already detached, |
* pointer to it must be still valid. |
*/ |
|
ipl = interrupts_disable(); |
spinlock_lock(&t->lock); |
ASSERT(!t->detached); |