Subversion Repositories HelenOS

Rev

Rev 2446 | Rev 2470 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (c) 2001-2004 Jakub Jermar
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. /** @addtogroup genericproc
  30.  * @{
  31.  */
  32.  
  33. /**
  34.  * @file
  35.  * @brief   Thread management functions.
  36.  */
  37.  
  38. #include <proc/scheduler.h>
  39. #include <proc/thread.h>
  40. #include <proc/task.h>
  41. #include <proc/uarg.h>
  42. #include <mm/frame.h>
  43. #include <mm/page.h>
  44. #include <arch/asm.h>
  45. #include <arch/cycle.h>
  46. #include <arch.h>
  47. #include <synch/synch.h>
  48. #include <synch/spinlock.h>
  49. #include <synch/waitq.h>
  50. #include <synch/rwlock.h>
  51. #include <cpu.h>
  52. #include <func.h>
  53. #include <context.h>
  54. #include <adt/btree.h>
  55. #include <adt/list.h>
  56. #include <time/clock.h>
  57. #include <time/timeout.h>
  58. #include <config.h>
  59. #include <arch/interrupt.h>
  60. #include <smp/ipi.h>
  61. #include <arch/faddr.h>
  62. #include <atomic.h>
  63. #include <memstr.h>
  64. #include <print.h>
  65. #include <mm/slab.h>
  66. #include <debug.h>
  67. #include <main/uinit.h>
  68. #include <syscall/copy.h>
  69. #include <errno.h>
  70. #include <console/klog.h>
  71.  
  72.  
  73. /** Thread states */
  74. char *thread_states[] = {
  75.     "Invalid",
  76.     "Running",
  77.     "Sleeping",
  78.     "Ready",
  79.     "Entering",
  80.     "Exiting",
  81.     "Lingering"
  82. };
  83.  
  84. /** Lock protecting the threads_btree B+tree.
  85.  *
  86.  * For locking rules, see declaration thereof.
  87.  */
  88. SPINLOCK_INITIALIZE(threads_lock);
  89.  
  90. /** B+tree of all threads.
  91.  *
  92.  * When a thread is found in the threads_btree B+tree, it is guaranteed to
  93.  * exist as long as the threads_lock is held.
  94.  */
  95. btree_t threads_btree;     
  96.  
  97. SPINLOCK_INITIALIZE(tidlock);
  98. thread_id_t last_tid = 0;
  99.  
  100. static slab_cache_t *thread_slab;
  101. #ifdef ARCH_HAS_FPU
  102. slab_cache_t *fpu_context_slab;
  103. #endif
  104.  
  105. /** Thread wrapper.
  106.  *
  107.  * This wrapper is provided to ensure that every thread makes a call to
  108.  * thread_exit() when its implementing function returns.
  109.  *
  110.  * interrupts_disable() is assumed.
  111.  *
  112.  */
  113. static void cushion(void)
  114. {
  115.     void (*f)(void *) = THREAD->thread_code;
  116.     void *arg = THREAD->thread_arg;
  117.     THREAD->last_cycle = get_cycle();
  118.  
  119.     /* This is where each thread wakes up after its creation */
  120.     spinlock_unlock(&THREAD->lock);
  121.     interrupts_enable();
  122.  
  123.     f(arg);
  124.    
  125.     /* Accumulate accounting to the task */
  126.     ipl_t ipl = interrupts_disable();
  127.    
  128.     spinlock_lock(&THREAD->lock);
  129.     if (!THREAD->uncounted) {
  130.         thread_update_accounting();
  131.         uint64_t cycles = THREAD->cycles;
  132.         THREAD->cycles = 0;
  133.         spinlock_unlock(&THREAD->lock);
  134.        
  135.         spinlock_lock(&TASK->lock);
  136.         TASK->cycles += cycles;
  137.         spinlock_unlock(&TASK->lock);
  138.     } else
  139.         spinlock_unlock(&THREAD->lock);
  140.    
  141.     interrupts_restore(ipl);
  142.    
  143.     thread_exit();
  144.     /* not reached */
  145. }
  146.  
  147. /** Initialization and allocation for thread_t structure */
  148. static int thr_constructor(void *obj, int kmflags)
  149. {
  150.     thread_t *t = (thread_t *) obj;
  151.  
  152.     spinlock_initialize(&t->lock, "thread_t_lock");
  153.     link_initialize(&t->rq_link);
  154.     link_initialize(&t->wq_link);
  155.     link_initialize(&t->th_link);
  156.  
  157.     /* call the architecture-specific part of the constructor */
  158.     thr_constructor_arch(t);
  159.    
  160. #ifdef ARCH_HAS_FPU
  161. #ifdef CONFIG_FPU_LAZY
  162.     t->saved_fpu_context = NULL;
  163. #else
  164.     t->saved_fpu_context = slab_alloc(fpu_context_slab, kmflags);
  165.     if (!t->saved_fpu_context)
  166.         return -1;
  167. #endif
  168. #endif 
  169.  
  170.     t->kstack = (uint8_t *) frame_alloc(STACK_FRAMES, FRAME_KA | kmflags);
  171.     if (!t->kstack) {
  172. #ifdef ARCH_HAS_FPU
  173.         if (t->saved_fpu_context)
  174.             slab_free(fpu_context_slab, t->saved_fpu_context);
  175. #endif
  176.         return -1;
  177.     }
  178.  
  179.     return 0;
  180. }
  181.  
  182. /** Destruction of thread_t object */
  183. static int thr_destructor(void *obj)
  184. {
  185.     thread_t *t = (thread_t *) obj;
  186.  
  187.     /* call the architecture-specific part of the destructor */
  188.     thr_destructor_arch(t);
  189.  
  190.     frame_free(KA2PA(t->kstack));
  191. #ifdef ARCH_HAS_FPU
  192.     if (t->saved_fpu_context)
  193.         slab_free(fpu_context_slab, t->saved_fpu_context);
  194. #endif
  195.     return 1; /* One page freed */
  196. }
  197.  
  198. /** Initialize threads
  199.  *
  200.  * Initialize kernel threads support.
  201.  *
  202.  */
  203. void thread_init(void)
  204. {
  205.     THREAD = NULL;
  206.     atomic_set(&nrdy,0);
  207.     thread_slab = slab_cache_create("thread_slab", sizeof(thread_t), 0,
  208.         thr_constructor, thr_destructor, 0);
  209.  
  210. #ifdef ARCH_HAS_FPU
  211.     fpu_context_slab = slab_cache_create("fpu_slab", sizeof(fpu_context_t),
  212.         FPU_CONTEXT_ALIGN, NULL, NULL, 0);
  213. #endif
  214.  
  215.     btree_create(&threads_btree);
  216. }
  217.  
  218. /** Make thread ready
  219.  *
  220.  * Switch thread t to the ready state.
  221.  *
  222.  * @param t Thread to make ready.
  223.  *
  224.  */
  225. void thread_ready(thread_t *t)
  226. {
  227.     cpu_t *cpu;
  228.     runq_t *r;
  229.     ipl_t ipl;
  230.     int i, avg;
  231.  
  232.     ipl = interrupts_disable();
  233.  
  234.     spinlock_lock(&t->lock);
  235.  
  236.     ASSERT(!(t->state == Ready));
  237.  
  238.     i = (t->priority < RQ_COUNT - 1) ? ++t->priority : t->priority;
  239.    
  240.     cpu = CPU;
  241.     if (t->flags & THREAD_FLAG_WIRED) {
  242.         ASSERT(t->cpu != NULL);
  243.         cpu = t->cpu;
  244.     }
  245.     t->state = Ready;
  246.     spinlock_unlock(&t->lock);
  247.    
  248.     /*
  249.      * Append t to respective ready queue on respective processor.
  250.      */
  251.     r = &cpu->rq[i];
  252.     spinlock_lock(&r->lock);
  253.     list_append(&t->rq_link, &r->rq_head);
  254.     r->n++;
  255.     spinlock_unlock(&r->lock);
  256.  
  257.     atomic_inc(&nrdy);
  258.     avg = atomic_get(&nrdy) / config.cpu_active;
  259.     atomic_inc(&cpu->nrdy);
  260.  
  261.     interrupts_restore(ipl);
  262. }
  263.  
  264. /** Create new thread
  265.  *
  266.  * Create a new thread.
  267.  *
  268.  * @param func      Thread's implementing function.
  269.  * @param arg       Thread's implementing function argument.
  270.  * @param task      Task to which the thread belongs.
  271.  * @param flags     Thread flags.
  272.  * @param name      Symbolic name.
  273.  * @param uncounted Thread's accounting doesn't affect accumulated task
  274.  *          accounting.
  275.  *
  276.  * @return New thread's structure on success, NULL on failure.
  277.  *
  278.  */
  279. thread_t *thread_create(void (* func)(void *), void *arg, task_t *task,
  280.     int flags, char *name, bool uncounted)
  281. {
  282.     thread_t *t;
  283.     ipl_t ipl;
  284.    
  285.     t = (thread_t *) slab_alloc(thread_slab, 0);
  286.     if (!t)
  287.         return NULL;
  288.    
  289.     /* Not needed, but good for debugging */
  290.     memsetb((uintptr_t) t->kstack, THREAD_STACK_SIZE * 1 << STACK_FRAMES,
  291.         0);
  292.    
  293.     ipl = interrupts_disable();
  294.     spinlock_lock(&tidlock);
  295.     t->tid = ++last_tid;
  296.     spinlock_unlock(&tidlock);
  297.     interrupts_restore(ipl);
  298.    
  299.     context_save(&t->saved_context);
  300.     context_set(&t->saved_context, FADDR(cushion), (uintptr_t) t->kstack,
  301.         THREAD_STACK_SIZE);
  302.    
  303.     the_initialize((the_t *) t->kstack);
  304.    
  305.     ipl = interrupts_disable();
  306.     t->saved_context.ipl = interrupts_read();
  307.     interrupts_restore(ipl);
  308.    
  309.     memcpy(t->name, name, THREAD_NAME_BUFLEN);
  310.    
  311.     t->thread_code = func;
  312.     t->thread_arg = arg;
  313.     t->ticks = -1;
  314.     t->cycles = 0;
  315.     t->uncounted = uncounted;
  316.     t->priority = -1;       /* start in rq[0] */
  317.     t->cpu = NULL;
  318.     t->flags = flags;
  319.     t->state = Entering;
  320.     t->call_me = NULL;
  321.     t->call_me_with = NULL;
  322.    
  323.     timeout_initialize(&t->sleep_timeout);
  324.     t->sleep_interruptible = false;
  325.     t->sleep_queue = NULL;
  326.     t->timeout_pending = 0;
  327.  
  328.     t->in_copy_from_uspace = false;
  329.     t->in_copy_to_uspace = false;
  330.  
  331.     t->interrupted = false;
  332.     t->detached = false;
  333.     waitq_initialize(&t->join_wq);
  334.    
  335.     t->rwlock_holder_type = RWLOCK_NONE;
  336.        
  337.     t->task = task;
  338.    
  339.     t->fpu_context_exists = 0;
  340.     t->fpu_context_engaged = 0;
  341.  
  342.     /* might depend on previous initialization */
  343.     thread_create_arch(t); 
  344.  
  345.     if (!(flags & THREAD_FLAG_NOATTACH))
  346.         thread_attach(t, task);
  347.  
  348.     return t;
  349. }
  350.  
  351. /** Destroy thread memory structure
  352.  *
  353.  * Detach thread from all queues, cpus etc. and destroy it.
  354.  *
  355.  * Assume thread->lock is held!!
  356.  */
  357. void thread_destroy(thread_t *t)
  358. {
  359.     ASSERT(t->state == Exiting || t->state == Lingering);
  360.     ASSERT(t->task);
  361.     ASSERT(t->cpu);
  362.  
  363.     spinlock_lock(&t->cpu->lock);
  364.     if (t->cpu->fpu_owner == t)
  365.         t->cpu->fpu_owner = NULL;
  366.     spinlock_unlock(&t->cpu->lock);
  367.  
  368.     spinlock_unlock(&t->lock);
  369.  
  370.     spinlock_lock(&threads_lock);
  371.     btree_remove(&threads_btree, (btree_key_t) ((uintptr_t ) t), NULL);
  372.     spinlock_unlock(&threads_lock);
  373.  
  374.     /*
  375.      * Detach from the containing task.
  376.      */
  377.     spinlock_lock(&t->task->lock);
  378.     list_remove(&t->th_link);
  379.     spinlock_unlock(&t->task->lock);   
  380.  
  381.     /*
  382.      * t is guaranteed to be the very last thread of its task.
  383.      * It is safe to destroy the task.
  384.      */
  385.     if (atomic_predec(&t->task->refcount) == 0)
  386.         task_destroy(t->task);
  387.    
  388.     /*
  389.      * If the thread had a userspace context, free up its kernel_uarg
  390.      * structure.
  391.      */
  392.     if (t->flags & THREAD_FLAG_USPACE) {
  393.         ASSERT(t->thread_arg);
  394.         free(t->thread_arg);
  395.     }
  396.  
  397.     slab_free(thread_slab, t);
  398. }
  399.  
  400. /** Make the thread visible to the system.
  401.  *
  402.  * Attach the thread structure to the current task and make it visible in the
  403.  * threads_btree.
  404.  *
  405.  * @param t Thread to be attached to the task.
  406.  * @param task  Task to which the thread is to be attached.
  407.  */
  408. void thread_attach(thread_t *t, task_t *task)
  409. {
  410.     ipl_t ipl;
  411.  
  412.     /*
  413.      * Attach to the current task.
  414.      */
  415.     ipl = interrupts_disable();
  416.     spinlock_lock(&task->lock);
  417.     atomic_inc(&task->refcount);
  418.     atomic_inc(&task->lifecount);
  419.     list_append(&t->th_link, &task->th_head);
  420.     spinlock_unlock(&task->lock);
  421.  
  422.     /*
  423.      * Register this thread in the system-wide list.
  424.      */
  425.     spinlock_lock(&threads_lock);
  426.     btree_insert(&threads_btree, (btree_key_t) ((uintptr_t) t), (void *) t,
  427.         NULL);
  428.     spinlock_unlock(&threads_lock);
  429.    
  430.     interrupts_restore(ipl);
  431. }
  432.  
  433. /** Terminate thread.
  434.  *
  435.  * End current thread execution and switch it to the exiting state. All pending
  436.  * timeouts are executed.
  437.  */
  438. void thread_exit(void)
  439. {
  440.     ipl_t ipl;
  441.  
  442.     if (atomic_predec(&TASK->lifecount) == 0) {
  443.         /*
  444.          * We are the last thread in the task that still has not exited.
  445.          * With the exception of the moment the task was created, new
  446.          * threads can only be created by threads of the same task.
  447.          * We are safe to perform cleanup.
  448.          */
  449.         if (THREAD->flags & THREAD_FLAG_USPACE) {
  450.             ipc_cleanup();
  451.                 futex_cleanup();
  452.             klog_printf("Cleanup of task %llu completed.",
  453.                 TASK->taskid);
  454.         }
  455.     }
  456.  
  457. restart:
  458.     ipl = interrupts_disable();
  459.     spinlock_lock(&THREAD->lock);
  460.     if (THREAD->timeout_pending) {
  461.         /* busy waiting for timeouts in progress */
  462.         spinlock_unlock(&THREAD->lock);
  463.         interrupts_restore(ipl);
  464.         goto restart;
  465.     }
  466.    
  467.     THREAD->state = Exiting;
  468.     spinlock_unlock(&THREAD->lock);
  469.     scheduler();
  470.  
  471.     /* Not reached */
  472.     while (1)
  473.         ;
  474. }
  475.  
  476.  
  477. /** Thread sleep
  478.  *
  479.  * Suspend execution of the current thread.
  480.  *
  481.  * @param sec Number of seconds to sleep.
  482.  *
  483.  */
  484. void thread_sleep(uint32_t sec)
  485. {
  486.     thread_usleep(sec * 1000000);
  487. }
  488.  
  489. /** Wait for another thread to exit.
  490.  *
  491.  * @param t Thread to join on exit.
  492.  * @param usec Timeout in microseconds.
  493.  * @param flags Mode of operation.
  494.  *
  495.  * @return An error code from errno.h or an error code from synch.h.
  496.  */
  497. int thread_join_timeout(thread_t *t, uint32_t usec, int flags)
  498. {
  499.     ipl_t ipl;
  500.     int rc;
  501.  
  502.     if (t == THREAD)
  503.         return EINVAL;
  504.  
  505.     /*
  506.      * Since thread join can only be called once on an undetached thread,
  507.      * the thread pointer is guaranteed to be still valid.
  508.      */
  509.    
  510.     ipl = interrupts_disable();
  511.     spinlock_lock(&t->lock);
  512.     ASSERT(!t->detached);
  513.     spinlock_unlock(&t->lock);
  514.     interrupts_restore(ipl);
  515.    
  516.     rc = waitq_sleep_timeout(&t->join_wq, usec, flags);
  517.    
  518.     return rc; 
  519. }
  520.  
  521. /** Detach thread.
  522.  *
  523.  * Mark the thread as detached, if the thread is already in the Lingering
  524.  * state, deallocate its resources.
  525.  *
  526.  * @param t Thread to be detached.
  527.  */
  528. void thread_detach(thread_t *t)
  529. {
  530.     ipl_t ipl;
  531.  
  532.     /*
  533.      * Since the thread is expected not to be already detached,
  534.      * pointer to it must be still valid.
  535.      */
  536.     ipl = interrupts_disable();
  537.     spinlock_lock(&t->lock);
  538.     ASSERT(!t->detached);
  539.     if (t->state == Lingering) {
  540.         thread_destroy(t);  /* unlocks &t->lock */
  541.         interrupts_restore(ipl);
  542.         return;
  543.     } else {
  544.         t->detached = true;
  545.     }
  546.     spinlock_unlock(&t->lock);
  547.     interrupts_restore(ipl);
  548. }
  549.  
  550. /** Thread usleep
  551.  *
  552.  * Suspend execution of the current thread.
  553.  *
  554.  * @param usec Number of microseconds to sleep.
  555.  *
  556.  */
  557. void thread_usleep(uint32_t usec)
  558. {
  559.     waitq_t wq;
  560.                  
  561.     waitq_initialize(&wq);
  562.  
  563.     (void) waitq_sleep_timeout(&wq, usec, SYNCH_FLAGS_NON_BLOCKING);
  564. }
  565.  
  566. /** Register thread out-of-context invocation
  567.  *
  568.  * Register a function and its argument to be executed
  569.  * on next context switch to the current thread.
  570.  *
  571.  * @param call_me      Out-of-context function.
  572.  * @param call_me_with Out-of-context function argument.
  573.  *
  574.  */
  575. void thread_register_call_me(void (* call_me)(void *), void *call_me_with)
  576. {
  577.     ipl_t ipl;
  578.    
  579.     ipl = interrupts_disable();
  580.     spinlock_lock(&THREAD->lock);
  581.     THREAD->call_me = call_me;
  582.     THREAD->call_me_with = call_me_with;
  583.     spinlock_unlock(&THREAD->lock);
  584.     interrupts_restore(ipl);
  585. }
  586.  
  587. /** Print list of threads debug info */
  588. void thread_print_list(void)
  589. {
  590.     link_t *cur;
  591.     ipl_t ipl;
  592.    
  593.     /* Messing with thread structures, avoid deadlock */
  594.     ipl = interrupts_disable();
  595.     spinlock_lock(&threads_lock);
  596.    
  597.     printf("tid    name       address    state    task       ctx code    "
  598.         "   stack      cycles     cpu  kstack     waitqueue\n");
  599.     printf("------ ---------- ---------- -------- ---------- --- --------"
  600.         "-- ---------- ---------- ---- ---------- ----------\n");
  601.  
  602.     for (cur = threads_btree.leaf_head.next;
  603.         cur != &threads_btree.leaf_head; cur = cur->next) {
  604.         btree_node_t *node;
  605.         unsigned int i;
  606.  
  607.         node = list_get_instance(cur, btree_node_t, leaf_link);
  608.         for (i = 0; i < node->keys; i++) {
  609.             thread_t *t;
  610.        
  611.             t = (thread_t *) node->value[i];
  612.            
  613.             uint64_t cycles;
  614.             char suffix;
  615.             order(t->cycles, &cycles, &suffix);
  616.            
  617.             printf("%-6llu %-10s %#10zx %-8s %#10zx %-3ld %#10zx "
  618.                 "%#10zx %9llu%c ", t->tid, t->name, t,
  619.                 thread_states[t->state], t->task, t->task->context,
  620.                 t->thread_code, t->kstack, cycles, suffix);
  621.            
  622.             if (t->cpu)
  623.                 printf("%-4zd", t->cpu->id);
  624.             else
  625.                 printf("none");
  626.            
  627.             if (t->state == Sleeping)
  628.                 printf(" %#10zx %#10zx", t->kstack,
  629.                     t->sleep_queue);
  630.            
  631.             printf("\n");
  632.         }
  633.     }
  634.  
  635.     spinlock_unlock(&threads_lock);
  636.     interrupts_restore(ipl);
  637. }
  638.  
  639. /** Check whether thread exists.
  640.  *
  641.  * Note that threads_lock must be already held and
  642.  * interrupts must be already disabled.
  643.  *
  644.  * @param t Pointer to thread.
  645.  *
  646.  * @return True if thread t is known to the system, false otherwise.
  647.  */
  648. bool thread_exists(thread_t *t)
  649. {
  650.     btree_node_t *leaf;
  651.    
  652.     return btree_search(&threads_btree, (btree_key_t) ((uintptr_t) t),
  653.         &leaf) != NULL;
  654. }
  655.  
  656.  
  657. /** Update accounting of current thread.
  658.  *
  659.  * Note that thread_lock on THREAD must be already held and
  660.  * interrupts must be already disabled.
  661.  *
  662.  */
  663. void thread_update_accounting(void)
  664. {
  665.     uint64_t time = get_cycle();
  666.     THREAD->cycles += time - THREAD->last_cycle;
  667.     THREAD->last_cycle = time;
  668. }
  669.  
  670. /** Process syscall to create new thread.
  671.  *
  672.  */
  673. unative_t sys_thread_create(uspace_arg_t *uspace_uarg, char *uspace_name,
  674.     thread_id_t *uspace_thread_id)
  675. {
  676.     thread_t *t;
  677.     char namebuf[THREAD_NAME_BUFLEN];
  678.     uspace_arg_t *kernel_uarg;
  679.     int rc;
  680.  
  681.     rc = copy_from_uspace(namebuf, uspace_name, THREAD_NAME_BUFLEN);
  682.     if (rc != 0)
  683.         return (unative_t) rc;
  684.  
  685.     kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0);
  686.     rc = copy_from_uspace(kernel_uarg, uspace_uarg, sizeof(uspace_arg_t));
  687.     if (rc != 0) {
  688.         free(kernel_uarg);
  689.         return (unative_t) rc;
  690.     }
  691.  
  692.     t = thread_create(uinit, kernel_uarg, TASK,
  693.         THREAD_FLAG_USPACE | THREAD_FLAG_NOATTACH, namebuf, false);
  694.     if (t) {
  695.         if (uspace_thread_id != NULL) {
  696.             int rc;
  697.  
  698.             rc = copy_to_uspace(uspace_thread_id, &t->tid,
  699.                 sizeof(t->tid));
  700.             if (rc != 0) {
  701.                 /*
  702.                  * We have encountered a failure, but the thread
  703.                  * has already been created. We need to undo its
  704.                  * creation now.
  705.                  */
  706.  
  707.                 /*
  708.                  * The new thread structure is initialized, but
  709.                  * is still not visible to the system.
  710.                  * We can safely deallocate it.
  711.                  */
  712.                 slab_free(thread_slab, t);
  713.                 free(kernel_uarg);
  714.  
  715.                 return (unative_t) rc;
  716.              }
  717.         }
  718.         thread_attach(t, TASK);
  719.         thread_ready(t);
  720.  
  721.         return 0;
  722.     } else
  723.         free(kernel_uarg);
  724.  
  725.     return (unative_t) ENOMEM;
  726. }
  727.  
  728. /** Process syscall to terminate thread.
  729.  *
  730.  */
  731. unative_t sys_thread_exit(int uspace_status)
  732. {
  733.     thread_exit();
  734.     /* Unreachable */
  735.     return 0;
  736. }
  737.  
  738. /** Syscall for getting TID.
  739.  *
  740.  * @param uspace_thread_id Userspace address of 8-byte buffer where to store
  741.  * current thread ID.
  742.  *
  743.  * @return 0 on success or an error code from @ref errno.h.
  744.  */
  745. unative_t sys_thread_get_id(thread_id_t *uspace_thread_id)
  746. {
  747.     /*
  748.      * No need to acquire lock on THREAD because tid
  749.      * remains constant for the lifespan of the thread.
  750.      */
  751.     return (unative_t) copy_to_uspace(uspace_thread_id, &THREAD->tid,
  752.         sizeof(THREAD->tid));
  753. }
  754.  
  755. /** @}
  756.  */
  757.