Subversion Repositories HelenOS

Rev

Rev 2712 | Rev 2801 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (c) 2001-2004 Jakub Jermar
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. /** @addtogroup genericproc
  30.  * @{
  31.  */
  32.  
  33. /**
  34.  * @file
  35.  * @brief   Thread management functions.
  36.  */
  37.  
  38. #include <proc/scheduler.h>
  39. #include <proc/thread.h>
  40. #include <proc/task.h>
  41. #include <proc/uarg.h>
  42. #include <mm/frame.h>
  43. #include <mm/page.h>
  44. #include <arch/asm.h>
  45. #include <arch/cycle.h>
  46. #include <arch.h>
  47. #include <synch/synch.h>
  48. #include <synch/spinlock.h>
  49. #include <synch/waitq.h>
  50. #include <synch/rwlock.h>
  51. #include <cpu.h>
  52. #include <func.h>
  53. #include <context.h>
  54. #include <adt/avl.h>
  55. #include <adt/list.h>
  56. #include <time/clock.h>
  57. #include <time/timeout.h>
  58. #include <config.h>
  59. #include <arch/interrupt.h>
  60. #include <smp/ipi.h>
  61. #include <arch/faddr.h>
  62. #include <atomic.h>
  63. #include <memstr.h>
  64. #include <print.h>
  65. #include <mm/slab.h>
  66. #include <debug.h>
  67. #include <main/uinit.h>
  68. #include <syscall/copy.h>
  69. #include <errno.h>
  70. #include <console/klog.h>
  71. #include <tdebug/tdebug.h>
  72.  
  73.  
  74. /** Thread states */
  75. char *thread_states[] = {
  76.     "Invalid",
  77.     "Running",
  78.     "Sleeping",
  79.     "Ready",
  80.     "Entering",
  81.     "Exiting",
  82.     "Lingering"
  83. };
  84.  
  85. /** Lock protecting the threads_tree AVL tree.
  86.  *
  87.  * For locking rules, see declaration thereof.
  88.  */
  89. SPINLOCK_INITIALIZE(threads_lock);
  90.  
  91. /** ALV tree of all threads.
  92.  *
  93.  * When a thread is found in the threads_tree AVL tree, it is guaranteed to
  94.  * exist as long as the threads_lock is held.
  95.  */
  96. avltree_t threads_tree;    
  97.  
  98. SPINLOCK_INITIALIZE(tidlock);
  99. thread_id_t last_tid = 0;
  100.  
  101. static slab_cache_t *thread_slab;
  102. #ifdef ARCH_HAS_FPU
  103. slab_cache_t *fpu_context_slab;
  104. #endif
  105.  
  106. /** Thread wrapper.
  107.  *
  108.  * This wrapper is provided to ensure that every thread makes a call to
  109.  * thread_exit() when its implementing function returns.
  110.  *
  111.  * interrupts_disable() is assumed.
  112.  *
  113.  */
  114. static void cushion(void)
  115. {
  116.     void (*f)(void *) = THREAD->thread_code;
  117.     void *arg = THREAD->thread_arg;
  118.     THREAD->last_cycle = get_cycle();
  119.  
  120.     /* This is where each thread wakes up after its creation */
  121.     spinlock_unlock(&THREAD->lock);
  122.     interrupts_enable();
  123.  
  124.     f(arg);
  125.    
  126.     /* Accumulate accounting to the task */
  127.     ipl_t ipl = interrupts_disable();
  128.    
  129.     spinlock_lock(&THREAD->lock);
  130.     if (!THREAD->uncounted) {
  131.         thread_update_accounting();
  132.         uint64_t cycles = THREAD->cycles;
  133.         THREAD->cycles = 0;
  134.         spinlock_unlock(&THREAD->lock);
  135.        
  136.         spinlock_lock(&TASK->lock);
  137.         TASK->cycles += cycles;
  138.         spinlock_unlock(&TASK->lock);
  139.     } else
  140.         spinlock_unlock(&THREAD->lock);
  141.    
  142.     interrupts_restore(ipl);
  143.    
  144.     thread_exit();
  145.     /* not reached */
  146. }
  147.  
  148. /** Initialization and allocation for thread_t structure */
  149. static int thr_constructor(void *obj, int kmflags)
  150. {
  151.     thread_t *t = (thread_t *) obj;
  152.  
  153.     spinlock_initialize(&t->lock, "thread_t_lock");
  154.     link_initialize(&t->rq_link);
  155.     link_initialize(&t->wq_link);
  156.     link_initialize(&t->th_link);
  157.  
  158.     /* call the architecture-specific part of the constructor */
  159.     thr_constructor_arch(t);
  160.    
  161. #ifdef ARCH_HAS_FPU
  162. #ifdef CONFIG_FPU_LAZY
  163.     t->saved_fpu_context = NULL;
  164. #else
  165.     t->saved_fpu_context = slab_alloc(fpu_context_slab, kmflags);
  166.     if (!t->saved_fpu_context)
  167.         return -1;
  168. #endif
  169. #endif 
  170.  
  171.     t->kstack = (uint8_t *) frame_alloc(STACK_FRAMES, FRAME_KA | kmflags);
  172.     if (!t->kstack) {
  173. #ifdef ARCH_HAS_FPU
  174.         if (t->saved_fpu_context)
  175.             slab_free(fpu_context_slab, t->saved_fpu_context);
  176. #endif
  177.         return -1;
  178.     }
  179.  
  180.     return 0;
  181. }
  182.  
  183. /** Destruction of thread_t object */
  184. static int thr_destructor(void *obj)
  185. {
  186.     thread_t *t = (thread_t *) obj;
  187.  
  188.     /* call the architecture-specific part of the destructor */
  189.     thr_destructor_arch(t);
  190.  
  191.     frame_free(KA2PA(t->kstack));
  192. #ifdef ARCH_HAS_FPU
  193.     if (t->saved_fpu_context)
  194.         slab_free(fpu_context_slab, t->saved_fpu_context);
  195. #endif
  196.     return 1; /* One page freed */
  197. }
  198.  
  199. /** Initialize threads
  200.  *
  201.  * Initialize kernel threads support.
  202.  *
  203.  */
  204. void thread_init(void)
  205. {
  206.     THREAD = NULL;
  207.     atomic_set(&nrdy,0);
  208.     thread_slab = slab_cache_create("thread_slab", sizeof(thread_t), 0,
  209.         thr_constructor, thr_destructor, 0);
  210.  
  211. #ifdef ARCH_HAS_FPU
  212.     fpu_context_slab = slab_cache_create("fpu_slab", sizeof(fpu_context_t),
  213.         FPU_CONTEXT_ALIGN, NULL, NULL, 0);
  214. #endif
  215.  
  216.     avltree_create(&threads_tree);
  217. }
  218.  
  219. /** Make thread ready
  220.  *
  221.  * Switch thread t to the ready state.
  222.  *
  223.  * @param t Thread to make ready.
  224.  *
  225.  */
  226. void thread_ready(thread_t *t)
  227. {
  228.     cpu_t *cpu;
  229.     runq_t *r;
  230.     ipl_t ipl;
  231.     int i, avg;
  232.  
  233.     ipl = interrupts_disable();
  234.  
  235.     spinlock_lock(&t->lock);
  236.  
  237.     ASSERT(!(t->state == Ready));
  238.  
  239.     i = (t->priority < RQ_COUNT - 1) ? ++t->priority : t->priority;
  240.    
  241.     cpu = CPU;
  242.     if (t->flags & THREAD_FLAG_WIRED) {
  243.         ASSERT(t->cpu != NULL);
  244.         cpu = t->cpu;
  245.     }
  246.     t->state = Ready;
  247.     spinlock_unlock(&t->lock);
  248.    
  249.     /*
  250.      * Append t to respective ready queue on respective processor.
  251.      */
  252.     r = &cpu->rq[i];
  253.     spinlock_lock(&r->lock);
  254.     list_append(&t->rq_link, &r->rq_head);
  255.     r->n++;
  256.     spinlock_unlock(&r->lock);
  257.  
  258.     atomic_inc(&nrdy);
  259.     avg = atomic_get(&nrdy) / config.cpu_active;
  260.     atomic_inc(&cpu->nrdy);
  261.  
  262.     interrupts_restore(ipl);
  263. }
  264.  
  265. /** Create new thread
  266.  *
  267.  * Create a new thread.
  268.  *
  269.  * @param func      Thread's implementing function.
  270.  * @param arg       Thread's implementing function argument.
  271.  * @param task      Task to which the thread belongs.
  272.  * @param flags     Thread flags.
  273.  * @param name      Symbolic name.
  274.  * @param uncounted Thread's accounting doesn't affect accumulated task
  275.  *          accounting.
  276.  *
  277.  * @return New thread's structure on success, NULL on failure.
  278.  *
  279.  */
  280. thread_t *thread_create(void (* func)(void *), void *arg, task_t *task,
  281.     int flags, char *name, bool uncounted)
  282. {
  283.     thread_t *t;
  284.     ipl_t ipl;
  285.    
  286.     t = (thread_t *) slab_alloc(thread_slab, 0);
  287.     if (!t)
  288.         return NULL;
  289.    
  290.     /* Not needed, but good for debugging */
  291.     memsetb((uintptr_t) t->kstack, THREAD_STACK_SIZE * 1 << STACK_FRAMES,
  292.         0);
  293.    
  294.     ipl = interrupts_disable();
  295.     spinlock_lock(&tidlock);
  296.     t->tid = ++last_tid;
  297.     spinlock_unlock(&tidlock);
  298.     interrupts_restore(ipl);
  299.    
  300.     context_save(&t->saved_context);
  301.     context_set(&t->saved_context, FADDR(cushion), (uintptr_t) t->kstack,
  302.         THREAD_STACK_SIZE);
  303.    
  304.     the_initialize((the_t *) t->kstack);
  305.    
  306.     ipl = interrupts_disable();
  307.     t->saved_context.ipl = interrupts_read();
  308.     interrupts_restore(ipl);
  309.    
  310.     memcpy(t->name, name, THREAD_NAME_BUFLEN);
  311.    
  312.     t->thread_code = func;
  313.     t->thread_arg = arg;
  314.     t->ticks = -1;
  315.     t->cycles = 0;
  316.     t->uncounted = uncounted;
  317.     t->priority = -1;       /* start in rq[0] */
  318.     t->cpu = NULL;
  319.     t->flags = flags;
  320.     t->state = Entering;
  321.     t->call_me = NULL;
  322.     t->call_me_with = NULL;
  323.    
  324.     timeout_initialize(&t->sleep_timeout);
  325.     t->sleep_interruptible = false;
  326.     t->sleep_queue = NULL;
  327.     t->timeout_pending = 0;
  328.  
  329.     t->in_copy_from_uspace = false;
  330.     t->in_copy_to_uspace = false;
  331.  
  332.     t->interrupted = false;
  333.     t->detached = false;
  334.     waitq_initialize(&t->join_wq);
  335.    
  336.     t->rwlock_holder_type = RWLOCK_NONE;
  337.        
  338.     t->task = task;
  339.    
  340.     t->fpu_context_exists = 0;
  341.     t->fpu_context_engaged = 0;
  342.  
  343.     avltree_node_initialize(&t->threads_tree_node);
  344.     t->threads_tree_node.key = (uintptr_t) t;
  345. //  t->threads_tree_node.key = (avltree_key_t) t->tid;
  346.  
  347.     /* might depend on previous initialization */
  348.     thread_create_arch(t);
  349.  
  350.     /* init tdebug stuff */
  351.     tdebug_thread_init(t);
  352.  
  353.     if (!(flags & THREAD_FLAG_NOATTACH))
  354.         thread_attach(t, task);
  355.  
  356.     return t;
  357. }
  358.  
  359. /** Destroy thread memory structure
  360.  *
  361.  * Detach thread from all queues, cpus etc. and destroy it.
  362.  *
  363.  * Assume thread->lock is held!!
  364.  */
  365. void thread_destroy(thread_t *t)
  366. {
  367.     ASSERT(t->state == Exiting || t->state == Lingering);
  368.     ASSERT(t->task);
  369.     ASSERT(t->cpu);
  370.  
  371.     spinlock_lock(&t->cpu->lock);
  372.     if (t->cpu->fpu_owner == t)
  373.         t->cpu->fpu_owner = NULL;
  374.     spinlock_unlock(&t->cpu->lock);
  375.  
  376.     spinlock_unlock(&t->lock);
  377.  
  378.     spinlock_lock(&threads_lock);
  379.     avltree_delete(&threads_tree, &t->threads_tree_node);
  380.     spinlock_unlock(&threads_lock);
  381.  
  382.     /*
  383.      * Detach from the containing task.
  384.      */
  385.     spinlock_lock(&t->task->lock);
  386.     list_remove(&t->th_link);
  387.     spinlock_unlock(&t->task->lock);   
  388.  
  389.     /*
  390.      * t is guaranteed to be the very last thread of its task.
  391.      * It is safe to destroy the task.
  392.      */
  393.     if (atomic_predec(&t->task->refcount) == 0)
  394.         task_destroy(t->task);
  395.    
  396.     slab_free(thread_slab, t);
  397. }
  398.  
  399. /** Make the thread visible to the system.
  400.  *
  401.  * Attach the thread structure to the current task and make it visible in the
  402.  * threads_tree.
  403.  *
  404.  * @param t Thread to be attached to the task.
  405.  * @param task  Task to which the thread is to be attached.
  406.  */
  407. void thread_attach(thread_t *t, task_t *task)
  408. {
  409.     ipl_t ipl;
  410.  
  411.     /*
  412.      * Attach to the current task.
  413.      */
  414.     ipl = interrupts_disable();
  415.     spinlock_lock(&task->lock);
  416.     atomic_inc(&task->refcount);
  417.     atomic_inc(&task->lifecount);
  418.     list_append(&t->th_link, &task->th_head);
  419.     spinlock_unlock(&task->lock);
  420.  
  421.     /*
  422.      * Register this thread in the system-wide list.
  423.      */
  424.     spinlock_lock(&threads_lock);
  425.     avltree_insert(&threads_tree, &t->threads_tree_node);
  426.     spinlock_unlock(&threads_lock);
  427.    
  428.     interrupts_restore(ipl);
  429. }
  430.  
  431. /** Terminate thread.
  432.  *
  433.  * End current thread execution and switch it to the exiting state. All pending
  434.  * timeouts are executed.
  435.  */
  436. void thread_exit(void)
  437. {
  438.     ipl_t ipl;
  439.  
  440.     if (atomic_predec(&TASK->lifecount) == 0) {
  441.         /*
  442.          * We are the last thread in the task that still has not exited.
  443.          * With the exception of the moment the task was created, new
  444.          * threads can only be created by threads of the same task.
  445.          * We are safe to perform cleanup.
  446.          */
  447.         if (THREAD->flags & THREAD_FLAG_USPACE) {
  448.             ipc_cleanup();
  449.                 futex_cleanup();
  450.             tdebug_cleanup();
  451.             klog_printf("Cleanup of task %llu completed.",
  452.                 TASK->taskid);
  453.         }
  454.     }
  455.  
  456. restart:
  457.     ipl = interrupts_disable();
  458.     spinlock_lock(&THREAD->lock);
  459.     if (THREAD->timeout_pending) {
  460.         /* busy waiting for timeouts in progress */
  461.         spinlock_unlock(&THREAD->lock);
  462.         interrupts_restore(ipl);
  463.         goto restart;
  464.     }
  465.    
  466.     THREAD->state = Exiting;
  467.     spinlock_unlock(&THREAD->lock);
  468.     scheduler();
  469.  
  470.     /* Not reached */
  471.     while (1)
  472.         ;
  473. }
  474.  
  475.  
  476. /** Thread sleep
  477.  *
  478.  * Suspend execution of the current thread.
  479.  *
  480.  * @param sec Number of seconds to sleep.
  481.  *
  482.  */
  483. void thread_sleep(uint32_t sec)
  484. {
  485.     thread_usleep(sec * 1000000);
  486. }
  487.  
  488. /** Wait for another thread to exit.
  489.  *
  490.  * @param t Thread to join on exit.
  491.  * @param usec Timeout in microseconds.
  492.  * @param flags Mode of operation.
  493.  *
  494.  * @return An error code from errno.h or an error code from synch.h.
  495.  */
  496. int thread_join_timeout(thread_t *t, uint32_t usec, int flags)
  497. {
  498.     ipl_t ipl;
  499.     int rc;
  500.  
  501.     if (t == THREAD)
  502.         return EINVAL;
  503.  
  504.     /*
  505.      * Since thread join can only be called once on an undetached thread,
  506.      * the thread pointer is guaranteed to be still valid.
  507.      */
  508.    
  509.     ipl = interrupts_disable();
  510.     spinlock_lock(&t->lock);
  511.     ASSERT(!t->detached);
  512.     spinlock_unlock(&t->lock);
  513.     interrupts_restore(ipl);
  514.    
  515.     rc = waitq_sleep_timeout(&t->join_wq, usec, flags);
  516.    
  517.     return rc; 
  518. }
  519.  
  520. /** Detach thread.
  521.  *
  522.  * Mark the thread as detached, if the thread is already in the Lingering
  523.  * state, deallocate its resources.
  524.  *
  525.  * @param t Thread to be detached.
  526.  */
  527. void thread_detach(thread_t *t)
  528. {
  529.     ipl_t ipl;
  530.  
  531.     /*
  532.      * Since the thread is expected not to be already detached,
  533.      * pointer to it must be still valid.
  534.      */
  535.     ipl = interrupts_disable();
  536.     spinlock_lock(&t->lock);
  537.     ASSERT(!t->detached);
  538.     if (t->state == Lingering) {
  539.         thread_destroy(t);  /* unlocks &t->lock */
  540.         interrupts_restore(ipl);
  541.         return;
  542.     } else {
  543.         t->detached = true;
  544.     }
  545.     spinlock_unlock(&t->lock);
  546.     interrupts_restore(ipl);
  547. }
  548.  
  549. /** Thread usleep
  550.  *
  551.  * Suspend execution of the current thread.
  552.  *
  553.  * @param usec Number of microseconds to sleep.
  554.  *
  555.  */
  556. void thread_usleep(uint32_t usec)
  557. {
  558.     waitq_t wq;
  559.                  
  560.     waitq_initialize(&wq);
  561.  
  562.     (void) waitq_sleep_timeout(&wq, usec, SYNCH_FLAGS_NON_BLOCKING);
  563. }
  564.  
  565. /** Register thread out-of-context invocation
  566.  *
  567.  * Register a function and its argument to be executed
  568.  * on next context switch to the current thread.
  569.  *
  570.  * @param call_me      Out-of-context function.
  571.  * @param call_me_with Out-of-context function argument.
  572.  *
  573.  */
  574. void thread_register_call_me(void (* call_me)(void *), void *call_me_with)
  575. {
  576.     ipl_t ipl;
  577.    
  578.     ipl = interrupts_disable();
  579.     spinlock_lock(&THREAD->lock);
  580.     THREAD->call_me = call_me;
  581.     THREAD->call_me_with = call_me_with;
  582.     spinlock_unlock(&THREAD->lock);
  583.     interrupts_restore(ipl);
  584. }
  585.  
  586. static bool thread_walker(avltree_node_t *node, void *arg)
  587. {
  588.     thread_t *t;
  589.        
  590.     t = avltree_get_instance(node, thread_t, threads_tree_node);
  591.  
  592.     uint64_t cycles;
  593.     char suffix;
  594.     order(t->cycles, &cycles, &suffix);
  595.    
  596.     if (sizeof(void *) == 4)
  597.         printf("%-6llu %-10s %#10zx %-8s %#10zx %-3ld %#10zx %#10zx %9llu%c ",
  598.             t->tid, t->name, t, thread_states[t->state], t->task,
  599.             t->task->context, t->thread_code, t->kstack, cycles, suffix);
  600.     else
  601.         printf("%-6llu %-10s %#18zx %-8s %#18zx %-3ld %#18zx %#18zx %9llu%c ",
  602.             t->tid, t->name, t, thread_states[t->state], t->task,
  603.             t->task->context, t->thread_code, t->kstack, cycles, suffix);
  604.            
  605.     if (t->cpu)
  606.         printf("%-4zd", t->cpu->id);
  607.     else
  608.         printf("none");
  609.            
  610.     if (t->state == Sleeping) {
  611.         if (sizeof(uintptr_t) == 4)
  612.             printf(" %#10zx", t->sleep_queue);
  613.         else
  614.             printf(" %#18zx", t->sleep_queue);
  615.     }
  616.            
  617.     printf("\n");
  618.  
  619.     return true;
  620. }
  621.  
  622. /** Print list of threads debug info */
  623. void thread_print_list(void)
  624. {
  625.     ipl_t ipl;
  626.    
  627.     /* Messing with thread structures, avoid deadlock */
  628.     ipl = interrupts_disable();
  629.     spinlock_lock(&threads_lock);
  630.    
  631.     if (sizeof(uintptr_t) == 4) {
  632.         printf("tid    name       address    state    task       "
  633.             "ctx code       stack      cycles     cpu  "
  634.             "waitqueue\n");
  635.         printf("------ ---------- ---------- -------- ---------- "
  636.             "--- ---------- ---------- ---------- ---- "
  637.             "----------\n");
  638.     } else {
  639.         printf("tid    name       address            state    task               "
  640.             "ctx code               stack              cycles     cpu  "
  641.             "waitqueue\n");
  642.         printf("------ ---------- ------------------ -------- ------------------ "
  643.             "--- ------------------ ------------------ ---------- ---- "
  644.             "------------------\n");
  645.     }
  646.  
  647.     avltree_walk(&threads_tree, thread_walker, NULL);
  648.  
  649.     spinlock_unlock(&threads_lock);
  650.     interrupts_restore(ipl);
  651. }
  652.  
  653. /** Check whether thread exists.
  654.  *
  655.  * Note that threads_lock must be already held and
  656.  * interrupts must be already disabled.
  657.  *
  658.  * @param t Pointer to thread.
  659.  *
  660.  * @return True if thread t is known to the system, false otherwise.
  661.  */
  662. bool thread_exists(thread_t *t)
  663. {
  664.     avltree_node_t *node;
  665.  
  666.     node = avltree_search(&threads_tree, (avltree_key_t) ((uintptr_t) t));
  667.    
  668.     return node != NULL;
  669. }
  670.  
  671.  
  672. /** Update accounting of current thread.
  673.  *
  674.  * Note that thread_lock on THREAD must be already held and
  675.  * interrupts must be already disabled.
  676.  *
  677.  */
  678. void thread_update_accounting(void)
  679. {
  680.     uint64_t time = get_cycle();
  681.     THREAD->cycles += time - THREAD->last_cycle;
  682.     THREAD->last_cycle = time;
  683. }
  684.  
  685. /** Process syscall to create new thread.
  686.  *
  687.  */
  688. unative_t sys_thread_create(uspace_arg_t *uspace_uarg, char *uspace_name,
  689.     thread_id_t *uspace_thread_id)
  690. {
  691.     thread_t *t;
  692.     char namebuf[THREAD_NAME_BUFLEN];
  693.     uspace_arg_t *kernel_uarg;
  694.     int rc;
  695.  
  696.     rc = copy_from_uspace(namebuf, uspace_name, THREAD_NAME_BUFLEN);
  697.     if (rc != 0)
  698.         return (unative_t) rc;
  699.  
  700.     /*
  701.      * In case of failure, kernel_uarg will be deallocated in this function.
  702.      * In case of success, kernel_uarg will be freed in uinit().
  703.      */
  704.     kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0);
  705.    
  706.     rc = copy_from_uspace(kernel_uarg, uspace_uarg, sizeof(uspace_arg_t));
  707.     if (rc != 0) {
  708.         free(kernel_uarg);
  709.         return (unative_t) rc;
  710.     }
  711.  
  712.     t = thread_create(uinit, kernel_uarg, TASK,
  713.         THREAD_FLAG_USPACE | THREAD_FLAG_NOATTACH, namebuf, false);
  714.     if (t) {
  715.         if (uspace_thread_id != NULL) {
  716.             int rc;
  717.  
  718.             rc = copy_to_uspace(uspace_thread_id, &t->tid,
  719.                 sizeof(t->tid));
  720.             if (rc != 0) {
  721.                 /*
  722.                  * We have encountered a failure, but the thread
  723.                  * has already been created. We need to undo its
  724.                  * creation now.
  725.                  */
  726.  
  727.                 /*
  728.                  * The new thread structure is initialized, but
  729.                  * is still not visible to the system.
  730.                  * We can safely deallocate it.
  731.                  */
  732.                 slab_free(thread_slab, t);
  733.                 free(kernel_uarg);
  734.  
  735.                 return (unative_t) rc;
  736.              }
  737.         }
  738.         thread_attach(t, TASK);
  739.         thread_ready(t);
  740.  
  741.         return 0;
  742.     } else
  743.         free(kernel_uarg);
  744.  
  745.     return (unative_t) ENOMEM;
  746. }
  747.  
  748. /** Process syscall to terminate thread.
  749.  *
  750.  */
  751. unative_t sys_thread_exit(int uspace_status)
  752. {
  753.     thread_exit();
  754.     /* Unreachable */
  755.     return 0;
  756. }
  757.  
  758. struct fbiw {
  759.     thread_id_t tid;
  760.     thread_t *t;
  761. };
  762.  
  763. static bool find_by_id_walker(avltree_node_t *node, void *arg)
  764. {
  765.     thread_t *t = avltree_get_instance(node, thread_t, threads_tree_node);
  766.     struct fbiw *s = (struct fbiw *)arg;
  767.  
  768.     if (t->tid == s->tid) {
  769.         /* found it! */
  770.         s->t = t;
  771.         return false;
  772.     }
  773.  
  774.     return true;    /* continue */
  775. }
  776.  
  777. /** Find thread structure corresponding to thread ID.
  778.  *
  779.  * The threads_lock must be already held by the caller of this function
  780.  * and interrupts must be disabled.
  781.  *
  782.  * @param id Thread ID.
  783.  *
  784.  * @return Thread structure address or NULL if there is no such thread ID.
  785.  */
  786. thread_t *thread_find_by_id(thread_id_t id)
  787. {
  788.     struct fbiw s;
  789.  
  790.     s.t = NULL;
  791.     s.tid = id;
  792.  
  793.     avltree_walk(&threads_tree, find_by_id_walker, &s);
  794.  
  795.     return s.t;
  796. /*
  797.     // ANO, takhle krasne by to fungovalo, kdyby threads_tree
  798.     // nepouzival jako klic pointer na vlakno misto tid
  799.     avltree_node_t *node;
  800.    
  801.     node = avltree_search(&threads_tree, (avltree_key_t) id);
  802.  
  803.     if (node)
  804.         return avltree_get_instance(node, thread_t, threads_tree_node);
  805.     return NULL;*/
  806. }
  807.  
  808. /** Syscall for getting TID.
  809.  *
  810.  * @param uspace_thread_id Userspace address of 8-byte buffer where to store
  811.  * current thread ID.
  812.  *
  813.  * @return 0 on success or an error code from @ref errno.h.
  814.  */
  815. unative_t sys_thread_get_id(thread_id_t *uspace_thread_id)
  816. {
  817.     /*
  818.      * No need to acquire lock on THREAD because tid
  819.      * remains constant for the lifespan of the thread.
  820.      */
  821.     return (unative_t) copy_to_uspace(uspace_thread_id, &THREAD->tid,
  822.         sizeof(THREAD->tid));
  823. }
  824.  
  825. /** @}
  826.  */
  827.