Subversion Repositories HelenOS

Rev

Rev 2087 | Rev 2109 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (c) 2001-2004 Jakub Jermar
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. /** @addtogroup genericproc
  30.  * @{
  31.  */
  32.  
  33. /**
  34.  * @file
  35.  * @brief   Task management.
  36.  */
  37.  
  38. #include <main/uinit.h>
  39. #include <proc/thread.h>
  40. #include <proc/task.h>
  41. #include <proc/uarg.h>
  42. #include <mm/as.h>
  43. #include <mm/slab.h>
  44. #include <synch/spinlock.h>
  45. #include <arch.h>
  46. #include <panic.h>
  47. #include <adt/btree.h>
  48. #include <adt/list.h>
  49. #include <ipc/ipc.h>
  50. #include <security/cap.h>
  51. #include <memstr.h>
  52. #include <print.h>
  53. #include <lib/elf.h>
  54. #include <errno.h>
  55. #include <func.h>
  56. #include <syscall/copy.h>
  57. #include <console/klog.h>
  58.  
  59. #ifndef LOADED_PROG_STACK_PAGES_NO
  60. #define LOADED_PROG_STACK_PAGES_NO 1
  61. #endif
  62.  
  63. /** Spinlock protecting the tasks_btree B+tree. */
  64. SPINLOCK_INITIALIZE(tasks_lock);
  65.  
  66. /** B+tree of active tasks.
  67.  *
  68.  * The task is guaranteed to exist after it was found in the tasks_btree as
  69.  * long as:
  70.  * @li the tasks_lock is held,
  71.  * @li the task's lock is held when task's lock is acquired before releasing
  72.  *     tasks_lock or
  73.  * @li the task's refcount is greater than 0
  74.  *
  75.  */
  76. btree_t tasks_btree;
  77.  
  78. static task_id_t task_counter = 0;
  79.  
  80. static void ktaskclnp(void *arg);
  81. static void ktaskgc(void *arg);
  82.  
  83. /** Initialize tasks
  84.  *
  85.  * Initialize kernel tasks support.
  86.  *
  87.  */
  88. void task_init(void)
  89. {
  90.     TASK = NULL;
  91.     btree_create(&tasks_btree);
  92. }
  93.  
  94.  
  95. /** Create new task
  96.  *
  97.  * Create new task with no threads.
  98.  *
  99.  * @param as Task's address space.
  100.  * @param name Symbolic name.
  101.  *
  102.  * @return New task's structure
  103.  *
  104.  */
  105. task_t *task_create(as_t *as, char *name)
  106. {
  107.     ipl_t ipl;
  108.     task_t *ta;
  109.     int i;
  110.    
  111.     ta = (task_t *) malloc(sizeof(task_t), 0);
  112.  
  113.     task_create_arch(ta);
  114.  
  115.     spinlock_initialize(&ta->lock, "task_ta_lock");
  116.     list_initialize(&ta->th_head);
  117.     ta->as = as;
  118.     ta->name = name;
  119.     ta->main_thread = NULL;
  120.     ta->refcount = 0;
  121.     ta->context = CONTEXT;
  122.  
  123.     ta->capabilities = 0;
  124.     ta->accept_new_threads = true;
  125.     ta->cycles = 0;
  126.    
  127.     ipc_answerbox_init(&ta->answerbox);
  128.     for (i = 0; i < IPC_MAX_PHONES; i++)
  129.         ipc_phone_init(&ta->phones[i]);
  130.     if ((ipc_phone_0) && (context_check(ipc_phone_0->task->context,
  131.         ta->context)))
  132.         ipc_phone_connect(&ta->phones[0], ipc_phone_0);
  133.     atomic_set(&ta->active_calls, 0);
  134.  
  135.     mutex_initialize(&ta->futexes_lock);
  136.     btree_create(&ta->futexes);
  137.    
  138.     ipl = interrupts_disable();
  139.  
  140.     /*
  141.      * Increment address space reference count.
  142.      * TODO: Reconsider the locking scheme.
  143.      */
  144.     mutex_lock(&as->lock);
  145.     as->refcount++;
  146.     mutex_unlock(&as->lock);
  147.  
  148.     spinlock_lock(&tasks_lock);
  149.  
  150.     ta->taskid = ++task_counter;
  151.     btree_insert(&tasks_btree, (btree_key_t) ta->taskid, (void *) ta, NULL);
  152.  
  153.     spinlock_unlock(&tasks_lock);
  154.     interrupts_restore(ipl);
  155.  
  156.     return ta;
  157. }
  158.  
  159. /** Destroy task.
  160.  *
  161.  * @param t Task to be destroyed.
  162.  */
  163. void task_destroy(task_t *t)
  164. {
  165.     task_destroy_arch(t);
  166.     btree_destroy(&t->futexes);
  167.  
  168.     mutex_lock_active(&t->as->lock);
  169.     if (--t->as->refcount == 0) {
  170.         mutex_unlock(&t->as->lock);
  171.         as_destroy(t->as);
  172.         /*
  173.          * t->as is destroyed.
  174.          */
  175.     } else
  176.         mutex_unlock(&t->as->lock);
  177.    
  178.     free(t);
  179.     TASK = NULL;
  180. }
  181.  
  182. /** Create new task with 1 thread and run it
  183.  *
  184.  * @param program_addr Address of program executable image.
  185.  * @param name Program name.
  186.  *
  187.  * @return Task of the running program or NULL on error.
  188.  */
  189. task_t * task_run_program(void *program_addr, char *name)
  190. {
  191.     as_t *as;
  192.     as_area_t *a;
  193.     int rc;
  194.     thread_t *t1, *t2;
  195.     task_t *task;
  196.     uspace_arg_t *kernel_uarg;
  197.  
  198.     as = as_create(0);
  199.     ASSERT(as);
  200.  
  201.     rc = elf_load((elf_header_t *) program_addr, as);
  202.     if (rc != EE_OK) {
  203.         as_destroy(as);
  204.         return NULL;
  205.     }
  206.    
  207.     kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0);
  208.     kernel_uarg->uspace_entry =
  209.         (void *) ((elf_header_t *) program_addr)->e_entry;
  210.     kernel_uarg->uspace_stack = (void *) USTACK_ADDRESS;
  211.     kernel_uarg->uspace_thread_function = NULL;
  212.     kernel_uarg->uspace_thread_arg = NULL;
  213.     kernel_uarg->uspace_uarg = NULL;
  214.    
  215.     task = task_create(as, name);
  216.     ASSERT(task);
  217.  
  218.     /*
  219.      * Create the data as_area.
  220.      */
  221.     a = as_area_create(as, AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE,
  222.         LOADED_PROG_STACK_PAGES_NO * PAGE_SIZE, USTACK_ADDRESS,
  223.         AS_AREA_ATTR_NONE, &anon_backend, NULL);
  224.  
  225.     /*
  226.      * Create the main thread.
  227.      */
  228.     t1 = thread_create(uinit, kernel_uarg, task, THREAD_FLAG_USPACE,
  229.         "uinit", false);
  230.     ASSERT(t1);
  231.    
  232.     /*
  233.      * Create killer thread for the new task.
  234.      */
  235.     t2 = thread_create(ktaskgc, t1, task, 0, "ktaskgc", true);
  236.     ASSERT(t2);
  237.     thread_ready(t2);
  238.  
  239.     thread_ready(t1);
  240.  
  241.     return task;
  242. }
  243.  
  244. /** Syscall for reading task ID from userspace.
  245.  *
  246.  * @param uspace_task_id Userspace address of 8-byte buffer where to store
  247.  * current task ID.
  248.  *
  249.  * @return 0 on success or an error code from @ref errno.h.
  250.  */
  251. unative_t sys_task_get_id(task_id_t *uspace_task_id)
  252. {
  253.     /*
  254.      * No need to acquire lock on TASK because taskid
  255.      * remains constant for the lifespan of the task.
  256.      */
  257.     return (unative_t) copy_to_uspace(uspace_task_id, &TASK->taskid,
  258.         sizeof(TASK->taskid));
  259. }
  260.  
  261. /** Find task structure corresponding to task ID.
  262.  *
  263.  * The tasks_lock must be already held by the caller of this function
  264.  * and interrupts must be disabled.
  265.  *
  266.  * @param id Task ID.
  267.  *
  268.  * @return Task structure address or NULL if there is no such task ID.
  269.  */
  270. task_t *task_find_by_id(task_id_t id)
  271. {
  272.     btree_node_t *leaf;
  273.    
  274.     return (task_t *) btree_search(&tasks_btree, (btree_key_t) id, &leaf);
  275. }
  276.  
  277. /** Get accounting data of given task.
  278.  *
  279.  * Note that task lock of 't' must be already held and
  280.  * interrupts must be already disabled.
  281.  *
  282.  * @param t Pointer to thread.
  283.  *
  284.  */
  285. uint64_t task_get_accounting(task_t *t)
  286. {
  287.     /* Accumulated value of task */
  288.     uint64_t ret = t->cycles;
  289.    
  290.     /* Current values of threads */
  291.     link_t *cur;
  292.     for (cur = t->th_head.next; cur != &t->th_head; cur = cur->next) {
  293.         thread_t *thr = list_get_instance(cur, thread_t, th_link);
  294.        
  295.         spinlock_lock(&thr->lock);
  296.         /* Process only counted threads */
  297.         if (!thr->uncounted) {
  298.             if (thr == THREAD) {
  299.                 /* Update accounting of current thread */
  300.                 thread_update_accounting();
  301.             }
  302.             ret += thr->cycles;
  303.         }
  304.         spinlock_unlock(&thr->lock);
  305.     }
  306.    
  307.     return ret;
  308. }
  309.  
  310. /** Kill task.
  311.  *
  312.  * @param id ID of the task to be killed.
  313.  *
  314.  * @return 0 on success or an error code from errno.h
  315.  */
  316. int task_kill(task_id_t id)
  317. {
  318.     ipl_t ipl;
  319.     task_t *ta;
  320.     thread_t *t;
  321.     link_t *cur;
  322.  
  323.     if (id == 1)
  324.         return EPERM;
  325.    
  326.     ipl = interrupts_disable();
  327.     spinlock_lock(&tasks_lock);
  328.  
  329.     if (!(ta = task_find_by_id(id))) {
  330.         spinlock_unlock(&tasks_lock);
  331.         interrupts_restore(ipl);
  332.         return ENOENT;
  333.     }
  334.  
  335.     spinlock_lock(&ta->lock);
  336.     ta->refcount++;
  337.     spinlock_unlock(&ta->lock);
  338.  
  339.     btree_remove(&tasks_btree, ta->taskid, NULL);
  340.     spinlock_unlock(&tasks_lock);
  341.    
  342.     t = thread_create(ktaskclnp, NULL, ta, 0, "ktaskclnp", true);
  343.    
  344.     spinlock_lock(&ta->lock);
  345.     ta->accept_new_threads = false;
  346.     ta->refcount--;
  347.  
  348.     /*
  349.      * Interrupt all threads except ktaskclnp.
  350.      */
  351.     for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) {
  352.         thread_t *thr;
  353.         bool  sleeping = false;
  354.        
  355.         thr = list_get_instance(cur, thread_t, th_link);
  356.         if (thr == t)
  357.             continue;
  358.            
  359.         spinlock_lock(&thr->lock);
  360.         thr->interrupted = true;
  361.         if (thr->state == Sleeping)
  362.             sleeping = true;
  363.         spinlock_unlock(&thr->lock);
  364.        
  365.         if (sleeping)
  366.             thread_interrupt_sleep(thr);
  367.     }
  368.    
  369.     spinlock_unlock(&ta->lock);
  370.     interrupts_restore(ipl);
  371.    
  372.     if (t)
  373.         thread_ready(t);
  374.  
  375.     return 0;
  376. }
  377.  
  378. /** Print task list */
  379. void task_print_list(void)
  380. {
  381.     link_t *cur;
  382.     ipl_t ipl;
  383.    
  384.     /* Messing with thread structures, avoid deadlock */
  385.     ipl = interrupts_disable();
  386.     spinlock_lock(&tasks_lock);
  387.    
  388.     printf("taskid name       ctx address    as         cycles     threads "
  389.         "calls  callee\n");
  390.     printf("------ ---------- --- ---------- ---------- ---------- ------- "        "------ ------>\n");
  391.  
  392.     for (cur = tasks_btree.leaf_head.next; cur != &tasks_btree.leaf_head;
  393.         cur = cur->next) {
  394.         btree_node_t *node;
  395.         int i;
  396.        
  397.         node = list_get_instance(cur, btree_node_t, leaf_link);
  398.         for (i = 0; i < node->keys; i++) {
  399.             task_t *t;
  400.             int j;
  401.  
  402.             t = (task_t *) node->value[i];
  403.        
  404.             spinlock_lock(&t->lock);
  405.            
  406.             uint64_t cycles;
  407.             char suffix;
  408.             order(task_get_accounting(t), &cycles, &suffix);
  409.            
  410.             printf("%-6lld %-10s %-3ld %#10zx %#10zx %9llu%c %7zd "
  411.                 "%6zd", t->taskid, t->name, t->context, t, t->as,
  412.                 cycles, suffix, t->refcount,
  413.                 atomic_get(&t->active_calls));
  414.             for (j = 0; j < IPC_MAX_PHONES; j++) {
  415.                 if (t->phones[j].callee)
  416.                     printf(" %zd:%#zx", j,
  417.                         t->phones[j].callee);
  418.             }
  419.             printf("\n");
  420.            
  421.             spinlock_unlock(&t->lock);
  422.         }
  423.     }
  424.  
  425.     spinlock_unlock(&tasks_lock);
  426.     interrupts_restore(ipl);
  427. }
  428.  
  429. /** Kernel thread used to cleanup the task after it is killed. */
  430. void ktaskclnp(void *arg)
  431. {
  432.     ipl_t ipl;
  433.     thread_t *t = NULL, *main_thread;
  434.     link_t *cur;
  435.     bool again;
  436.  
  437.     thread_detach(THREAD);
  438.  
  439. loop:
  440.     ipl = interrupts_disable();
  441.     spinlock_lock(&TASK->lock);
  442.    
  443.     main_thread = TASK->main_thread;
  444.    
  445.     /*
  446.      * Find a thread to join.
  447.      */
  448.     again = false;
  449.     for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) {
  450.         t = list_get_instance(cur, thread_t, th_link);
  451.  
  452.         spinlock_lock(&t->lock);
  453.         if (t == THREAD) {
  454.             spinlock_unlock(&t->lock);
  455.             continue;
  456.         } else if (t == main_thread) {
  457.             spinlock_unlock(&t->lock);
  458.             continue;
  459.         } else if (t->join_type != None) {
  460.             spinlock_unlock(&t->lock);
  461.             again = true;
  462.             continue;
  463.         } else {
  464.             t->join_type = TaskClnp;
  465.             spinlock_unlock(&t->lock);
  466.             again = false;
  467.             break;
  468.         }
  469.     }
  470.    
  471.     spinlock_unlock(&TASK->lock);
  472.     interrupts_restore(ipl);
  473.    
  474.     if (again) {
  475.         /*
  476.          * Other cleanup (e.g. ktaskgc) is in progress.
  477.          */
  478.         scheduler();
  479.         goto loop;
  480.     }
  481.    
  482.     if (t != THREAD) {
  483.         ASSERT(t != main_thread);   /* uninit is joined and detached
  484.                          * in ktaskgc */
  485.         thread_join(t);
  486.         thread_detach(t);
  487.         goto loop;          /* go for another thread */
  488.     }
  489.    
  490.     /*
  491.      * Now there are no other threads in this task
  492.      * and no new threads can be created.
  493.      */
  494.  
  495.     ipc_cleanup();
  496.     futex_cleanup();
  497.     klog_printf("Cleanup of task %lld completed.", TASK->taskid);
  498. }
  499.  
  500. /** Kernel thread used to kill the userspace task when its main thread exits.
  501.  *
  502.  * This thread waits until the main userspace thread (i.e. uninit) exits.
  503.  * When this happens, the task is killed. In the meantime, exited threads
  504.  * are garbage collected.
  505.  *
  506.  * @param arg Pointer to the thread structure of the task's main thread.
  507.  */
  508. void ktaskgc(void *arg)
  509. {
  510.     thread_t *t = (thread_t *) arg;
  511. loop:  
  512.     /*
  513.      * Userspace threads cannot detach themselves,
  514.      * therefore the thread pointer is guaranteed to be valid.
  515.      */
  516.     if (thread_join_timeout(t, 1000000, SYNCH_FLAGS_NONE) ==
  517.         ESYNCH_TIMEOUT) {   /* sleep uninterruptibly here! */
  518.         ipl_t ipl;
  519.         link_t *cur;
  520.         thread_t *thr = NULL;
  521.    
  522.         /*
  523.          * The join timed out. Try to do some garbage collection of
  524.          * Undead threads.
  525.          */
  526. more_gc:       
  527.         ipl = interrupts_disable();
  528.         spinlock_lock(&TASK->lock);
  529.        
  530.         for (cur = TASK->th_head.next; cur != &TASK->th_head;
  531.             cur = cur->next) {
  532.             thr = list_get_instance(cur, thread_t, th_link);
  533.             spinlock_lock(&thr->lock);
  534.             if (thr != t && thr->state == Undead &&
  535.                 thr->join_type == None) {
  536.                 thr->join_type = TaskGC;
  537.                 spinlock_unlock(&thr->lock);
  538.                 break;
  539.             }
  540.             spinlock_unlock(&thr->lock);
  541.             thr = NULL;
  542.         }
  543.         spinlock_unlock(&TASK->lock);
  544.         interrupts_restore(ipl);
  545.        
  546.         if (thr) {
  547.             thread_join(thr);
  548.             thread_detach(thr);
  549.             scheduler();
  550.             goto more_gc;
  551.         }
  552.            
  553.         goto loop;
  554.     }
  555.     thread_detach(t);
  556.     task_kill(TASK->taskid);
  557. }
  558.  
  559. /** @}
  560.  */
  561.