Subversion Repositories HelenOS

Rev

Rev 2825 | Rev 2899 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (c) 2001-2004 Jakub Jermar
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. /** @addtogroup genericproc
  30.  * @{
  31.  */
  32.  
  33. /**
  34.  * @file
  35.  * @brief   Task management.
  36.  */
  37.  
  38. #include <main/uinit.h>
  39. #include <proc/thread.h>
  40. #include <proc/task.h>
  41. #include <proc/uarg.h>
  42. #include <mm/as.h>
  43. #include <mm/slab.h>
  44. #include <atomic.h>
  45. #include <synch/spinlock.h>
  46. #include <synch/waitq.h>
  47. #include <arch.h>
  48. #include <panic.h>
  49. #include <adt/avl.h>
  50. #include <adt/btree.h>
  51. #include <adt/list.h>
  52. #include <ipc/ipc.h>
  53. #include <security/cap.h>
  54. #include <memstr.h>
  55. #include <print.h>
  56. #include <lib/elf.h>
  57. #include <errno.h>
  58. #include <func.h>
  59. #include <syscall/copy.h>
  60.  
  61. #ifndef LOADED_PROG_STACK_PAGES_NO
  62. #define LOADED_PROG_STACK_PAGES_NO 1
  63. #endif
  64.  
  65. /** Spinlock protecting the tasks_tree AVL tree. */
  66. SPINLOCK_INITIALIZE(tasks_lock);
  67.  
  68. /** AVL tree of active tasks.
  69.  *
  70.  * The task is guaranteed to exist after it was found in the tasks_tree as
  71.  * long as:
  72.  * @li the tasks_lock is held,
  73.  * @li the task's lock is held when task's lock is acquired before releasing
  74.  *     tasks_lock or
  75.  * @li the task's refcount is greater than 0
  76.  *
  77.  */
  78. avltree_t tasks_tree;
  79.  
  80. static task_id_t task_counter = 0;
  81.  
  82. /** Initialize tasks
  83.  *
  84.  * Initialize kernel tasks support.
  85.  *
  86.  */
  87. void task_init(void)
  88. {
  89.     TASK = NULL;
  90.     avltree_create(&tasks_tree);
  91. }
  92.  
  93. /*
  94.  * The idea behind this walker is to remember a single task different from TASK.
  95.  */
  96. static bool task_done_walker(avltree_node_t *node, void *arg)
  97. {
  98.     task_t *t = avltree_get_instance(node, task_t, tasks_tree_node);
  99.     task_t **tp = (task_t **) arg;
  100.  
  101.     if (t != TASK) {
  102.         *tp = t;
  103.         return false;   /* stop walking */
  104.     }
  105.  
  106.     return true;    /* continue the walk */
  107. }
  108.  
  109. /** Kill all tasks except the current task.
  110.  *
  111.  */
  112. void task_done(void)
  113. {
  114.     task_t *t;
  115.     do { /* Repeat until there are any tasks except TASK */
  116.        
  117.         /* Messing with task structures, avoid deadlock */
  118.         ipl_t ipl = interrupts_disable();
  119.         spinlock_lock(&tasks_lock);
  120.        
  121.         t = NULL;
  122.         avltree_walk(&tasks_tree, task_done_walker, &t);
  123.        
  124.         if (t != NULL) {
  125.             task_id_t id = t->taskid;
  126.            
  127.             spinlock_unlock(&tasks_lock);
  128.             interrupts_restore(ipl);
  129.            
  130. #ifdef CONFIG_DEBUG
  131.             printf("Killing task %llu\n", id);
  132. #endif         
  133.             task_kill(id);
  134.             thread_usleep(10000);
  135.         } else {
  136.             spinlock_unlock(&tasks_lock);
  137.             interrupts_restore(ipl);
  138.         }
  139.        
  140.     } while (t != NULL);
  141. }
  142.  
  143. /** Create new task
  144.  *
  145.  * Create new task with no threads.
  146.  *
  147.  * @param as Task's address space.
  148.  * @param name Symbolic name.
  149.  *
  150.  * @return New task's structure
  151.  *
  152.  */
  153. task_t *task_create(as_t *as, char *name)
  154. {
  155.     ipl_t ipl;
  156.     task_t *ta;
  157.     int i;
  158.    
  159.     ta = (task_t *) malloc(sizeof(task_t), 0);
  160.  
  161.     task_create_arch(ta);
  162.  
  163.     spinlock_initialize(&ta->lock, "task_ta_lock");
  164.     list_initialize(&ta->th_head);
  165.     ta->as = as;
  166.     ta->name = name;
  167.     atomic_set(&ta->refcount, 0);
  168.     atomic_set(&ta->lifecount, 0);
  169.     ta->context = CONTEXT;
  170.  
  171.     ta->capabilities = 0;
  172.     ta->cycles = 0;
  173.  
  174.     /* Init debugging stuff */
  175.     ta->dt_state = UDEBUG_TS_INACTIVE;
  176.     ta->debug_begin_call = NULL;
  177.     ta->not_stoppable_count = 0;
  178.  
  179.     ipc_answerbox_init(&ta->kernel_box, ta);
  180.     ta->kb_thread = NULL;
  181.  
  182.     ipc_answerbox_init(&ta->answerbox, ta);
  183.     for (i = 0; i < IPC_MAX_PHONES; i++)
  184.         ipc_phone_init(&ta->phones[i]);
  185.     if ((ipc_phone_0) && (context_check(ipc_phone_0->task->context,
  186.         ta->context)))
  187.         ipc_phone_connect(&ta->phones[0], ipc_phone_0);
  188.     atomic_set(&ta->active_calls, 0);
  189.  
  190.     mutex_initialize(&ta->futexes_lock);
  191.     btree_create(&ta->futexes);
  192.    
  193.     ipl = interrupts_disable();
  194.  
  195.     /*
  196.      * Increment address space reference count.
  197.      */
  198.     atomic_inc(&as->refcount);
  199.  
  200.     spinlock_lock(&tasks_lock);
  201.     ta->taskid = ++task_counter;
  202.     avltree_node_initialize(&ta->tasks_tree_node);
  203.     ta->tasks_tree_node.key = ta->taskid;
  204.     avltree_insert(&tasks_tree, &ta->tasks_tree_node);
  205.     spinlock_unlock(&tasks_lock);
  206.     interrupts_restore(ipl);
  207.  
  208.     return ta;
  209. }
  210.  
  211. /** Destroy task.
  212.  *
  213.  * @param t Task to be destroyed.
  214.  */
  215. void task_destroy(task_t *t)
  216. {
  217.     /*
  218.      * Remove the task from the task B+tree.
  219.      */
  220.     spinlock_lock(&tasks_lock);
  221.     avltree_delete(&tasks_tree, &t->tasks_tree_node);
  222.     spinlock_unlock(&tasks_lock);
  223.  
  224.     /*
  225.      * Perform architecture specific task destruction.
  226.      */
  227.     task_destroy_arch(t);
  228.  
  229.     /*
  230.      * Free up dynamically allocated state.
  231.      */
  232.     btree_destroy(&t->futexes);
  233.  
  234.     /*
  235.      * Drop our reference to the address space.
  236.      */
  237.     if (atomic_predec(&t->as->refcount) == 0)
  238.         as_destroy(t->as);
  239.    
  240.     free(t);
  241.     TASK = NULL;
  242. }
  243.  
  244. /** Create new task with 1 thread and run it
  245.  *
  246.  * @param program_addr Address of program executable image.
  247.  * @param name Program name.
  248.  *
  249.  * @return Task of the running program or NULL on error.
  250.  */
  251. task_t *task_run_program(void *program_addr, char *name)
  252. {
  253.     as_t *as;
  254.     as_area_t *a;
  255.     unsigned int rc;
  256.     thread_t *t;
  257.     task_t *task;
  258.     uspace_arg_t *kernel_uarg;
  259.  
  260.     as = as_create(0);
  261.     ASSERT(as);
  262.  
  263.     rc = elf_load((elf_header_t *) program_addr, as);
  264.     if (rc != EE_OK) {
  265.         as_destroy(as);
  266.         return NULL;
  267.     }
  268.    
  269.     kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0);
  270.     kernel_uarg->uspace_entry =
  271.         (void *) ((elf_header_t *) program_addr)->e_entry;
  272.     kernel_uarg->uspace_stack = (void *) USTACK_ADDRESS;
  273.     kernel_uarg->uspace_thread_function = NULL;
  274.     kernel_uarg->uspace_thread_arg = NULL;
  275.     kernel_uarg->uspace_uarg = NULL;
  276.    
  277.     task = task_create(as, name);
  278.     ASSERT(task);
  279.  
  280.     /*
  281.      * Create the data as_area.
  282.      */
  283.     a = as_area_create(as, AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE,
  284.         LOADED_PROG_STACK_PAGES_NO * PAGE_SIZE, USTACK_ADDRESS,
  285.         AS_AREA_ATTR_NONE, &anon_backend, NULL);
  286.  
  287.     /*
  288.      * Create the main thread.
  289.      */
  290.     t = thread_create(uinit, kernel_uarg, task, THREAD_FLAG_USPACE,
  291.         "uinit", false);
  292.     ASSERT(t);
  293.    
  294.     thread_ready(t);
  295.  
  296.     return task;
  297. }
  298.  
  299. /** Syscall for reading task ID from userspace.
  300.  *
  301.  * @param uspace_task_id Userspace address of 8-byte buffer where to store
  302.  * current task ID.
  303.  *
  304.  * @return 0 on success or an error code from @ref errno.h.
  305.  */
  306. unative_t sys_task_get_id(task_id_t *uspace_task_id)
  307. {
  308.     /*
  309.      * No need to acquire lock on TASK because taskid
  310.      * remains constant for the lifespan of the task.
  311.      */
  312.     return (unative_t) copy_to_uspace(uspace_task_id, &TASK->taskid,
  313.         sizeof(TASK->taskid));
  314. }
  315.  
  316. /** Find task structure corresponding to task ID.
  317.  *
  318.  * The tasks_lock must be already held by the caller of this function
  319.  * and interrupts must be disabled.
  320.  *
  321.  * @param id Task ID.
  322.  *
  323.  * @return Task structure address or NULL if there is no such task ID.
  324.  */
  325. task_t *task_find_by_id(task_id_t id)
  326. {
  327.     avltree_node_t *node;
  328.    
  329.     node = avltree_search(&tasks_tree, (avltree_key_t) id);
  330.  
  331.     if (node)
  332.         return avltree_get_instance(node, task_t, tasks_tree_node);
  333.     return NULL;
  334. }
  335.  
  336. /** Get accounting data of given task.
  337.  *
  338.  * Note that task lock of 't' must be already held and
  339.  * interrupts must be already disabled.
  340.  *
  341.  * @param t Pointer to thread.
  342.  *
  343.  */
  344. uint64_t task_get_accounting(task_t *t)
  345. {
  346.     /* Accumulated value of task */
  347.     uint64_t ret = t->cycles;
  348.    
  349.     /* Current values of threads */
  350.     link_t *cur;
  351.     for (cur = t->th_head.next; cur != &t->th_head; cur = cur->next) {
  352.         thread_t *thr = list_get_instance(cur, thread_t, th_link);
  353.        
  354.         spinlock_lock(&thr->lock);
  355.         /* Process only counted threads */
  356.         if (!thr->uncounted) {
  357.             if (thr == THREAD) {
  358.                 /* Update accounting of current thread */
  359.                 thread_update_accounting();
  360.             }
  361.             ret += thr->cycles;
  362.         }
  363.         spinlock_unlock(&thr->lock);
  364.     }
  365.    
  366.     return ret;
  367. }
  368.  
  369. /** Kill task.
  370.  *
  371.  * This function is idempotent.
  372.  * It signals all the task's threads to bail it out.
  373.  *
  374.  * @param id ID of the task to be killed.
  375.  *
  376.  * @return 0 on success or an error code from errno.h
  377.  */
  378. int task_kill(task_id_t id)
  379. {
  380.     ipl_t ipl;
  381.     task_t *ta;
  382.     link_t *cur;
  383.  
  384.     if (id == 1)
  385.         return EPERM;
  386.    
  387.     ipl = interrupts_disable();
  388.     spinlock_lock(&tasks_lock);
  389.     if (!(ta = task_find_by_id(id))) {
  390.         spinlock_unlock(&tasks_lock);
  391.         interrupts_restore(ipl);
  392.         return ENOENT;
  393.     }
  394.     spinlock_unlock(&tasks_lock);
  395.    
  396.     /*
  397.      * Interrupt all threads except ktaskclnp.
  398.      */
  399.     spinlock_lock(&ta->lock);
  400.     for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) {
  401.         thread_t *thr;
  402.         bool sleeping = false;
  403.        
  404.         thr = list_get_instance(cur, thread_t, th_link);
  405.            
  406.         spinlock_lock(&thr->lock);
  407.         thr->interrupted = true;
  408.         if (thr->state == Sleeping)
  409.             sleeping = true;
  410.         spinlock_unlock(&thr->lock);
  411.        
  412.         if (sleeping)
  413.             waitq_interrupt_sleep(thr);
  414.     }
  415.     spinlock_unlock(&ta->lock);
  416.     interrupts_restore(ipl);
  417.    
  418.     return 0;
  419. }
  420.  
  421. static bool task_print_walker(avltree_node_t *node, void *arg)
  422. {
  423.     task_t *t = avltree_get_instance(node, task_t, tasks_tree_node);
  424.     int j;
  425.        
  426.     spinlock_lock(&t->lock);
  427.            
  428.     uint64_t cycles;
  429.     char suffix;
  430.     order(task_get_accounting(t), &cycles, &suffix);
  431.    
  432.     if (sizeof(void *) == 4)
  433.         printf("%-6llu %-10s %-3ld %#10zx %#10zx %9llu%c %7zd %6zd",
  434.             t->taskid, t->name, t->context, t, t->as, cycles, suffix,
  435.             t->refcount, atomic_get(&t->active_calls));
  436.     else
  437.         printf("%-6llu %-10s %-3ld %#18zx %#18zx %9llu%c %7zd %6zd",
  438.             t->taskid, t->name, t->context, t, t->as, cycles, suffix,
  439.             t->refcount, atomic_get(&t->active_calls));
  440.     for (j = 0; j < IPC_MAX_PHONES; j++) {
  441.         if (t->phones[j].callee)
  442.             printf(" %zd:%#zx", j, t->phones[j].callee);
  443.     }
  444.     printf("\n");
  445.            
  446.     spinlock_unlock(&t->lock);
  447.     return true;
  448. }
  449.  
  450. /** Print task list */
  451. void task_print_list(void)
  452. {
  453.     ipl_t ipl;
  454.    
  455.     /* Messing with task structures, avoid deadlock */
  456.     ipl = interrupts_disable();
  457.     spinlock_lock(&tasks_lock);
  458.    
  459.     if (sizeof(void *) == 4) {
  460.         printf("taskid name       ctx address    as         "
  461.             "cycles     threads calls  callee\n");
  462.         printf("------ ---------- --- ---------- ---------- "
  463.             "---------- ------- ------ ------>\n");
  464.     } else {
  465.         printf("taskid name       ctx address            as                 "
  466.             "cycles     threads calls  callee\n");
  467.         printf("------ ---------- --- ------------------ ------------------ "
  468.             "---------- ------- ------ ------>\n");
  469.     }
  470.  
  471.     avltree_walk(&tasks_tree, task_print_walker, NULL);
  472.  
  473.     spinlock_unlock(&tasks_lock);
  474.     interrupts_restore(ipl);
  475. }
  476.  
  477. /** @}
  478.  */
  479.