Subversion Repositories HelenOS

Rev

Rev 2897 | Rev 2899 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (c) 2008 Jiri Svoboda
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. /** @addtogroup generic
  30.  * @{
  31.  */
  32.  
  33. /**
  34.  * @file
  35.  * @brief   Udebug operations.
  36.  */
  37.  
  38. #include <console/klog.h>
  39. #include <proc/task.h>
  40. #include <proc/thread.h>
  41. #include <arch.h>
  42. #include <errno.h>
  43. #include <syscall/copy.h>
  44. #include <ipc/ipc.h>
  45. #include <udebug/udebug.h>
  46. #include <udebug/udebug_ops.h>
  47.  
  48. /**
  49.  * Prepare a thread for a debugging operation.
  50.  *
  51.  * Simply put, return thread t with t->debug_lock held,
  52.  * but only if it verifies all conditions.
  53.  *
  54.  * Specifically, verifies that thread t exists, is a userspace thread,
  55.  * and belongs to the current task (TASK). Verifies, that the thread
  56.  * has (or hasn't) go according to having_go (typically false).
  57.  * It also locks t->debug_lock, making sure that t->debug_active is true
  58.  * - that the thread is in a valid debugging session.
  59.  *
  60.  * Returns EOK if all went well, or an error code otherwise.
  61.  * Interrupts must be already disabled when calling this function.
  62.  *
  63.  * Note: This function sports complicated locking.
  64.  */
  65. static int _thread_op_begin(thread_t *t, bool having_go)
  66. {
  67.     int rc;
  68.     task_id_t taskid;
  69.  
  70.     taskid = TASK->taskid;
  71.  
  72.     /* Must lock threads_lock to ensure continued existence of the thread */
  73.     spinlock_lock(&threads_lock);
  74.  
  75.     if (!thread_exists(t)) {
  76.         spinlock_unlock(&threads_lock);
  77.         return ENOENT;
  78.     }
  79.  
  80.     spinlock_lock(&t->debug_lock);
  81.     spinlock_lock(&t->lock);
  82.    
  83.     /* Now verify that it's the current task */
  84.     if (t->task != TASK) {
  85.         /* No such thread belonging to callee */
  86.         rc = ENOENT;
  87.         goto error_exit;
  88.     }
  89.  
  90.     /* Verify that 't' is a userspace thread */
  91.     if ((t->flags & THREAD_FLAG_USPACE) == 0) {
  92.         /* It's not, deny its existence */
  93.         rc = ENOENT;
  94.         goto error_exit;
  95.     }
  96.  
  97.     if ((t->debug_active != true) || (!t->debug_stop != having_go)) {
  98.         /* Not in debugging session or undesired GO state */
  99.         rc = EINVAL;
  100.         goto error_exit;
  101.     }
  102.  
  103.     spinlock_unlock(&threads_lock);
  104.     spinlock_unlock(&t->lock);
  105.  
  106.     /* Only t->debug_lock left */
  107.  
  108.     return EOK; /* All went well */
  109.  
  110.  
  111.     /* Executed when a check on the thread fails */
  112. error_exit:
  113.     spinlock_unlock(&t->lock);
  114.     spinlock_unlock(&t->debug_lock);
  115.     spinlock_unlock(&threads_lock);
  116.  
  117.     /* No locks left here */
  118.     return rc;  /* Some errors occured */
  119. }
  120.  
  121.  
  122. static void _thread_op_end(thread_t *t)
  123. {
  124.     spinlock_unlock(&t->debug_lock);
  125. }
  126.  
  127. /**
  128.  * \return 0 (ok, but not done yet), 1 (done) or negative error code.
  129.  */
  130. int udebug_begin(call_t *call)
  131. {
  132.     ipl_t ipl;
  133.     int reply;
  134.  
  135.     thread_t *t;
  136.     link_t *cur;
  137.  
  138.     klog_printf("udebug_begin()");
  139.  
  140.     ipl = interrupts_disable();
  141.     klog_printf("debugging task %llu", TASK->taskid);
  142.  
  143.     spinlock_lock(&TASK->lock);
  144.  
  145.     if (TASK->dt_state != UDEBUG_TS_INACTIVE) {
  146.         spinlock_unlock(&TASK->lock);
  147.         interrupts_restore(ipl);
  148.         klog_printf("udebug_begin(): busy error");
  149.  
  150.         return EBUSY;
  151.     }
  152.  
  153.     TASK->dt_state = UDEBUG_TS_BEGINNING;
  154.     TASK->debug_begin_call = call;
  155.     TASK->debugger = call->sender;
  156.  
  157.     if (TASK->not_stoppable_count == 0) {
  158.         TASK->dt_state = UDEBUG_TS_ACTIVE;
  159.         TASK->debug_begin_call = NULL;
  160.         reply = 1; /* immediate reply */
  161.     } else {
  162.         reply = 0; /* no reply */
  163.     }
  164.    
  165.     /* Set debug_active on all of the task's userspace threads */
  166.  
  167.     for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) {
  168.         t = list_get_instance(cur, thread_t, th_link);
  169.  
  170.         spinlock_lock(&t->debug_lock);
  171.         if ((t->flags & THREAD_FLAG_USPACE) != 0)
  172.             t->debug_active = true;
  173.         spinlock_unlock(&t->debug_lock);
  174.     }
  175.  
  176.     spinlock_unlock(&TASK->lock);
  177.     interrupts_restore(ipl);
  178.  
  179.     klog_printf("udebug_begin() done (%s)",
  180.         reply ? "reply" : "stoppability wait");
  181.  
  182.     return reply;
  183. }
  184.  
  185. int udebug_end(void)
  186. {
  187.     ipl_t ipl;
  188.     int rc;
  189.  
  190.     klog_printf("udebug_end()");
  191.  
  192.     ipl = interrupts_disable();
  193.     spinlock_lock(&TASK->lock);
  194.  
  195.     rc = udebug_task_cleanup(TASK);
  196.  
  197.     klog_printf("task %llu", TASK->taskid);
  198.  
  199.     spinlock_unlock(&TASK->lock);
  200.     interrupts_restore(ipl);
  201.  
  202.     if (rc < 0) return EINVAL;
  203.  
  204.     return 0;
  205. }
  206.  
  207. int udebug_go(thread_t *t, call_t *call)
  208. {
  209.     ipl_t ipl;
  210.     int rc;
  211.  
  212.     klog_printf("udebug_go()");
  213.  
  214.     ipl = interrupts_disable();
  215.  
  216.     /* On success, this will lock t->debug_lock */
  217.     rc = _thread_op_begin(t, false);
  218.     if (rc != EOK) {
  219.         interrupts_restore(ipl);
  220.         return rc;
  221.     }
  222.  
  223.     t->debug_go_call = call;
  224.     t->debug_stop = false;
  225.     t->cur_event = 0;   /* none */
  226.  
  227.     /*
  228.      * Neither t's lock nor threads_lock may be held during wakeup
  229.      */
  230.     waitq_wakeup(&t->go_wq, WAKEUP_FIRST);
  231.  
  232.     _thread_op_end(t);
  233.     interrupts_restore(ipl);
  234.  
  235.     return 0;
  236. }
  237.  
  238. int udebug_stop(thread_t *t, call_t *call)
  239. {
  240.     ipl_t ipl;
  241.     int rc;
  242.  
  243.     klog_printf("udebug_stop()");
  244.  
  245.     ipl = interrupts_disable();
  246.  
  247.     /*
  248.      * On success, this will lock t->debug_lock. Note that this makes sure
  249.      * the thread is not stopped.
  250.      */
  251.     rc = _thread_op_begin(t, true);
  252.     if (rc != EOK) {
  253.         interrupts_restore(ipl);
  254.         return rc;
  255.     }
  256.  
  257.     /* Take GO away from the thread */
  258.     t->debug_stop = true;
  259.  
  260.     if (!t->debug_stoppable) {
  261.         /* Answer will be sent when the thread becomes stoppable */
  262.         _thread_op_end(t);
  263.         interrupts_restore(ipl);
  264.         return 0;
  265.     }
  266.  
  267.     /*
  268.      * Answer GO call
  269.      */
  270.     klog_printf("udebug_stop - answering go call");
  271.  
  272.     /* Make sure nobody takes this call away from us */
  273.     call = t->debug_go_call;
  274.     t->debug_go_call = NULL;
  275.  
  276.     IPC_SET_RETVAL(call->data, 0);
  277.     IPC_SET_ARG1(call->data, UDEBUG_EVENT_STOP);
  278.     klog_printf("udebug_stop/ipc_answer");
  279.  
  280.     THREAD->cur_event = UDEBUG_EVENT_STOP;
  281.     _thread_op_end(t);
  282.  
  283.     spinlock_lock(&TASK->lock);
  284.     ipc_answer(&TASK->answerbox, call);
  285.     spinlock_unlock(&TASK->lock);
  286.  
  287.     interrupts_restore(ipl);
  288.     klog_printf("udebog_stop/done");
  289.     return 0;
  290. }
  291.  
  292. int udebug_thread_read(void **buffer, size_t buf_size, size_t *n)
  293. {
  294.     thread_t *t;
  295.     link_t *cur;
  296.     unative_t tid;
  297.     unsigned copied_ids;
  298.     ipl_t ipl;
  299.     unative_t *id_buffer;
  300.     int flags;
  301.     size_t max_ids;
  302.  
  303.     klog_printf("udebug_thread_read()");
  304.  
  305.     /* Allocate a buffer to hold thread IDs */
  306.     id_buffer = malloc(buf_size, 0);
  307.     if (!id_buffer) return ENOMEM;
  308.  
  309.     ipl = interrupts_disable();
  310.     spinlock_lock(&TASK->lock);
  311.  
  312.     /* Verify task state */
  313.     if (TASK->dt_state != UDEBUG_TS_ACTIVE) {
  314.         spinlock_unlock(&TASK->lock);
  315.         interrupts_restore(ipl);
  316.  
  317.         return EINVAL;
  318.     }
  319.  
  320.     /* Copy down the thread IDs */
  321.  
  322.     max_ids = buf_size / sizeof(unative_t);
  323.     copied_ids = 0;
  324.  
  325.     for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) {
  326.         /* Do not write past end of buffer */
  327.         if (copied_ids >= max_ids) break;
  328.  
  329.         t = list_get_instance(cur, thread_t, th_link);
  330.  
  331.         spinlock_lock(&t->lock);
  332.         flags = t->flags;
  333.         spinlock_unlock(&t->lock);
  334.  
  335.         /* Not interested in kernel threads */
  336.         if ((flags & THREAD_FLAG_USPACE) != 0) {
  337.             /* Using thread struct pointer as identification hash */
  338.             tid = (unative_t) t;
  339.             id_buffer[copied_ids++] = tid;
  340.         }
  341.     }
  342.  
  343.     spinlock_unlock(&TASK->lock);
  344.     interrupts_restore(ipl);
  345.  
  346.     *buffer = id_buffer;
  347.     *n = copied_ids * sizeof(unative_t);
  348.  
  349.     return 0;
  350. }
  351.  
  352. int udebug_args_read(thread_t *t, void **buffer)
  353. {
  354.     int rc;
  355.     ipl_t ipl;
  356.     unative_t *arg_buffer;
  357.  
  358.     klog_printf("udebug_args_read()");
  359.  
  360.     /* Prepare a buffer to hold the arguments */
  361.     arg_buffer = malloc(6 * sizeof(unative_t), 0);
  362.     if (!arg_buffer) return ENOMEM;
  363.  
  364.     ipl = interrupts_disable();
  365.  
  366.     /* On success, this will lock t->debug_lock */
  367.     rc = _thread_op_begin(t, false);
  368.     if (rc != EOK) {
  369.         interrupts_restore(ipl);
  370.         return rc;
  371.     }
  372.  
  373.     /* Additionally we need to verify that we are inside a syscall */
  374.     if (t->cur_event != UDEBUG_EVENT_SYSCALL) {
  375.         _thread_op_end(t);
  376.         interrupts_restore(ipl);
  377.  
  378.         return EINVAL;
  379.     }
  380.  
  381.     /* Copy to a local buffer before releasing the lock */
  382.     memcpy(arg_buffer, t->syscall_args, 6 * sizeof(unative_t));
  383.  
  384.     _thread_op_end(t);
  385.     interrupts_restore(ipl);
  386.  
  387.     *buffer = arg_buffer;
  388.     return 0;
  389. }
  390.  
  391. int udebug_regs_read(thread_t *t, void **buffer, size_t *n)
  392. {
  393.     istate_t *state;
  394.     void *regs_buffer;
  395.     int rc;
  396.     ipl_t ipl;
  397.  
  398.     klog_printf("udebug_regs_read()");
  399.  
  400.     /* Prepare a buffer to hold the registers */
  401.     regs_buffer = malloc(sizeof(istate_t), 0);
  402.     if (!regs_buffer) return ENOMEM;
  403.  
  404.     ipl = interrupts_disable();
  405.  
  406.     /* On success, this will lock t->debug_lock */
  407.     rc = _thread_op_begin(t, false);
  408.     if (rc != EOK) {
  409.         interrupts_restore(ipl);
  410.         return rc;
  411.     }
  412.  
  413.     state = t->uspace_state;
  414.     if (state == NULL) {
  415.         _thread_op_end(t);
  416.         interrupts_restore(ipl);
  417.         klog_printf("udebug_regs_read() - istate not available");
  418.         return EBUSY;
  419.     }
  420.  
  421.     /* Copy to the allocated buffer */
  422.     memcpy(regs_buffer, state, sizeof(istate_t));
  423.  
  424.     _thread_op_end(t);
  425.     interrupts_restore(ipl);
  426.  
  427.     *buffer = regs_buffer;
  428.     *n = sizeof(istate_t);
  429.  
  430.     return 0;
  431. }
  432.  
  433. int udebug_regs_write(thread_t *t, void *buffer)
  434. {
  435.     int rc;
  436.     istate_t *state;
  437.     ipl_t ipl;
  438.  
  439.     klog_printf("udebug_regs_write()");
  440.  
  441.     /* Try to change the thread's uspace_state */
  442.  
  443.     ipl = interrupts_disable();
  444.  
  445.     /* On success, this will lock t->debug_lock */
  446.     rc = _thread_op_begin(t, false);
  447.     if (rc != EOK) {
  448.         interrupts_restore(ipl);
  449.         return rc;
  450.     }
  451.  
  452.     state = t->uspace_state;
  453.     if (state == NULL) {
  454.         _thread_op_end(t);
  455.         interrupts_restore(ipl);
  456.         klog_printf("udebug_regs_write() - istate not available");
  457.  
  458.         return EBUSY;
  459.     }
  460.  
  461.     memcpy(t->uspace_state, buffer, sizeof(t->uspace_state));
  462.  
  463.     _thread_op_end(t);
  464.     interrupts_restore(ipl);
  465.  
  466.     return 0;
  467. }
  468.  
  469.  
  470. int udebug_mem_read(unative_t uspace_addr, size_t n, void **buffer)
  471. {
  472.     void *data_buffer;
  473.     int rc;
  474.  
  475.     klog_printf("udebug_mem_read()");
  476.  
  477.     data_buffer = malloc(n, 0);
  478.     if (!data_buffer) return ENOMEM;
  479.  
  480.     klog_printf("udebug_mem_read: src=%u, size=%u", uspace_addr, n);
  481.  
  482.     /* NOTE: this is not strictly from a syscall... but that shouldn't
  483.      * be a problem */
  484.     rc = copy_from_uspace(data_buffer, (void *)uspace_addr, n);
  485.     if (rc) return rc;
  486.  
  487.     *buffer = data_buffer;
  488.     return 0;
  489. }
  490.  
  491. int udebug_mem_write(unative_t uspace_addr, void *data, size_t n)
  492. {
  493.     int rc;
  494.     udebug_task_state_t dts;
  495.  
  496.     klog_printf("udebug_mem_write()");
  497.  
  498.     /* Verify task state */
  499.     spinlock_lock(&TASK->lock);
  500.     dts = TASK->dt_state;
  501.     spinlock_unlock(&TASK->lock);
  502.  
  503.     if (dts != UDEBUG_TS_ACTIVE)
  504.         return EBUSY;
  505.    
  506.     klog_printf("dst=%u, size=%u", uspace_addr, n);
  507.  
  508.     /* NOTE: this is not strictly from a syscall... but that shouldn't
  509.      * be a problem */
  510.     rc = copy_to_uspace((void *)uspace_addr, data, n);
  511.     if (rc) return rc;
  512.  
  513.     return 0;
  514. }
  515.  
  516. /** @}
  517.  */
  518.