Subversion Repositories HelenOS

Rev

Rev 2842 | Rev 2849 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /** @addtogroup generic
  2.  * @{
  3.  */
  4.  
  5. /**
  6.  * @file
  7.  * @brief   Tdebug.
  8.  */
  9.  
  10. #include <console/klog.h>
  11. #include <proc/task.h>
  12. #include <proc/thread.h>
  13. #include <arch.h>
  14. #include <errno.h>
  15. #include <ipc/ipc.h>
  16. #include <syscall/copy.h>
  17. #include <udebug/udebug.h>
  18. #include <udebug/udebug_ipc.h>
  19.  
  20. /**
  21.  * Get and lock a phone's callee task.
  22.  *
  23.  * This will return a pointer to the task to which the phone
  24.  * is connected. It will lock the task, making sure it exists.
  25.  *
  26.  * Interrupts must be already disabled.
  27.  *
  28.  * (TODO: make sure the udebug-cleanup of the task hasn't
  29.  * started yet)
  30.  */
  31. static task_t *get_lock_callee_task(phone_t *phone)
  32. {
  33.     answerbox_t *box;
  34.     task_t *ta;
  35.     task_id_t taskid;
  36.  
  37.     spinlock_lock(&phone->lock);
  38.     if (phone->state != IPC_PHONE_CONNECTED) {
  39.         spinlock_unlock(&phone->lock);
  40.         return NULL;
  41.     }
  42.  
  43.     box = phone->callee;
  44.    
  45.     spinlock_lock(&box->lock);
  46.     ta = box->task;
  47.     taskid = ta->taskid;
  48.     spinlock_unlock(&box->lock);
  49.     spinlock_unlock(&phone->lock);
  50.  
  51.     /* Locking decoupled using taskid */
  52.    
  53.     spinlock_lock(&tasks_lock);
  54.     ta = task_find_by_id(taskid);
  55.     if (ta == NULL) {
  56.         spinlock_unlock(&tasks_lock);
  57.         return NULL;
  58.     }
  59.  
  60.     spinlock_lock(&ta->lock);
  61.     spinlock_unlock(&tasks_lock);
  62.  
  63.     return ta;
  64. }
  65.  
  66. /**
  67.  * Verify that thread t is valid for debugging ops.
  68.  *
  69.  * Verifies that t belongs to task ta and that debugging operations
  70.  * may be used on it.
  71.  *
  72.  * Thread t's lock must already be held and interrupts must be disabled.
  73.  */
  74. static int verify_thread(thread_t *t, task_t *ta)
  75. {
  76.     /* Verify that 't' exists and belongs to task 'ta' */
  77.     if (!thread_exists(t) || (t->task != ta)) {
  78.         return ENOENT;
  79.     }
  80.  
  81.     /* Verify that 't' is a userspace thread */
  82.     if ((t->flags & THREAD_FLAG_USPACE) == 0) {
  83.         /* It's not, deny its existence */
  84.         return ENOENT;
  85.     }
  86.  
  87.     if ((t->debug_active != true) || (t->debug_stop != true)) {
  88.         /* Not in debugging session or already has GO */
  89.         return EBUSY;
  90.     }
  91.  
  92.     return EOK;
  93. }
  94.  
  95. static int udebug_rp_begin(call_t *call, phone_t *phone)
  96. {
  97.     task_t *ta;
  98.     ipl_t ipl;
  99.     int rc;
  100.  
  101.     thread_t *t;
  102.     link_t *cur;
  103.  
  104.     klog_printf("debug_begin()");
  105.  
  106.     ipl = interrupts_disable();
  107.     ta = get_lock_callee_task(phone);
  108.     klog_printf("debugging task %llu", ta->taskid);
  109.  
  110.     if (ta->dt_state != UDEBUG_TS_INACTIVE) {
  111.         spinlock_unlock(&ta->lock);
  112.         interrupts_restore(ipl);
  113.         klog_printf("debug_begin(): busy error");
  114.         return EBUSY;
  115.     }
  116.  
  117.     ta->dt_state = UDEBUG_TS_BEGINNING;
  118.     ta->debug_begin_call = call;
  119.  
  120.     if (ta->not_stoppable_count == 0) {
  121.         ta->dt_state = UDEBUG_TS_ACTIVE;
  122.         ta->debug_begin_call = NULL;
  123.         rc = 1; /* actually we need backsend with 0 retval */
  124.     } else {
  125.         rc = 0; /* no backsend */
  126.     }
  127.    
  128.     /* Set debug_active on all of the task's userspace threads */
  129.  
  130.     for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) {
  131.         t = list_get_instance(cur, thread_t, th_link);
  132.  
  133.         spinlock_lock(&t->debug_lock);
  134.         if ((t->flags & THREAD_FLAG_USPACE) != 0)
  135.             t->debug_active = true;
  136.         spinlock_unlock(&t->debug_lock);
  137.     }
  138.  
  139.     spinlock_unlock(&ta->lock);
  140.     interrupts_restore(ipl);
  141.  
  142.     klog_printf("debug_begin() done (%s)",
  143.         rc ? "backsend" : "stoppability wait");
  144.  
  145.     return rc;
  146. }
  147.  
  148. static int udebug_rp_end(call_t *call, phone_t *phone)
  149. {
  150.     task_t *ta;
  151.     ipl_t ipl;
  152.  
  153.     thread_t *t;
  154.     link_t *cur;
  155.     int flags;
  156.  
  157.     klog_printf("udebug_rp_end()");
  158.  
  159.     ipl = interrupts_disable();
  160.     ta = get_lock_callee_task(phone);
  161.     klog_printf("task %llu", ta->taskid);
  162.  
  163.     if (ta->dt_state == UDEBUG_TS_BEGINNING &&
  164.         ta->dt_state != UDEBUG_TS_ACTIVE) {
  165.         spinlock_unlock(&ta->lock);
  166.         interrupts_restore(ipl);
  167.         klog_printf("udebug_rp_begin(): task not being debugged");
  168.         return EINVAL;
  169.     }
  170.  
  171.     /* Finish debugging of all userspace threads */
  172.     for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) {
  173.         t = list_get_instance(cur, thread_t, th_link);
  174.  
  175.         spinlock_lock(&t->lock);
  176.  
  177.         flags = t->flags;
  178.  
  179.         spinlock_lock(&t->debug_lock);
  180.         spinlock_unlock(&t->lock);
  181.  
  182.         /* Only process userspace threads */
  183.         if ((flags & THREAD_FLAG_USPACE) != 0) {
  184.             /* Prevent any further debug activity in thread */
  185.             t->debug_active = false;
  186.  
  187.             /* Still has go? */
  188.             if (t->debug_stop == false) {
  189.                 /*
  190.                 * Yes, so clear go. As debug_active == false,
  191.                  * this doesn't affect anything.
  192.                  */
  193.                 t->debug_stop = true;  
  194.  
  195.                 /* Answer GO call */
  196.                 klog_printf("answer GO call with EVENT_FINISHED");
  197.                 IPC_SET_RETVAL(t->debug_go_call->data, 0);
  198.                 IPC_SET_ARG1(t->debug_go_call->data, UDEBUG_EVENT_FINISHED);
  199.                 ipc_answer(&ta->answerbox, t->debug_go_call);
  200.             } else {
  201.                 /*
  202.                  * Debug_stop is already at initial value.
  203.                  * Yet this means the thread needs waking up.
  204.                  */
  205.  
  206.                 /*
  207.                  * t's lock must not be held when calling
  208.                  * waitq_wakeup.
  209.                  */
  210.                 waitq_wakeup(&t->go_wq, WAKEUP_FIRST);
  211.             }
  212.         }
  213.         spinlock_unlock(&t->debug_lock);
  214.     }
  215.  
  216.     ta->dt_state = UDEBUG_TS_INACTIVE;
  217.  
  218.     spinlock_unlock(&ta->lock);
  219.     interrupts_restore(ipl);
  220.  
  221.     IPC_SET_RETVAL(call->data, 0);
  222.  
  223.     klog_printf("udebug_rp_end() done\n");
  224.  
  225.     return 1;
  226. }
  227.  
  228.  
  229. static int udebug_rp_go(call_t *call, phone_t *phone)
  230. {
  231.     thread_t *t;
  232.     task_t *ta;
  233.     ipl_t ipl;
  234.     int rc;
  235.  
  236.     klog_printf("debug_go()");
  237.  
  238.     ipl = interrupts_disable();
  239.  
  240.     ta = get_lock_callee_task(phone);
  241.     spinlock_unlock(&ta->lock);
  242.     // TODO: don't lock ta
  243.  
  244.     t = (thread_t *) IPC_GET_ARG2(call->data);
  245.  
  246.     spinlock_lock(&threads_lock);
  247.     if (!thread_exists(t)) {
  248.         spinlock_unlock(&threads_lock);
  249.         interrupts_restore(ipl);
  250.         return ENOENT;
  251.     }
  252.  
  253.     spinlock_lock(&t->debug_lock);
  254.  
  255.     /* Verify that thread t may be operated on */
  256.     rc = verify_thread(t, ta);
  257.     if (rc != EOK) {
  258.         spinlock_unlock(&t->debug_lock);
  259.         spinlock_unlock(&threads_lock);
  260.         interrupts_restore(ipl);
  261.         return rc;
  262.     }
  263.  
  264.     /*
  265.      * Since t->debug_active == true and t->debug_lock is held,
  266.      * we can safely release threads_lock and t will continue
  267.      * to exist (and will stay in debug_active state)
  268.      */
  269.     spinlock_unlock(&threads_lock);
  270.  
  271.     t->debug_go_call = call;
  272.     t->debug_stop = false;
  273.  
  274.     /*
  275.      * Neither t's lock nor threads_lock may be held during wakeup
  276.      */
  277.     waitq_wakeup(&t->go_wq, WAKEUP_FIRST);
  278.  
  279.     spinlock_unlock(&t->debug_lock);
  280.     interrupts_restore(ipl);
  281.  
  282.     return 0; /* no backsend */
  283. }
  284.  
  285. static int udebug_rp_args_read(call_t *call, phone_t *phone)
  286. {
  287.     thread_t *t;
  288.     task_t *ta;
  289.     void *uspace_buffer;
  290.     int rc;
  291.     ipl_t ipl;
  292.     unative_t buffer[6];
  293.  
  294.     klog_printf("debug_args_read()");
  295.  
  296.     ipl = interrupts_disable();
  297.     ta = get_lock_callee_task(phone);
  298.     klog_printf("task %llu", ta->taskid);
  299.     spinlock_unlock(&ta->lock);
  300.  
  301.     t = (thread_t *) IPC_GET_ARG2(call->data);
  302.  
  303.     spinlock_lock(&threads_lock);
  304.  
  305.     if (!thread_exists(t)) {
  306.         spinlock_unlock(&threads_lock);
  307.         interrupts_restore(ipl);       
  308.         return ENOENT;
  309.     }
  310.  
  311.     spinlock_lock(&t->debug_lock);
  312.  
  313.     /* Verify that thread t may be operated on */
  314.     rc = verify_thread(t, ta);
  315.     if (rc != EOK) {
  316.         spinlock_unlock(&t->debug_lock);
  317.         spinlock_unlock(&threads_lock);
  318.         interrupts_restore(ipl);
  319.         return rc;
  320.     }
  321.  
  322.     /*
  323.      * We can now safely release threads_lock as debug_active == true
  324.      * and t->debug_lock is held.
  325.      */
  326.     spinlock_unlock(&threads_lock);
  327.  
  328.     //FIXME: additionally we need to verify that we are inside a syscall
  329.  
  330.     /* Copy to a local buffer before releasing the lock */
  331.     memcpy(buffer, t->syscall_args, 6 * sizeof(unative_t));
  332.  
  333.     spinlock_unlock(&t->debug_lock);
  334.     interrupts_restore(ipl);
  335.  
  336.     /* Now copy to userspace */
  337.  
  338.     uspace_buffer = (void *)IPC_GET_ARG3(call->data);
  339.  
  340.     rc = copy_to_uspace(uspace_buffer, buffer, 6 * sizeof(unative_t));
  341.     if (rc != 0) {
  342.         spinlock_unlock(&ta->lock);
  343.         klog_printf("debug_args_read() - copy failed");
  344.         return rc;
  345.     }
  346.  
  347.     klog_printf("debug_args_read() done");
  348.     return 1; /* actually need becksend with retval 0 */
  349. }
  350.  
  351. static int udebug_rp_regs_read(call_t *call, phone_t *phone)
  352. {
  353.     thread_t *t;
  354.     task_t *ta;
  355.     void *uspace_buffer;
  356.     unative_t to_copy;
  357.     int rc;
  358.     istate_t *state;
  359.     istate_t state_copy;
  360.     ipl_t ipl;
  361.  
  362.     klog_printf("debug_regs_read()");
  363.  
  364.     ta = get_lock_callee_task(phone);
  365.     spinlock_unlock(&ta->lock);
  366.     //FIXME: don't lock ta
  367.  
  368.     ipl = interrupts_disable();
  369.     spinlock_lock(&threads_lock);
  370.  
  371.     t = (thread_t *) IPC_GET_ARG2(call->data);
  372.  
  373.     if (!thread_exists(t)) {
  374.         spinlock_unlock(&threads_lock);
  375.         interrupts_restore(ipl);       
  376.         return ENOENT;
  377.     }
  378.  
  379.     spinlock_lock(&t->debug_lock);
  380.  
  381.     /* Verify that thread t may be operated on */
  382.     rc = verify_thread(t, ta);
  383.     if (rc != EOK) {
  384.         spinlock_unlock(&t->debug_lock);
  385.         spinlock_unlock(&threads_lock);
  386.         interrupts_restore(ipl);
  387.         return rc;
  388.     }
  389.  
  390.     /*
  391.      * We can now safely release threads_lock as debug_active == true
  392.      * and t->debug_lock is held.
  393.      */
  394.     spinlock_unlock(&threads_lock);
  395.  
  396.     state = t->uspace_state;
  397.     if (state == NULL) {
  398.         spinlock_unlock(&threads_lock);
  399.         interrupts_restore(ipl);
  400.         klog_printf("debug_regs_read() - istate not available");
  401.         return EBUSY;
  402.     }
  403.  
  404.     /* Copy to a local buffer so that we can release the lock */
  405.     memcpy(&state_copy, state, sizeof(state_copy));
  406.     spinlock_unlock(&t->debug_lock);
  407.     interrupts_restore(ipl);
  408.  
  409.     uspace_buffer = (void *)IPC_GET_ARG3(call->data);
  410.     to_copy = IPC_GET_ARG4(call->data);
  411.     if (to_copy > sizeof(istate_t)) to_copy = sizeof(istate_t);
  412.  
  413.     rc = copy_to_uspace(uspace_buffer, &state_copy, to_copy);
  414.     if (rc != 0) {
  415.         klog_printf("debug_regs_read() - copy failed");
  416.         return rc;
  417.     }
  418.  
  419.     IPC_SET_ARG1(call->data, to_copy);
  420.     IPC_SET_ARG2(call->data, sizeof(istate_t));
  421.  
  422.     klog_printf("debug_regs_read() done");
  423.     return 1; /* actually need becksend with retval 0 */
  424. }
  425.  
  426. static int udebug_rp_regs_write(call_t *call, phone_t *phone)
  427. {
  428.     thread_t *t;
  429.     task_t *ta;
  430.     void *uspace_data;
  431.     unative_t to_copy;
  432.     int rc;
  433.     istate_t *state;
  434.     istate_t data_copy;
  435.     ipl_t ipl;
  436.  
  437.     klog_printf("debug_regs_write()");
  438.  
  439.     /* First copy to a local buffer */
  440.  
  441.     uspace_data = (void *)IPC_GET_ARG3(call->data);
  442.     to_copy = IPC_GET_ARG4(call->data);
  443.     if (to_copy > sizeof(istate_t)) to_copy = sizeof(istate_t);
  444.  
  445.     rc = copy_from_uspace(&data_copy, uspace_data, to_copy);
  446.     if (rc != 0) {
  447.         klog_printf("debug_regs_write() - copy failed");
  448.         return rc;
  449.     }
  450.  
  451.     ta = get_lock_callee_task(phone);
  452.     spinlock_unlock(&ta->lock);
  453.     //FIXME: don't lock ta
  454.  
  455.     /* Now try to change the thread's uspace_state */
  456.  
  457.     ipl = interrupts_disable();
  458.     spinlock_lock(&threads_lock);
  459.  
  460.     t = (thread_t *) IPC_GET_ARG2(call->data);
  461.  
  462.     if (!thread_exists(t)) {
  463.         spinlock_unlock(&threads_lock);
  464.         interrupts_restore(ipl);       
  465.         return ENOENT;
  466.     }
  467.  
  468.     spinlock_lock(&t->debug_lock);
  469.  
  470.     /* Verify that thread t may be operated on */
  471.     rc = verify_thread(t, ta);
  472.     if (rc != EOK) {
  473.         spinlock_unlock(&t->debug_lock);
  474.         spinlock_unlock(&threads_lock);
  475.         interrupts_restore(ipl);
  476.         return rc;
  477.     }
  478.  
  479.     state = t->uspace_state;
  480.     if (state == NULL) {
  481.         spinlock_unlock(&t->debug_lock);
  482.         interrupts_restore(ipl);
  483.         klog_printf("debug_regs_write() - istate not available");
  484.         return EBUSY;
  485.     }
  486.  
  487.     memcpy(t->uspace_state, &data_copy, sizeof(t->uspace_state));
  488.  
  489.     spinlock_unlock(&t->debug_lock);
  490.     interrupts_restore(ipl);
  491.  
  492.     /* Set answer values */
  493.  
  494.     IPC_SET_ARG1(call->data, to_copy);
  495.     IPC_SET_ARG2(call->data, sizeof(istate_t));
  496.  
  497.     klog_printf("debug_regs_write() done");
  498.     return 1; /* actually need becksend with retval 0 */
  499. }
  500.  
  501. static int udebug_rp_thread_read(call_t *call, phone_t *phone)
  502. {
  503.     thread_t *t;
  504.     link_t *cur;
  505.     task_t *ta;
  506.     unative_t *uspace_buffer;
  507.     unative_t to_copy;
  508.     int rc;
  509.     unsigned total_bytes;
  510.     unsigned buf_size;
  511.     unative_t tid;
  512.     unsigned num_threads, copied_ids;
  513.     ipl_t ipl;
  514.     unative_t *buffer;
  515.     int flags;
  516.  
  517.     klog_printf("debug_thread_read()");
  518.  
  519.     ipl = interrupts_disable();
  520.     ta = get_lock_callee_task(phone);
  521.  
  522.     /* Verify task state */
  523.     if (ta->dt_state != UDEBUG_TS_ACTIVE) {
  524.         spinlock_unlock(&ta->lock);
  525.         interrupts_restore(ipl);
  526.         return EBUSY;
  527.     }
  528.  
  529.     /* Count the threads first */
  530.  
  531.     num_threads = 0;
  532.     for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) {
  533.         /* Count all threads, to be on the safe side */
  534.         ++num_threads;
  535.     }
  536.  
  537.     /* Allocate a buffer and copy down the threads' ids */
  538.     buffer = malloc(num_threads * sizeof(unative_t), 0); // ???
  539.  
  540.     copied_ids = 0;
  541.     for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) {
  542.         t = list_get_instance(cur, thread_t, th_link);
  543.  
  544.         spinlock_lock(&t->lock);
  545.         flags = t->flags;
  546.         spinlock_unlock(&t->lock);
  547.  
  548.         /* Not interested in kernel threads */
  549.         if ((flags & THREAD_FLAG_USPACE) != 0) {
  550.             /* Using thread struct pointer for identification */
  551.             tid = (unative_t) t;
  552.             buffer[copied_ids++] = tid;
  553.         }
  554.     }
  555.  
  556.     spinlock_unlock(&ta->lock);
  557.     interrupts_restore(ipl);
  558.  
  559.     /* Now copy to userspace */
  560.  
  561.     uspace_buffer = (void *)IPC_GET_ARG2(call->data);
  562.     buf_size = IPC_GET_ARG3(call->data);
  563.  
  564.     total_bytes = copied_ids * sizeof(unative_t);
  565.  
  566.     if (buf_size > total_bytes)
  567.         to_copy = total_bytes;
  568.     else
  569.         to_copy = buf_size;
  570.  
  571.     rc = copy_to_uspace(uspace_buffer, buffer, to_copy);
  572.     free(buffer);
  573.  
  574.     if (rc != 0) {
  575.         klog_printf("debug_thread_read() - copy failed");
  576.         return rc;
  577.     }
  578.  
  579.     IPC_SET_ARG1(call->data, to_copy);
  580.     IPC_SET_ARG2(call->data, total_bytes);
  581.  
  582.     klog_printf("debug_thread_read() done");
  583.     return 1; /* actually need becksend with retval 0 */
  584. }
  585.  
  586. static int udebug_rp_mem_write(call_t *call, phone_t *phone)
  587. {
  588.     void *uspace_data;
  589.     unative_t to_copy;
  590.     int rc;
  591.     void *buffer;
  592.  
  593.     klog_printf("udebug_rp_mem_write()");
  594.  
  595.     uspace_data = (void *)IPC_GET_ARG2(call->data);
  596.     to_copy = IPC_GET_ARG4(call->data);
  597.  
  598.     buffer = malloc(to_copy, 0); // ???
  599.  
  600.     rc = copy_from_uspace(buffer, uspace_data, to_copy);
  601.     if (rc != 0) {
  602.         klog_printf(" - copy failed");
  603.         return rc;
  604.     }
  605.  
  606.     call->buffer = buffer;
  607.  
  608.     klog_printf(" - done");
  609.     return 1; /* actually need becksend with retval 0 */
  610. }
  611.  
  612.  
  613. int udebug_request_preprocess(call_t *call, phone_t *phone)
  614. {
  615.     int rc;
  616.  
  617.     switch (IPC_GET_ARG1(call->data)) {
  618.     case UDEBUG_M_BEGIN:
  619.         rc = udebug_rp_begin(call, phone);
  620.         return rc;
  621.     case UDEBUG_M_END:
  622.         rc = udebug_rp_end(call, phone);
  623.         return rc;
  624.     case UDEBUG_M_GO:
  625.         rc = udebug_rp_go(call, phone);
  626.         return rc;
  627.     case UDEBUG_M_ARGS_READ:
  628.         rc = udebug_rp_args_read(call, phone);
  629.         return rc;
  630.     case UDEBUG_M_REGS_READ:
  631.         rc = udebug_rp_regs_read(call, phone);
  632.         return rc;
  633.     case UDEBUG_M_REGS_WRITE:
  634.         rc = udebug_rp_regs_write(call, phone);
  635.         return rc;
  636.     case UDEBUG_M_THREAD_READ:
  637.         rc = udebug_rp_thread_read(call, phone);
  638.         return rc;
  639.     case UDEBUG_M_MEM_WRITE:
  640.         rc = udebug_rp_mem_write(call, phone);
  641.         return rc;
  642.     default:
  643.         break;
  644.     }
  645.  
  646.     return 0;
  647. }
  648.  
  649. static void udebug_receive_mem_read(call_t *call)
  650. {
  651.     unative_t uspace_dst;
  652.     void *uspace_ptr;
  653.     unsigned size;
  654.     void *buffer;
  655.     int rc;
  656.  
  657.     klog_printf("debug_mem_read()");
  658.     uspace_dst = IPC_GET_ARG2(call->data);
  659.     uspace_ptr = (void *)IPC_GET_ARG3(call->data);
  660.     size = IPC_GET_ARG4(call->data);
  661.  
  662.     buffer = malloc(size, 0); // ???
  663.     klog_printf("debug_mem_read: src=%u, size=%u", uspace_ptr, size);
  664.  
  665.     /* NOTE: this is not strictly from a syscall... but that shouldn't
  666.      * be a problem */
  667.     rc = copy_from_uspace(buffer, uspace_ptr, size);
  668.     if (rc) {
  669.         IPC_SET_RETVAL(call->data, rc);
  670.         return;
  671.     }
  672.  
  673.     klog_printf("first word: %u", *((unative_t *)buffer));
  674.  
  675.     IPC_SET_RETVAL(call->data, 0);
  676.     /* Hack: ARG1=dest, ARG2=size as in IPC_M_DATA_READ so that
  677.        same code in process_answer() can be used
  678.        (no way to distinguish method in answer) */
  679.     IPC_SET_ARG1(call->data, uspace_dst);
  680.     IPC_SET_ARG2(call->data, size);
  681.     call->buffer = buffer;
  682.  
  683.     ipc_answer(&TASK->kernel_box, call);
  684. }
  685.  
  686. static void udebug_receive_mem_write(call_t *call)
  687. {
  688.     void *uspace_dst;
  689.     unsigned size;
  690.     void *buffer;
  691.     int rc;
  692.     udebug_task_state_t dts;
  693.  
  694.     klog_printf("udebug_receive_mem_write()");
  695.  
  696.     /* Verify task state */
  697.     spinlock_lock(&TASK->lock);
  698.     dts = TASK->dt_state;
  699.     spinlock_unlock(&TASK->lock);
  700.  
  701.     if (dts != UDEBUG_TS_ACTIVE) {
  702.         IPC_SET_RETVAL(call->data, EBUSY);
  703.         ipc_answer(&TASK->kernel_box, call);
  704.         return;
  705.     }
  706.    
  707.     uspace_dst = (void *)IPC_GET_ARG3(call->data);
  708.     size = IPC_GET_ARG4(call->data);
  709.  
  710.     buffer = call->buffer;
  711.     klog_printf("dst=%u, size=%u", uspace_dst, size);
  712.  
  713.     /* NOTE: this is not strictly from a syscall... but that shouldn't
  714.      * be a problem */
  715.     rc = copy_to_uspace(uspace_dst, buffer, size);
  716.     if (rc) {
  717.         IPC_SET_RETVAL(call->data, rc);
  718.         ipc_answer(&TASK->kernel_box, call);
  719.         return;
  720.     }
  721.  
  722.     IPC_SET_RETVAL(call->data, 0);
  723.  
  724.     free(call->buffer);
  725.     call->buffer = NULL;
  726.  
  727.     ipc_answer(&TASK->kernel_box, call);
  728. }
  729.  
  730.  
  731. /**
  732.  * Handle a debug call received on the kernel answerbox.
  733.  *
  734.  * This is called by the kbox servicing thread.
  735.  */
  736. void udebug_call_receive(call_t *call)
  737. {
  738.     int debug_method;
  739.  
  740.     debug_method = IPC_GET_ARG1(call->data);
  741.  
  742.     switch (debug_method) {
  743.     case UDEBUG_M_MEM_READ:
  744.         udebug_receive_mem_read(call);
  745.         break;
  746.     case UDEBUG_M_MEM_WRITE:
  747.         udebug_receive_mem_write(call);
  748.         break;
  749.     }
  750. }
  751.  
  752. /** @}
  753.  */
  754.