/** @addtogroup generic
* @{
*/
/**
* @file
* @brief Tdebug.
*/
#include <console/klog.h>
#include <proc/task.h>
#include <proc/thread.h>
#include <arch.h>
#include <errno.h>
#include <syscall/copy.h>
#include <ipc/ipc.h>
#include <udebug/udebug.h>
#include <udebug/udebug_ops.h>
/**
* Prepare a thread for a debugging operation.
*
* Simply put, return thread t with t->debug_lock held,
* but only if it verifies all conditions.
*
* Specifically, verifies that thread t exists, is a userspace thread,
* and belongs to the current task (TASK). It also locks t->debug_lock,
* making sure that t->debug_active is true - that the thread is
* in a valid debugging session.
*
* Returns EOK if all went well, or an error code otherwise.
* Interrupts must be already disabled when calling this function.
*
* Note: This function sports complicated locking.
*/
static int _thread_op_begin(thread_t *t)
{
int rc;
task_id_t taskid;
taskid = TASK->taskid;
/* Must lock threads_lock to ensure continued existence of the thread */
spinlock_lock(&threads_lock);
if (!thread_exists(t)) {
spinlock_unlock(&threads_lock);
return ENOENT;
}
spinlock_lock(&t->debug_lock);
spinlock_lock(&t->lock);
/* Now verify that it's the current task */
if (t->task != TASK) {
/* No such thread belonging to callee */
rc = ENOENT;
goto error_exit;
}
/* Verify that 't' is a userspace thread */
if ((t->flags & THREAD_FLAG_USPACE) == 0) {
/* It's not, deny its existence */
rc = ENOENT;
goto error_exit;
}
if ((t->debug_active != true) || (t->debug_stop != true)) {
/* Not in debugging session or already has GO */
rc = ENOENT;
goto error_exit;
}
spinlock_unlock(&threads_lock);
spinlock_unlock(&t->lock);
/* Only t->debug_lock left */
return EOK; /* All went well */
/* Executed when a check on the thread fails */
error_exit:
spinlock_unlock(&t->lock);
spinlock_unlock(&t->debug_lock);
spinlock_unlock(&threads_lock);
/* No locks left here */
return rc; /* Some errors occured */
}
static void _thread_op_end(thread_t *t)
{
spinlock_unlock(&t->debug_lock);
}
/**
* \return 0 (ok, but not done yet), 1 (done) or negative error code.
*/
int udebug_begin(call_t *call)
{
ipl_t ipl;
int reply;
thread_t *t;
link_t *cur;
klog_printf("udebug_begin()");
ipl = interrupts_disable();
klog_printf("debugging task %llu", TASK->taskid);
spinlock_lock(&TASK->lock);
if (TASK->dt_state != UDEBUG_TS_INACTIVE) {
spinlock_unlock(&TASK->lock);
interrupts_restore(ipl);
klog_printf("udebug_begin(): busy error");
return EBUSY;
}
TASK->dt_state = UDEBUG_TS_BEGINNING;
TASK->debug_begin_call = call;
TASK->debugger = call->sender;
if (TASK->not_stoppable_count == 0) {
TASK->dt_state = UDEBUG_TS_ACTIVE;
TASK->debug_begin_call = NULL;
reply = 1; /* immediate reply */
} else {
reply = 0; /* no reply */
}
/* Set debug_active on all of the task's userspace threads */
for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) {
t = list_get_instance(cur, thread_t, th_link);
spinlock_lock(&t->debug_lock);
if ((t->flags & THREAD_FLAG_USPACE) != 0)
t->debug_active = true;
spinlock_unlock(&t->debug_lock);
}
spinlock_unlock(&TASK->lock);
interrupts_restore(ipl);
klog_printf("udebug_begin() done (%s)",
reply ? "reply" : "stoppability wait");
return reply;
}
int udebug_end(void)
{
ipl_t ipl;
int rc;
klog_printf("udebug_end()");
ipl = interrupts_disable();
spinlock_lock(&TASK->lock);
rc = udebug_task_cleanup(TASK);
klog_printf("task %llu", TASK->taskid);
spinlock_unlock(&TASK->lock);
interrupts_restore(ipl);
if (rc < 0) return EINVAL;
return 0;
}
int udebug_go(thread_t *t, call_t *call)
{
ipl_t ipl;
int rc;
klog_printf("udebug_go()");
ipl = interrupts_disable();
/* On success, this will lock t->debug_lock */
rc = _thread_op_begin(t);
if (rc != EOK) {
interrupts_restore(ipl);
return rc;
}
t->debug_go_call = call;
t->debug_stop = false;
t->cur_event = 0; /* none */
/*
* Neither t's lock nor threads_lock may be held during wakeup
*/
waitq_wakeup(&t->go_wq, WAKEUP_FIRST);
_thread_op_end(t);
interrupts_restore(ipl);
return 0;
}
int udebug_thread_read(void **buffer, size_t *n)
{
thread_t *t;
link_t *cur;
unative_t tid;
unsigned num_threads, copied_ids;
ipl_t ipl;
unative_t *id_buffer;
int flags;
klog_printf("udebug_thread_read()");
ipl = interrupts_disable();
spinlock_lock(&TASK->lock);
/* Verify task state */
if (TASK->dt_state != UDEBUG_TS_ACTIVE) {
spinlock_unlock(&TASK->lock);
interrupts_restore(ipl);
return EINVAL;
}
/* Count the threads first */
num_threads = 0;
for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) {
/* Count all threads, to be on the safe side */
++num_threads;
}
/* Allocate a buffer and copy down the threads' ids */
id_buffer
= malloc(num_threads
* sizeof(unative_t
), 0); // ???
copied_ids = 0;
for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) {
t = list_get_instance(cur, thread_t, th_link);
spinlock_lock(&t->lock);
flags = t->flags;
spinlock_unlock(&t->lock);
/* Not interested in kernel threads */
if ((flags & THREAD_FLAG_USPACE) != 0) {
/* Using thread struct pointer for identification */
tid = (unative_t) t;
id_buffer[copied_ids++] = tid;
}
}
spinlock_unlock(&TASK->lock);
interrupts_restore(ipl);
*buffer = id_buffer;
*n = copied_ids * sizeof(unative_t);
return 0;
}
int udebug_args_read(thread_t *t, void **buffer)
{
int rc;
ipl_t ipl;
unative_t *arg_buffer;
klog_printf("udebug_args_read()");
ipl = interrupts_disable();
/* On success, this will lock t->debug_lock */
rc = _thread_op_begin(t);
if (rc != EOK) {
interrupts_restore(ipl);
return rc;
}
/* Additionally we need to verify that we are inside a syscall */
if (t->cur_event != UDEBUG_EVENT_SYSCALL) {
_thread_op_end(t);
interrupts_restore(ipl);
return EINVAL;
}
/* Copy to a local buffer before releasing the lock */
arg_buffer
= malloc(6 * sizeof(unative_t
), 0); // ???
memcpy(arg_buffer
, t
->syscall_args
, 6 * sizeof(unative_t
));
_thread_op_end(t);
interrupts_restore(ipl);
*buffer = arg_buffer;
return 0;
}
int udebug_regs_read(thread_t *t, void **buffer, size_t *n)
{
istate_t *state;
void *regs_buffer;
int rc;
ipl_t ipl;
klog_printf("udebug_regs_read()");
ipl = interrupts_disable();
/* On success, this will lock t->debug_lock */
rc = _thread_op_begin(t);
if (rc != EOK) {
interrupts_restore(ipl);
return rc;
}
state = t->uspace_state;
if (state == NULL) {
_thread_op_end(t);
interrupts_restore(ipl);
klog_printf("udebug_regs_read() - istate not available");
return EBUSY;
}
/* Copy to an allocated buffer */
regs_buffer
= malloc(sizeof(istate_t
), 0); // ???
memcpy(regs_buffer
, state
, sizeof(istate_t
));
_thread_op_end(t);
interrupts_restore(ipl);
*buffer = regs_buffer;
*n = sizeof(istate_t);
return 0;
}
int udebug_regs_write(thread_t *t, void *buffer)
{
int rc;
istate_t *state;
ipl_t ipl;
klog_printf("udebug_regs_write()");
/* Try to change the thread's uspace_state */
ipl = interrupts_disable();
/* On success, this will lock t->debug_lock */
rc = _thread_op_begin(t);
if (rc != EOK) {
interrupts_restore(ipl);
return rc;
}
state = t->uspace_state;
if (state == NULL) {
_thread_op_end(t);
interrupts_restore(ipl);
klog_printf("udebug_regs_write() - istate not available");
return EBUSY;
}
memcpy(t
->uspace_state
, buffer
, sizeof(t
->uspace_state
));
_thread_op_end(t);
interrupts_restore(ipl);
return 0;
}
int udebug_mem_read(unative_t uspace_addr, size_t n, void **buffer)
{
void *data_buffer;
int rc;
klog_printf("udebug_mem_read()");
data_buffer
= malloc(n
, 0); // ???
klog_printf("udebug_mem_read: src=%u, size=%u", uspace_addr, n);
/* NOTE: this is not strictly from a syscall... but that shouldn't
* be a problem */
rc = copy_from_uspace(data_buffer, (void *)uspace_addr, n);
if (rc) return rc;
*buffer = data_buffer;
return 0;
}
int udebug_mem_write(unative_t uspace_addr, void *data, size_t n)
{
int rc;
udebug_task_state_t dts;
klog_printf("udebug_mem_write()");
/* Verify task state */
spinlock_lock(&TASK->lock);
dts = TASK->dt_state;
spinlock_unlock(&TASK->lock);
if (dts != UDEBUG_TS_ACTIVE)
return EBUSY;
klog_printf("dst=%u, size=%u", uspace_addr, n);
/* NOTE: this is not strictly from a syscall... but that shouldn't
* be a problem */
rc = copy_to_uspace((void *)uspace_addr, data, n);
if (rc) return rc;
return 0;
}
/** @}
*/