54,74 → 54,102 |
* Specifically, verifies that thread t exists, is a userspace thread, |
* and belongs to the current task (TASK). Verifies, that the thread |
* has (or hasn't) go according to having_go (typically false). |
* It also locks t->udebug.lock, making sure that t->udebug.debug_active is true |
* - that the thread is in a valid debugging session. |
* It also locks t->udebug.lock, making sure that t->udebug.debug_active |
* is true - that the thread is in a valid debugging session. |
* |
* With this verified and the t->udebug.lock mutex held, it is ensured |
* that the thread cannot leave the debugging session, let alone cease |
* to exist. |
* |
* In this function, holding the TASK->udebug.lock mutex prevents the |
* thread from leaving the debugging session, while relaxing from |
* the t->lock spinlock to the t->udebug.lock mutex. |
* |
* Returns EOK if all went well, or an error code otherwise. |
* Interrupts must be already disabled when calling this function. |
* |
* Note: This function sports complicated locking. |
*/ |
static int _thread_op_begin(thread_t *t, bool having_go) |
{ |
int rc; |
task_id_t taskid; |
ipl_t ipl; |
|
taskid = TASK->taskid; |
|
/* Must lock threads_lock to ensure continued existence of the thread */ |
mutex_lock(&TASK->udebug.lock); |
|
/* thread_exists() must be called with threads_lock held */ |
ipl = interrupts_disable(); |
spinlock_lock(&threads_lock); |
|
if (!thread_exists(t)) { |
spinlock_unlock(&threads_lock); |
interrupts_restore(ipl); |
mutex_unlock(&TASK->udebug.lock); |
return ENOENT; |
} |
|
spinlock_lock(&t->udebug.lock); |
/* t->lock is enough to ensure the thread's existence */ |
spinlock_lock(&t->lock); |
|
/* Now verify that it's the current task */ |
if (t->task != TASK) { |
/* No such thread belonging to callee */ |
rc = ENOENT; |
goto error_exit; |
} |
spinlock_unlock(&threads_lock); |
|
/* Verify that 't' is a userspace thread */ |
if ((t->flags & THREAD_FLAG_USPACE) == 0) { |
/* It's not, deny its existence */ |
rc = ENOENT; |
goto error_exit; |
spinlock_unlock(&t->lock); |
interrupts_restore(ipl); |
mutex_unlock(&TASK->udebug.lock); |
return ENOENT; |
} |
|
if ((t->udebug.debug_active != true) || (!t->udebug.stop != having_go)) { |
/* Verify debugging state */ |
if (t->udebug.debug_active != true) { |
/* Not in debugging session or undesired GO state */ |
rc = EINVAL; |
goto error_exit; |
spinlock_unlock(&t->lock); |
interrupts_restore(ipl); |
mutex_unlock(&TASK->udebug.lock); |
return ENOENT; |
} |
|
spinlock_unlock(&threads_lock); |
/* |
* Since the thread has debug_active == true, TASK->udebug.lock |
* is enough to ensure its existence and that debug_active remains |
* true. |
*/ |
spinlock_unlock(&t->lock); |
interrupts_restore(ipl); |
|
/* Only t->udebug.lock left */ |
/* Only mutex TASK->udebug.lock left */ |
|
/* Now verify that the thread belongs to the current task */ |
if (t->task != TASK) { |
/* No such thread belonging this task*/ |
mutex_unlock(&TASK->udebug.lock); |
return ENOENT; |
} |
|
return EOK; /* All went well */ |
/* |
* Now we need to grab the thread's debug lock for synchronization |
* of the threads stoppability/stop state. |
*/ |
mutex_lock(&t->udebug.lock); |
|
/* The big task mutex is no longer needed */ |
mutex_unlock(&TASK->udebug.lock); |
|
/* Executed when a check on the thread fails */ |
error_exit: |
spinlock_unlock(&t->lock); |
spinlock_unlock(&t->udebug.lock); |
spinlock_unlock(&threads_lock); |
if (!t->udebug.stop != having_go) { |
/* Not in debugging session or undesired GO state */ |
mutex_unlock(&t->udebug.lock); |
return EINVAL; |
} |
|
/* No locks left here */ |
return rc; /* Some errors occured */ |
/* Only t->udebug.lock left */ |
|
return EOK; /* All went well */ |
} |
|
|
static void _thread_op_end(thread_t *t) |
{ |
spinlock_unlock(&t->udebug.lock); |
mutex_unlock(&t->udebug.lock); |
} |
|
/** |
129,7 → 157,6 |
*/ |
int udebug_begin(call_t *call) |
{ |
ipl_t ipl; |
int reply; |
|
thread_t *t; |
164,12 → 191,10 |
for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) { |
t = list_get_instance(cur, thread_t, th_link); |
|
ipl = interrupts_disable(); |
spinlock_lock(&t->udebug.lock); |
mutex_lock(&t->udebug.lock); |
if ((t->flags & THREAD_FLAG_USPACE) != 0) |
t->udebug.debug_active = true; |
spinlock_unlock(&t->udebug.lock); |
interrupts_restore(ipl); |
mutex_unlock(&t->udebug.lock); |
} |
|
mutex_unlock(&TASK->udebug.lock); |
221,17 → 246,13 |
|
int udebug_go(thread_t *t, call_t *call) |
{ |
ipl_t ipl; |
int rc; |
|
// klog_printf("udebug_go()"); |
|
ipl = interrupts_disable(); |
|
/* On success, this will lock t->udebug.lock */ |
rc = _thread_op_begin(t, false); |
if (rc != EOK) { |
interrupts_restore(ipl); |
return rc; |
} |
|
245,7 → 266,6 |
waitq_wakeup(&t->udebug.go_wq, WAKEUP_FIRST); |
|
_thread_op_end(t); |
interrupts_restore(ipl); |
|
return 0; |
} |
252,14 → 272,11 |
|
int udebug_stop(thread_t *t, call_t *call) |
{ |
ipl_t ipl; |
int rc; |
|
klog_printf("udebug_stop()"); |
mutex_lock(&TASK->udebug.lock); |
|
ipl = interrupts_disable(); |
|
/* |
* On success, this will lock t->udebug.lock. Note that this makes sure |
* the thread is not stopped. |
266,7 → 283,6 |
*/ |
rc = _thread_op_begin(t, true); |
if (rc != EOK) { |
interrupts_restore(ipl); |
return rc; |
} |
|
276,7 → 292,6 |
if (!t->udebug.stoppable) { |
/* Answer will be sent when the thread becomes stoppable */ |
_thread_op_end(t); |
interrupts_restore(ipl); |
return 0; |
} |
|
296,7 → 311,6 |
THREAD->udebug.cur_event = UDEBUG_EVENT_STOP; |
|
_thread_op_end(t); |
interrupts_restore(ipl); |
|
ipc_answer(&TASK->answerbox, call); |
mutex_unlock(&TASK->udebug.lock); |
336,6 → 350,7 |
max_ids = buf_size / sizeof(unative_t); |
copied_ids = 0; |
|
/* FIXME: make sure the thread isn't past debug shutdown... */ |
for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) { |
/* Do not write past end of buffer */ |
if (copied_ids >= max_ids) break; |
368,20 → 383,16 |
int udebug_args_read(thread_t *t, void **buffer) |
{ |
int rc; |
ipl_t ipl; |
unative_t *arg_buffer; |
|
klog_printf("udebug_args_read()"); |
// klog_printf("udebug_args_read()"); |
|
/* Prepare a buffer to hold the arguments */ |
arg_buffer = malloc(6 * sizeof(unative_t), 0); |
|
ipl = interrupts_disable(); |
|
/* On success, this will lock t->udebug.lock */ |
rc = _thread_op_begin(t, false); |
if (rc != EOK) { |
interrupts_restore(ipl); |
return rc; |
} |
|
389,8 → 400,6 |
if (t->udebug.cur_event != UDEBUG_EVENT_SYSCALL_B && |
t->udebug.cur_event != UDEBUG_EVENT_SYSCALL_E) { |
_thread_op_end(t); |
interrupts_restore(ipl); |
|
return EINVAL; |
} |
|
398,7 → 407,6 |
memcpy(arg_buffer, t->udebug.syscall_args, 6 * sizeof(unative_t)); |
|
_thread_op_end(t); |
interrupts_restore(ipl); |
|
*buffer = arg_buffer; |
return 0; |
408,16 → 416,12 |
{ |
istate_t *state; |
int rc; |
ipl_t ipl; |
|
klog_printf("udebug_regs_read()"); |
// klog_printf("udebug_regs_read()"); |
|
ipl = interrupts_disable(); |
|
/* On success, this will lock t->udebug.lock */ |
rc = _thread_op_begin(t, false); |
if (rc != EOK) { |
interrupts_restore(ipl); |
return rc; |
} |
|
424,7 → 428,6 |
state = t->udebug.uspace_state; |
if (state == NULL) { |
_thread_op_end(t); |
interrupts_restore(ipl); |
klog_printf("udebug_regs_read() - istate not available"); |
return EBUSY; |
} |
433,7 → 436,6 |
memcpy(buffer, state, sizeof(istate_t)); |
|
_thread_op_end(t); |
interrupts_restore(ipl); |
|
return 0; |
} |
442,19 → 444,15 |
{ |
int rc; |
istate_t *state; |
ipl_t ipl; |
|
klog_printf("udebug_regs_write()"); |
|
/* Try to change the thread's uspace_state */ |
|
ipl = interrupts_disable(); |
|
/* On success, this will lock t->udebug.lock */ |
rc = _thread_op_begin(t, false); |
if (rc != EOK) { |
klog_printf("error locking thread"); |
interrupts_restore(ipl); |
return rc; |
} |
|
461,9 → 459,7 |
state = t->udebug.uspace_state; |
if (state == NULL) { |
_thread_op_end(t); |
interrupts_restore(ipl); |
klog_printf("udebug_regs_write() - istate not available"); |
|
return EBUSY; |
} |
|
470,7 → 466,6 |
memcpy(t->udebug.uspace_state, buffer, sizeof(istate_t)); |
|
_thread_op_end(t); |
interrupts_restore(ipl); |
|
return 0; |
} |