/branches/dynload/kernel/generic/include/byteorder.h |
---|
51,6 → 51,14 |
#define uint32_t_be2host(n) (n) |
#define uint64_t_be2host(n) (n) |
#define host2uint16_t_le(n) uint16_t_byteorder_swap(n) |
#define host2uint32_t_le(n) uint32_t_byteorder_swap(n) |
#define host2uint64_t_le(n) uint64_t_byteorder_swap(n) |
#define host2uint16_t_be(n) (n) |
#define host2uint32_t_be(n) (n) |
#define host2uint64_t_be(n) (n) |
#else |
#define uint16_t_le2host(n) (n) |
61,6 → 69,14 |
#define uint32_t_be2host(n) uint32_t_byteorder_swap(n) |
#define uint64_t_be2host(n) uint64_t_byteorder_swap(n) |
#define host2uint16_t_le(n) (n) |
#define host2uint32_t_le(n) (n) |
#define host2uint64_t_le(n) (n) |
#define host2uint16_t_be(n) uint16_t_byteorder_swap(n) |
#define host2uint32_t_be(n) uint32_t_byteorder_swap(n) |
#define host2uint64_t_be(n) uint64_t_byteorder_swap(n) |
#endif |
static inline uint64_t uint64_t_byteorder_swap(uint64_t n) |
/branches/dynload/kernel/generic/include/proc/task.h |
---|
53,6 → 53,7 |
#include <mm/tlb.h> |
#include <proc/scheduler.h> |
#include <udebug/udebug.h> |
#include <ipc/kbox.h> |
#define TASK_NAME_BUFLEN 20 |
98,19 → 99,13 |
atomic_t active_calls; |
#ifdef CONFIG_UDEBUG |
/** Debugging stuff */ |
/** Debugging stuff. */ |
udebug_task_t udebug; |
/** Kernel answerbox */ |
answerbox_t kernel_box; |
/** Thread used to service kernel answerbox */ |
struct thread *kb_thread; |
/** Kbox thread creation vs. begin of cleanup mutual exclusion */ |
mutex_t kb_cleanup_lock; |
/** True if cleanup of kbox has already started */ |
bool kb_finished; |
/** Kernel answerbox. */ |
kbox_t kb; |
#endif |
/** Architecture specific task data. */ |
task_arch_t arch; |
/branches/dynload/kernel/generic/include/udebug/udebug.h |
---|
147,9 → 147,7 |
/** BEGIN operation in progress (waiting for threads to stop) */ |
UDEBUG_TS_BEGINNING, |
/** Debugger fully connected */ |
UDEBUG_TS_ACTIVE, |
/** Task is shutting down, no more debug activities allowed */ |
UDEBUG_TS_SHUTDOWN |
UDEBUG_TS_ACTIVE |
} udebug_task_state_t; |
/** Debugging part of task_t structure. |
169,25 → 167,19 |
/** Debugging part of thread_t structure. |
*/ |
typedef struct { |
/** |
* Prevent deadlock with udebug_before_thread_runs() in interrupt |
* handler, without actually disabling interrupts. |
* ==0 means "unlocked", >0 means "locked" |
*/ |
atomic_t int_lock; |
/** Synchronize debug ops on this thread / access to this structure */ |
/** Synchronize debug ops on this thread / access to this structure. */ |
mutex_t lock; |
waitq_t go_wq; |
call_t *go_call; |
unative_t syscall_args[6]; |
istate_t *uspace_state; |
/** What type of event are we stopped in or 0 if none */ |
udebug_event_t cur_event; |
bool stop; |
bool stoppable; |
bool debug_active; /**< In a debugging session */ |
/** What type of event are we stopped in or 0 if none. */ |
udebug_event_t cur_event; |
bool go; /**< thread is GO */ |
bool stoppable; /**< thread is stoppable */ |
bool debug_active; /**< thread is in a debugging session */ |
} udebug_thread_t; |
struct task; |
200,7 → 192,7 |
unative_t a4, unative_t a5, unative_t a6, unative_t id, unative_t rc, |
bool end_variant); |
void udebug_thread_b_event(struct thread *t); |
void udebug_thread_b_event_attach(struct thread *t, struct task *ta); |
void udebug_thread_e_event(void); |
void udebug_stoppable_begin(void); |
/branches/dynload/kernel/generic/include/ddi/irq.h |
---|
83,6 → 83,9 |
struct irq; |
typedef void (* irq_handler_t)(struct irq *irq, void *arg, ...); |
/** Type for function used to clear the interrupt. */ |
typedef void (* cir_t)(void *arg, inr_t inr); |
/** IPC notification config structure. |
* |
* Primarily, this structure is encapsulated in the irq_t structure. |
144,6 → 147,11 |
/** Argument for the handler. */ |
void *arg; |
/** Clear interrupt routine. */ |
cir_t cir; |
/** First argument to the clear interrupt routine. */ |
void *cir_arg; |
/** Notification configuration structure. */ |
ipc_notif_cfg_t notif_cfg; |
} irq_t; |
/branches/dynload/kernel/generic/include/adt/bitmap.h |
---|
49,6 → 49,14 |
extern void bitmap_clear_range(bitmap_t *bitmap, index_t start, count_t bits); |
extern void bitmap_copy(bitmap_t *dst, bitmap_t *src, count_t bits); |
static inline int bitmap_get(bitmap_t *bitmap,index_t bit) |
{ |
if(bit >= bitmap->bits) |
return 0; |
return !! ((bitmap->map)[bit/8] & (1 << (bit & 7))); |
} |
#endif |
/** @} |
/branches/dynload/kernel/generic/include/ipc/kbox.h |
---|
37,6 → 37,18 |
#include <typedefs.h> |
/** Kernel answerbox structure. */ |
typedef struct kbox { |
/** The answerbox itself. */ |
answerbox_t box; |
/** Thread used to service the answerbox. */ |
struct thread *thread; |
/** Kbox thread creation vs. begin of cleanup mutual exclusion. */ |
mutex_t cleanup_lock; |
/** True if cleanup of kbox has already started. */ |
bool finished; |
} kbox_t; |
extern int ipc_connect_kbox(task_id_t); |
extern void ipc_kbox_cleanup(void); |
/branches/dynload/kernel/generic/src/synch/futex.c |
---|
115,6 → 115,7 |
uintptr_t paddr; |
pte_t *t; |
ipl_t ipl; |
int rc; |
ipl = interrupts_disable(); |
134,9 → 135,17 |
interrupts_restore(ipl); |
futex = futex_find(paddr); |
return (unative_t) waitq_sleep_timeout(&futex->wq, usec, flags | |
#ifdef CONFIG_UDEBUG |
udebug_stoppable_begin(); |
#endif |
rc = waitq_sleep_timeout(&futex->wq, usec, flags | |
SYNCH_FLAGS_INTERRUPTIBLE); |
#ifdef CONFIG_UDEBUG |
udebug_stoppable_end(); |
#endif |
return (unative_t) rc; |
} |
/** Wakeup one thread waiting in futex wait queue. |
/branches/dynload/kernel/generic/src/interrupt/interrupt.c |
---|
86,8 → 86,17 |
void exc_dispatch(int n, istate_t *istate) |
{ |
ASSERT(n < IVT_ITEMS); |
#ifdef CONFIG_UDEBUG |
if (THREAD) THREAD->udebug.uspace_state = istate; |
#endif |
exc_table[n].f(n + IVT_FIRST, istate); |
#ifdef CONFIG_UDEBUG |
if (THREAD) THREAD->udebug.uspace_state = NULL; |
#endif |
/* This is a safe place to exit exiting thread */ |
if (THREAD && THREAD->interrupted && istate_from_uspace(istate)) |
thread_exit(); |
/branches/dynload/kernel/generic/src/time/clock.c |
---|
190,6 → 190,14 |
if (!ticks && !PREEMPTION_DISABLED) { |
scheduler(); |
#ifdef CONFIG_UDEBUG |
/* |
* Give udebug chance to stop the thread |
* before it begins executing. |
*/ |
if (istate_from_uspace(THREAD->udebug.uspace_state)) |
udebug_before_thread_runs(); |
#endif |
} |
} |
/branches/dynload/kernel/generic/src/ddi/irq.c |
---|
145,6 → 145,8 |
irq->claim = NULL; |
irq->handler = NULL; |
irq->arg = NULL; |
irq->cir = NULL; |
irq->cir_arg = NULL; |
irq->notif_cfg.notify = false; |
irq->notif_cfg.answerbox = NULL; |
irq->notif_cfg.code = NULL; |
/branches/dynload/kernel/generic/src/proc/task.c |
---|
164,10 → 164,10 |
udebug_task_init(&ta->udebug); |
/* Init kbox stuff */ |
ipc_answerbox_init(&ta->kernel_box, ta); |
ta->kb_thread = NULL; |
mutex_initialize(&ta->kb_cleanup_lock, MUTEX_PASSIVE); |
ta->kb_finished = false; |
ipc_answerbox_init(&ta->kb.box, ta); |
ta->kb.thread = NULL; |
mutex_initialize(&ta->kb.cleanup_lock, MUTEX_PASSIVE); |
ta->kb.finished = false; |
#endif |
ipc_answerbox_init(&ta->answerbox, ta); |
/branches/dynload/kernel/generic/src/proc/thread.c |
---|
763,14 → 763,20 |
return (unative_t) rc; |
} |
} |
#ifdef CONFIG_UDEBUG |
/* |
* Generate udebug THREAD_B event and attach the thread. |
* This must be done atomically (with the debug locks held), |
* otherwise we would either miss some thread or receive |
* THREAD_B events for threads that already existed |
* and could be detected with THREAD_READ before. |
*/ |
udebug_thread_b_event_attach(t, TASK); |
#else |
thread_attach(t, TASK); |
#endif |
thread_ready(t); |
#ifdef CONFIG_UDEBUG |
/* Generate udebug THREAD_B event */ |
udebug_thread_b_event(t); |
#endif |
return 0; |
} else |
free(kernel_uarg); |
/branches/dynload/kernel/generic/src/mm/tlb.c |
---|
134,9 → 134,7 |
void tlb_shootdown_ipi_send(void) |
{ |
#ifndef ia64 |
ipi_broadcast(VECTOR_TLB_SHOOTDOWN_IPI); |
#endif |
} |
/** Receive TLB shootdown message. */ |
/branches/dynload/kernel/generic/src/syscall/syscall.c |
---|
112,11 → 112,7 |
#ifdef CONFIG_UDEBUG |
udebug_syscall_event(a1, a2, a3, a4, a5, a6, id, 0, false); |
#endif |
if (id < SYSCALL_END) { |
#ifdef CONFIG_UDEBUG |
udebug_stoppable_begin(); |
#endif |
rc = syscall_table[id](a1, a2, a3, a4, a5, a6); |
} else { |
printf("Task %" PRIu64": Unknown syscall %#" PRIxn, TASK->taskid, id); |
129,9 → 125,14 |
#ifdef CONFIG_UDEBUG |
udebug_syscall_event(a1, a2, a3, a4, a5, a6, id, rc, true); |
/* |
* Stopping point needed for tasks that only invoke non-blocking |
* system calls. |
*/ |
udebug_stoppable_begin(); |
udebug_stoppable_end(); |
#endif |
#endif |
return rc; |
} |
/branches/dynload/kernel/generic/src/ipc/kbox.c |
---|
48,14 → 48,20 |
ipl_t ipl; |
bool have_kb_thread; |
/* Only hold kb_cleanup_lock while setting kb_finished - this is enough */ |
mutex_lock(&TASK->kb_cleanup_lock); |
TASK->kb_finished = true; |
mutex_unlock(&TASK->kb_cleanup_lock); |
/* |
* Only hold kb.cleanup_lock while setting kb.finished - |
* this is enough. |
*/ |
mutex_lock(&TASK->kb.cleanup_lock); |
TASK->kb.finished = true; |
mutex_unlock(&TASK->kb.cleanup_lock); |
have_kb_thread = (TASK->kb_thread != NULL); |
have_kb_thread = (TASK->kb.thread != NULL); |
/* From now on nobody will try to connect phones or attach kbox threads */ |
/* |
* From now on nobody will try to connect phones or attach |
* kbox threads |
*/ |
/* |
* Disconnect all phones connected to our kbox. Passing true for |
63,7 → 69,7 |
* disconnected phone. This ensures the kbox thread is going to |
* wake up and terminate. |
*/ |
ipc_answerbox_slam_phones(&TASK->kernel_box, have_kb_thread); |
ipc_answerbox_slam_phones(&TASK->kb.box, have_kb_thread); |
/* |
* If the task was being debugged, clean up debugging session. |
77,18 → 83,18 |
interrupts_restore(ipl); |
if (have_kb_thread) { |
LOG("join kb_thread..\n"); |
thread_join(TASK->kb_thread); |
thread_detach(TASK->kb_thread); |
LOG("join kb.thread..\n"); |
thread_join(TASK->kb.thread); |
thread_detach(TASK->kb.thread); |
LOG("join done\n"); |
TASK->kb_thread = NULL; |
TASK->kb.thread = NULL; |
} |
/* Answer all messages in 'calls' and 'dispatched_calls' queues */ |
spinlock_lock(&TASK->kernel_box.lock); |
ipc_cleanup_call_list(&TASK->kernel_box.dispatched_calls); |
ipc_cleanup_call_list(&TASK->kernel_box.calls); |
spinlock_unlock(&TASK->kernel_box.lock); |
/* Answer all messages in 'calls' and 'dispatched_calls' queues. */ |
spinlock_lock(&TASK->kb.box.lock); |
ipc_cleanup_call_list(&TASK->kb.box.dispatched_calls); |
ipc_cleanup_call_list(&TASK->kb.box.calls); |
spinlock_unlock(&TASK->kb.box.lock); |
} |
/** Handle hangup message in kbox. |
105,7 → 111,7 |
/* Was it our debugger, who hung up? */ |
if (call->sender == TASK->udebug.debugger) { |
/* Terminate debugging session (if any) */ |
/* Terminate debugging session (if any). */ |
LOG("kbox: terminate debug session\n"); |
ipl = interrupts_disable(); |
spinlock_lock(&TASK->lock); |
118,7 → 124,7 |
LOG("kbox: continue with hangup message\n"); |
IPC_SET_RETVAL(call->data, 0); |
ipc_answer(&TASK->kernel_box, call); |
ipc_answer(&TASK->kb.box, call); |
ipl = interrupts_disable(); |
spinlock_lock(&TASK->lock); |
130,13 → 136,13 |
*/ |
/* Only detach kbox thread unless already terminating. */ |
mutex_lock(&TASK->kb_cleanup_lock); |
if (&TASK->kb_finished == false) { |
mutex_lock(&TASK->kb.cleanup_lock); |
if (&TASK->kb.finished == false) { |
/* Detach kbox thread so it gets freed from memory. */ |
thread_detach(TASK->kb_thread); |
TASK->kb_thread = NULL; |
thread_detach(TASK->kb.thread); |
TASK->kb.thread = NULL; |
} |
mutex_unlock(&TASK->kb_cleanup_lock); |
mutex_unlock(&TASK->kb.cleanup_lock); |
LOG("phone list is empty\n"); |
*last = true; |
166,7 → 172,7 |
done = false; |
while (!done) { |
call = ipc_wait_for_call(&TASK->kernel_box, SYNCH_NO_TIMEOUT, |
call = ipc_wait_for_call(&TASK->kb.box, SYNCH_NO_TIMEOUT, |
SYNCH_FLAGS_NONE); |
if (call == NULL) |
201,10 → 207,10 |
/** |
* Connect phone to a task kernel-box specified by id. |
* |
* Note that this is not completely atomic. For optimisation reasons, |
* The task might start cleaning up kbox after the phone has been connected |
* and before a kbox thread has been created. This must be taken into account |
* in the cleanup code. |
* Note that this is not completely atomic. For optimisation reasons, the task |
* might start cleaning up kbox after the phone has been connected and before |
* a kbox thread has been created. This must be taken into account in the |
* cleanup code. |
* |
* @return Phone id on success, or negative error code. |
*/ |
230,44 → 236,45 |
spinlock_unlock(&tasks_lock); |
interrupts_restore(ipl); |
mutex_lock(&ta->kb_cleanup_lock); |
mutex_lock(&ta->kb.cleanup_lock); |
if (atomic_predec(&ta->refcount) == 0) { |
mutex_unlock(&ta->kb_cleanup_lock); |
mutex_unlock(&ta->kb.cleanup_lock); |
task_destroy(ta); |
return ENOENT; |
} |
if (ta->kb_finished != false) { |
mutex_unlock(&ta->kb_cleanup_lock); |
if (ta->kb.finished != false) { |
mutex_unlock(&ta->kb.cleanup_lock); |
return EINVAL; |
} |
newphid = phone_alloc(); |
if (newphid < 0) { |
mutex_unlock(&ta->kb_cleanup_lock); |
mutex_unlock(&ta->kb.cleanup_lock); |
return ELIMIT; |
} |
/* Connect the newly allocated phone to the kbox */ |
ipc_phone_connect(&TASK->phones[newphid], &ta->kernel_box); |
ipc_phone_connect(&TASK->phones[newphid], &ta->kb.box); |
if (ta->kb_thread != NULL) { |
mutex_unlock(&ta->kb_cleanup_lock); |
if (ta->kb.thread != NULL) { |
mutex_unlock(&ta->kb.cleanup_lock); |
return newphid; |
} |
/* Create a kbox thread */ |
kb_thread = thread_create(kbox_thread_proc, NULL, ta, 0, "kbox", false); |
kb_thread = thread_create(kbox_thread_proc, NULL, ta, 0, |
"kbox", false); |
if (!kb_thread) { |
mutex_unlock(&ta->kb_cleanup_lock); |
mutex_unlock(&ta->kb.cleanup_lock); |
return ENOMEM; |
} |
ta->kb_thread = kb_thread; |
ta->kb.thread = kb_thread; |
thread_ready(kb_thread); |
mutex_unlock(&ta->kb_cleanup_lock); |
mutex_unlock(&ta->kb.cleanup_lock); |
return newphid; |
} |
/branches/dynload/kernel/generic/src/ipc/sysipc.c |
---|
455,10 → 455,17 |
IPC_SET_ARG5(call.data, 0); |
if (!(res = request_preprocess(&call, phone))) { |
#ifdef CONFIG_UDEBUG |
udebug_stoppable_begin(); |
#endif |
rc = ipc_call_sync(phone, &call); |
#ifdef CONFIG_UDEBUG |
udebug_stoppable_end(); |
#endif |
if (rc != EOK) |
return rc; |
process_answer(&call); |
} else { |
IPC_SET_RETVAL(call.data, res); |
} |
495,7 → 502,13 |
GET_CHECK_PHONE(phone, phoneid, return ENOENT); |
if (!(res = request_preprocess(&call, phone))) { |
#ifdef CONFIG_UDEBUG |
udebug_stoppable_begin(); |
#endif |
rc = ipc_call_sync(phone, &call); |
#ifdef CONFIG_UDEBUG |
udebug_stoppable_end(); |
#endif |
if (rc != EOK) |
return rc; |
process_answer(&call); |
798,9 → 811,17 |
{ |
call_t *call; |
restart: |
restart: |
#ifdef CONFIG_UDEBUG |
udebug_stoppable_begin(); |
#endif |
call = ipc_wait_for_call(&TASK->answerbox, usec, |
flags | SYNCH_FLAGS_INTERRUPTIBLE); |
#ifdef CONFIG_UDEBUG |
udebug_stoppable_end(); |
#endif |
if (!call) |
return 0; |
/branches/dynload/kernel/generic/src/ipc/irq.c |
---|
100,7 → 100,7 |
*((uint64_t *) code->cmds[i].addr) = |
code->cmds[i].value; |
break; |
#if defined(ia32) || defined(amd64) |
#if defined(ia32) || defined(amd64) || defined(ia64) |
case CMD_PORT_READ_1: |
dstval = inb((long) code->cmds[i].addr); |
break; |
/branches/dynload/kernel/generic/src/udebug/udebug_ipc.c |
---|
73,7 → 73,7 |
rc = udebug_begin(call); |
if (rc < 0) { |
IPC_SET_RETVAL(call->data, rc); |
ipc_answer(&TASK->kernel_box, call); |
ipc_answer(&TASK->kb.box, call); |
return; |
} |
83,7 → 83,7 |
*/ |
if (rc != 0) { |
IPC_SET_RETVAL(call->data, 0); |
ipc_answer(&TASK->kernel_box, call); |
ipc_answer(&TASK->kb.box, call); |
} |
} |
99,7 → 99,7 |
rc = udebug_end(); |
IPC_SET_RETVAL(call->data, rc); |
ipc_answer(&TASK->kernel_box, call); |
ipc_answer(&TASK->kb.box, call); |
} |
/** Process a SET_EVMASK call. |
116,7 → 116,7 |
rc = udebug_set_evmask(mask); |
IPC_SET_RETVAL(call->data, rc); |
ipc_answer(&TASK->kernel_box, call); |
ipc_answer(&TASK->kb.box, call); |
} |
135,7 → 135,7 |
rc = udebug_go(t, call); |
if (rc < 0) { |
IPC_SET_RETVAL(call->data, rc); |
ipc_answer(&TASK->kernel_box, call); |
ipc_answer(&TASK->kb.box, call); |
return; |
} |
} |
154,7 → 154,7 |
rc = udebug_stop(t, call); |
IPC_SET_RETVAL(call->data, rc); |
ipc_answer(&TASK->kernel_box, call); |
ipc_answer(&TASK->kb.box, call); |
} |
/** Process a THREAD_READ call. |
182,7 → 182,7 |
rc = udebug_thread_read(&buffer, buf_size, &n); |
if (rc < 0) { |
IPC_SET_RETVAL(call->data, rc); |
ipc_answer(&TASK->kernel_box, call); |
ipc_answer(&TASK->kb.box, call); |
return; |
} |
209,7 → 209,7 |
IPC_SET_ARG3(call->data, total_bytes); |
call->buffer = buffer; |
ipc_answer(&TASK->kernel_box, call); |
ipc_answer(&TASK->kb.box, call); |
} |
/** Process an ARGS_READ call. |
229,7 → 229,7 |
rc = udebug_args_read(t, &buffer); |
if (rc != EOK) { |
IPC_SET_RETVAL(call->data, rc); |
ipc_answer(&TASK->kernel_box, call); |
ipc_answer(&TASK->kb.box, call); |
return; |
} |
247,7 → 247,7 |
IPC_SET_ARG2(call->data, 6 * sizeof(unative_t)); |
call->buffer = buffer; |
ipc_answer(&TASK->kernel_box, call); |
ipc_answer(&TASK->kb.box, call); |
} |
/** Process an MEM_READ call. |
270,7 → 270,7 |
rc = udebug_mem_read(uspace_src, size, &buffer); |
if (rc < 0) { |
IPC_SET_RETVAL(call->data, rc); |
ipc_answer(&TASK->kernel_box, call); |
ipc_answer(&TASK->kb.box, call); |
return; |
} |
282,7 → 282,7 |
IPC_SET_ARG2(call->data, size); |
call->buffer = buffer; |
ipc_answer(&TASK->kernel_box, call); |
ipc_answer(&TASK->kb.box, call); |
} |
/** Handle a debug call received on the kernel answerbox. |
306,7 → 306,7 |
*/ |
if (TASK->udebug.debugger != call->sender) { |
IPC_SET_RETVAL(call->data, EINVAL); |
ipc_answer(&TASK->kernel_box, call); |
ipc_answer(&TASK->kb.box, call); |
return; |
} |
} |
/branches/dynload/kernel/generic/src/udebug/udebug.c |
---|
35,18 → 35,6 |
* @brief Udebug hooks and data structure management. |
* |
* Udebug is an interface that makes userspace debuggers possible. |
* |
* Functions in this file are executed directly in each thread, which |
* may or may not be the subject of debugging. The udebug_stoppable_begin/end() |
* functions are also executed in the clock interrupt handler. To avoid |
* deadlock, functions in this file are protected from the interrupt |
* by locking the recursive lock THREAD->udebug.int_lock (just an atomic |
* variable). This prevents udebug_stoppable_begin/end() from being |
* executed in the interrupt handler (they are skipped). |
* |
* Functions in udebug_ops.c and udebug_ipc.c execute in different threads, |
* so they needn't be protected from the (preemptible) interrupt-initiated |
* code. |
*/ |
#include <synch/waitq.h> |
55,16 → 43,7 |
#include <errno.h> |
#include <arch.h> |
static inline void udebug_int_lock(void) |
{ |
atomic_inc(&THREAD->udebug.int_lock); |
} |
static inline void udebug_int_unlock(void) |
{ |
atomic_dec(&THREAD->udebug.int_lock); |
} |
/** Initialize udebug part of task structure. |
* |
* Called as part of task structure initialization. |
89,13 → 68,9 |
mutex_initialize(&ut->lock, MUTEX_PASSIVE); |
waitq_initialize(&ut->go_wq); |
/* |
* At the beginning the thread is stoppable, so int_lock be set, too. |
*/ |
atomic_set(&ut->int_lock, 1); |
ut->go_call = NULL; |
ut->stop = true; |
ut->uspace_state = NULL; |
ut->go = false; |
ut->stoppable = true; |
ut->debug_active = false; |
ut->cur_event = 0; /* none */ |
161,11 → 136,8 |
ASSERT(THREAD); |
ASSERT(TASK); |
udebug_int_lock(); |
/* Early check for undebugged tasks */ |
if (!udebug_thread_precheck()) { |
udebug_int_unlock(); |
return; |
} |
198,7 → 170,8 |
* Active debugging session |
*/ |
if (THREAD->udebug.debug_active && THREAD->udebug.stop) { |
if (THREAD->udebug.debug_active == true && |
THREAD->udebug.go == false) { |
/* |
* Thread was requested to stop - answer go call |
*/ |
230,7 → 203,6 |
{ |
/* Early check for undebugged tasks */ |
if (!udebug_thread_precheck()) { |
udebug_int_unlock(); |
return; |
} |
239,7 → 211,7 |
mutex_lock(&THREAD->udebug.lock); |
if (THREAD->udebug.debug_active && |
THREAD->udebug.stop == true) { |
THREAD->udebug.go == false) { |
TASK->udebug.begin_call = NULL; |
mutex_unlock(&THREAD->udebug.lock); |
mutex_unlock(&TASK->udebug.lock); |
247,7 → 219,7 |
udebug_wait_for_go(&THREAD->udebug.go_wq); |
goto restart; |
/* must try again - have to lose stoppability atomically */ |
/* Must try again - have to lose stoppability atomically. */ |
} else { |
++TASK->udebug.not_stoppable_count; |
ASSERT(THREAD->udebug.stoppable == true); |
256,44 → 228,17 |
mutex_unlock(&THREAD->udebug.lock); |
mutex_unlock(&TASK->udebug.lock); |
} |
udebug_int_unlock(); |
} |
/** Upon being scheduled to run, check if the current thread should stop. |
* |
* This function is called from clock(). Preemption is enabled. |
* interrupts are disabled, but since this is called after |
* being scheduled-in, we can enable them, if we're careful enough |
* not to allow arbitrary recursion or deadlock with the thread context. |
* This function is called from clock(). |
*/ |
void udebug_before_thread_runs(void) |
{ |
ipl_t ipl; |
return; |
ASSERT(!PREEMPTION_DISABLED); |
/* |
* Prevent agains re-entering, such as when preempted inside this |
* function. |
*/ |
if (atomic_get(&THREAD->udebug.int_lock) != 0) |
return; |
udebug_int_lock(); |
ipl = interrupts_enable(); |
/* Now we're free to do whatever we need (lock mutexes, sleep, etc.) */ |
/* Check if we're supposed to stop */ |
udebug_stoppable_begin(); |
udebug_stoppable_end(); |
interrupts_restore(ipl); |
udebug_int_unlock(); |
} |
/** Syscall event hook. |
310,11 → 255,8 |
etype = end_variant ? UDEBUG_EVENT_SYSCALL_E : UDEBUG_EVENT_SYSCALL_B; |
udebug_int_lock(); |
/* Early check for undebugged tasks */ |
if (!udebug_thread_precheck()) { |
udebug_int_unlock(); |
return; |
} |
321,9 → 263,9 |
mutex_lock(&TASK->udebug.lock); |
mutex_lock(&THREAD->udebug.lock); |
/* Must only generate events when in debugging session and have go */ |
/* Must only generate events when in debugging session and is go. */ |
if (THREAD->udebug.debug_active != true || |
THREAD->udebug.stop == true || |
THREAD->udebug.go == false || |
(TASK->udebug.evmask & UDEBUG_EVMASK(etype)) == 0) { |
mutex_unlock(&THREAD->udebug.lock); |
mutex_unlock(&TASK->udebug.lock); |
348,11 → 290,11 |
THREAD->udebug.syscall_args[5] = a6; |
/* |
* Make sure udebug.stop is true when going to sleep |
* Make sure udebug.go is false when going to sleep |
* in case we get woken up by DEBUG_END. (At which |
* point it must be back to the initial true value). |
*/ |
THREAD->udebug.stop = true; |
THREAD->udebug.go = false; |
THREAD->udebug.cur_event = etype; |
ipc_answer(&TASK->answerbox, call); |
361,35 → 303,41 |
mutex_unlock(&TASK->udebug.lock); |
udebug_wait_for_go(&THREAD->udebug.go_wq); |
udebug_int_unlock(); |
} |
/** Thread-creation event hook. |
/** Thread-creation event hook combined with attaching the thread. |
* |
* Must be called when a new userspace thread is created in the debugged |
* task. Generates a THREAD_B event. |
* task. Generates a THREAD_B event. Also attaches the thread @a t |
* to the task @a ta. |
* |
* This is necessary to avoid a race condition where the BEGIN and THREAD_READ |
* requests would be handled inbetween attaching the thread and checking it |
* for being in a debugging session to send the THREAD_B event. We could then |
* either miss threads or get some threads both in the thread list |
* and get a THREAD_B event for them. |
* |
* @param t Structure of the thread being created. Not locked, as the |
* thread is not executing yet. |
* @param ta Task to which the thread should be attached. |
*/ |
void udebug_thread_b_event(struct thread *t) |
void udebug_thread_b_event_attach(struct thread *t, struct task *ta) |
{ |
call_t *call; |
udebug_int_lock(); |
mutex_lock(&TASK->udebug.lock); |
mutex_lock(&THREAD->udebug.lock); |
thread_attach(t, ta); |
LOG("udebug_thread_b_event\n"); |
LOG("- check state\n"); |
/* Must only generate events when in debugging session */ |
if (THREAD->udebug.debug_active != true) { |
LOG("- debug_active: %s, udebug.stop: %s\n", |
LOG("- debug_active: %s, udebug.go: %s\n", |
THREAD->udebug.debug_active ? "yes(+)" : "no(-)", |
THREAD->udebug.stop ? "yes(-)" : "no(+)"); |
THREAD->udebug.go ? "yes(-)" : "no(+)"); |
mutex_unlock(&THREAD->udebug.lock); |
mutex_unlock(&TASK->udebug.lock); |
return; |
404,11 → 352,11 |
IPC_SET_ARG2(call->data, (unative_t)t); |
/* |
* Make sure udebug.stop is true when going to sleep |
* Make sure udebug.go is false when going to sleep |
* in case we get woken up by DEBUG_END. (At which |
* point it must be back to the initial true value). |
*/ |
THREAD->udebug.stop = true; |
THREAD->udebug.go = false; |
THREAD->udebug.cur_event = UDEBUG_EVENT_THREAD_B; |
ipc_answer(&TASK->answerbox, call); |
418,8 → 366,6 |
LOG("- sleep\n"); |
udebug_wait_for_go(&THREAD->udebug.go_wq); |
udebug_int_unlock(); |
} |
/** Thread-termination event hook. |
431,8 → 377,6 |
{ |
call_t *call; |
udebug_int_lock(); |
mutex_lock(&TASK->udebug.lock); |
mutex_lock(&THREAD->udebug.lock); |
439,11 → 383,11 |
LOG("udebug_thread_e_event\n"); |
LOG("- check state\n"); |
/* Must only generate events when in debugging session */ |
/* Must only generate events when in debugging session. */ |
if (THREAD->udebug.debug_active != true) { |
/* printf("- debug_active: %s, udebug.stop: %s\n", |
/* printf("- debug_active: %s, udebug.go: %s\n", |
THREAD->udebug.debug_active ? "yes(+)" : "no(-)", |
THREAD->udebug.stop ? "yes(-)" : "no(+)");*/ |
THREAD->udebug.go ? "yes(-)" : "no(+)");*/ |
mutex_unlock(&THREAD->udebug.lock); |
mutex_unlock(&TASK->udebug.lock); |
return; |
456,10 → 400,10 |
IPC_SET_RETVAL(call->data, 0); |
IPC_SET_ARG1(call->data, UDEBUG_EVENT_THREAD_E); |
/* Prevent any further debug activity in thread */ |
/* Prevent any further debug activity in thread. */ |
THREAD->udebug.debug_active = false; |
THREAD->udebug.cur_event = 0; /* none */ |
THREAD->udebug.stop = true; /* set to initial value */ |
THREAD->udebug.go = false; /* set to initial value */ |
ipc_answer(&TASK->answerbox, call); |
466,8 → 410,10 |
mutex_unlock(&THREAD->udebug.lock); |
mutex_unlock(&TASK->udebug.lock); |
/* Leave int_lock enabled */ |
/* This event does not sleep - debugging has finished in this thread */ |
/* |
* This event does not sleep - debugging has finished |
* in this thread. |
*/ |
} |
/** |
490,8 → 436,6 |
LOG("udebug_task_cleanup()\n"); |
LOG("task %" PRIu64 "\n", ta->taskid); |
udebug_int_lock(); |
if (ta->udebug.dt_state != UDEBUG_TS_BEGINNING && |
ta->udebug.dt_state != UDEBUG_TS_ACTIVE) { |
LOG("udebug_task_cleanup(): task not being debugged\n"); |
512,19 → 456,19 |
spinlock_unlock(&t->lock); |
interrupts_restore(ipl); |
/* Only process userspace threads */ |
/* Only process userspace threads. */ |
if ((flags & THREAD_FLAG_USPACE) != 0) { |
/* Prevent any further debug activity in thread */ |
/* Prevent any further debug activity in thread. */ |
t->udebug.debug_active = false; |
t->udebug.cur_event = 0; /* none */ |
/* Still has go? */ |
if (t->udebug.stop == false) { |
/* Is the thread still go? */ |
if (t->udebug.go == true) { |
/* |
* Yes, so clear go. As debug_active == false, |
* this doesn't affect anything. |
*/ |
t->udebug.stop = true; |
t->udebug.go = false; |
/* Answer GO call */ |
LOG("answer GO call with EVENT_FINISHED\n"); |
553,8 → 497,6 |
ta->udebug.dt_state = UDEBUG_TS_INACTIVE; |
ta->udebug.debugger = NULL; |
udebug_int_unlock(); |
return 0; |
} |
/branches/dynload/kernel/generic/src/udebug/udebug_ops.c |
---|
57,7 → 57,7 |
* |
* Specifically, verifies that thread t exists, is a userspace thread, |
* and belongs to the current task (TASK). Verifies, that the thread |
* has (or hasn't) go according to having_go (typically false). |
* is (or is not) go according to being_go (typically false). |
* It also locks t->udebug.lock, making sure that t->udebug.debug_active |
* is true - that the thread is in a valid debugging session. |
* |
70,11 → 70,11 |
* the t->lock spinlock to the t->udebug.lock mutex. |
* |
* @param t Pointer, need not at all be valid. |
* @param having_go Required thread state. |
* @param being_go Required thread state. |
* |
* Returns EOK if all went well, or an error code otherwise. |
*/ |
static int _thread_op_begin(thread_t *t, bool having_go) |
static int _thread_op_begin(thread_t *t, bool being_go) |
{ |
task_id_t taskid; |
ipl_t ipl; |
98,7 → 98,7 |
spinlock_lock(&t->lock); |
spinlock_unlock(&threads_lock); |
/* Verify that 't' is a userspace thread */ |
/* Verify that 't' is a userspace thread. */ |
if ((t->flags & THREAD_FLAG_USPACE) == 0) { |
/* It's not, deny its existence */ |
spinlock_unlock(&t->lock); |
107,7 → 107,7 |
return ENOENT; |
} |
/* Verify debugging state */ |
/* Verify debugging state. */ |
if (t->udebug.debug_active != true) { |
/* Not in debugging session or undesired GO state */ |
spinlock_unlock(&t->lock); |
124,9 → 124,9 |
spinlock_unlock(&t->lock); |
interrupts_restore(ipl); |
/* Only mutex TASK->udebug.lock left */ |
/* Only mutex TASK->udebug.lock left. */ |
/* Now verify that the thread belongs to the current task */ |
/* Now verify that the thread belongs to the current task. */ |
if (t->task != TASK) { |
/* No such thread belonging this task*/ |
mutex_unlock(&TASK->udebug.lock); |
139,18 → 139,18 |
*/ |
mutex_lock(&t->udebug.lock); |
/* The big task mutex is no longer needed */ |
/* The big task mutex is no longer needed. */ |
mutex_unlock(&TASK->udebug.lock); |
if (!t->udebug.stop != having_go) { |
/* Not in debugging session or undesired GO state */ |
if (t->udebug.go != being_go) { |
/* Not in debugging session or undesired GO state. */ |
mutex_unlock(&t->udebug.lock); |
return EINVAL; |
} |
/* Only t->udebug.lock left */ |
/* Only t->udebug.lock left. */ |
return EOK; /* All went well */ |
return EOK; /* All went well. */ |
} |
/** End debugging operation on a thread. */ |
204,7 → 204,7 |
reply = 0; /* no reply */ |
} |
/* Set udebug.debug_active on all of the task's userspace threads */ |
/* Set udebug.debug_active on all of the task's userspace threads. */ |
for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) { |
t = list_get_instance(cur, thread_t, th_link); |
273,7 → 273,7 |
/** Give thread GO. |
* |
* Upon recieving a go message, the thread is given GO. Having GO |
* Upon recieving a go message, the thread is given GO. Being GO |
* means the thread is allowed to execute userspace code (until |
* a debugging event or STOP occurs, at which point the thread loses GO. |
* |
284,7 → 284,7 |
{ |
int rc; |
/* On success, this will lock t->udebug.lock */ |
/* On success, this will lock t->udebug.lock. */ |
rc = _thread_op_begin(t, false); |
if (rc != EOK) { |
return rc; |
291,11 → 291,11 |
} |
t->udebug.go_call = call; |
t->udebug.stop = false; |
t->udebug.go = true; |
t->udebug.cur_event = 0; /* none */ |
/* |
* Neither t's lock nor threads_lock may be held during wakeup |
* Neither t's lock nor threads_lock may be held during wakeup. |
*/ |
waitq_wakeup(&t->udebug.go_wq, WAKEUP_FIRST); |
317,7 → 317,6 |
int rc; |
LOG("udebug_stop()\n"); |
mutex_lock(&TASK->udebug.lock); |
/* |
* On success, this will lock t->udebug.lock. Note that this makes sure |
328,21 → 327,21 |
return rc; |
} |
/* Take GO away from the thread */ |
t->udebug.stop = true; |
/* Take GO away from the thread. */ |
t->udebug.go = false; |
if (!t->udebug.stoppable) { |
/* Answer will be sent when the thread becomes stoppable */ |
if (t->udebug.stoppable != true) { |
/* Answer will be sent when the thread becomes stoppable. */ |
_thread_op_end(t); |
return 0; |
} |
/* |
* Answer GO call |
* Answer GO call. |
*/ |
LOG("udebug_stop - answering go call\n"); |
/* Make sure nobody takes this call away from us */ |
/* Make sure nobody takes this call away from us. */ |
call = t->udebug.go_call; |
t->udebug.go_call = NULL; |
354,6 → 353,7 |
_thread_op_end(t); |
mutex_lock(&TASK->udebug.lock); |
ipc_answer(&TASK->answerbox, call); |
mutex_unlock(&TASK->udebug.lock); |
422,7 → 422,7 |
flags = t->flags; |
spinlock_unlock(&t->lock); |
/* Not interested in kernel threads */ |
/* Not interested in kernel threads. */ |
if ((flags & THREAD_FLAG_USPACE) != 0) { |
/* Using thread struct pointer as identification hash */ |
tid = (unative_t) t; |
458,16 → 458,16 |
int rc; |
unative_t *arg_buffer; |
/* Prepare a buffer to hold the arguments */ |
/* Prepare a buffer to hold the arguments. */ |
arg_buffer = malloc(6 * sizeof(unative_t), 0); |
/* On success, this will lock t->udebug.lock */ |
/* On success, this will lock t->udebug.lock. */ |
rc = _thread_op_begin(t, false); |
if (rc != EOK) { |
return rc; |
} |
/* Additionally we need to verify that we are inside a syscall */ |
/* Additionally we need to verify that we are inside a syscall. */ |
if (t->udebug.cur_event != UDEBUG_EVENT_SYSCALL_B && |
t->udebug.cur_event != UDEBUG_EVENT_SYSCALL_E) { |
_thread_op_end(t); |
474,7 → 474,7 |
return EINVAL; |
} |
/* Copy to a local buffer before releasing the lock */ |
/* Copy to a local buffer before releasing the lock. */ |
memcpy(arg_buffer, t->udebug.syscall_args, 6 * sizeof(unative_t)); |
_thread_op_end(t); |