Rev 3474 | Rev 4337 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 3474 | Rev 3674 | ||
---|---|---|---|
Line 55... | Line 55... | ||
55 | * Simply put, return thread t with t->udebug.lock held, |
55 | * Simply put, return thread t with t->udebug.lock held, |
56 | * but only if it verifies all conditions. |
56 | * but only if it verifies all conditions. |
57 | * |
57 | * |
58 | * Specifically, verifies that thread t exists, is a userspace thread, |
58 | * Specifically, verifies that thread t exists, is a userspace thread, |
59 | * and belongs to the current task (TASK). Verifies, that the thread |
59 | * and belongs to the current task (TASK). Verifies, that the thread |
60 | * has (or hasn't) go according to having_go (typically false). |
60 | * is (or is not) go according to being_go (typically false). |
61 | * It also locks t->udebug.lock, making sure that t->udebug.debug_active |
61 | * It also locks t->udebug.lock, making sure that t->udebug.debug_active |
62 | * is true - that the thread is in a valid debugging session. |
62 | * is true - that the thread is in a valid debugging session. |
63 | * |
63 | * |
64 | * With this verified and the t->udebug.lock mutex held, it is ensured |
64 | * With this verified and the t->udebug.lock mutex held, it is ensured |
65 | * that the thread cannot leave the debugging session, let alone cease |
65 | * that the thread cannot leave the debugging session, let alone cease |
Line 68... | Line 68... | ||
68 | * In this function, holding the TASK->udebug.lock mutex prevents the |
68 | * In this function, holding the TASK->udebug.lock mutex prevents the |
69 | * thread from leaving the debugging session, while relaxing from |
69 | * thread from leaving the debugging session, while relaxing from |
70 | * the t->lock spinlock to the t->udebug.lock mutex. |
70 | * the t->lock spinlock to the t->udebug.lock mutex. |
71 | * |
71 | * |
72 | * @param t Pointer, need not at all be valid. |
72 | * @param t Pointer, need not at all be valid. |
73 | * @param having_go Required thread state. |
73 | * @param being_go Required thread state. |
74 | * |
74 | * |
75 | * Returns EOK if all went well, or an error code otherwise. |
75 | * Returns EOK if all went well, or an error code otherwise. |
76 | */ |
76 | */ |
77 | static int _thread_op_begin(thread_t *t, bool having_go) |
77 | static int _thread_op_begin(thread_t *t, bool being_go) |
78 | { |
78 | { |
79 | task_id_t taskid; |
79 | task_id_t taskid; |
80 | ipl_t ipl; |
80 | ipl_t ipl; |
81 | 81 | ||
82 | taskid = TASK->taskid; |
82 | taskid = TASK->taskid; |
Line 96... | Line 96... | ||
96 | 96 | ||
97 | /* t->lock is enough to ensure the thread's existence */ |
97 | /* t->lock is enough to ensure the thread's existence */ |
98 | spinlock_lock(&t->lock); |
98 | spinlock_lock(&t->lock); |
99 | spinlock_unlock(&threads_lock); |
99 | spinlock_unlock(&threads_lock); |
100 | 100 | ||
101 | /* Verify that 't' is a userspace thread */ |
101 | /* Verify that 't' is a userspace thread. */ |
102 | if ((t->flags & THREAD_FLAG_USPACE) == 0) { |
102 | if ((t->flags & THREAD_FLAG_USPACE) == 0) { |
103 | /* It's not, deny its existence */ |
103 | /* It's not, deny its existence */ |
104 | spinlock_unlock(&t->lock); |
104 | spinlock_unlock(&t->lock); |
105 | interrupts_restore(ipl); |
105 | interrupts_restore(ipl); |
106 | mutex_unlock(&TASK->udebug.lock); |
106 | mutex_unlock(&TASK->udebug.lock); |
107 | return ENOENT; |
107 | return ENOENT; |
108 | } |
108 | } |
109 | 109 | ||
110 | /* Verify debugging state */ |
110 | /* Verify debugging state. */ |
111 | if (t->udebug.debug_active != true) { |
111 | if (t->udebug.debug_active != true) { |
112 | /* Not in debugging session or undesired GO state */ |
112 | /* Not in debugging session or undesired GO state */ |
113 | spinlock_unlock(&t->lock); |
113 | spinlock_unlock(&t->lock); |
114 | interrupts_restore(ipl); |
114 | interrupts_restore(ipl); |
115 | mutex_unlock(&TASK->udebug.lock); |
115 | mutex_unlock(&TASK->udebug.lock); |
Line 122... | Line 122... | ||
122 | * true. |
122 | * true. |
123 | */ |
123 | */ |
124 | spinlock_unlock(&t->lock); |
124 | spinlock_unlock(&t->lock); |
125 | interrupts_restore(ipl); |
125 | interrupts_restore(ipl); |
126 | 126 | ||
127 | /* Only mutex TASK->udebug.lock left */ |
127 | /* Only mutex TASK->udebug.lock left. */ |
128 | 128 | ||
129 | /* Now verify that the thread belongs to the current task */ |
129 | /* Now verify that the thread belongs to the current task. */ |
130 | if (t->task != TASK) { |
130 | if (t->task != TASK) { |
131 | /* No such thread belonging this task*/ |
131 | /* No such thread belonging this task*/ |
132 | mutex_unlock(&TASK->udebug.lock); |
132 | mutex_unlock(&TASK->udebug.lock); |
133 | return ENOENT; |
133 | return ENOENT; |
134 | } |
134 | } |
Line 137... | Line 137... | ||
137 | * Now we need to grab the thread's debug lock for synchronization |
137 | * Now we need to grab the thread's debug lock for synchronization |
138 | * of the threads stoppability/stop state. |
138 | * of the threads stoppability/stop state. |
139 | */ |
139 | */ |
140 | mutex_lock(&t->udebug.lock); |
140 | mutex_lock(&t->udebug.lock); |
141 | 141 | ||
142 | /* The big task mutex is no longer needed */ |
142 | /* The big task mutex is no longer needed. */ |
143 | mutex_unlock(&TASK->udebug.lock); |
143 | mutex_unlock(&TASK->udebug.lock); |
144 | 144 | ||
145 | if (!t->udebug.stop != having_go) { |
145 | if (t->udebug.go != being_go) { |
146 | /* Not in debugging session or undesired GO state */ |
146 | /* Not in debugging session or undesired GO state. */ |
147 | mutex_unlock(&t->udebug.lock); |
147 | mutex_unlock(&t->udebug.lock); |
148 | return EINVAL; |
148 | return EINVAL; |
149 | } |
149 | } |
150 | 150 | ||
151 | /* Only t->udebug.lock left */ |
151 | /* Only t->udebug.lock left. */ |
152 | 152 | ||
153 | return EOK; /* All went well */ |
153 | return EOK; /* All went well. */ |
154 | } |
154 | } |
155 | 155 | ||
156 | /** End debugging operation on a thread. */ |
156 | /** End debugging operation on a thread. */ |
157 | static void _thread_op_end(thread_t *t) |
157 | static void _thread_op_end(thread_t *t) |
158 | { |
158 | { |
Line 202... | Line 202... | ||
202 | reply = 1; /* immediate reply */ |
202 | reply = 1; /* immediate reply */ |
203 | } else { |
203 | } else { |
204 | reply = 0; /* no reply */ |
204 | reply = 0; /* no reply */ |
205 | } |
205 | } |
206 | 206 | ||
207 | /* Set udebug.debug_active on all of the task's userspace threads */ |
207 | /* Set udebug.debug_active on all of the task's userspace threads. */ |
208 | 208 | ||
209 | for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) { |
209 | for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) { |
210 | t = list_get_instance(cur, thread_t, th_link); |
210 | t = list_get_instance(cur, thread_t, th_link); |
211 | 211 | ||
212 | mutex_lock(&t->udebug.lock); |
212 | mutex_lock(&t->udebug.lock); |
Line 271... | Line 271... | ||
271 | return 0; |
271 | return 0; |
272 | } |
272 | } |
273 | 273 | ||
274 | /** Give thread GO. |
274 | /** Give thread GO. |
275 | * |
275 | * |
276 | * Upon recieving a go message, the thread is given GO. Having GO |
276 | * Upon recieving a go message, the thread is given GO. Being GO |
277 | * means the thread is allowed to execute userspace code (until |
277 | * means the thread is allowed to execute userspace code (until |
278 | * a debugging event or STOP occurs, at which point the thread loses GO. |
278 | * a debugging event or STOP occurs, at which point the thread loses GO. |
279 | * |
279 | * |
280 | * @param t The thread to operate on (unlocked and need not be valid). |
280 | * @param t The thread to operate on (unlocked and need not be valid). |
281 | * @param call The GO call that we are servicing. |
281 | * @param call The GO call that we are servicing. |
282 | */ |
282 | */ |
283 | int udebug_go(thread_t *t, call_t *call) |
283 | int udebug_go(thread_t *t, call_t *call) |
284 | { |
284 | { |
285 | int rc; |
285 | int rc; |
286 | 286 | ||
287 | /* On success, this will lock t->udebug.lock */ |
287 | /* On success, this will lock t->udebug.lock. */ |
288 | rc = _thread_op_begin(t, false); |
288 | rc = _thread_op_begin(t, false); |
289 | if (rc != EOK) { |
289 | if (rc != EOK) { |
290 | return rc; |
290 | return rc; |
291 | } |
291 | } |
292 | 292 | ||
293 | t->udebug.go_call = call; |
293 | t->udebug.go_call = call; |
294 | t->udebug.stop = false; |
294 | t->udebug.go = true; |
295 | t->udebug.cur_event = 0; /* none */ |
295 | t->udebug.cur_event = 0; /* none */ |
296 | 296 | ||
297 | /* |
297 | /* |
298 | * Neither t's lock nor threads_lock may be held during wakeup |
298 | * Neither t's lock nor threads_lock may be held during wakeup. |
299 | */ |
299 | */ |
300 | waitq_wakeup(&t->udebug.go_wq, WAKEUP_FIRST); |
300 | waitq_wakeup(&t->udebug.go_wq, WAKEUP_FIRST); |
301 | 301 | ||
302 | _thread_op_end(t); |
302 | _thread_op_end(t); |
303 | 303 | ||
Line 315... | Line 315... | ||
315 | int udebug_stop(thread_t *t, call_t *call) |
315 | int udebug_stop(thread_t *t, call_t *call) |
316 | { |
316 | { |
317 | int rc; |
317 | int rc; |
318 | 318 | ||
319 | LOG("udebug_stop()\n"); |
319 | LOG("udebug_stop()\n"); |
320 | mutex_lock(&TASK->udebug.lock); |
- | |
321 | 320 | ||
322 | /* |
321 | /* |
323 | * On success, this will lock t->udebug.lock. Note that this makes sure |
322 | * On success, this will lock t->udebug.lock. Note that this makes sure |
324 | * the thread is not stopped. |
323 | * the thread is not stopped. |
325 | */ |
324 | */ |
326 | rc = _thread_op_begin(t, true); |
325 | rc = _thread_op_begin(t, true); |
327 | if (rc != EOK) { |
326 | if (rc != EOK) { |
328 | return rc; |
327 | return rc; |
329 | } |
328 | } |
330 | 329 | ||
331 | /* Take GO away from the thread */ |
330 | /* Take GO away from the thread. */ |
332 | t->udebug.stop = true; |
331 | t->udebug.go = false; |
333 | 332 | ||
334 | if (!t->udebug.stoppable) { |
333 | if (t->udebug.stoppable != true) { |
335 | /* Answer will be sent when the thread becomes stoppable */ |
334 | /* Answer will be sent when the thread becomes stoppable. */ |
336 | _thread_op_end(t); |
335 | _thread_op_end(t); |
337 | return 0; |
336 | return 0; |
338 | } |
337 | } |
339 | 338 | ||
340 | /* |
339 | /* |
341 | * Answer GO call |
340 | * Answer GO call. |
342 | */ |
341 | */ |
343 | LOG("udebug_stop - answering go call\n"); |
342 | LOG("udebug_stop - answering go call\n"); |
344 | 343 | ||
345 | /* Make sure nobody takes this call away from us */ |
344 | /* Make sure nobody takes this call away from us. */ |
346 | call = t->udebug.go_call; |
345 | call = t->udebug.go_call; |
347 | t->udebug.go_call = NULL; |
346 | t->udebug.go_call = NULL; |
348 | 347 | ||
349 | IPC_SET_RETVAL(call->data, 0); |
348 | IPC_SET_RETVAL(call->data, 0); |
350 | IPC_SET_ARG1(call->data, UDEBUG_EVENT_STOP); |
349 | IPC_SET_ARG1(call->data, UDEBUG_EVENT_STOP); |
Line 352... | Line 351... | ||
352 | 351 | ||
353 | THREAD->udebug.cur_event = UDEBUG_EVENT_STOP; |
352 | THREAD->udebug.cur_event = UDEBUG_EVENT_STOP; |
354 | 353 | ||
355 | _thread_op_end(t); |
354 | _thread_op_end(t); |
356 | 355 | ||
- | 356 | mutex_lock(&TASK->udebug.lock); |
|
357 | ipc_answer(&TASK->answerbox, call); |
357 | ipc_answer(&TASK->answerbox, call); |
358 | mutex_unlock(&TASK->udebug.lock); |
358 | mutex_unlock(&TASK->udebug.lock); |
359 | 359 | ||
360 | LOG("udebog_stop/done\n"); |
360 | LOG("udebog_stop/done\n"); |
361 | return 0; |
361 | return 0; |
Line 420... | Line 420... | ||
420 | 420 | ||
421 | spinlock_lock(&t->lock); |
421 | spinlock_lock(&t->lock); |
422 | flags = t->flags; |
422 | flags = t->flags; |
423 | spinlock_unlock(&t->lock); |
423 | spinlock_unlock(&t->lock); |
424 | 424 | ||
425 | /* Not interested in kernel threads */ |
425 | /* Not interested in kernel threads. */ |
426 | if ((flags & THREAD_FLAG_USPACE) != 0) { |
426 | if ((flags & THREAD_FLAG_USPACE) != 0) { |
427 | /* Using thread struct pointer as identification hash */ |
427 | /* Using thread struct pointer as identification hash */ |
428 | tid = (unative_t) t; |
428 | tid = (unative_t) t; |
429 | id_buffer[copied_ids++] = tid; |
429 | id_buffer[copied_ids++] = tid; |
430 | } |
430 | } |
Line 456... | Line 456... | ||
456 | int udebug_args_read(thread_t *t, void **buffer) |
456 | int udebug_args_read(thread_t *t, void **buffer) |
457 | { |
457 | { |
458 | int rc; |
458 | int rc; |
459 | unative_t *arg_buffer; |
459 | unative_t *arg_buffer; |
460 | 460 | ||
461 | /* Prepare a buffer to hold the arguments */ |
461 | /* Prepare a buffer to hold the arguments. */ |
462 | arg_buffer = malloc(6 * sizeof(unative_t), 0); |
462 | arg_buffer = malloc(6 * sizeof(unative_t), 0); |
463 | 463 | ||
464 | /* On success, this will lock t->udebug.lock */ |
464 | /* On success, this will lock t->udebug.lock. */ |
465 | rc = _thread_op_begin(t, false); |
465 | rc = _thread_op_begin(t, false); |
466 | if (rc != EOK) { |
466 | if (rc != EOK) { |
467 | return rc; |
467 | return rc; |
468 | } |
468 | } |
469 | 469 | ||
470 | /* Additionally we need to verify that we are inside a syscall */ |
470 | /* Additionally we need to verify that we are inside a syscall. */ |
471 | if (t->udebug.cur_event != UDEBUG_EVENT_SYSCALL_B && |
471 | if (t->udebug.cur_event != UDEBUG_EVENT_SYSCALL_B && |
472 | t->udebug.cur_event != UDEBUG_EVENT_SYSCALL_E) { |
472 | t->udebug.cur_event != UDEBUG_EVENT_SYSCALL_E) { |
473 | _thread_op_end(t); |
473 | _thread_op_end(t); |
474 | return EINVAL; |
474 | return EINVAL; |
475 | } |
475 | } |
476 | 476 | ||
477 | /* Copy to a local buffer before releasing the lock */ |
477 | /* Copy to a local buffer before releasing the lock. */ |
478 | memcpy(arg_buffer, t->udebug.syscall_args, 6 * sizeof(unative_t)); |
478 | memcpy(arg_buffer, t->udebug.syscall_args, 6 * sizeof(unative_t)); |
479 | 479 | ||
480 | _thread_op_end(t); |
480 | _thread_op_end(t); |
481 | 481 | ||
482 | *buffer = arg_buffer; |
482 | *buffer = arg_buffer; |