Rev 2894 | Go to most recent revision | Details | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
2887 | svoboda | 1 | /** @addtogroup generic |
2 | * @{ |
||
3 | */ |
||
4 | |||
5 | /** |
||
6 | * @file |
||
7 | * @brief Tdebug. |
||
8 | */ |
||
9 | |||
10 | #include <console/klog.h> |
||
11 | #include <proc/task.h> |
||
12 | #include <proc/thread.h> |
||
13 | #include <arch.h> |
||
14 | #include <errno.h> |
||
15 | #include <syscall/copy.h> |
||
16 | #include <ipc/ipc.h> |
||
17 | #include <udebug/udebug.h> |
||
18 | #include <udebug/udebug_ops.h> |
||
19 | |||
20 | /** |
||
21 | * Prepare a thread for a debugging operation. |
||
22 | * |
||
23 | * Simply put, return thread t with t->debug_lock held, |
||
24 | * but only if it verifies all conditions. |
||
25 | * |
||
26 | * Specifically, verifies that thread t exists, is a userspace thread, |
||
27 | * and belongs to the current task (TASK). It also locks t->debug_lock, |
||
28 | * making sure that t->debug_active is true - that the thread is |
||
29 | * in a valid debugging session. |
||
30 | * |
||
31 | * Returns EOK if all went well, or an error code otherwise. |
||
32 | * Interrupts must be already disabled when calling this function. |
||
33 | * |
||
34 | * Note: This function sports complicated locking. |
||
35 | */ |
||
36 | static int _thread_op_begin(thread_t *t) |
||
37 | { |
||
38 | int rc; |
||
39 | task_id_t taskid; |
||
40 | |||
41 | taskid = TASK->taskid; |
||
42 | |||
43 | /* Must lock threads_lock to ensure continued existence of the thread */ |
||
44 | spinlock_lock(&threads_lock); |
||
45 | |||
46 | if (!thread_exists(t)) { |
||
47 | spinlock_unlock(&threads_lock); |
||
48 | return ENOENT; |
||
49 | } |
||
50 | |||
51 | spinlock_lock(&t->debug_lock); |
||
52 | spinlock_lock(&t->lock); |
||
53 | |||
54 | /* Now verify that it's the current task */ |
||
55 | if (t->task != TASK) { |
||
56 | /* No such thread belonging to callee */ |
||
57 | rc = ENOENT; |
||
58 | goto error_exit; |
||
59 | } |
||
60 | |||
61 | /* Verify that 't' is a userspace thread */ |
||
62 | if ((t->flags & THREAD_FLAG_USPACE) == 0) { |
||
63 | /* It's not, deny its existence */ |
||
64 | rc = ENOENT; |
||
65 | goto error_exit; |
||
66 | } |
||
67 | |||
68 | if ((t->debug_active != true) || (t->debug_stop != true)) { |
||
69 | /* Not in debugging session or already has GO */ |
||
70 | rc = ENOENT; |
||
71 | goto error_exit; |
||
72 | } |
||
73 | |||
74 | spinlock_unlock(&threads_lock); |
||
75 | spinlock_unlock(&t->lock); |
||
76 | |||
77 | /* Only t->debug_lock left */ |
||
78 | |||
79 | return EOK; /* All went well */ |
||
80 | |||
81 | |||
82 | /* Executed when a check on the thread fails */ |
||
83 | error_exit: |
||
84 | spinlock_unlock(&t->lock); |
||
85 | spinlock_unlock(&t->debug_lock); |
||
86 | spinlock_unlock(&threads_lock); |
||
87 | |||
88 | /* No locks left here */ |
||
89 | return rc; /* Some errors occured */ |
||
90 | } |
||
91 | |||
92 | |||
93 | static void _thread_op_end(thread_t *t) |
||
94 | { |
||
95 | spinlock_unlock(&t->debug_lock); |
||
96 | } |
||
97 | |||
98 | /** |
||
99 | * \return 0 (ok, but not done yet), 1 (done) or negative error code. |
||
100 | */ |
||
101 | int udebug_begin(call_t *call) |
||
102 | { |
||
103 | ipl_t ipl; |
||
104 | int reply; |
||
105 | |||
106 | thread_t *t; |
||
107 | link_t *cur; |
||
108 | |||
109 | klog_printf("udebug_begin()"); |
||
110 | |||
111 | ipl = interrupts_disable(); |
||
112 | klog_printf("debugging task %llu", TASK->taskid); |
||
113 | |||
114 | spinlock_lock(&TASK->lock); |
||
115 | |||
116 | if (TASK->dt_state != UDEBUG_TS_INACTIVE) { |
||
117 | spinlock_unlock(&TASK->lock); |
||
118 | interrupts_restore(ipl); |
||
119 | klog_printf("udebug_begin(): busy error"); |
||
120 | |||
121 | return EBUSY; |
||
122 | } |
||
123 | |||
124 | TASK->dt_state = UDEBUG_TS_BEGINNING; |
||
125 | TASK->debug_begin_call = call; |
||
126 | TASK->debugger = call->sender; |
||
127 | |||
128 | if (TASK->not_stoppable_count == 0) { |
||
129 | TASK->dt_state = UDEBUG_TS_ACTIVE; |
||
130 | TASK->debug_begin_call = NULL; |
||
131 | reply = 1; /* immediate reply */ |
||
132 | } else { |
||
133 | reply = 0; /* no reply */ |
||
134 | } |
||
135 | |||
136 | /* Set debug_active on all of the task's userspace threads */ |
||
137 | |||
138 | for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) { |
||
139 | t = list_get_instance(cur, thread_t, th_link); |
||
140 | |||
141 | spinlock_lock(&t->debug_lock); |
||
142 | if ((t->flags & THREAD_FLAG_USPACE) != 0) |
||
143 | t->debug_active = true; |
||
144 | spinlock_unlock(&t->debug_lock); |
||
145 | } |
||
146 | |||
147 | spinlock_unlock(&TASK->lock); |
||
148 | interrupts_restore(ipl); |
||
149 | |||
150 | klog_printf("udebug_begin() done (%s)", |
||
151 | reply ? "reply" : "stoppability wait"); |
||
152 | |||
153 | return reply; |
||
154 | } |
||
155 | |||
156 | int udebug_end(void) |
||
157 | { |
||
158 | ipl_t ipl; |
||
159 | int rc; |
||
160 | |||
161 | klog_printf("udebug_end()"); |
||
162 | |||
163 | ipl = interrupts_disable(); |
||
164 | spinlock_lock(&TASK->lock); |
||
165 | |||
166 | rc = udebug_task_cleanup(TASK); |
||
167 | |||
168 | klog_printf("task %llu", TASK->taskid); |
||
169 | |||
170 | spinlock_unlock(&TASK->lock); |
||
171 | interrupts_restore(ipl); |
||
172 | |||
173 | if (rc < 0) return EINVAL; |
||
174 | |||
175 | return 0; |
||
176 | } |
||
177 | |||
178 | int udebug_go(thread_t *t, call_t *call) |
||
179 | { |
||
180 | ipl_t ipl; |
||
181 | int rc; |
||
182 | |||
183 | klog_printf("udebug_go()"); |
||
184 | |||
185 | ipl = interrupts_disable(); |
||
186 | |||
187 | /* On success, this will lock t->debug_lock */ |
||
188 | rc = _thread_op_begin(t); |
||
189 | if (rc != EOK) { |
||
190 | interrupts_restore(ipl); |
||
191 | return rc; |
||
192 | } |
||
193 | |||
194 | t->debug_go_call = call; |
||
195 | t->debug_stop = false; |
||
196 | t->cur_event = 0; /* none */ |
||
197 | |||
198 | /* |
||
199 | * Neither t's lock nor threads_lock may be held during wakeup |
||
200 | */ |
||
201 | waitq_wakeup(&t->go_wq, WAKEUP_FIRST); |
||
202 | |||
203 | _thread_op_end(t); |
||
204 | interrupts_restore(ipl); |
||
205 | |||
206 | return 0; |
||
207 | } |
||
208 | |||
209 | |||
210 | int udebug_thread_read(void **buffer, size_t *n) |
||
211 | { |
||
212 | thread_t *t; |
||
213 | link_t *cur; |
||
214 | unative_t tid; |
||
215 | unsigned num_threads, copied_ids; |
||
216 | ipl_t ipl; |
||
217 | unative_t *id_buffer; |
||
218 | int flags; |
||
219 | |||
220 | klog_printf("udebug_thread_read()"); |
||
221 | |||
222 | ipl = interrupts_disable(); |
||
223 | spinlock_lock(&TASK->lock); |
||
224 | |||
225 | /* Verify task state */ |
||
226 | if (TASK->dt_state != UDEBUG_TS_ACTIVE) { |
||
227 | spinlock_unlock(&TASK->lock); |
||
228 | interrupts_restore(ipl); |
||
229 | |||
230 | return EINVAL; |
||
231 | } |
||
232 | |||
233 | /* Count the threads first */ |
||
234 | |||
235 | num_threads = 0; |
||
236 | for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) { |
||
237 | /* Count all threads, to be on the safe side */ |
||
238 | ++num_threads; |
||
239 | } |
||
240 | |||
241 | /* Allocate a buffer and copy down the threads' ids */ |
||
242 | id_buffer = malloc(num_threads * sizeof(unative_t), 0); // ??? |
||
243 | |||
244 | copied_ids = 0; |
||
245 | for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) { |
||
246 | t = list_get_instance(cur, thread_t, th_link); |
||
247 | |||
248 | spinlock_lock(&t->lock); |
||
249 | flags = t->flags; |
||
250 | spinlock_unlock(&t->lock); |
||
251 | |||
252 | /* Not interested in kernel threads */ |
||
253 | if ((flags & THREAD_FLAG_USPACE) != 0) { |
||
254 | /* Using thread struct pointer for identification */ |
||
255 | tid = (unative_t) t; |
||
256 | id_buffer[copied_ids++] = tid; |
||
257 | } |
||
258 | } |
||
259 | |||
260 | spinlock_unlock(&TASK->lock); |
||
261 | interrupts_restore(ipl); |
||
262 | |||
263 | *buffer = id_buffer; |
||
264 | *n = copied_ids * sizeof(unative_t); |
||
265 | |||
266 | return 0; |
||
267 | } |
||
268 | |||
269 | int udebug_args_read(thread_t *t, void **buffer) |
||
270 | { |
||
271 | int rc; |
||
272 | ipl_t ipl; |
||
273 | unative_t *arg_buffer; |
||
274 | |||
275 | klog_printf("udebug_args_read()"); |
||
276 | |||
277 | ipl = interrupts_disable(); |
||
278 | |||
279 | /* On success, this will lock t->debug_lock */ |
||
280 | rc = _thread_op_begin(t); |
||
281 | if (rc != EOK) { |
||
282 | interrupts_restore(ipl); |
||
283 | return rc; |
||
284 | } |
||
285 | |||
286 | /* Additionally we need to verify that we are inside a syscall */ |
||
287 | if (t->cur_event != UDEBUG_EVENT_SYSCALL) { |
||
288 | _thread_op_end(t); |
||
289 | interrupts_restore(ipl); |
||
290 | |||
291 | return EINVAL; |
||
292 | } |
||
293 | |||
294 | /* Copy to a local buffer before releasing the lock */ |
||
295 | arg_buffer = malloc(6 * sizeof(unative_t), 0); // ??? |
||
296 | memcpy(arg_buffer, t->syscall_args, 6 * sizeof(unative_t)); |
||
297 | |||
298 | _thread_op_end(t); |
||
299 | interrupts_restore(ipl); |
||
300 | |||
301 | *buffer = arg_buffer; |
||
302 | return 0; |
||
303 | } |
||
304 | |||
305 | int udebug_regs_read(thread_t *t, void **buffer, size_t *n) |
||
306 | { |
||
307 | istate_t *state; |
||
308 | void *regs_buffer; |
||
309 | int rc; |
||
310 | ipl_t ipl; |
||
311 | |||
312 | klog_printf("udebug_regs_read()"); |
||
313 | |||
314 | ipl = interrupts_disable(); |
||
315 | |||
316 | /* On success, this will lock t->debug_lock */ |
||
317 | rc = _thread_op_begin(t); |
||
318 | if (rc != EOK) { |
||
319 | interrupts_restore(ipl); |
||
320 | return rc; |
||
321 | } |
||
322 | |||
323 | state = t->uspace_state; |
||
324 | if (state == NULL) { |
||
325 | _thread_op_end(t); |
||
326 | interrupts_restore(ipl); |
||
327 | klog_printf("udebug_regs_read() - istate not available"); |
||
328 | return EBUSY; |
||
329 | } |
||
330 | |||
331 | /* Copy to an allocated buffer */ |
||
332 | regs_buffer = malloc(sizeof(istate_t), 0); // ??? |
||
333 | memcpy(regs_buffer, state, sizeof(istate_t)); |
||
334 | |||
335 | _thread_op_end(t); |
||
336 | interrupts_restore(ipl); |
||
337 | |||
338 | *buffer = regs_buffer; |
||
339 | *n = sizeof(istate_t); |
||
340 | |||
341 | return 0; |
||
342 | } |
||
343 | |||
344 | int udebug_regs_write(thread_t *t, void *buffer) |
||
345 | { |
||
346 | int rc; |
||
347 | istate_t *state; |
||
348 | ipl_t ipl; |
||
349 | |||
350 | klog_printf("udebug_regs_write()"); |
||
351 | |||
352 | /* Try to change the thread's uspace_state */ |
||
353 | |||
354 | ipl = interrupts_disable(); |
||
355 | |||
356 | /* On success, this will lock t->debug_lock */ |
||
357 | rc = _thread_op_begin(t); |
||
358 | if (rc != EOK) { |
||
359 | interrupts_restore(ipl); |
||
360 | return rc; |
||
361 | } |
||
362 | |||
363 | state = t->uspace_state; |
||
364 | if (state == NULL) { |
||
365 | _thread_op_end(t); |
||
366 | interrupts_restore(ipl); |
||
367 | klog_printf("udebug_regs_write() - istate not available"); |
||
368 | |||
369 | return EBUSY; |
||
370 | } |
||
371 | |||
372 | memcpy(t->uspace_state, buffer, sizeof(t->uspace_state)); |
||
373 | |||
374 | _thread_op_end(t); |
||
375 | interrupts_restore(ipl); |
||
376 | |||
377 | return 0; |
||
378 | } |
||
379 | |||
380 | |||
381 | int udebug_mem_read(unative_t uspace_addr, size_t n, void **buffer) |
||
382 | { |
||
383 | void *data_buffer; |
||
384 | int rc; |
||
385 | |||
386 | klog_printf("udebug_mem_read()"); |
||
387 | |||
388 | data_buffer = malloc(n, 0); // ??? |
||
389 | klog_printf("udebug_mem_read: src=%u, size=%u", uspace_addr, n); |
||
390 | |||
391 | /* NOTE: this is not strictly from a syscall... but that shouldn't |
||
392 | * be a problem */ |
||
393 | rc = copy_from_uspace(data_buffer, (void *)uspace_addr, n); |
||
394 | if (rc) return rc; |
||
395 | |||
396 | *buffer = data_buffer; |
||
397 | return 0; |
||
398 | } |
||
399 | |||
400 | int udebug_mem_write(unative_t uspace_addr, void *data, size_t n) |
||
401 | { |
||
402 | int rc; |
||
403 | udebug_task_state_t dts; |
||
404 | |||
405 | klog_printf("udebug_mem_write()"); |
||
406 | |||
407 | /* Verify task state */ |
||
408 | spinlock_lock(&TASK->lock); |
||
409 | dts = TASK->dt_state; |
||
410 | spinlock_unlock(&TASK->lock); |
||
411 | |||
412 | if (dts != UDEBUG_TS_ACTIVE) |
||
413 | return EBUSY; |
||
414 | |||
415 | klog_printf("dst=%u, size=%u", uspace_addr, n); |
||
416 | |||
417 | /* NOTE: this is not strictly from a syscall... but that shouldn't |
||
418 | * be a problem */ |
||
419 | rc = copy_to_uspace((void *)uspace_addr, data, n); |
||
420 | if (rc) return rc; |
||
421 | |||
422 | return 0; |
||
423 | } |
||
424 | |||
425 | /** @} |
||
426 | */ |