Rev 2913 | Rev 3013 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 2913 | Rev 2919 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (c) 2008 Jiri Svoboda |
2 | * Copyright (c) 2008 Jiri Svoboda |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | /** @addtogroup generic |
29 | /** @addtogroup generic |
30 | * @{ |
30 | * @{ |
31 | */ |
31 | */ |
32 | 32 | ||
33 | /** |
33 | /** |
34 | * @file |
34 | * @file |
35 | * @brief Udebug operations. |
35 | * @brief Udebug operations. |
36 | */ |
36 | */ |
37 | 37 | ||
38 | #include <console/klog.h> |
38 | #include <console/klog.h> |
39 | #include <proc/task.h> |
39 | #include <proc/task.h> |
40 | #include <proc/thread.h> |
40 | #include <proc/thread.h> |
41 | #include <arch.h> |
41 | #include <arch.h> |
42 | #include <errno.h> |
42 | #include <errno.h> |
43 | #include <syscall/copy.h> |
43 | #include <syscall/copy.h> |
44 | #include <ipc/ipc.h> |
44 | #include <ipc/ipc.h> |
45 | #include <udebug/udebug.h> |
45 | #include <udebug/udebug.h> |
46 | #include <udebug/udebug_ops.h> |
46 | #include <udebug/udebug_ops.h> |
47 | 47 | ||
48 | /** |
48 | /** |
49 | * Prepare a thread for a debugging operation. |
49 | * Prepare a thread for a debugging operation. |
50 | * |
50 | * |
51 | * Simply put, return thread t with t->debug_lock held, |
51 | * Simply put, return thread t with t->debug_lock held, |
52 | * but only if it verifies all conditions. |
52 | * but only if it verifies all conditions. |
53 | * |
53 | * |
54 | * Specifically, verifies that thread t exists, is a userspace thread, |
54 | * Specifically, verifies that thread t exists, is a userspace thread, |
55 | * and belongs to the current task (TASK). Verifies, that the thread |
55 | * and belongs to the current task (TASK). Verifies, that the thread |
56 | * has (or hasn't) go according to having_go (typically false). |
56 | * has (or hasn't) go according to having_go (typically false). |
57 | * It also locks t->debug_lock, making sure that t->debug_active is true |
57 | * It also locks t->debug_lock, making sure that t->debug_active is true |
58 | * - that the thread is in a valid debugging session. |
58 | * - that the thread is in a valid debugging session. |
59 | * |
59 | * |
60 | * Returns EOK if all went well, or an error code otherwise. |
60 | * Returns EOK if all went well, or an error code otherwise. |
61 | * Interrupts must be already disabled when calling this function. |
61 | * Interrupts must be already disabled when calling this function. |
62 | * |
62 | * |
63 | * Note: This function sports complicated locking. |
63 | * Note: This function sports complicated locking. |
64 | */ |
64 | */ |
65 | static int _thread_op_begin(thread_t *t, bool having_go) |
65 | static int _thread_op_begin(thread_t *t, bool having_go) |
66 | { |
66 | { |
67 | int rc; |
67 | int rc; |
68 | task_id_t taskid; |
68 | task_id_t taskid; |
69 | 69 | ||
70 | taskid = TASK->taskid; |
70 | taskid = TASK->taskid; |
71 | 71 | ||
72 | /* Must lock threads_lock to ensure continued existence of the thread */ |
72 | /* Must lock threads_lock to ensure continued existence of the thread */ |
73 | spinlock_lock(&threads_lock); |
73 | spinlock_lock(&threads_lock); |
74 | 74 | ||
75 | if (!thread_exists(t)) { |
75 | if (!thread_exists(t)) { |
76 | spinlock_unlock(&threads_lock); |
76 | spinlock_unlock(&threads_lock); |
77 | return ENOENT; |
77 | return ENOENT; |
78 | } |
78 | } |
79 | 79 | ||
80 | spinlock_lock(&t->debug_lock); |
80 | spinlock_lock(&t->debug_lock); |
81 | spinlock_lock(&t->lock); |
81 | spinlock_lock(&t->lock); |
82 | 82 | ||
83 | /* Now verify that it's the current task */ |
83 | /* Now verify that it's the current task */ |
84 | if (t->task != TASK) { |
84 | if (t->task != TASK) { |
85 | /* No such thread belonging to callee */ |
85 | /* No such thread belonging to callee */ |
86 | rc = ENOENT; |
86 | rc = ENOENT; |
87 | goto error_exit; |
87 | goto error_exit; |
88 | } |
88 | } |
89 | 89 | ||
90 | /* Verify that 't' is a userspace thread */ |
90 | /* Verify that 't' is a userspace thread */ |
91 | if ((t->flags & THREAD_FLAG_USPACE) == 0) { |
91 | if ((t->flags & THREAD_FLAG_USPACE) == 0) { |
92 | /* It's not, deny its existence */ |
92 | /* It's not, deny its existence */ |
93 | rc = ENOENT; |
93 | rc = ENOENT; |
94 | goto error_exit; |
94 | goto error_exit; |
95 | } |
95 | } |
96 | 96 | ||
97 | if ((t->debug_active != true) || (!t->debug_stop != having_go)) { |
97 | if ((t->debug_active != true) || (!t->debug_stop != having_go)) { |
98 | /* Not in debugging session or undesired GO state */ |
98 | /* Not in debugging session or undesired GO state */ |
99 | rc = EINVAL; |
99 | rc = EINVAL; |
100 | goto error_exit; |
100 | goto error_exit; |
101 | } |
101 | } |
102 | 102 | ||
103 | spinlock_unlock(&threads_lock); |
103 | spinlock_unlock(&threads_lock); |
104 | spinlock_unlock(&t->lock); |
104 | spinlock_unlock(&t->lock); |
105 | 105 | ||
106 | /* Only t->debug_lock left */ |
106 | /* Only t->debug_lock left */ |
107 | 107 | ||
108 | return EOK; /* All went well */ |
108 | return EOK; /* All went well */ |
109 | 109 | ||
110 | 110 | ||
111 | /* Executed when a check on the thread fails */ |
111 | /* Executed when a check on the thread fails */ |
112 | error_exit: |
112 | error_exit: |
113 | spinlock_unlock(&t->lock); |
113 | spinlock_unlock(&t->lock); |
114 | spinlock_unlock(&t->debug_lock); |
114 | spinlock_unlock(&t->debug_lock); |
115 | spinlock_unlock(&threads_lock); |
115 | spinlock_unlock(&threads_lock); |
116 | 116 | ||
117 | /* No locks left here */ |
117 | /* No locks left here */ |
118 | return rc; /* Some errors occured */ |
118 | return rc; /* Some errors occured */ |
119 | } |
119 | } |
120 | 120 | ||
121 | 121 | ||
122 | static void _thread_op_end(thread_t *t) |
122 | static void _thread_op_end(thread_t *t) |
123 | { |
123 | { |
124 | spinlock_unlock(&t->debug_lock); |
124 | spinlock_unlock(&t->debug_lock); |
125 | } |
125 | } |
126 | 126 | ||
127 | /** |
127 | /** |
128 | * \return 0 (ok, but not done yet), 1 (done) or negative error code. |
128 | * \return 0 (ok, but not done yet), 1 (done) or negative error code. |
129 | */ |
129 | */ |
130 | int udebug_begin(call_t *call) |
130 | int udebug_begin(call_t *call) |
131 | { |
131 | { |
132 | ipl_t ipl; |
132 | ipl_t ipl; |
133 | int reply; |
133 | int reply; |
134 | 134 | ||
135 | thread_t *t; |
135 | thread_t *t; |
136 | link_t *cur; |
136 | link_t *cur; |
137 | 137 | ||
138 | klog_printf("udebug_begin()"); |
138 | klog_printf("udebug_begin()"); |
139 | 139 | ||
140 | ipl = interrupts_disable(); |
140 | ipl = interrupts_disable(); |
141 | klog_printf("debugging task %llu", TASK->taskid); |
141 | klog_printf("debugging task %llu", TASK->taskid); |
142 | 142 | ||
143 | spinlock_lock(&TASK->lock); |
143 | spinlock_lock(&TASK->lock); |
144 | 144 | ||
145 | if (TASK->dt_state != UDEBUG_TS_INACTIVE) { |
145 | if (TASK->dt_state != UDEBUG_TS_INACTIVE) { |
146 | spinlock_unlock(&TASK->lock); |
146 | spinlock_unlock(&TASK->lock); |
147 | interrupts_restore(ipl); |
147 | interrupts_restore(ipl); |
148 | klog_printf("udebug_begin(): busy error"); |
148 | klog_printf("udebug_begin(): busy error"); |
149 | 149 | ||
150 | return EBUSY; |
150 | return EBUSY; |
151 | } |
151 | } |
152 | 152 | ||
153 | TASK->dt_state = UDEBUG_TS_BEGINNING; |
153 | TASK->dt_state = UDEBUG_TS_BEGINNING; |
154 | TASK->debug_begin_call = call; |
154 | TASK->debug_begin_call = call; |
155 | TASK->debugger = call->sender; |
155 | TASK->debugger = call->sender; |
156 | 156 | ||
157 | if (TASK->not_stoppable_count == 0) { |
157 | if (TASK->not_stoppable_count == 0) { |
158 | TASK->dt_state = UDEBUG_TS_ACTIVE; |
158 | TASK->dt_state = UDEBUG_TS_ACTIVE; |
159 | TASK->debug_begin_call = NULL; |
159 | TASK->debug_begin_call = NULL; |
160 | reply = 1; /* immediate reply */ |
160 | reply = 1; /* immediate reply */ |
161 | } else { |
161 | } else { |
162 | reply = 0; /* no reply */ |
162 | reply = 0; /* no reply */ |
163 | } |
163 | } |
164 | 164 | ||
165 | /* Set debug_active on all of the task's userspace threads */ |
165 | /* Set debug_active on all of the task's userspace threads */ |
166 | 166 | ||
167 | for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) { |
167 | for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) { |
168 | t = list_get_instance(cur, thread_t, th_link); |
168 | t = list_get_instance(cur, thread_t, th_link); |
169 | 169 | ||
170 | spinlock_lock(&t->debug_lock); |
170 | spinlock_lock(&t->debug_lock); |
171 | if ((t->flags & THREAD_FLAG_USPACE) != 0) |
171 | if ((t->flags & THREAD_FLAG_USPACE) != 0) |
172 | t->debug_active = true; |
172 | t->debug_active = true; |
173 | spinlock_unlock(&t->debug_lock); |
173 | spinlock_unlock(&t->debug_lock); |
174 | } |
174 | } |
175 | 175 | ||
176 | spinlock_unlock(&TASK->lock); |
176 | spinlock_unlock(&TASK->lock); |
177 | interrupts_restore(ipl); |
177 | interrupts_restore(ipl); |
178 | 178 | ||
179 | klog_printf("udebug_begin() done (%s)", |
179 | klog_printf("udebug_begin() done (%s)", |
180 | reply ? "reply" : "stoppability wait"); |
180 | reply ? "reply" : "stoppability wait"); |
181 | 181 | ||
182 | return reply; |
182 | return reply; |
183 | } |
183 | } |
184 | 184 | ||
185 | int udebug_end(void) |
185 | int udebug_end(void) |
186 | { |
186 | { |
187 | ipl_t ipl; |
187 | ipl_t ipl; |
188 | int rc; |
188 | int rc; |
189 | 189 | ||
190 | klog_printf("udebug_end()"); |
190 | klog_printf("udebug_end()"); |
191 | 191 | ||
192 | ipl = interrupts_disable(); |
192 | ipl = interrupts_disable(); |
193 | spinlock_lock(&TASK->lock); |
193 | spinlock_lock(&TASK->lock); |
194 | 194 | ||
195 | rc = udebug_task_cleanup(TASK); |
195 | rc = udebug_task_cleanup(TASK); |
196 | 196 | ||
197 | klog_printf("task %llu", TASK->taskid); |
197 | klog_printf("task %llu", TASK->taskid); |
198 | 198 | ||
199 | spinlock_unlock(&TASK->lock); |
199 | spinlock_unlock(&TASK->lock); |
200 | interrupts_restore(ipl); |
200 | interrupts_restore(ipl); |
201 | 201 | ||
202 | if (rc < 0) return EINVAL; |
202 | if (rc < 0) return EINVAL; |
203 | 203 | ||
204 | return 0; |
204 | return 0; |
205 | } |
205 | } |
206 | 206 | ||
207 | int udebug_set_evmask(udebug_evmask_t mask) |
207 | int udebug_set_evmask(udebug_evmask_t mask) |
208 | { |
208 | { |
209 | ipl_t ipl; |
209 | ipl_t ipl; |
210 | 210 | ||
211 | klog_printf("udebug_set_mask()"); |
211 | klog_printf("udebug_set_mask()"); |
212 | 212 | ||
213 | ipl = interrupts_disable(); |
213 | ipl = interrupts_disable(); |
214 | klog_printf("debugging task %llu", TASK->taskid); |
214 | klog_printf("debugging task %llu", TASK->taskid); |
215 | 215 | ||
216 | spinlock_lock(&TASK->lock); |
216 | spinlock_lock(&TASK->lock); |
217 | 217 | ||
218 | if (TASK->dt_state != UDEBUG_TS_ACTIVE) { |
218 | if (TASK->dt_state != UDEBUG_TS_ACTIVE) { |
219 | spinlock_unlock(&TASK->lock); |
219 | spinlock_unlock(&TASK->lock); |
220 | interrupts_restore(ipl); |
220 | interrupts_restore(ipl); |
221 | klog_printf("udebug_set_mask(): not active debuging session"); |
221 | klog_printf("udebug_set_mask(): not active debuging session"); |
222 | 222 | ||
223 | return EINVAL; |
223 | return EINVAL; |
224 | } |
224 | } |
225 | 225 | ||
226 | TASK->debug_evmask = mask; |
226 | TASK->debug_evmask = mask; |
227 | 227 | ||
228 | spinlock_unlock(&TASK->lock); |
228 | spinlock_unlock(&TASK->lock); |
229 | interrupts_restore(ipl); |
229 | interrupts_restore(ipl); |
230 | 230 | ||
231 | return 0; |
231 | return 0; |
232 | } |
232 | } |
233 | 233 | ||
234 | 234 | ||
235 | int udebug_go(thread_t *t, call_t *call) |
235 | int udebug_go(thread_t *t, call_t *call) |
236 | { |
236 | { |
237 | ipl_t ipl; |
237 | ipl_t ipl; |
238 | int rc; |
238 | int rc; |
239 | 239 | ||
240 | // klog_printf("udebug_go()"); |
240 | // klog_printf("udebug_go()"); |
241 | 241 | ||
242 | ipl = interrupts_disable(); |
242 | ipl = interrupts_disable(); |
243 | 243 | ||
244 | /* On success, this will lock t->debug_lock */ |
244 | /* On success, this will lock t->debug_lock */ |
245 | rc = _thread_op_begin(t, false); |
245 | rc = _thread_op_begin(t, false); |
246 | if (rc != EOK) { |
246 | if (rc != EOK) { |
247 | interrupts_restore(ipl); |
247 | interrupts_restore(ipl); |
248 | return rc; |
248 | return rc; |
249 | } |
249 | } |
250 | 250 | ||
251 | t->debug_go_call = call; |
251 | t->debug_go_call = call; |
252 | t->debug_stop = false; |
252 | t->debug_stop = false; |
253 | t->cur_event = 0; /* none */ |
253 | t->cur_event = 0; /* none */ |
254 | 254 | ||
255 | /* |
255 | /* |
256 | * Neither t's lock nor threads_lock may be held during wakeup |
256 | * Neither t's lock nor threads_lock may be held during wakeup |
257 | */ |
257 | */ |
258 | waitq_wakeup(&t->go_wq, WAKEUP_FIRST); |
258 | waitq_wakeup(&t->go_wq, WAKEUP_FIRST); |
259 | 259 | ||
260 | _thread_op_end(t); |
260 | _thread_op_end(t); |
261 | interrupts_restore(ipl); |
261 | interrupts_restore(ipl); |
262 | 262 | ||
263 | return 0; |
263 | return 0; |
264 | } |
264 | } |
265 | 265 | ||
266 | int udebug_stop(thread_t *t, call_t *call) |
266 | int udebug_stop(thread_t *t, call_t *call) |
267 | { |
267 | { |
268 | ipl_t ipl; |
268 | ipl_t ipl; |
269 | int rc; |
269 | int rc; |
270 | 270 | ||
271 | klog_printf("udebug_stop()"); |
271 | klog_printf("udebug_stop()"); |
272 | 272 | ||
273 | ipl = interrupts_disable(); |
273 | ipl = interrupts_disable(); |
274 | 274 | ||
275 | /* |
275 | /* |
276 | * On success, this will lock t->debug_lock. Note that this makes sure |
276 | * On success, this will lock t->debug_lock. Note that this makes sure |
277 | * the thread is not stopped. |
277 | * the thread is not stopped. |
278 | */ |
278 | */ |
279 | rc = _thread_op_begin(t, true); |
279 | rc = _thread_op_begin(t, true); |
280 | if (rc != EOK) { |
280 | if (rc != EOK) { |
281 | interrupts_restore(ipl); |
281 | interrupts_restore(ipl); |
282 | return rc; |
282 | return rc; |
283 | } |
283 | } |
284 | 284 | ||
285 | /* Take GO away from the thread */ |
285 | /* Take GO away from the thread */ |
286 | t->debug_stop = true; |
286 | t->debug_stop = true; |
287 | 287 | ||
288 | if (!t->debug_stoppable) { |
288 | if (!t->debug_stoppable) { |
289 | /* Answer will be sent when the thread becomes stoppable */ |
289 | /* Answer will be sent when the thread becomes stoppable */ |
290 | _thread_op_end(t); |
290 | _thread_op_end(t); |
291 | interrupts_restore(ipl); |
291 | interrupts_restore(ipl); |
292 | return 0; |
292 | return 0; |
293 | } |
293 | } |
294 | 294 | ||
295 | /* |
295 | /* |
296 | * Answer GO call |
296 | * Answer GO call |
297 | */ |
297 | */ |
298 | klog_printf("udebug_stop - answering go call"); |
298 | klog_printf("udebug_stop - answering go call"); |
299 | 299 | ||
300 | /* Make sure nobody takes this call away from us */ |
300 | /* Make sure nobody takes this call away from us */ |
301 | call = t->debug_go_call; |
301 | call = t->debug_go_call; |
302 | t->debug_go_call = NULL; |
302 | t->debug_go_call = NULL; |
303 | 303 | ||
304 | IPC_SET_RETVAL(call->data, 0); |
304 | IPC_SET_RETVAL(call->data, 0); |
305 | IPC_SET_ARG1(call->data, UDEBUG_EVENT_STOP); |
305 | IPC_SET_ARG1(call->data, UDEBUG_EVENT_STOP); |
306 | klog_printf("udebug_stop/ipc_answer"); |
306 | klog_printf("udebug_stop/ipc_answer"); |
307 | 307 | ||
308 | THREAD->cur_event = UDEBUG_EVENT_STOP; |
308 | THREAD->cur_event = UDEBUG_EVENT_STOP; |
309 | _thread_op_end(t); |
309 | _thread_op_end(t); |
310 | 310 | ||
311 | spinlock_lock(&TASK->lock); |
311 | spinlock_lock(&TASK->lock); |
312 | ipc_answer(&TASK->answerbox, call); |
312 | ipc_answer(&TASK->answerbox, call); |
313 | spinlock_unlock(&TASK->lock); |
313 | spinlock_unlock(&TASK->lock); |
314 | 314 | ||
315 | interrupts_restore(ipl); |
315 | interrupts_restore(ipl); |
316 | klog_printf("udebog_stop/done"); |
316 | klog_printf("udebog_stop/done"); |
317 | return 0; |
317 | return 0; |
318 | } |
318 | } |
319 | 319 | ||
320 | int udebug_thread_read(void **buffer, size_t buf_size, size_t *n) |
320 | int udebug_thread_read(void **buffer, size_t buf_size, size_t *n) |
321 | { |
321 | { |
322 | thread_t *t; |
322 | thread_t *t; |
323 | link_t *cur; |
323 | link_t *cur; |
324 | unative_t tid; |
324 | unative_t tid; |
325 | unsigned copied_ids; |
325 | unsigned copied_ids; |
326 | ipl_t ipl; |
326 | ipl_t ipl; |
327 | unative_t *id_buffer; |
327 | unative_t *id_buffer; |
328 | int flags; |
328 | int flags; |
329 | size_t max_ids; |
329 | size_t max_ids; |
330 | 330 | ||
331 | klog_printf("udebug_thread_read()"); |
331 | klog_printf("udebug_thread_read()"); |
332 | 332 | ||
333 | /* Allocate a buffer to hold thread IDs */ |
333 | /* Allocate a buffer to hold thread IDs */ |
334 | id_buffer = malloc(buf_size, 0); |
334 | id_buffer = malloc(buf_size, 0); |
335 | 335 | ||
336 | ipl = interrupts_disable(); |
336 | ipl = interrupts_disable(); |
337 | spinlock_lock(&TASK->lock); |
337 | spinlock_lock(&TASK->lock); |
338 | 338 | ||
339 | /* Verify task state */ |
339 | /* Verify task state */ |
340 | if (TASK->dt_state != UDEBUG_TS_ACTIVE) { |
340 | if (TASK->dt_state != UDEBUG_TS_ACTIVE) { |
341 | spinlock_unlock(&TASK->lock); |
341 | spinlock_unlock(&TASK->lock); |
342 | interrupts_restore(ipl); |
342 | interrupts_restore(ipl); |
343 | 343 | ||
344 | return EINVAL; |
344 | return EINVAL; |
345 | } |
345 | } |
346 | 346 | ||
347 | /* Copy down the thread IDs */ |
347 | /* Copy down the thread IDs */ |
348 | 348 | ||
349 | max_ids = buf_size / sizeof(unative_t); |
349 | max_ids = buf_size / sizeof(unative_t); |
350 | copied_ids = 0; |
350 | copied_ids = 0; |
351 | 351 | ||
352 | for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) { |
352 | for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) { |
353 | /* Do not write past end of buffer */ |
353 | /* Do not write past end of buffer */ |
354 | if (copied_ids >= max_ids) break; |
354 | if (copied_ids >= max_ids) break; |
355 | 355 | ||
356 | t = list_get_instance(cur, thread_t, th_link); |
356 | t = list_get_instance(cur, thread_t, th_link); |
357 | 357 | ||
358 | spinlock_lock(&t->lock); |
358 | spinlock_lock(&t->lock); |
359 | flags = t->flags; |
359 | flags = t->flags; |
360 | spinlock_unlock(&t->lock); |
360 | spinlock_unlock(&t->lock); |
361 | 361 | ||
362 | /* Not interested in kernel threads */ |
362 | /* Not interested in kernel threads */ |
363 | if ((flags & THREAD_FLAG_USPACE) != 0) { |
363 | if ((flags & THREAD_FLAG_USPACE) != 0) { |
364 | /* Using thread struct pointer as identification hash */ |
364 | /* Using thread struct pointer as identification hash */ |
365 | tid = (unative_t) t; |
365 | tid = (unative_t) t; |
366 | id_buffer[copied_ids++] = tid; |
366 | id_buffer[copied_ids++] = tid; |
367 | } |
367 | } |
368 | } |
368 | } |
369 | 369 | ||
370 | spinlock_unlock(&TASK->lock); |
370 | spinlock_unlock(&TASK->lock); |
371 | interrupts_restore(ipl); |
371 | interrupts_restore(ipl); |
372 | 372 | ||
373 | *buffer = id_buffer; |
373 | *buffer = id_buffer; |
374 | *n = copied_ids * sizeof(unative_t); |
374 | *n = copied_ids * sizeof(unative_t); |
375 | 375 | ||
376 | return 0; |
376 | return 0; |
377 | } |
377 | } |
378 | 378 | ||
379 | int udebug_args_read(thread_t *t, void **buffer) |
379 | int udebug_args_read(thread_t *t, void **buffer) |
380 | { |
380 | { |
381 | int rc; |
381 | int rc; |
382 | ipl_t ipl; |
382 | ipl_t ipl; |
383 | unative_t *arg_buffer; |
383 | unative_t *arg_buffer; |
384 | 384 | ||
385 | klog_printf("udebug_args_read()"); |
385 | klog_printf("udebug_args_read()"); |
386 | 386 | ||
387 | /* Prepare a buffer to hold the arguments */ |
387 | /* Prepare a buffer to hold the arguments */ |
388 | arg_buffer = malloc(6 * sizeof(unative_t), 0); |
388 | arg_buffer = malloc(6 * sizeof(unative_t), 0); |
389 | 389 | ||
390 | ipl = interrupts_disable(); |
390 | ipl = interrupts_disable(); |
391 | 391 | ||
392 | /* On success, this will lock t->debug_lock */ |
392 | /* On success, this will lock t->debug_lock */ |
393 | rc = _thread_op_begin(t, false); |
393 | rc = _thread_op_begin(t, false); |
394 | if (rc != EOK) { |
394 | if (rc != EOK) { |
395 | interrupts_restore(ipl); |
395 | interrupts_restore(ipl); |
396 | return rc; |
396 | return rc; |
397 | } |
397 | } |
398 | 398 | ||
399 | /* Additionally we need to verify that we are inside a syscall */ |
399 | /* Additionally we need to verify that we are inside a syscall */ |
400 | if (t->cur_event != UDEBUG_EVENT_SYSCALL_B && |
400 | if (t->cur_event != UDEBUG_EVENT_SYSCALL_B && |
401 | t->cur_event != UDEBUG_EVENT_SYSCALL_E) { |
401 | t->cur_event != UDEBUG_EVENT_SYSCALL_E) { |
402 | _thread_op_end(t); |
402 | _thread_op_end(t); |
403 | interrupts_restore(ipl); |
403 | interrupts_restore(ipl); |
404 | 404 | ||
405 | return EINVAL; |
405 | return EINVAL; |
406 | } |
406 | } |
407 | 407 | ||
408 | /* Copy to a local buffer before releasing the lock */ |
408 | /* Copy to a local buffer before releasing the lock */ |
409 | memcpy(arg_buffer, t->syscall_args, 6 * sizeof(unative_t)); |
409 | memcpy(arg_buffer, t->syscall_args, 6 * sizeof(unative_t)); |
410 | 410 | ||
411 | _thread_op_end(t); |
411 | _thread_op_end(t); |
412 | interrupts_restore(ipl); |
412 | interrupts_restore(ipl); |
413 | 413 | ||
414 | *buffer = arg_buffer; |
414 | *buffer = arg_buffer; |
415 | return 0; |
415 | return 0; |
416 | } |
416 | } |
417 | 417 | ||
418 | int udebug_regs_read(thread_t *t, void **buffer, size_t *n) |
418 | int udebug_regs_read(thread_t *t, void *buffer) |
419 | { |
419 | { |
420 | istate_t *state; |
420 | istate_t *state; |
421 | void *regs_buffer; |
- | |
422 | int rc; |
421 | int rc; |
423 | ipl_t ipl; |
422 | ipl_t ipl; |
424 | 423 | ||
425 | klog_printf("udebug_regs_read()"); |
424 | klog_printf("udebug_regs_read()"); |
426 | 425 | ||
427 | /* Prepare a buffer to hold the registers */ |
- | |
428 | regs_buffer = malloc(sizeof(istate_t), 0); |
- | |
429 | - | ||
430 | ipl = interrupts_disable(); |
426 | ipl = interrupts_disable(); |
431 | 427 | ||
432 | /* On success, this will lock t->debug_lock */ |
428 | /* On success, this will lock t->debug_lock */ |
433 | rc = _thread_op_begin(t, false); |
429 | rc = _thread_op_begin(t, false); |
434 | if (rc != EOK) { |
430 | if (rc != EOK) { |
435 | interrupts_restore(ipl); |
431 | interrupts_restore(ipl); |
436 | return rc; |
432 | return rc; |
437 | } |
433 | } |
438 | 434 | ||
439 | state = t->uspace_state; |
435 | state = t->uspace_state; |
440 | if (state == NULL) { |
436 | if (state == NULL) { |
441 | _thread_op_end(t); |
437 | _thread_op_end(t); |
442 | interrupts_restore(ipl); |
438 | interrupts_restore(ipl); |
443 | klog_printf("udebug_regs_read() - istate not available"); |
439 | klog_printf("udebug_regs_read() - istate not available"); |
444 | return EBUSY; |
440 | return EBUSY; |
445 | } |
441 | } |
446 | 442 | ||
447 | /* Copy to the allocated buffer */ |
443 | /* Copy to the allocated buffer */ |
448 | memcpy(regs_buffer, state, sizeof(istate_t)); |
444 | memcpy(buffer, state, sizeof(istate_t)); |
449 | 445 | ||
450 | _thread_op_end(t); |
446 | _thread_op_end(t); |
451 | interrupts_restore(ipl); |
447 | interrupts_restore(ipl); |
452 | 448 | ||
453 | *buffer = regs_buffer; |
- | |
454 | *n = sizeof(istate_t); |
- | |
455 | - | ||
456 | return 0; |
449 | return 0; |
457 | } |
450 | } |
458 | 451 | ||
459 | int udebug_regs_write(thread_t *t, void *buffer) |
452 | int udebug_regs_write(thread_t *t, void *buffer) |
460 | { |
453 | { |
461 | int rc; |
454 | int rc; |
462 | istate_t *state; |
455 | istate_t *state; |
463 | ipl_t ipl; |
456 | ipl_t ipl; |
464 | 457 | ||
465 | klog_printf("udebug_regs_write()"); |
458 | klog_printf("udebug_regs_write()"); |
466 | 459 | ||
467 | /* Try to change the thread's uspace_state */ |
460 | /* Try to change the thread's uspace_state */ |
468 | 461 | ||
469 | ipl = interrupts_disable(); |
462 | ipl = interrupts_disable(); |
470 | 463 | ||
471 | /* On success, this will lock t->debug_lock */ |
464 | /* On success, this will lock t->debug_lock */ |
472 | rc = _thread_op_begin(t, false); |
465 | rc = _thread_op_begin(t, false); |
473 | if (rc != EOK) { |
466 | if (rc != EOK) { |
- | 467 | klog_printf("error locking thread"); |
|
474 | interrupts_restore(ipl); |
468 | interrupts_restore(ipl); |
475 | return rc; |
469 | return rc; |
476 | } |
470 | } |
477 | 471 | ||
478 | state = t->uspace_state; |
472 | state = t->uspace_state; |
479 | if (state == NULL) { |
473 | if (state == NULL) { |
480 | _thread_op_end(t); |
474 | _thread_op_end(t); |
481 | interrupts_restore(ipl); |
475 | interrupts_restore(ipl); |
482 | klog_printf("udebug_regs_write() - istate not available"); |
476 | klog_printf("udebug_regs_write() - istate not available"); |
483 | 477 | ||
484 | return EBUSY; |
478 | return EBUSY; |
485 | } |
479 | } |
486 | 480 | ||
487 | memcpy(t->uspace_state, buffer, sizeof(t->uspace_state)); |
481 | memcpy(t->uspace_state, buffer, sizeof(istate_t)); |
488 | 482 | ||
489 | _thread_op_end(t); |
483 | _thread_op_end(t); |
490 | interrupts_restore(ipl); |
484 | interrupts_restore(ipl); |
491 | 485 | ||
492 | return 0; |
486 | return 0; |
493 | } |
487 | } |
494 | 488 | ||
495 | 489 | ||
496 | int udebug_mem_read(unative_t uspace_addr, size_t n, void **buffer) |
490 | int udebug_mem_read(unative_t uspace_addr, size_t n, void **buffer) |
497 | { |
491 | { |
498 | void *data_buffer; |
492 | void *data_buffer; |
499 | int rc; |
493 | int rc; |
500 | 494 | ||
501 | klog_printf("udebug_mem_read()"); |
495 | klog_printf("udebug_mem_read()"); |
502 | 496 | ||
503 | data_buffer = malloc(n, 0); |
497 | data_buffer = malloc(n, 0); |
504 | 498 | ||
505 | klog_printf("udebug_mem_read: src=%u, size=%u", uspace_addr, n); |
499 | klog_printf("udebug_mem_read: src=%u, size=%u", uspace_addr, n); |
506 | 500 | ||
507 | /* NOTE: this is not strictly from a syscall... but that shouldn't |
501 | /* NOTE: this is not strictly from a syscall... but that shouldn't |
508 | * be a problem */ |
502 | * be a problem */ |
509 | rc = copy_from_uspace(data_buffer, (void *)uspace_addr, n); |
503 | rc = copy_from_uspace(data_buffer, (void *)uspace_addr, n); |
510 | if (rc) return rc; |
504 | if (rc) return rc; |
511 | 505 | ||
512 | *buffer = data_buffer; |
506 | *buffer = data_buffer; |
513 | return 0; |
507 | return 0; |
514 | } |
508 | } |
515 | 509 | ||
516 | int udebug_mem_write(unative_t uspace_addr, void *data, size_t n) |
510 | int udebug_mem_write(unative_t uspace_addr, void *data, size_t n) |
517 | { |
511 | { |
518 | int rc; |
512 | int rc; |
519 | udebug_task_state_t dts; |
513 | udebug_task_state_t dts; |
520 | 514 | ||
521 | klog_printf("udebug_mem_write()"); |
515 | klog_printf("udebug_mem_write()"); |
522 | 516 | ||
523 | /* Verify task state */ |
517 | /* Verify task state */ |
524 | spinlock_lock(&TASK->lock); |
518 | spinlock_lock(&TASK->lock); |
525 | dts = TASK->dt_state; |
519 | dts = TASK->dt_state; |
526 | spinlock_unlock(&TASK->lock); |
520 | spinlock_unlock(&TASK->lock); |
527 | 521 | ||
528 | if (dts != UDEBUG_TS_ACTIVE) |
522 | if (dts != UDEBUG_TS_ACTIVE) |
529 | return EBUSY; |
523 | return EBUSY; |
530 | 524 | ||
531 | klog_printf("dst=%u, size=%u", uspace_addr, n); |
525 | klog_printf("dst=%u, size=%u", uspace_addr, n); |
532 | 526 | ||
533 | /* NOTE: this is not strictly from a syscall... but that shouldn't |
527 | /* NOTE: this is not strictly from a syscall... but that shouldn't |
534 | * be a problem */ |
528 | * be a problem */ |
535 | rc = copy_to_uspace((void *)uspace_addr, data, n); |
529 | rc = copy_to_uspace((void *)uspace_addr, data, n); |
536 | if (rc) return rc; |
530 | if (rc) return rc; |
537 | 531 | ||
538 | return 0; |
532 | return 0; |
539 | } |
533 | } |
540 | 534 | ||
541 | /** @} |
535 | /** @} |
542 | */ |
536 | */ |
543 | 537 |