Rev 4605 | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 4605 | Rev 4616 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (c) 2008 Jiri Svoboda |
2 | * Copyright (c) 2008 Jiri Svoboda |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | /** @addtogroup generic |
29 | /** @addtogroup generic |
30 | * @{ |
30 | * @{ |
31 | */ |
31 | */ |
32 | 32 | ||
33 | /** |
33 | /** |
34 | * @file |
34 | * @file |
35 | * @brief Udebug operations. |
35 | * @brief Udebug operations. |
36 | * |
36 | * |
37 | * Udebug operations on tasks and threads are implemented here. The |
37 | * Udebug operations on tasks and threads are implemented here. The |
38 | * functions defined here are called from the udebug_ipc module |
38 | * functions defined here are called from the udebug_ipc module |
39 | * when servicing udebug IPC messages. |
39 | * when servicing udebug IPC messages. |
40 | */ |
40 | */ |
41 | 41 | ||
42 | #include <debug.h> |
42 | #include <debug.h> |
43 | #include <proc/task.h> |
43 | #include <proc/task.h> |
44 | #include <proc/thread.h> |
44 | #include <proc/thread.h> |
45 | #include <arch.h> |
45 | #include <arch.h> |
46 | #include <errno.h> |
46 | #include <errno.h> |
47 | #include <print.h> |
47 | #include <print.h> |
48 | #include <syscall/copy.h> |
48 | #include <syscall/copy.h> |
49 | #include <ipc/ipc.h> |
49 | #include <ipc/ipc.h> |
50 | #include <udebug/udebug.h> |
50 | #include <udebug/udebug.h> |
51 | #include <udebug/udebug_ops.h> |
51 | #include <udebug/udebug_ops.h> |
52 | 52 | ||
53 | /** |
53 | /** |
54 | * Prepare a thread for a debugging operation. |
54 | * Prepare a thread for a debugging operation. |
55 | * |
55 | * |
56 | * Simply put, return thread t with t->udebug.lock held, |
56 | * Simply put, return thread t with t->udebug.lock held, |
57 | * but only if it verifies all conditions. |
57 | * but only if it verifies all conditions. |
58 | * |
58 | * |
59 | * Specifically, verifies that thread t exists, is a userspace thread, |
59 | * Specifically, verifies that thread t exists, is a userspace thread, |
60 | * and belongs to the current task (TASK). Verifies, that the thread |
60 | * and belongs to the current task (TASK). Verifies, that the thread |
61 | * is (or is not) go according to being_go (typically false). |
61 | * is (or is not) go according to being_go (typically false). |
62 | * It also locks t->udebug.lock, making sure that t->udebug.active |
62 | * It also locks t->udebug.lock, making sure that t->udebug.active |
63 | * is true - that the thread is in a valid debugging session. |
63 | * is true - that the thread is in a valid debugging session. |
64 | * |
64 | * |
65 | * With this verified and the t->udebug.lock mutex held, it is ensured |
65 | * With this verified and the t->udebug.lock mutex held, it is ensured |
66 | * that the thread cannot leave the debugging session, let alone cease |
66 | * that the thread cannot leave the debugging session, let alone cease |
67 | * to exist. |
67 | * to exist. |
68 | * |
68 | * |
69 | * In this function, holding the TASK->udebug.lock mutex prevents the |
69 | * In this function, holding the TASK->udebug.lock mutex prevents the |
70 | * thread from leaving the debugging session, while relaxing from |
70 | * thread from leaving the debugging session, while relaxing from |
71 | * the t->lock spinlock to the t->udebug.lock mutex. |
71 | * the t->lock spinlock to the t->udebug.lock mutex. |
72 | * |
72 | * |
73 | * @param t Pointer, need not at all be valid. |
73 | * @param t Pointer, need not at all be valid. |
74 | * @param being_go Required thread state. |
74 | * @param being_go Required thread state. |
75 | * |
75 | * |
76 | * Returns EOK if all went well, or an error code otherwise. |
76 | * Returns EOK if all went well, or an error code otherwise. |
77 | */ |
77 | */ |
78 | static int _thread_op_begin(thread_t *t, bool being_go) |
78 | static int _thread_op_begin(thread_t *t, bool being_go) |
79 | { |
79 | { |
80 | task_id_t taskid; |
80 | task_id_t taskid; |
81 | ipl_t ipl; |
81 | ipl_t ipl; |
82 | 82 | ||
83 | taskid = TASK->taskid; |
83 | taskid = TASK->taskid; |
84 | 84 | ||
85 | mutex_lock(&TASK->udebug.lock); |
85 | mutex_lock(&TASK->udebug.lock); |
86 | 86 | ||
87 | /* thread_exists() must be called with threads_lock held */ |
87 | /* thread_exists() must be called with threads_lock held */ |
88 | ipl = interrupts_disable(); |
88 | ipl = interrupts_disable(); |
89 | spinlock_lock(&threads_lock); |
89 | spinlock_lock(&threads_lock); |
90 | 90 | ||
91 | if (!thread_exists(t)) { |
91 | if (!thread_exists(t)) { |
92 | spinlock_unlock(&threads_lock); |
92 | spinlock_unlock(&threads_lock); |
93 | interrupts_restore(ipl); |
93 | interrupts_restore(ipl); |
94 | mutex_unlock(&TASK->udebug.lock); |
94 | mutex_unlock(&TASK->udebug.lock); |
95 | return ENOENT; |
95 | return ENOENT; |
96 | } |
96 | } |
97 | 97 | ||
98 | /* t->lock is enough to ensure the thread's existence */ |
98 | /* t->lock is enough to ensure the thread's existence */ |
99 | spinlock_lock(&t->lock); |
99 | spinlock_lock(&t->lock); |
100 | spinlock_unlock(&threads_lock); |
100 | spinlock_unlock(&threads_lock); |
101 | 101 | ||
102 | /* Verify that 't' is a userspace thread. */ |
102 | /* Verify that 't' is a userspace thread. */ |
103 | if ((t->flags & THREAD_FLAG_USPACE) == 0) { |
103 | if ((t->flags & THREAD_FLAG_USPACE) == 0) { |
104 | /* It's not, deny its existence */ |
104 | /* It's not, deny its existence */ |
105 | spinlock_unlock(&t->lock); |
105 | spinlock_unlock(&t->lock); |
106 | interrupts_restore(ipl); |
106 | interrupts_restore(ipl); |
107 | mutex_unlock(&TASK->udebug.lock); |
107 | mutex_unlock(&TASK->udebug.lock); |
108 | return ENOENT; |
108 | return ENOENT; |
109 | } |
109 | } |
110 | 110 | ||
111 | /* Verify debugging state. */ |
111 | /* Verify debugging state. */ |
112 | if (t->udebug.active != true) { |
112 | if (t->udebug.active != true) { |
113 | /* Not in debugging session or undesired GO state */ |
113 | /* Not in debugging session or undesired GO state */ |
114 | spinlock_unlock(&t->lock); |
114 | spinlock_unlock(&t->lock); |
115 | interrupts_restore(ipl); |
115 | interrupts_restore(ipl); |
116 | mutex_unlock(&TASK->udebug.lock); |
116 | mutex_unlock(&TASK->udebug.lock); |
117 | return ENOENT; |
117 | return ENOENT; |
118 | } |
118 | } |
119 | 119 | ||
120 | /* |
120 | /* |
121 | * Since the thread has active == true, TASK->udebug.lock |
121 | * Since the thread has active == true, TASK->udebug.lock |
122 | * is enough to ensure its existence and that active remains |
122 | * is enough to ensure its existence and that active remains |
123 | * true. |
123 | * true. |
124 | */ |
124 | */ |
125 | spinlock_unlock(&t->lock); |
125 | spinlock_unlock(&t->lock); |
126 | interrupts_restore(ipl); |
126 | interrupts_restore(ipl); |
127 | 127 | ||
128 | /* Only mutex TASK->udebug.lock left. */ |
128 | /* Only mutex TASK->udebug.lock left. */ |
129 | 129 | ||
130 | /* Now verify that the thread belongs to the current task. */ |
130 | /* Now verify that the thread belongs to the current task. */ |
131 | if (t->task != TASK) { |
131 | if (t->task != TASK) { |
132 | /* No such thread belonging this task*/ |
132 | /* No such thread belonging this task*/ |
133 | mutex_unlock(&TASK->udebug.lock); |
133 | mutex_unlock(&TASK->udebug.lock); |
134 | return ENOENT; |
134 | return ENOENT; |
135 | } |
135 | } |
136 | 136 | ||
137 | /* |
137 | /* |
138 | * Now we need to grab the thread's debug lock for synchronization |
138 | * Now we need to grab the thread's debug lock for synchronization |
139 | * of the threads stoppability/stop state. |
139 | * of the threads stoppability/stop state. |
140 | */ |
140 | */ |
141 | mutex_lock(&t->udebug.lock); |
141 | mutex_lock(&t->udebug.lock); |
142 | 142 | ||
143 | /* The big task mutex is no longer needed. */ |
143 | /* The big task mutex is no longer needed. */ |
144 | mutex_unlock(&TASK->udebug.lock); |
144 | mutex_unlock(&TASK->udebug.lock); |
145 | 145 | ||
146 | if (t->udebug.go != being_go) { |
146 | if (t->udebug.go != being_go) { |
147 | /* Not in debugging session or undesired GO state. */ |
147 | /* Not in debugging session or undesired GO state. */ |
148 | mutex_unlock(&t->udebug.lock); |
148 | mutex_unlock(&t->udebug.lock); |
149 | return EINVAL; |
149 | return EINVAL; |
150 | } |
150 | } |
151 | 151 | ||
152 | /* Only t->udebug.lock left. */ |
152 | /* Only t->udebug.lock left. */ |
153 | 153 | ||
154 | return EOK; /* All went well. */ |
154 | return EOK; /* All went well. */ |
155 | } |
155 | } |
156 | 156 | ||
157 | /** End debugging operation on a thread. */ |
157 | /** End debugging operation on a thread. */ |
158 | static void _thread_op_end(thread_t *t) |
158 | static void _thread_op_end(thread_t *t) |
159 | { |
159 | { |
160 | mutex_unlock(&t->udebug.lock); |
160 | mutex_unlock(&t->udebug.lock); |
161 | } |
161 | } |
162 | 162 | ||
163 | /** Begin debugging the current task. |
163 | /** Begin debugging the current task. |
164 | * |
164 | * |
165 | * Initiates a debugging session for the current task (and its threads). |
165 | * Initiates a debugging session for the current task (and its threads). |
166 | * When the debugging session has started a reply will be sent to the |
166 | * When the debugging session has started a reply will be sent to the |
167 | * UDEBUG_BEGIN call. This may happen immediately in this function if |
167 | * UDEBUG_BEGIN call. This may happen immediately in this function if |
168 | * all the threads in this task are stoppable at the moment and in this |
168 | * all the threads in this task are stoppable at the moment and in this |
169 | * case the function returns 1. |
169 | * case the function returns 1. |
170 | * |
170 | * |
171 | * Otherwise the function returns 0 and the reply will be sent as soon as |
171 | * Otherwise the function returns 0 and the reply will be sent as soon as |
172 | * all the threads become stoppable (i.e. they can be considered stopped). |
172 | * all the threads become stoppable (i.e. they can be considered stopped). |
173 | * |
173 | * |
174 | * @param call The BEGIN call we are servicing. |
174 | * @param call The BEGIN call we are servicing. |
175 | * @return 0 (OK, but not done yet), 1 (done) or negative error code. |
175 | * @return 0 (OK, but not done yet), 1 (done) or negative error code. |
176 | */ |
176 | */ |
177 | int udebug_begin(call_t *call) |
177 | int udebug_begin(call_t *call) |
178 | { |
178 | { |
179 | int reply; |
179 | int reply; |
180 | 180 | ||
181 | thread_t *t; |
181 | thread_t *t; |
182 | link_t *cur; |
182 | link_t *cur; |
183 | 183 | ||
184 | LOG("Debugging task %llu", TASK->taskid); |
184 | LOG("Debugging task %llu", TASK->taskid); |
185 | mutex_lock(&TASK->udebug.lock); |
185 | mutex_lock(&TASK->udebug.lock); |
186 | 186 | ||
187 | if (TASK->udebug.dt_state != UDEBUG_TS_INACTIVE) { |
187 | if (TASK->udebug.dt_state != UDEBUG_TS_INACTIVE) { |
188 | mutex_unlock(&TASK->udebug.lock); |
188 | mutex_unlock(&TASK->udebug.lock); |
189 | return EBUSY; |
189 | return EBUSY; |
190 | } |
190 | } |
191 | 191 | ||
192 | TASK->udebug.dt_state = UDEBUG_TS_BEGINNING; |
192 | TASK->udebug.dt_state = UDEBUG_TS_BEGINNING; |
193 | TASK->udebug.begin_call = call; |
193 | TASK->udebug.begin_call = call; |
194 | TASK->udebug.debugger = call->sender; |
194 | TASK->udebug.debugger = call->sender; |
195 | 195 | ||
196 | if (TASK->udebug.not_stoppable_count == 0) { |
196 | if (TASK->udebug.not_stoppable_count == 0) { |
197 | TASK->udebug.dt_state = UDEBUG_TS_ACTIVE; |
197 | TASK->udebug.dt_state = UDEBUG_TS_ACTIVE; |
198 | TASK->udebug.begin_call = NULL; |
198 | TASK->udebug.begin_call = NULL; |
199 | reply = 1; /* immediate reply */ |
199 | reply = 1; /* immediate reply */ |
200 | } else { |
200 | } else { |
201 | reply = 0; /* no reply */ |
201 | reply = 0; /* no reply */ |
202 | } |
202 | } |
203 | 203 | ||
204 | /* Set udebug.active on all of the task's userspace threads. */ |
204 | /* Set udebug.active on all of the task's userspace threads. */ |
205 | 205 | ||
206 | for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) { |
206 | for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) { |
207 | t = list_get_instance(cur, thread_t, th_link); |
207 | t = list_get_instance(cur, thread_t, th_link); |
208 | 208 | ||
209 | mutex_lock(&t->udebug.lock); |
209 | mutex_lock(&t->udebug.lock); |
210 | if ((t->flags & THREAD_FLAG_USPACE) != 0) |
210 | if ((t->flags & THREAD_FLAG_USPACE) != 0) |
211 | t->udebug.active = true; |
211 | t->udebug.active = true; |
212 | mutex_unlock(&t->udebug.lock); |
212 | mutex_unlock(&t->udebug.lock); |
213 | } |
213 | } |
214 | 214 | ||
215 | mutex_unlock(&TASK->udebug.lock); |
215 | mutex_unlock(&TASK->udebug.lock); |
216 | return reply; |
216 | return reply; |
217 | } |
217 | } |
218 | 218 | ||
219 | /** Finish debugging the current task. |
219 | /** Finish debugging the current task. |
220 | * |
220 | * |
221 | * Closes the debugging session for the current task. |
221 | * Closes the debugging session for the current task. |
222 | * @return Zero on success or negative error code. |
222 | * @return Zero on success or negative error code. |
223 | */ |
223 | */ |
224 | int udebug_end(void) |
224 | int udebug_end(void) |
225 | { |
225 | { |
226 | int rc; |
226 | int rc; |
227 | 227 | ||
228 | LOG("Task %" PRIu64, TASK->taskid); |
228 | LOG("Task %" PRIu64, TASK->taskid); |
229 | 229 | ||
230 | mutex_lock(&TASK->udebug.lock); |
230 | mutex_lock(&TASK->udebug.lock); |
231 | rc = udebug_task_cleanup(TASK); |
231 | rc = udebug_task_cleanup(TASK); |
232 | mutex_unlock(&TASK->udebug.lock); |
232 | mutex_unlock(&TASK->udebug.lock); |
233 | 233 | ||
234 | return rc; |
234 | return rc; |
235 | } |
235 | } |
236 | 236 | ||
237 | /** Set the event mask. |
237 | /** Set the event mask. |
238 | * |
238 | * |
239 | * Sets the event mask that determines which events are enabled. |
239 | * Sets the event mask that determines which events are enabled. |
240 | * |
240 | * |
241 | * @param mask Or combination of events that should be enabled. |
241 | * @param mask Or combination of events that should be enabled. |
242 | * @return Zero on success or negative error code. |
242 | * @return Zero on success or negative error code. |
243 | */ |
243 | */ |
244 | int udebug_set_evmask(udebug_evmask_t mask) |
244 | int udebug_set_evmask(udebug_evmask_t mask) |
245 | { |
245 | { |
246 | LOG("mask = 0x%x", mask); |
246 | LOG("mask = 0x%x", mask); |
247 | 247 | ||
248 | mutex_lock(&TASK->udebug.lock); |
248 | mutex_lock(&TASK->udebug.lock); |
249 | 249 | ||
250 | if (TASK->udebug.dt_state != UDEBUG_TS_ACTIVE) { |
250 | if (TASK->udebug.dt_state != UDEBUG_TS_ACTIVE) { |
251 | mutex_unlock(&TASK->udebug.lock); |
251 | mutex_unlock(&TASK->udebug.lock); |
252 | return EINVAL; |
252 | return EINVAL; |
253 | } |
253 | } |
254 | 254 | ||
255 | TASK->udebug.evmask = mask; |
255 | TASK->udebug.evmask = mask; |
256 | mutex_unlock(&TASK->udebug.lock); |
256 | mutex_unlock(&TASK->udebug.lock); |
257 | 257 | ||
258 | return 0; |
258 | return 0; |
259 | } |
259 | } |
260 | 260 | ||
261 | /** Give thread GO. |
261 | /** Give thread GO. |
262 | * |
262 | * |
263 | * Upon recieving a go message, the thread is given GO. Being GO |
263 | * Upon recieving a go message, the thread is given GO. Being GO |
264 | * means the thread is allowed to execute userspace code (until |
264 | * means the thread is allowed to execute userspace code (until |
265 | * a debugging event or STOP occurs, at which point the thread loses GO. |
265 | * a debugging event or STOP occurs, at which point the thread loses GO. |
266 | * |
266 | * |
267 | * @param t The thread to operate on (unlocked and need not be valid). |
267 | * @param t The thread to operate on (unlocked and need not be valid). |
268 | * @param call The GO call that we are servicing. |
268 | * @param call The GO call that we are servicing. |
269 | */ |
269 | */ |
270 | int udebug_go(thread_t *t, call_t *call) |
270 | int udebug_go(thread_t *t, call_t *call) |
271 | { |
271 | { |
272 | int rc; |
272 | int rc; |
273 | 273 | ||
274 | /* On success, this will lock t->udebug.lock. */ |
274 | /* On success, this will lock t->udebug.lock. */ |
275 | rc = _thread_op_begin(t, false); |
275 | rc = _thread_op_begin(t, false); |
276 | if (rc != EOK) { |
276 | if (rc != EOK) { |
277 | return rc; |
277 | return rc; |
278 | } |
278 | } |
279 | 279 | ||
280 | t->udebug.go_call = call; |
280 | t->udebug.go_call = call; |
281 | t->udebug.go = true; |
281 | t->udebug.go = true; |
282 | t->udebug.cur_event = 0; /* none */ |
282 | t->udebug.cur_event = 0; /* none */ |
283 | 283 | ||
284 | /* |
284 | /* |
285 | * Neither t's lock nor threads_lock may be held during wakeup. |
285 | * Neither t's lock nor threads_lock may be held during wakeup. |
286 | */ |
286 | */ |
287 | waitq_wakeup(&t->udebug.go_wq, WAKEUP_FIRST); |
287 | waitq_wakeup(&t->udebug.go_wq, WAKEUP_FIRST); |
288 | 288 | ||
289 | _thread_op_end(t); |
289 | _thread_op_end(t); |
290 | 290 | ||
291 | return 0; |
291 | return 0; |
292 | } |
292 | } |
293 | 293 | ||
294 | /** Stop a thread (i.e. take its GO away) |
294 | /** Stop a thread (i.e. take its GO away) |
295 | * |
295 | * |
296 | * Generates a STOP event as soon as the thread becomes stoppable (i.e. |
296 | * Generates a STOP event as soon as the thread becomes stoppable (i.e. |
297 | * can be considered stopped). |
297 | * can be considered stopped). |
298 | * |
298 | * |
299 | * @param t The thread to operate on (unlocked and need not be valid). |
299 | * @param t The thread to operate on (unlocked and need not be valid). |
300 | * @param call The GO call that we are servicing. |
300 | * @param call The GO call that we are servicing. |
301 | */ |
301 | */ |
302 | int udebug_stop(thread_t *t, call_t *call) |
302 | int udebug_stop(thread_t *t, call_t *call) |
303 | { |
303 | { |
304 | int rc; |
304 | int rc; |
305 | 305 | ||
306 | LOG("udebug_stop()"); |
306 | LOG("udebug_stop()"); |
307 | 307 | ||
308 | /* |
308 | /* |
309 | * On success, this will lock t->udebug.lock. Note that this makes sure |
309 | * On success, this will lock t->udebug.lock. Note that this makes sure |
310 | * the thread is not stopped. |
310 | * the thread is not stopped. |
311 | */ |
311 | */ |
312 | rc = _thread_op_begin(t, true); |
312 | rc = _thread_op_begin(t, true); |
313 | if (rc != EOK) { |
313 | if (rc != EOK) { |
314 | return rc; |
314 | return rc; |
315 | } |
315 | } |
316 | 316 | ||
317 | /* Take GO away from the thread. */ |
317 | /* Take GO away from the thread. */ |
318 | t->udebug.go = false; |
318 | t->udebug.go = false; |
319 | 319 | ||
320 | if (t->udebug.stoppable != true) { |
320 | if (t->udebug.stoppable != true) { |
321 | /* Answer will be sent when the thread becomes stoppable. */ |
321 | /* Answer will be sent when the thread becomes stoppable. */ |
322 | _thread_op_end(t); |
322 | _thread_op_end(t); |
323 | return 0; |
323 | return 0; |
324 | } |
324 | } |
325 | 325 | ||
326 | /* |
326 | /* |
327 | * Answer GO call. |
327 | * Answer GO call. |
328 | */ |
328 | */ |
329 | 329 | ||
330 | /* Make sure nobody takes this call away from us. */ |
330 | /* Make sure nobody takes this call away from us. */ |
331 | call = t->udebug.go_call; |
331 | call = t->udebug.go_call; |
332 | t->udebug.go_call = NULL; |
332 | t->udebug.go_call = NULL; |
333 | 333 | ||
334 | IPC_SET_RETVAL(call->data, 0); |
334 | IPC_SET_RETVAL(call->data, 0); |
335 | IPC_SET_ARG1(call->data, UDEBUG_EVENT_STOP); |
335 | IPC_SET_ARG1(call->data, UDEBUG_EVENT_STOP); |
336 | 336 | ||
337 | THREAD->udebug.cur_event = UDEBUG_EVENT_STOP; |
337 | THREAD->udebug.cur_event = UDEBUG_EVENT_STOP; |
338 | 338 | ||
339 | _thread_op_end(t); |
339 | _thread_op_end(t); |
340 | 340 | ||
341 | mutex_lock(&TASK->udebug.lock); |
341 | mutex_lock(&TASK->udebug.lock); |
342 | ipc_answer(&TASK->answerbox, call); |
342 | ipc_answer(&TASK->answerbox, call); |
343 | mutex_unlock(&TASK->udebug.lock); |
343 | mutex_unlock(&TASK->udebug.lock); |
344 | 344 | ||
345 | return 0; |
345 | return 0; |
346 | } |
346 | } |
347 | 347 | ||
348 | /** Read the list of userspace threads in the current task. |
348 | /** Read the list of userspace threads in the current task. |
349 | * |
349 | * |
350 | * The list takes the form of a sequence of thread hashes (i.e. the pointers |
350 | * The list takes the form of a sequence of thread hashes (i.e. the pointers |
351 | * to thread structures). A buffer of size @a buf_size is allocated and |
351 | * to thread structures). A buffer of size @a buf_size is allocated and |
352 | * a pointer to it written to @a buffer. The sequence of hashes is written |
352 | * a pointer to it written to @a buffer. The sequence of hashes is written |
353 | * into this buffer. |
353 | * into this buffer. |
354 | * |
354 | * |
355 | * If the sequence is longer than @a buf_size bytes, only as much hashes |
355 | * If the sequence is longer than @a buf_size bytes, only as much hashes |
356 | * as can fit are copied. The number of thread hashes copied is stored |
356 | * as can fit are copied. The number of thread hashes copied is stored |
357 | * in @a n. |
357 | * in @a n. |
358 | * |
358 | * |
359 | * The rationale for having @a buf_size is that this function is only |
359 | * The rationale for having @a buf_size is that this function is only |
360 | * used for servicing the THREAD_READ message, which always specifies |
360 | * used for servicing the THREAD_READ message, which always specifies |
361 | * a maximum size for the userspace buffer. |
361 | * a maximum size for the userspace buffer. |
362 | * |
362 | * |
363 | * @param buffer The buffer for storing thread hashes. |
363 | * @param buffer The buffer for storing thread hashes. |
364 | * @param buf_size Buffer size in bytes. |
364 | * @param buf_size Buffer size in bytes. |
365 | * @param n The actual number of hashes copied will be stored here. |
365 | * @param n The actual number of hashes copied will be stored here. |
366 | */ |
366 | */ |
367 | int udebug_thread_read(void **buffer, size_t buf_size, size_t *n) |
367 | int udebug_thread_read(void **buffer, size_t buf_size, size_t *n) |
368 | { |
368 | { |
369 | thread_t *t; |
369 | thread_t *t; |
370 | link_t *cur; |
370 | link_t *cur; |
371 | unative_t tid; |
371 | unative_t tid; |
372 | unsigned copied_ids; |
372 | unsigned copied_ids; |
373 | ipl_t ipl; |
373 | ipl_t ipl; |
374 | unative_t *id_buffer; |
374 | unative_t *id_buffer; |
375 | int flags; |
375 | int flags; |
376 | size_t max_ids; |
376 | size_t max_ids; |
377 | 377 | ||
378 | LOG("udebug_thread_read()"); |
378 | LOG("udebug_thread_read()"); |
379 | 379 | ||
380 | /* Allocate a buffer to hold thread IDs */ |
380 | /* Allocate a buffer to hold thread IDs */ |
381 | id_buffer = malloc(buf_size, 0); |
381 | id_buffer = malloc(buf_size, 0); |
382 | 382 | ||
383 | mutex_lock(&TASK->udebug.lock); |
383 | mutex_lock(&TASK->udebug.lock); |
384 | 384 | ||
385 | /* Verify task state */ |
385 | /* Verify task state */ |
386 | if (TASK->udebug.dt_state != UDEBUG_TS_ACTIVE) { |
386 | if (TASK->udebug.dt_state != UDEBUG_TS_ACTIVE) { |
387 | mutex_unlock(&TASK->udebug.lock); |
387 | mutex_unlock(&TASK->udebug.lock); |
388 | return EINVAL; |
388 | return EINVAL; |
389 | } |
389 | } |
390 | 390 | ||
391 | ipl = interrupts_disable(); |
391 | ipl = interrupts_disable(); |
392 | spinlock_lock(&TASK->lock); |
392 | spinlock_lock(&TASK->lock); |
393 | /* Copy down the thread IDs */ |
393 | /* Copy down the thread IDs */ |
394 | 394 | ||
395 | max_ids = buf_size / sizeof(unative_t); |
395 | max_ids = buf_size / sizeof(unative_t); |
396 | copied_ids = 0; |
396 | copied_ids = 0; |
397 | 397 | ||
398 | /* FIXME: make sure the thread isn't past debug shutdown... */ |
398 | /* FIXME: make sure the thread isn't past debug shutdown... */ |
399 | for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) { |
399 | for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) { |
400 | /* Do not write past end of buffer */ |
400 | /* Do not write past end of buffer */ |
401 | if (copied_ids >= max_ids) break; |
401 | if (copied_ids >= max_ids) break; |
402 | 402 | ||
403 | t = list_get_instance(cur, thread_t, th_link); |
403 | t = list_get_instance(cur, thread_t, th_link); |
404 | 404 | ||
405 | spinlock_lock(&t->lock); |
405 | spinlock_lock(&t->lock); |
406 | flags = t->flags; |
406 | flags = t->flags; |
407 | spinlock_unlock(&t->lock); |
407 | spinlock_unlock(&t->lock); |
408 | 408 | ||
409 | /* Not interested in kernel threads. */ |
409 | /* Not interested in kernel threads. */ |
410 | if ((flags & THREAD_FLAG_USPACE) != 0) { |
410 | if ((flags & THREAD_FLAG_USPACE) != 0) { |
411 | /* Using thread struct pointer as identification hash */ |
411 | /* Using thread struct pointer as identification hash */ |
412 | tid = (unative_t) t; |
412 | tid = (unative_t) t; |
413 | id_buffer[copied_ids++] = tid; |
413 | id_buffer[copied_ids++] = tid; |
414 | } |
414 | } |
415 | } |
415 | } |
416 | 416 | ||
417 | spinlock_unlock(&TASK->lock); |
417 | spinlock_unlock(&TASK->lock); |
418 | interrupts_restore(ipl); |
418 | interrupts_restore(ipl); |
419 | 419 | ||
420 | mutex_unlock(&TASK->udebug.lock); |
420 | mutex_unlock(&TASK->udebug.lock); |
421 | 421 | ||
422 | *buffer = id_buffer; |
422 | *buffer = id_buffer; |
423 | *n = copied_ids * sizeof(unative_t); |
423 | *n = copied_ids * sizeof(unative_t); |
424 | 424 | ||
425 | return 0; |
425 | return 0; |
426 | } |
426 | } |
427 | 427 | ||
428 | /** Read the arguments of a system call. |
428 | /** Read the arguments of a system call. |
429 | * |
429 | * |
430 | * The arguments of the system call being being executed are copied |
430 | * The arguments of the system call being being executed are copied |
431 | * to an allocated buffer and a pointer to it is written to @a buffer. |
431 | * to an allocated buffer and a pointer to it is written to @a buffer. |
432 | * The size of the buffer is exactly such that it can hold the maximum number |
432 | * The size of the buffer is exactly such that it can hold the maximum number |
433 | * of system-call arguments. |
433 | * of system-call arguments. |
434 | * |
434 | * |
435 | * Unless the thread is currently blocked in a SYSCALL_B or SYSCALL_E event, |
435 | * Unless the thread is currently blocked in a SYSCALL_B or SYSCALL_E event, |
436 | * this function will fail with an EINVAL error code. |
436 | * this function will fail with an EINVAL error code. |
437 | * |
437 | * |
438 | * @param buffer The buffer for storing thread hashes. |
438 | * @param buffer The buffer for storing thread hashes. |
439 | */ |
439 | */ |
440 | int udebug_args_read(thread_t *t, void **buffer) |
440 | int udebug_args_read(thread_t *t, void **buffer) |
441 | { |
441 | { |
442 | int rc; |
442 | int rc; |
443 | unative_t *arg_buffer; |
443 | unative_t *arg_buffer; |
444 | 444 | ||
445 | /* Prepare a buffer to hold the arguments. */ |
445 | /* Prepare a buffer to hold the arguments. */ |
446 | arg_buffer = malloc(6 * sizeof(unative_t), 0); |
446 | arg_buffer = malloc(6 * sizeof(unative_t), 0); |
447 | 447 | ||
448 | /* On success, this will lock t->udebug.lock. */ |
448 | /* On success, this will lock t->udebug.lock. */ |
449 | rc = _thread_op_begin(t, false); |
449 | rc = _thread_op_begin(t, false); |
450 | if (rc != EOK) { |
450 | if (rc != EOK) { |
451 | return rc; |
451 | return rc; |
452 | } |
452 | } |
453 | 453 | ||
454 | /* Additionally we need to verify that we are inside a syscall. */ |
454 | /* Additionally we need to verify that we are inside a syscall. */ |
455 | if (t->udebug.cur_event != UDEBUG_EVENT_SYSCALL_B && |
455 | if (t->udebug.cur_event != UDEBUG_EVENT_SYSCALL_B && |
456 | t->udebug.cur_event != UDEBUG_EVENT_SYSCALL_E) { |
456 | t->udebug.cur_event != UDEBUG_EVENT_SYSCALL_E) { |
457 | _thread_op_end(t); |
457 | _thread_op_end(t); |
458 | return EINVAL; |
458 | return EINVAL; |
459 | } |
459 | } |
460 | 460 | ||
461 | /* Copy to a local buffer before releasing the lock. */ |
461 | /* Copy to a local buffer before releasing the lock. */ |
462 | memcpy(arg_buffer, t->udebug.syscall_args, 6 * sizeof(unative_t)); |
462 | memcpy(arg_buffer, t->udebug.syscall_args, 6 * sizeof(unative_t)); |
463 | 463 | ||
464 | _thread_op_end(t); |
464 | _thread_op_end(t); |
465 | 465 | ||
466 | *buffer = arg_buffer; |
466 | *buffer = arg_buffer; |
467 | return 0; |
467 | return 0; |
468 | } |
468 | } |
469 | 469 | ||
470 | /** Read the memory of the debugged task. |
470 | /** Read the memory of the debugged task. |
471 | * |
471 | * |
472 | * Reads @a n bytes from the address space of the debugged task, starting |
472 | * Reads @a n bytes from the address space of the debugged task, starting |
473 | * from @a uspace_addr. The bytes are copied into an allocated buffer |
473 | * from @a uspace_addr. The bytes are copied into an allocated buffer |
474 | * and a pointer to it is written into @a buffer. |
474 | * and a pointer to it is written into @a buffer. |
475 | * |
475 | * |
476 | * @param uspace_addr Address from where to start reading. |
476 | * @param uspace_addr Address from where to start reading. |
477 | * @param n Number of bytes to read. |
477 | * @param n Number of bytes to read. |
478 | * @param buffer For storing a pointer to the allocated buffer. |
478 | * @param buffer For storing a pointer to the allocated buffer. |
479 | */ |
479 | */ |
480 | int udebug_mem_read(unative_t uspace_addr, size_t n, void **buffer) |
480 | int udebug_mem_read(unative_t uspace_addr, size_t n, void **buffer) |
481 | { |
481 | { |
482 | void *data_buffer; |
482 | void *data_buffer; |
483 | int rc; |
483 | int rc; |
484 | 484 | ||
485 | /* Verify task state */ |
485 | /* Verify task state */ |
486 | mutex_lock(&TASK->udebug.lock); |
486 | mutex_lock(&TASK->udebug.lock); |
487 | 487 | ||
488 | if (TASK->udebug.dt_state != UDEBUG_TS_ACTIVE) { |
488 | if (TASK->udebug.dt_state != UDEBUG_TS_ACTIVE) { |
489 | mutex_unlock(&TASK->udebug.lock); |
489 | mutex_unlock(&TASK->udebug.lock); |
490 | return EBUSY; |
490 | return EBUSY; |
491 | } |
491 | } |
492 | 492 | ||
493 | data_buffer = malloc(n, 0); |
493 | data_buffer = malloc(n, 0); |
494 | 494 | ||
495 | /* NOTE: this is not strictly from a syscall... but that shouldn't |
495 | /* NOTE: this is not strictly from a syscall... but that shouldn't |
496 | * be a problem */ |
496 | * be a problem */ |
497 | rc = copy_from_uspace(data_buffer, (void *)uspace_addr, n); |
497 | rc = copy_from_uspace(data_buffer, (void *)uspace_addr, n); |
498 | mutex_unlock(&TASK->udebug.lock); |
498 | mutex_unlock(&TASK->udebug.lock); |
499 | 499 | ||
500 | if (rc != 0) return rc; |
500 | if (rc != 0) return rc; |
501 | 501 | ||
502 | *buffer = data_buffer; |
502 | *buffer = data_buffer; |
503 | return 0; |
503 | return 0; |
504 | } |
504 | } |
505 | 505 | ||
- | 506 | int udebug_thread_get_thread_struct(thread_t *t, void **buffer) |
|
- | 507 | { |
|
- | 508 | ipl_t ipl = interrupts_disable(); |
|
- | 509 | ||
- | 510 | void *data_buffer = (void *)malloc(sizeof(thread_t), 0); |
|
- | 511 | ||
- | 512 | memcpy(data_buffer, (void *)t, sizeof(thread_t)); |
|
- | 513 | ||
- | 514 | *buffer = data_buffer; |
|
- | 515 | ||
- | 516 | interrupts_restore(ipl); |
|
- | 517 | ||
- | 518 | return (0); |
|
- | 519 | } |
|
- | 520 | ||
- | 521 | int udebug_task_get_memory_areas(void **buffer, size_t buf_size, size_t *n) |
|
- | 522 | { |
|
- | 523 | link_t *cur; |
|
- | 524 | ipl_t ipl; |
|
- | 525 | unative_t *areas_buffer; |
|
- | 526 | size_t max_index; |
|
- | 527 | ||
- | 528 | as_print(TASK->as); |
|
- | 529 | ||
- | 530 | areas_buffer = malloc(buf_size, 0); |
|
- | 531 | ||
- | 532 | mutex_lock(&TASK->udebug.lock); |
|
- | 533 | ||
- | 534 | /* Verify task state */ |
|
- | 535 | if (TASK->udebug.dt_state != UDEBUG_TS_ACTIVE) { |
|
- | 536 | mutex_unlock(&TASK->udebug.lock); |
|
- | 537 | return EINVAL; |
|
- | 538 | } |
|
- | 539 | ||
- | 540 | ipl = interrupts_disable(); |
|
- | 541 | spinlock_lock(&TASK->lock); |
|
- | 542 | ||
- | 543 | max_index = buf_size / sizeof(unative_t); |
|
- | 544 | as_t *as = TASK->as; |
|
- | 545 | ||
- | 546 | mutex_lock(&as->lock); |
|
- | 547 | ||
- | 548 | /* print out info about address space areas */ |
|
- | 549 | unsigned int index = 0; |
|
- | 550 | for (cur = as->as_area_btree.leaf_head.next; |
|
- | 551 | cur != &as->as_area_btree.leaf_head; cur = cur->next) { |
|
- | 552 | btree_node_t *node; |
|
- | 553 | ||
- | 554 | node = list_get_instance(cur, btree_node_t, leaf_link); |
|
- | 555 | ||
- | 556 | unsigned int i; |
|
- | 557 | for (i = 0; i < node->keys; i++) { |
|
- | 558 | if (index >= max_index) |
|
- | 559 | break; |
|
- | 560 | ||
- | 561 | as_area_t *area = node->value[i]; |
|
- | 562 | ||
- | 563 | mutex_lock(&area->lock); |
|
- | 564 | areas_buffer[index++] = area->base; |
|
- | 565 | areas_buffer[index++] = area->base + FRAMES2SIZE(area->pages); |
|
- | 566 | mutex_unlock(&area->lock); |
|
- | 567 | } |
|
- | 568 | } |
|
- | 569 | ||
- | 570 | mutex_unlock(&as->lock); |
|
- | 571 | ||
- | 572 | spinlock_unlock(&TASK->lock); |
|
- | 573 | interrupts_restore(ipl); |
|
- | 574 | ||
- | 575 | mutex_unlock(&TASK->udebug.lock); |
|
- | 576 | ||
- | 577 | *buffer = areas_buffer; |
|
- | 578 | *n = (index) * sizeof(unative_t); |
|
- | 579 | ||
- | 580 | return 0; |
|
- | 581 | ||
- | 582 | } |
|
- | 583 | ||
- | 584 | int udebug_copy_kstack(void *kstack, void **buffer, size_t n) |
|
- | 585 | { |
|
- | 586 | ipl_t ipl = interrupts_disable(); |
|
- | 587 | ||
- | 588 | void *data_buffer = malloc(n, 0); |
|
- | 589 | ||
- | 590 | memcpy(data_buffer, (void *)kstack, n); |
|
- | 591 | ||
- | 592 | *buffer = data_buffer; |
|
- | 593 | ||
- | 594 | interrupts_restore(ipl); |
|
- | 595 | ||
- | 596 | return 0; |
|
- | 597 | } |
|
- | 598 | ||
- | 599 | int udebug_restore_thread_struct(void *buffer, thread_t *t_old) |
|
- | 600 | { |
|
- | 601 | ipl_t ipl = interrupts_disable(); |
|
- | 602 | ||
- | 603 | thread_t *t_new = (thread_t *)buffer; |
|
- | 604 | ||
- | 605 | t_old->thread_code = t_new->thread_code; |
|
- | 606 | ||
- | 607 | printf("old sp: %p, new sp: %p\n", t_old->saved_context.sp, t_new->saved_context.sp); |
|
- | 608 | printf("old kstack: %p, new kstack: %p\n", t_old->kstack, t_new->kstack); |
|
- | 609 | ||
- | 610 | t_old->saved_context = t_new->saved_context; |
|
- | 611 | t_old->saved_context.sp = (uintptr_t)t_old->kstack + ((uintptr_t)t_new->saved_context.sp - (uintptr_t)t_new->kstack); |
|
- | 612 | ||
- | 613 | t_old->sleep_timeout_context = t_new->sleep_timeout_context; |
|
- | 614 | t_old->sleep_timeout = t_new->sleep_timeout; |
|
- | 615 | t_old->timeout_pending = t_new->timeout_pending; |
|
- | 616 | ||
- | 617 | t_old->in_copy_from_uspace = t_new->in_copy_from_uspace; |
|
- | 618 | t_old->in_copy_to_uspace = t_new->in_copy_to_uspace; |
|
- | 619 | ||
- | 620 | t_old->interrupted = t_new->interrupted; |
|
- | 621 | ||
- | 622 | t_old->call_me = t_new->call_me; |
|
- | 623 | t_old->call_me_with = t_new->call_me_with; |
|
- | 624 | ||
- | 625 | t_old->udebug.go_call = t_new->udebug.go_call; |
|
- | 626 | ||
- | 627 | interrupts_restore(ipl); |
|
- | 628 | ||
- | 629 | return (0); |
|
- | 630 | } |
|
- | 631 | ||
- | 632 | int udebug_mem_write(void *buffer, void *start, size_t n) |
|
- | 633 | { |
|
- | 634 | ipl_t ipl = interrupts_disable(); |
|
- | 635 | ||
- | 636 | if (((unsigned) start & 0x80000000) == 0) |
|
- | 637 | copy_to_uspace(start, buffer, n); |
|
- | 638 | ||
- | 639 | interrupts_restore(ipl); |
|
- | 640 | ||
- | 641 | return (0); |
|
- | 642 | } |
|
- | 643 | ||
- | 644 | int udebug_restore_kstack(void *buffer, size_t size, thread_t *t) |
|
- | 645 | { |
|
- | 646 | ipl_t ipl = interrupts_disable(); |
|
- | 647 | ||
- | 648 | memcpy(t->kstack + sizeof(the_t), buffer + sizeof(the_t), size - sizeof(the_t)); |
|
- | 649 | ||
- | 650 | interrupts_restore(ipl); |
|
- | 651 | ||
- | 652 | return (0); |
|
- | 653 | } |
|
- | 654 | ||
- | 655 | ||
506 | /** @} |
656 | /** @} |
507 | */ |
657 | */ |
508 | 658 |