Rev 3014 | Rev 3016 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 3014 | Rev 3015 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (c) 2008 Jiri Svoboda |
2 | * Copyright (c) 2008 Jiri Svoboda |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | /** @addtogroup generic |
29 | /** @addtogroup generic |
30 | * @{ |
30 | * @{ |
31 | */ |
31 | */ |
32 | 32 | ||
33 | /** |
33 | /** |
34 | * @file |
34 | * @file |
35 | * @brief Udebug. |
35 | * @brief Udebug. |
36 | */ |
36 | */ |
37 | 37 | ||
38 | #include <synch/waitq.h> |
38 | #include <synch/waitq.h> |
39 | #include <console/klog.h> |
39 | #include <console/klog.h> |
40 | #include <udebug/udebug.h> |
40 | #include <udebug/udebug.h> |
41 | #include <errno.h> |
41 | #include <errno.h> |
42 | #include <arch.h> |
42 | #include <arch.h> |
43 | 43 | ||
44 | void udebug_task_init(udebug_task_t *ut) |
44 | void udebug_task_init(udebug_task_t *ut) |
45 | { |
45 | { |
46 | ut->dt_state = UDEBUG_TS_INACTIVE; |
46 | ut->dt_state = UDEBUG_TS_INACTIVE; |
47 | ut->begin_call = NULL; |
47 | ut->begin_call = NULL; |
48 | ut->not_stoppable_count = 0; |
48 | ut->not_stoppable_count = 0; |
49 | ut->evmask = 0; |
49 | ut->evmask = 0; |
50 | } |
50 | } |
51 | 51 | ||
52 | static void udebug_wait_for_go(waitq_t *wq) |
52 | static void udebug_wait_for_go(waitq_t *wq) |
53 | { |
53 | { |
54 | int rc; |
54 | int rc; |
55 | ipl_t ipl; |
55 | ipl_t ipl; |
56 | 56 | ||
57 | ipl = waitq_sleep_prepare(wq); |
57 | ipl = waitq_sleep_prepare(wq); |
58 | 58 | ||
59 | wq->missed_wakeups = 0; /* Enforce blocking. */ |
59 | wq->missed_wakeups = 0; /* Enforce blocking. */ |
60 | rc = waitq_sleep_timeout_unsafe(wq, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE); |
60 | rc = waitq_sleep_timeout_unsafe(wq, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE); |
61 | 61 | ||
62 | waitq_sleep_finish(wq, rc, ipl); |
62 | waitq_sleep_finish(wq, rc, ipl); |
63 | } |
63 | } |
64 | 64 | ||
65 | void udebug_stoppable_begin(void) |
65 | void udebug_stoppable_begin(void) |
66 | { |
66 | { |
67 | int nsc; |
67 | int nsc; |
68 | call_t *db_call, *go_call; |
68 | call_t *db_call, *go_call; |
69 | ipl_t ipl; |
69 | ipl_t ipl; |
70 | 70 | ||
71 | ASSERT(THREAD); |
71 | ASSERT(THREAD); |
72 | ASSERT(TASK); |
72 | ASSERT(TASK); |
73 | 73 | ||
74 | ipl = interrupts_disable(); |
74 | ipl = interrupts_disable(); |
75 | spinlock_lock(&TASK->lock); |
75 | spinlock_lock(&TASK->lock); |
76 | 76 | ||
77 | nsc = --TASK->udebug.not_stoppable_count; |
77 | nsc = --TASK->udebug.not_stoppable_count; |
78 | 78 | ||
79 | if (TASK->udebug.dt_state == UDEBUG_TS_BEGINNING) { |
79 | if (TASK->udebug.dt_state == UDEBUG_TS_BEGINNING) { |
80 | klog_printf("udebug_stoppable_begin"); |
80 | klog_printf("udebug_stoppable_begin"); |
81 | klog_printf(" - nsc := %d", nsc); |
81 | klog_printf(" - nsc := %d", nsc); |
82 | } |
82 | } |
83 | 83 | ||
84 | if (TASK->udebug.dt_state == UDEBUG_TS_BEGINNING && nsc == 0) { |
84 | if (TASK->udebug.dt_state == UDEBUG_TS_BEGINNING && nsc == 0) { |
85 | /* |
85 | /* |
86 | * This was the last non-stoppable thread. Reply to |
86 | * This was the last non-stoppable thread. Reply to |
87 | * DEBUG_BEGIN call. |
87 | * DEBUG_BEGIN call. |
88 | */ |
88 | */ |
89 | 89 | ||
90 | db_call = TASK->udebug.begin_call; |
90 | db_call = TASK->udebug.begin_call; |
91 | ASSERT(db_call); |
91 | ASSERT(db_call); |
92 | 92 | ||
93 | /* Lock order OK, THREAD->debug_lock is after TASK->lock */ |
93 | /* Lock order OK, THREAD->debug_lock is after TASK->lock */ |
94 | spinlock_lock(&THREAD->debug_lock); |
94 | spinlock_lock(&THREAD->debug_lock); |
95 | THREAD->debug_stoppable = true; |
95 | THREAD->debug_stoppable = true; |
96 | spinlock_unlock(&THREAD->debug_lock); |
96 | spinlock_unlock(&THREAD->debug_lock); |
97 | 97 | ||
98 | TASK->udebug.dt_state = UDEBUG_TS_ACTIVE; |
98 | TASK->udebug.dt_state = UDEBUG_TS_ACTIVE; |
99 | TASK->udebug.begin_call = NULL; |
99 | TASK->udebug.begin_call = NULL; |
100 | spinlock_unlock(&TASK->lock); |
100 | spinlock_unlock(&TASK->lock); |
101 | interrupts_restore(ipl); |
101 | interrupts_restore(ipl); |
102 | 102 | ||
103 | IPC_SET_RETVAL(db_call->data, 0); |
103 | IPC_SET_RETVAL(db_call->data, 0); |
104 | //klog_printf("udebug_stoppable_begin/ipc_answer"); |
104 | //klog_printf("udebug_stoppable_begin/ipc_answer"); |
105 | ipc_answer(&TASK->answerbox, db_call); |
105 | ipc_answer(&TASK->answerbox, db_call); |
106 | 106 | ||
107 | } else if (TASK->udebug.dt_state == UDEBUG_TS_ACTIVE) { |
107 | } else if (TASK->udebug.dt_state == UDEBUG_TS_ACTIVE) { |
108 | /* |
108 | /* |
109 | * Active debugging session |
109 | * Active debugging session |
110 | */ |
110 | */ |
111 | 111 | ||
112 | /* Lock order OK, THREAD->debug_lock is after TASK->lock */ |
112 | /* Lock order OK, THREAD->debug_lock is after TASK->lock */ |
113 | spinlock_lock(&THREAD->debug_lock); |
113 | spinlock_lock(&THREAD->debug_lock); |
114 | THREAD->debug_stoppable = true; |
114 | THREAD->debug_stoppable = true; |
115 | 115 | ||
116 | if (THREAD->debug_active && THREAD->debug_stop) { |
116 | if (THREAD->debug_active && THREAD->debug_stop) { |
117 | /* |
117 | /* |
118 | * Thread was requested to stop - answer go call |
118 | * Thread was requested to stop - answer go call |
119 | */ |
119 | */ |
120 | 120 | ||
121 | /* Make sure nobody takes this call away from us */ |
121 | /* Make sure nobody takes this call away from us */ |
122 | go_call = THREAD->debug_go_call; |
122 | go_call = THREAD->debug_go_call; |
123 | THREAD->debug_go_call = NULL; |
123 | THREAD->debug_go_call = NULL; |
124 | ASSERT(go_call); |
124 | ASSERT(go_call); |
125 | 125 | ||
126 | IPC_SET_RETVAL(go_call->data, 0); |
126 | IPC_SET_RETVAL(go_call->data, 0); |
127 | IPC_SET_ARG1(go_call->data, UDEBUG_EVENT_STOP); |
127 | IPC_SET_ARG1(go_call->data, UDEBUG_EVENT_STOP); |
128 | 128 | ||
129 | THREAD->cur_event = UDEBUG_EVENT_STOP; |
129 | THREAD->cur_event = UDEBUG_EVENT_STOP; |
130 | spinlock_unlock(&THREAD->debug_lock); |
130 | spinlock_unlock(&THREAD->debug_lock); |
131 | 131 | ||
132 | ipc_answer(&TASK->answerbox, go_call); |
132 | ipc_answer(&TASK->answerbox, go_call); |
133 | 133 | ||
134 | spinlock_unlock(&TASK->lock); |
134 | spinlock_unlock(&TASK->lock); |
135 | interrupts_restore(ipl); |
135 | interrupts_restore(ipl); |
136 | } else { |
136 | } else { |
137 | /* |
137 | /* |
138 | * No stop request - nothing happens. |
138 | * No stop request - nothing happens. |
139 | */ |
139 | */ |
140 | spinlock_unlock(&THREAD->debug_lock); |
140 | spinlock_unlock(&THREAD->debug_lock); |
141 | spinlock_unlock(&TASK->lock); |
141 | spinlock_unlock(&TASK->lock); |
142 | interrupts_restore(ipl); |
142 | interrupts_restore(ipl); |
143 | } |
143 | } |
144 | } else { |
144 | } else { |
145 | /* |
145 | /* |
146 | * All other cases - nothing special happens. |
146 | * All other cases - nothing special happens. |
147 | */ |
147 | */ |
148 | 148 | ||
149 | /* Lock order OK, THREAD->debug_lock is after TASK->lock */ |
149 | /* Lock order OK, THREAD->debug_lock is after TASK->lock */ |
150 | spinlock_lock(&THREAD->debug_lock); |
150 | spinlock_lock(&THREAD->debug_lock); |
151 | THREAD->debug_stoppable = true; |
151 | THREAD->debug_stoppable = true; |
152 | spinlock_unlock(&THREAD->debug_lock); |
152 | spinlock_unlock(&THREAD->debug_lock); |
153 | 153 | ||
154 | spinlock_unlock(&TASK->lock); |
154 | spinlock_unlock(&TASK->lock); |
155 | interrupts_restore(ipl); |
155 | interrupts_restore(ipl); |
156 | } |
156 | } |
157 | } |
157 | } |
158 | 158 | ||
159 | void udebug_stoppable_end(void) |
159 | void udebug_stoppable_end(void) |
160 | { |
160 | { |
161 | ipl_t ipl; |
161 | ipl_t ipl; |
162 | 162 | ||
163 | restart: |
163 | restart: |
164 | ipl = interrupts_disable(); |
164 | ipl = interrupts_disable(); |
165 | spinlock_lock(&TASK->lock); |
165 | spinlock_lock(&TASK->lock); |
166 | 166 | ||
167 | /* Lock order OK, THREAD->debug_lock is after TASK->lock */ |
167 | /* Lock order OK, THREAD->debug_lock is after TASK->lock */ |
168 | spinlock_lock(&THREAD->debug_lock); |
168 | spinlock_lock(&THREAD->debug_lock); |
169 | 169 | ||
170 | if (TASK->udebug.dt_state == UDEBUG_TS_ACTIVE) { |
170 | if (TASK->udebug.dt_state == UDEBUG_TS_ACTIVE) { |
171 | //klog_printf("udebug_stoppable_end"); |
171 | //klog_printf("udebug_stoppable_end"); |
172 | //klog_printf("debug_stop=%d", THREAD->debug_stop); |
172 | //klog_printf("debug_stop=%d", THREAD->debug_stop); |
173 | } |
173 | } |
174 | 174 | ||
175 | if (THREAD->debug_active && |
175 | if (THREAD->debug_active && |
176 | THREAD->debug_stop == true) { |
176 | THREAD->debug_stop == true) { |
177 | TASK->udebug.begin_call = NULL; |
177 | TASK->udebug.begin_call = NULL; |
178 | spinlock_unlock(&THREAD->debug_lock); |
178 | spinlock_unlock(&THREAD->debug_lock); |
179 | spinlock_unlock(&TASK->lock); |
179 | spinlock_unlock(&TASK->lock); |
180 | interrupts_restore(ipl); |
180 | interrupts_restore(ipl); |
181 | 181 | ||
182 | udebug_wait_for_go(&THREAD->go_wq); |
182 | udebug_wait_for_go(&THREAD->go_wq); |
183 | 183 | ||
184 | goto restart; |
184 | goto restart; |
185 | /* must try again - have to lose stoppability atomically */ |
185 | /* must try again - have to lose stoppability atomically */ |
186 | } else { |
186 | } else { |
187 | ++TASK->udebug.not_stoppable_count; |
187 | ++TASK->udebug.not_stoppable_count; |
188 | THREAD->debug_stoppable = false; |
188 | THREAD->debug_stoppable = false; |
189 | 189 | ||
190 | spinlock_unlock(&THREAD->debug_lock); |
190 | spinlock_unlock(&THREAD->debug_lock); |
191 | spinlock_unlock(&TASK->lock); |
191 | spinlock_unlock(&TASK->lock); |
192 | interrupts_restore(ipl); |
192 | interrupts_restore(ipl); |
193 | } |
193 | } |
194 | } |
194 | } |
195 | 195 | ||
- | 196 | /** Upon being scheduled to run, check if the current thread should stop. |
|
- | 197 | * |
|
- | 198 | * This function is called from clock(). Preemption is enabled. |
|
- | 199 | * interrupts are disabled, but since this is called after |
|
- | 200 | * being scheduled-in, we can enable them, if we're careful enough |
|
- | 201 | * not to allow arbitrary recursion. |
|
- | 202 | */ |
|
- | 203 | void udebug_before_thread_runs(void) |
|
- | 204 | { |
|
- | 205 | ipl_t ipl; |
|
- | 206 | ||
- | 207 | /* This will happen if we get preempted inside this function. */ |
|
- | 208 | if (THREAD->debug_in_before_thread_runs) |
|
- | 209 | return; |
|
- | 210 | ||
- | 211 | THREAD->debug_in_before_thread_runs = true; |
|
- | 212 | ipl = interrupts_enable(); |
|
- | 213 | ||
- | 214 | /* Now we're free to do whatever we need (lock mutexes, etc.) */ |
|
- | 215 | ||
- | 216 | /* Check if we're supposed to stop */ |
|
- | 217 | udebug_stoppable_begin(); |
|
- | 218 | udebug_stoppable_end(); |
|
- | 219 | ||
- | 220 | interrupts_restore(ipl); |
|
- | 221 | THREAD->debug_in_before_thread_runs = false; |
|
- | 222 | } |
|
- | 223 | ||
196 | void udebug_syscall_event(unative_t a1, unative_t a2, unative_t a3, |
224 | void udebug_syscall_event(unative_t a1, unative_t a2, unative_t a3, |
197 | unative_t a4, unative_t a5, unative_t a6, unative_t id, unative_t rc, |
225 | unative_t a4, unative_t a5, unative_t a6, unative_t id, unative_t rc, |
198 | bool end_variant) |
226 | bool end_variant) |
199 | { |
227 | { |
200 | call_t *call; |
228 | call_t *call; |
201 | ipl_t ipl; |
229 | ipl_t ipl; |
202 | udebug_event_t etype; |
230 | udebug_event_t etype; |
203 | 231 | ||
204 | etype = end_variant ? UDEBUG_EVENT_SYSCALL_E : UDEBUG_EVENT_SYSCALL_B; |
232 | etype = end_variant ? UDEBUG_EVENT_SYSCALL_E : UDEBUG_EVENT_SYSCALL_B; |
205 | 233 | ||
206 | ipl = interrupts_disable(); |
234 | ipl = interrupts_disable(); |
207 | spinlock_lock(&THREAD->debug_lock); |
235 | spinlock_lock(&THREAD->debug_lock); |
208 | 236 | ||
209 | /* Must only generate events when in debugging session and have go */ |
237 | /* Must only generate events when in debugging session and have go */ |
210 | if (THREAD->debug_active != true || |
238 | if (THREAD->debug_active != true || |
211 | THREAD->debug_stop == true || |
239 | THREAD->debug_stop == true || |
212 | (TASK->udebug.evmask & UDEBUG_EVMASK(etype)) == 0) { |
240 | (TASK->udebug.evmask & UDEBUG_EVMASK(etype)) == 0) { |
213 | spinlock_unlock(&THREAD->debug_lock); |
241 | spinlock_unlock(&THREAD->debug_lock); |
214 | interrupts_restore(ipl); |
242 | interrupts_restore(ipl); |
215 | return; |
243 | return; |
216 | } |
244 | } |
217 | 245 | ||
218 | //klog_printf("udebug_syscall_event"); |
246 | //klog_printf("udebug_syscall_event"); |
219 | call = THREAD->debug_go_call; |
247 | call = THREAD->debug_go_call; |
220 | IPC_SET_RETVAL(call->data, 0); |
248 | IPC_SET_RETVAL(call->data, 0); |
221 | IPC_SET_ARG1(call->data, etype); |
249 | IPC_SET_ARG1(call->data, etype); |
222 | IPC_SET_ARG2(call->data, id); |
250 | IPC_SET_ARG2(call->data, id); |
223 | IPC_SET_ARG3(call->data, rc); |
251 | IPC_SET_ARG3(call->data, rc); |
224 | //klog_printf("udebug_syscall_event/ipc_answer"); |
252 | //klog_printf("udebug_syscall_event/ipc_answer"); |
225 | 253 | ||
226 | THREAD->syscall_args[0] = a1; |
254 | THREAD->syscall_args[0] = a1; |
227 | THREAD->syscall_args[1] = a2; |
255 | THREAD->syscall_args[1] = a2; |
228 | THREAD->syscall_args[2] = a3; |
256 | THREAD->syscall_args[2] = a3; |
229 | THREAD->syscall_args[3] = a4; |
257 | THREAD->syscall_args[3] = a4; |
230 | THREAD->syscall_args[4] = a5; |
258 | THREAD->syscall_args[4] = a5; |
231 | THREAD->syscall_args[5] = a6; |
259 | THREAD->syscall_args[5] = a6; |
232 | 260 | ||
233 | /* |
261 | /* |
234 | * Make sure debug_stop is true when going to sleep |
262 | * Make sure debug_stop is true when going to sleep |
235 | * in case we get woken up by DEBUG_END. (At which |
263 | * in case we get woken up by DEBUG_END. (At which |
236 | * point it must be back to the initial true value). |
264 | * point it must be back to the initial true value). |
237 | */ |
265 | */ |
238 | THREAD->debug_stop = true; |
266 | THREAD->debug_stop = true; |
239 | 267 | ||
240 | THREAD->cur_event = etype; |
268 | THREAD->cur_event = etype; |
241 | spinlock_unlock(&THREAD->debug_lock); |
269 | spinlock_unlock(&THREAD->debug_lock); |
242 | 270 | ||
243 | spinlock_lock(&TASK->lock); |
271 | spinlock_lock(&TASK->lock); |
244 | ipc_answer(&TASK->answerbox, THREAD->debug_go_call); |
272 | ipc_answer(&TASK->answerbox, THREAD->debug_go_call); |
245 | spinlock_unlock(&TASK->lock); |
273 | spinlock_unlock(&TASK->lock); |
246 | interrupts_restore(ipl); |
274 | interrupts_restore(ipl); |
247 | 275 | ||
248 | udebug_wait_for_go(&THREAD->go_wq); |
276 | udebug_wait_for_go(&THREAD->go_wq); |
249 | } |
277 | } |
250 | 278 | ||
251 | void udebug_thread_b_event(struct thread *t) |
279 | void udebug_thread_b_event(struct thread *t) |
252 | { |
280 | { |
253 | call_t *call; |
281 | call_t *call; |
254 | ipl_t ipl; |
282 | ipl_t ipl; |
255 | 283 | ||
256 | ipl = interrupts_disable(); |
284 | ipl = interrupts_disable(); |
257 | spinlock_lock(&THREAD->debug_lock); |
285 | spinlock_lock(&THREAD->debug_lock); |
258 | 286 | ||
259 | klog_printf("udebug_thread_b_event"); |
287 | klog_printf("udebug_thread_b_event"); |
260 | klog_printf("- check state"); |
288 | klog_printf("- check state"); |
261 | 289 | ||
262 | /* Must only generate events when in debugging session */ |
290 | /* Must only generate events when in debugging session */ |
263 | if (THREAD->debug_active != true) { |
291 | if (THREAD->debug_active != true) { |
264 | klog_printf("- debug_active: %s, debug_stop: %s", |
292 | klog_printf("- debug_active: %s, debug_stop: %s", |
265 | THREAD->debug_active ? "yes(+)" : "no(-)", |
293 | THREAD->debug_active ? "yes(+)" : "no(-)", |
266 | THREAD->debug_stop ? "yes(-)" : "no(+)"); |
294 | THREAD->debug_stop ? "yes(-)" : "no(+)"); |
267 | spinlock_unlock(&THREAD->debug_lock); |
295 | spinlock_unlock(&THREAD->debug_lock); |
268 | interrupts_restore(ipl); |
296 | interrupts_restore(ipl); |
269 | return; |
297 | return; |
270 | } |
298 | } |
271 | 299 | ||
272 | klog_printf("- trigger event"); |
300 | klog_printf("- trigger event"); |
273 | 301 | ||
274 | call = THREAD->debug_go_call; |
302 | call = THREAD->debug_go_call; |
275 | IPC_SET_RETVAL(call->data, 0); |
303 | IPC_SET_RETVAL(call->data, 0); |
276 | IPC_SET_ARG1(call->data, UDEBUG_EVENT_THREAD_B); |
304 | IPC_SET_ARG1(call->data, UDEBUG_EVENT_THREAD_B); |
277 | IPC_SET_ARG2(call->data, (unative_t)t); |
305 | IPC_SET_ARG2(call->data, (unative_t)t); |
278 | 306 | ||
279 | /* |
307 | /* |
280 | * Make sure debug_stop is true when going to sleep |
308 | * Make sure debug_stop is true when going to sleep |
281 | * in case we get woken up by DEBUG_END. (At which |
309 | * in case we get woken up by DEBUG_END. (At which |
282 | * point it must be back to the initial true value). |
310 | * point it must be back to the initial true value). |
283 | */ |
311 | */ |
284 | THREAD->debug_stop = true; |
312 | THREAD->debug_stop = true; |
285 | 313 | ||
286 | THREAD->cur_event = UDEBUG_EVENT_THREAD_B; |
314 | THREAD->cur_event = UDEBUG_EVENT_THREAD_B; |
287 | spinlock_unlock(&THREAD->debug_lock); |
315 | spinlock_unlock(&THREAD->debug_lock); |
288 | 316 | ||
289 | spinlock_lock(&TASK->lock); |
317 | spinlock_lock(&TASK->lock); |
290 | ipc_answer(&TASK->answerbox, THREAD->debug_go_call); |
318 | ipc_answer(&TASK->answerbox, THREAD->debug_go_call); |
291 | spinlock_unlock(&TASK->lock); |
319 | spinlock_unlock(&TASK->lock); |
292 | 320 | ||
293 | interrupts_restore(ipl); |
321 | interrupts_restore(ipl); |
294 | klog_printf("- sleep"); |
322 | klog_printf("- sleep"); |
295 | udebug_wait_for_go(&THREAD->go_wq); |
323 | udebug_wait_for_go(&THREAD->go_wq); |
296 | } |
324 | } |
297 | 325 | ||
298 | void udebug_thread_e_event(void) |
326 | void udebug_thread_e_event(void) |
299 | { |
327 | { |
300 | call_t *call; |
328 | call_t *call; |
301 | ipl_t ipl; |
329 | ipl_t ipl; |
302 | 330 | ||
303 | ipl = interrupts_disable(); |
331 | ipl = interrupts_disable(); |
304 | spinlock_lock(&THREAD->debug_lock); |
332 | spinlock_lock(&THREAD->debug_lock); |
305 | 333 | ||
306 | klog_printf("udebug_thread_e_event"); |
334 | klog_printf("udebug_thread_e_event"); |
307 | klog_printf("- check state"); |
335 | klog_printf("- check state"); |
308 | 336 | ||
309 | /* Must only generate events when in debugging session */ |
337 | /* Must only generate events when in debugging session */ |
310 | if (THREAD->debug_active != true) { |
338 | if (THREAD->debug_active != true) { |
311 | klog_printf("- debug_active: %s, debug_stop: %s", |
339 | klog_printf("- debug_active: %s, debug_stop: %s", |
312 | THREAD->debug_active ? "yes(+)" : "no(-)", |
340 | THREAD->debug_active ? "yes(+)" : "no(-)", |
313 | THREAD->debug_stop ? "yes(-)" : "no(+)"); |
341 | THREAD->debug_stop ? "yes(-)" : "no(+)"); |
314 | spinlock_unlock(&THREAD->debug_lock); |
342 | spinlock_unlock(&THREAD->debug_lock); |
315 | interrupts_restore(ipl); |
343 | interrupts_restore(ipl); |
316 | return; |
344 | return; |
317 | } |
345 | } |
318 | 346 | ||
319 | klog_printf("- trigger event"); |
347 | klog_printf("- trigger event"); |
320 | 348 | ||
321 | call = THREAD->debug_go_call; |
349 | call = THREAD->debug_go_call; |
322 | IPC_SET_RETVAL(call->data, 0); |
350 | IPC_SET_RETVAL(call->data, 0); |
323 | IPC_SET_ARG1(call->data, UDEBUG_EVENT_THREAD_E); |
351 | IPC_SET_ARG1(call->data, UDEBUG_EVENT_THREAD_E); |
324 | 352 | ||
325 | /* Prevent any further debug activity in thread */ |
353 | /* Prevent any further debug activity in thread */ |
326 | THREAD->debug_active = false; |
354 | THREAD->debug_active = false; |
327 | THREAD->cur_event = 0; /* none */ |
355 | THREAD->cur_event = 0; /* none */ |
328 | THREAD->debug_stop = true; /* set to initial value */ |
356 | THREAD->debug_stop = true; /* set to initial value */ |
329 | spinlock_unlock(&THREAD->debug_lock); |
357 | spinlock_unlock(&THREAD->debug_lock); |
330 | 358 | ||
331 | spinlock_lock(&TASK->lock); |
359 | spinlock_lock(&TASK->lock); |
332 | ipc_answer(&TASK->answerbox, THREAD->debug_go_call); |
360 | ipc_answer(&TASK->answerbox, THREAD->debug_go_call); |
333 | spinlock_unlock(&TASK->lock); |
361 | spinlock_unlock(&TASK->lock); |
334 | 362 | ||
335 | interrupts_restore(ipl); |
363 | interrupts_restore(ipl); |
336 | 364 | ||
337 | /* This event does not sleep - debugging has finished in this thread */ |
365 | /* This event does not sleep - debugging has finished in this thread */ |
338 | } |
366 | } |
339 | 367 | ||
340 | static void breakpoint_trap_event(uintptr_t addr, udebug_event_t etype) |
368 | static void breakpoint_trap_event(uintptr_t addr, udebug_event_t etype) |
341 | { |
369 | { |
342 | call_t *call; |
370 | call_t *call; |
343 | ipl_t ipl; |
371 | ipl_t ipl; |
344 | 372 | ||
345 | ipl = interrupts_disable(); |
373 | ipl = interrupts_disable(); |
346 | spinlock_lock(&THREAD->debug_lock); |
374 | spinlock_lock(&THREAD->debug_lock); |
347 | 375 | ||
348 | /* Must only generate events when in debugging session and have go */ |
376 | /* Must only generate events when in debugging session and have go */ |
349 | if (THREAD->debug_active != true || |
377 | if (THREAD->debug_active != true || |
350 | THREAD->debug_stop == true || |
378 | THREAD->debug_stop == true || |
351 | (TASK->udebug.evmask & UDEBUG_EVMASK(etype)) == 0) { |
379 | (TASK->udebug.evmask & UDEBUG_EVMASK(etype)) == 0) { |
352 | spinlock_unlock(&THREAD->debug_lock); |
380 | spinlock_unlock(&THREAD->debug_lock); |
353 | interrupts_restore(ipl); |
381 | interrupts_restore(ipl); |
354 | return; |
382 | return; |
355 | } |
383 | } |
356 | 384 | ||
357 | klog_printf("udebug_breakpoint/trap_event"); |
385 | klog_printf("udebug_breakpoint/trap_event"); |
358 | call = THREAD->debug_go_call; |
386 | call = THREAD->debug_go_call; |
359 | IPC_SET_RETVAL(call->data, 0); |
387 | IPC_SET_RETVAL(call->data, 0); |
360 | IPC_SET_ARG1(call->data, etype); |
388 | IPC_SET_ARG1(call->data, etype); |
361 | IPC_SET_ARG2(call->data, addr); |
389 | IPC_SET_ARG2(call->data, addr); |
362 | 390 | ||
363 | /* |
391 | /* |
364 | * Make sure debug_stop is true when going to sleep |
392 | * Make sure debug_stop is true when going to sleep |
365 | * in case we get woken up by DEBUG_END. (At which |
393 | * in case we get woken up by DEBUG_END. (At which |
366 | * point it must be back to the initial true value). |
394 | * point it must be back to the initial true value). |
367 | */ |
395 | */ |
368 | THREAD->debug_stop = true; |
396 | THREAD->debug_stop = true; |
369 | 397 | ||
370 | THREAD->cur_event = etype; |
398 | THREAD->cur_event = etype; |
371 | spinlock_unlock(&THREAD->debug_lock); |
399 | spinlock_unlock(&THREAD->debug_lock); |
372 | klog_printf("- send answer"); |
400 | klog_printf("- send answer"); |
373 | 401 | ||
374 | spinlock_lock(&TASK->lock); |
402 | spinlock_lock(&TASK->lock); |
375 | ipc_answer(&TASK->answerbox, THREAD->debug_go_call); |
403 | ipc_answer(&TASK->answerbox, THREAD->debug_go_call); |
376 | spinlock_unlock(&TASK->lock); |
404 | spinlock_unlock(&TASK->lock); |
377 | interrupts_restore(ipl); |
405 | interrupts_restore(ipl); |
378 | 406 | ||
379 | udebug_wait_for_go(&THREAD->go_wq); |
407 | udebug_wait_for_go(&THREAD->go_wq); |
380 | } |
408 | } |
381 | 409 | ||
382 | void udebug_breakpoint_event(uintptr_t addr) |
410 | void udebug_breakpoint_event(uintptr_t addr) |
383 | { |
411 | { |
384 | breakpoint_trap_event(addr, UDEBUG_EVENT_BREAKPOINT); |
412 | breakpoint_trap_event(addr, UDEBUG_EVENT_BREAKPOINT); |
385 | } |
413 | } |
386 | 414 | ||
387 | void udebug_trap_event(uintptr_t addr) |
415 | void udebug_trap_event(uintptr_t addr) |
388 | { |
416 | { |
389 | breakpoint_trap_event(addr, UDEBUG_EVENT_TRAP); |
417 | breakpoint_trap_event(addr, UDEBUG_EVENT_TRAP); |
390 | } |
418 | } |
391 | 419 | ||
392 | /** |
420 | /** |
393 | * Terminate task debugging session. |
421 | * Terminate task debugging session. |
394 | * |
422 | * |
395 | * \param ta Must be already locked and interrupts must be disabled. |
423 | * \param ta Must be already locked and interrupts must be disabled. |
396 | * \return Zero on success or negative error code. |
424 | * \return Zero on success or negative error code. |
397 | */ |
425 | */ |
398 | int udebug_task_cleanup(struct task *ta) |
426 | int udebug_task_cleanup(struct task *ta) |
399 | { |
427 | { |
400 | thread_t *t; |
428 | thread_t *t; |
401 | link_t *cur; |
429 | link_t *cur; |
402 | int flags; |
430 | int flags; |
403 | 431 | ||
404 | klog_printf("udebug_task_cleanup()"); |
432 | klog_printf("udebug_task_cleanup()"); |
405 | klog_printf("task %llu", ta->taskid); |
433 | klog_printf("task %llu", ta->taskid); |
406 | 434 | ||
407 | if (ta->udebug.dt_state == UDEBUG_TS_BEGINNING && |
435 | if (ta->udebug.dt_state == UDEBUG_TS_BEGINNING && |
408 | ta->udebug.dt_state != UDEBUG_TS_ACTIVE) { |
436 | ta->udebug.dt_state != UDEBUG_TS_ACTIVE) { |
409 | klog_printf("udebug_task_cleanup(): task not being debugged"); |
437 | klog_printf("udebug_task_cleanup(): task not being debugged"); |
410 | return EINVAL; |
438 | return EINVAL; |
411 | } |
439 | } |
412 | 440 | ||
413 | /* Finish debugging of all userspace threads */ |
441 | /* Finish debugging of all userspace threads */ |
414 | for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) { |
442 | for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) { |
415 | t = list_get_instance(cur, thread_t, th_link); |
443 | t = list_get_instance(cur, thread_t, th_link); |
416 | 444 | ||
417 | spinlock_lock(&t->debug_lock); |
445 | spinlock_lock(&t->debug_lock); |
418 | spinlock_lock(&t->lock); |
446 | spinlock_lock(&t->lock); |
419 | 447 | ||
420 | flags = t->flags; |
448 | flags = t->flags; |
421 | 449 | ||
422 | spinlock_unlock(&t->lock); |
450 | spinlock_unlock(&t->lock); |
423 | 451 | ||
424 | /* Only process userspace threads */ |
452 | /* Only process userspace threads */ |
425 | if ((flags & THREAD_FLAG_USPACE) != 0) { |
453 | if ((flags & THREAD_FLAG_USPACE) != 0) { |
426 | /* Prevent any further debug activity in thread */ |
454 | /* Prevent any further debug activity in thread */ |
427 | t->debug_active = false; |
455 | t->debug_active = false; |
428 | t->cur_event = 0; /* none */ |
456 | t->cur_event = 0; /* none */ |
429 | 457 | ||
430 | /* Still has go? */ |
458 | /* Still has go? */ |
431 | if (t->debug_stop == false) { |
459 | if (t->debug_stop == false) { |
432 | /* |
460 | /* |
433 | * Yes, so clear go. As debug_active == false, |
461 | * Yes, so clear go. As debug_active == false, |
434 | * this doesn't affect anything. |
462 | * this doesn't affect anything. |
435 | */ |
463 | */ |
436 | t->debug_stop = true; |
464 | t->debug_stop = true; |
437 | 465 | ||
438 | /* Answer GO call */ |
466 | /* Answer GO call */ |
439 | klog_printf("answer GO call with EVENT_FINISHED"); |
467 | klog_printf("answer GO call with EVENT_FINISHED"); |
440 | IPC_SET_RETVAL(t->debug_go_call->data, 0); |
468 | IPC_SET_RETVAL(t->debug_go_call->data, 0); |
441 | IPC_SET_ARG1(t->debug_go_call->data, UDEBUG_EVENT_FINISHED); |
469 | IPC_SET_ARG1(t->debug_go_call->data, UDEBUG_EVENT_FINISHED); |
442 | ipc_answer(&ta->answerbox, t->debug_go_call); |
470 | ipc_answer(&ta->answerbox, t->debug_go_call); |
443 | } else { |
471 | } else { |
444 | /* |
472 | /* |
445 | * Debug_stop is already at initial value. |
473 | * Debug_stop is already at initial value. |
446 | * Yet this means the thread needs waking up. |
474 | * Yet this means the thread needs waking up. |
447 | */ |
475 | */ |
448 | 476 | ||
449 | /* |
477 | /* |
450 | * t's lock must not be held when calling |
478 | * t's lock must not be held when calling |
451 | * waitq_wakeup. |
479 | * waitq_wakeup. |
452 | */ |
480 | */ |
453 | waitq_wakeup(&t->go_wq, WAKEUP_FIRST); |
481 | waitq_wakeup(&t->go_wq, WAKEUP_FIRST); |
454 | } |
482 | } |
455 | } |
483 | } |
456 | spinlock_unlock(&t->debug_lock); |
484 | spinlock_unlock(&t->debug_lock); |
457 | } |
485 | } |
458 | 486 | ||
459 | ta->udebug.dt_state = UDEBUG_TS_INACTIVE; |
487 | ta->udebug.dt_state = UDEBUG_TS_INACTIVE; |
460 | ta->udebug.debugger = NULL; |
488 | ta->udebug.debugger = NULL; |
461 | 489 | ||
462 | return 0; |
490 | return 0; |
463 | } |
491 | } |
464 | 492 | ||
465 | 493 | ||
466 | /** @} |
494 | /** @} |
467 | */ |
495 | */ |
468 | 496 |