Rev 2841 | Rev 2848 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 2841 | Rev 2842 | ||
---|---|---|---|
1 | /** @addtogroup generic |
1 | /** @addtogroup generic |
2 | * @{ |
2 | * @{ |
3 | */ |
3 | */ |
4 | 4 | ||
5 | /** |
5 | /** |
6 | * @file |
6 | * @file |
7 | * @brief Tdebug. |
7 | * @brief Tdebug. |
8 | */ |
8 | */ |
9 | 9 | ||
10 | #include <console/klog.h> |
10 | #include <console/klog.h> |
11 | #include <proc/task.h> |
11 | #include <proc/task.h> |
12 | #include <proc/thread.h> |
12 | #include <proc/thread.h> |
13 | #include <arch.h> |
13 | #include <arch.h> |
14 | #include <errno.h> |
14 | #include <errno.h> |
15 | #include <ipc/ipc.h> |
15 | #include <ipc/ipc.h> |
16 | #include <syscall/copy.h> |
16 | #include <syscall/copy.h> |
17 | #include <udebug/udebug.h> |
17 | #include <udebug/udebug.h> |
18 | #include <udebug/udebug_ipc.h> |
18 | #include <udebug/udebug_ipc.h> |
19 | 19 | ||
20 | /** |
20 | /** |
21 | * Get and lock a phone's callee task. |
21 | * Get and lock a phone's callee task. |
22 | * |
22 | * |
23 | * This will return a pointer to the task to which the phone |
23 | * This will return a pointer to the task to which the phone |
24 | * is connected. It will lock the task, making sure it exists. |
24 | * is connected. It will lock the task, making sure it exists. |
25 | * (TODO: make sure the udebug-cleanup of the task hasn't |
25 | * (TODO: make sure the udebug-cleanup of the task hasn't |
26 | * started yet) |
26 | * started yet) |
27 | */ |
27 | */ |
28 | static task_t *get_lock_callee_task(phone_t *phone) |
28 | static task_t *get_lock_callee_task(phone_t *phone) |
29 | { |
29 | { |
30 | answerbox_t *box; |
30 | answerbox_t *box; |
31 | task_t *ta; |
31 | task_t *ta; |
32 | task_id_t taskid; |
32 | task_id_t taskid; |
33 | ipl_t ipl; |
33 | ipl_t ipl; |
34 | 34 | ||
35 | ipl = interrupts_disable(); |
35 | ipl = interrupts_disable(); |
36 | spinlock_lock(&phone->lock); |
36 | spinlock_lock(&phone->lock); |
37 | if (phone->state != IPC_PHONE_CONNECTED) { |
37 | if (phone->state != IPC_PHONE_CONNECTED) { |
38 | spinlock_unlock(&phone->lock); |
38 | spinlock_unlock(&phone->lock); |
39 | interrupts_restore(ipl); |
39 | interrupts_restore(ipl); |
40 | return NULL; |
40 | return NULL; |
41 | } |
41 | } |
42 | 42 | ||
43 | box = phone->callee; |
43 | box = phone->callee; |
44 | 44 | ||
45 | spinlock_lock(&box->lock); |
45 | spinlock_lock(&box->lock); |
46 | ta = box->task; |
46 | ta = box->task; |
47 | taskid = ta->taskid; |
47 | taskid = ta->taskid; |
48 | spinlock_unlock(&box->lock); |
48 | spinlock_unlock(&box->lock); |
49 | spinlock_unlock(&phone->lock); |
49 | spinlock_unlock(&phone->lock); |
50 | 50 | ||
51 | /* Locking decoupled using taskid */ |
51 | /* Locking decoupled using taskid */ |
52 | 52 | ||
53 | spinlock_lock(&tasks_lock); |
53 | spinlock_lock(&tasks_lock); |
54 | ta = task_find_by_id(taskid); |
54 | ta = task_find_by_id(taskid); |
55 | if (ta == NULL) { |
55 | if (ta == NULL) { |
56 | spinlock_unlock(&tasks_lock); |
56 | spinlock_unlock(&tasks_lock); |
57 | interrupts_restore(ipl); |
57 | interrupts_restore(ipl); |
58 | return NULL; |
58 | return NULL; |
59 | } |
59 | } |
60 | 60 | ||
61 | spinlock_lock(&ta->lock); |
61 | spinlock_lock(&ta->lock); |
62 | spinlock_unlock(&tasks_lock); |
62 | spinlock_unlock(&tasks_lock); |
63 | interrupts_restore(ipl); |
63 | interrupts_restore(ipl); |
64 | 64 | ||
65 | return ta; |
65 | return ta; |
66 | } |
66 | } |
67 | 67 | ||
- | 68 | /** |
|
- | 69 | * Verify that thread t is valid for debugging ops. |
|
- | 70 | * |
|
- | 71 | * Verifies that t belongs to task ta and that debugging operations |
|
- | 72 | * may be used on it. |
|
- | 73 | * |
|
- | 74 | * Thread t's lock must already be held and interrupts must be disabled. |
|
- | 75 | */ |
|
68 | static int verify_thread(thread_t *t) |
76 | static int verify_thread(thread_t *t, task_t *ta) |
69 | { |
77 | { |
70 | /* Verify that 't' exists and belongs to task 'ta' */ |
78 | /* Verify that 't' exists and belongs to task 'ta' */ |
71 | if (!thread_exists(t) || (t->task != ta)) { |
79 | if (!thread_exists(t) || (t->task != ta)) { |
72 | spinlock_unlock(&threads_lock); |
- | |
73 | interrupts_restore(ipl); |
- | |
74 | return ENOENT; |
80 | return ENOENT; |
75 | } |
81 | } |
76 | 82 | ||
77 | /* Verify that 't' is a userspace thread */ |
83 | /* Verify that 't' is a userspace thread */ |
78 | if ((t->flags & THREAD_FLAG_USPACE) == 0) { |
84 | if ((t->flags & THREAD_FLAG_USPACE) == 0) { |
79 | /* It's not, deny its existence */ |
85 | /* It's not, deny its existence */ |
80 | return ENOENT; |
86 | return ENOENT; |
81 | } |
87 | } |
82 | 88 | ||
83 | if ((t->debug_active != true) || (t->debug_stop != true)) { |
89 | if ((t->debug_active != true) || (t->debug_stop != true)) { |
84 | /* Not in debugging session or already has GO */ |
90 | /* Not in debugging session or already has GO */ |
85 | spinlock_unlock(&threads_lock); |
- | |
86 | interrupts_restore(ipl); |
- | |
87 | return EBUSY; |
91 | return EBUSY; |
88 | } |
92 | } |
89 | 93 | ||
90 | return EOK; |
94 | return EOK; |
91 | } |
95 | } |
92 | 96 | ||
93 | static int udebug_rp_begin(call_t *call, phone_t *phone) |
97 | static int udebug_rp_begin(call_t *call, phone_t *phone) |
94 | { |
98 | { |
95 | task_t *ta; |
99 | task_t *ta; |
96 | ipl_t ipl; |
100 | ipl_t ipl; |
97 | int rc; |
101 | int rc; |
98 | 102 | ||
99 | thread_t *t; |
103 | thread_t *t; |
100 | link_t *cur; |
104 | link_t *cur; |
101 | 105 | ||
102 | klog_printf("debug_begin()"); |
106 | klog_printf("debug_begin()"); |
103 | 107 | ||
104 | ipl = interrupts_disable(); |
108 | ipl = interrupts_disable(); |
105 | ta = get_lock_callee_task(phone); |
109 | ta = get_lock_callee_task(phone); |
106 | klog_printf("debugging task %llu", ta->taskid); |
110 | klog_printf("debugging task %llu", ta->taskid); |
107 | 111 | ||
108 | if (ta->dt_state != UDEBUG_TS_INACTIVE) { |
112 | if (ta->dt_state != UDEBUG_TS_INACTIVE) { |
109 | spinlock_unlock(&ta->lock); |
113 | spinlock_unlock(&ta->lock); |
110 | interrupts_restore(ipl); |
114 | interrupts_restore(ipl); |
111 | klog_printf("debug_begin(): busy error"); |
115 | klog_printf("debug_begin(): busy error"); |
112 | return EBUSY; |
116 | return EBUSY; |
113 | } |
117 | } |
114 | 118 | ||
115 | ta->dt_state = UDEBUG_TS_BEGINNING; |
119 | ta->dt_state = UDEBUG_TS_BEGINNING; |
116 | ta->debug_begin_call = call; |
120 | ta->debug_begin_call = call; |
117 | 121 | ||
118 | if (ta->not_stoppable_count == 0) { |
122 | if (ta->not_stoppable_count == 0) { |
119 | ta->dt_state = UDEBUG_TS_ACTIVE; |
123 | ta->dt_state = UDEBUG_TS_ACTIVE; |
120 | ta->debug_begin_call = NULL; |
124 | ta->debug_begin_call = NULL; |
121 | rc = 1; /* actually we need backsend with 0 retval */ |
125 | rc = 1; /* actually we need backsend with 0 retval */ |
122 | } else { |
126 | } else { |
123 | rc = 0; /* no backsend */ |
127 | rc = 0; /* no backsend */ |
124 | } |
128 | } |
125 | 129 | ||
126 | /* Set debug_active on all of the task's userspace threads */ |
130 | /* Set debug_active on all of the task's userspace threads */ |
127 | 131 | ||
128 | for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) { |
132 | for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) { |
129 | t = list_get_instance(cur, thread_t, th_link); |
133 | t = list_get_instance(cur, thread_t, th_link); |
130 | 134 | ||
131 | spinlock_lock(&t->lock); |
135 | spinlock_lock(&t->lock); |
132 | if ((t->flags & THREAD_FLAG_USPACE) != 0) |
136 | if ((t->flags & THREAD_FLAG_USPACE) != 0) |
133 | t->debug_active = true; |
137 | t->debug_active = true; |
134 | spinlock_unlock(&t->lock); |
138 | spinlock_unlock(&t->lock); |
135 | } |
139 | } |
136 | 140 | ||
137 | spinlock_unlock(&ta->lock); |
141 | spinlock_unlock(&ta->lock); |
138 | interrupts_restore(ipl); |
142 | interrupts_restore(ipl); |
139 | 143 | ||
140 | klog_printf("debug_begin() done (%s)", |
144 | klog_printf("debug_begin() done (%s)", |
141 | rc ? "backsend" : "stoppability wait"); |
145 | rc ? "backsend" : "stoppability wait"); |
142 | 146 | ||
143 | return rc; |
147 | return rc; |
144 | } |
148 | } |
145 | 149 | ||
146 | static int udebug_rp_end(call_t *call, phone_t *phone) |
150 | static int udebug_rp_end(call_t *call, phone_t *phone) |
147 | { |
151 | { |
148 | task_t *ta; |
152 | task_t *ta; |
149 | ipl_t ipl; |
153 | ipl_t ipl; |
150 | 154 | ||
151 | thread_t *t; |
155 | thread_t *t; |
152 | link_t *cur; |
156 | link_t *cur; |
153 | 157 | ||
154 | klog_printf("udebug_rp_end()"); |
158 | klog_printf("udebug_rp_end()"); |
155 | 159 | ||
156 | ipl = interrupts_disable(); |
160 | ipl = interrupts_disable(); |
157 | ta = get_lock_callee_task(phone); |
161 | ta = get_lock_callee_task(phone); |
158 | klog_printf("task %llu", ta->taskid); |
162 | klog_printf("task %llu", ta->taskid); |
159 | 163 | ||
160 | if (ta->dt_state == UDEBUG_TS_BEGINNING && |
164 | if (ta->dt_state == UDEBUG_TS_BEGINNING && |
161 | ta->dt_state != UDEBUG_TS_ACTIVE) { |
165 | ta->dt_state != UDEBUG_TS_ACTIVE) { |
162 | spinlock_unlock(&ta->lock); |
166 | spinlock_unlock(&ta->lock); |
163 | interrupts_restore(ipl); |
167 | interrupts_restore(ipl); |
164 | klog_printf("udebug_rp_begin(): task not being debugged"); |
168 | klog_printf("udebug_rp_begin(): task not being debugged"); |
165 | return EINVAL; |
169 | return EINVAL; |
166 | } |
170 | } |
167 | 171 | ||
168 | /* Finish debugging of all userspace threads */ |
172 | /* Finish debugging of all userspace threads */ |
169 | for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) { |
173 | for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) { |
170 | t = list_get_instance(cur, thread_t, th_link); |
174 | t = list_get_instance(cur, thread_t, th_link); |
171 | 175 | ||
172 | spinlock_lock(&t->lock); |
176 | spinlock_lock(&t->lock); |
173 | 177 | ||
174 | /* Only process userspace threads */ |
178 | /* Only process userspace threads */ |
175 | if ((t->flags & THREAD_FLAG_USPACE) != 0) { |
179 | if ((t->flags & THREAD_FLAG_USPACE) != 0) { |
176 | /* Prevent any further debug activity in thread */ |
180 | /* Prevent any further debug activity in thread */ |
177 | t->debug_active = false; |
181 | t->debug_active = false; |
178 | 182 | ||
179 | /* Still has go? */ |
183 | /* Still has go? */ |
180 | if (t->debug_stop == false) { |
184 | if (t->debug_stop == false) { |
181 | /* |
185 | /* |
182 | * Yes, so clear go. As debug_active == false, |
186 | * Yes, so clear go. As debug_active == false, |
183 | * this doesn't affect anything. |
187 | * this doesn't affect anything. |
184 | */ |
188 | */ |
185 | t->debug_stop = true; |
189 | t->debug_stop = true; |
186 | 190 | ||
187 | /* Answer GO call */ |
191 | /* Answer GO call */ |
188 | klog_printf("answer GO call with EVENT_FINISHED"); |
192 | klog_printf("answer GO call with EVENT_FINISHED"); |
189 | IPC_SET_RETVAL(t->debug_go_call->data, 0); |
193 | IPC_SET_RETVAL(t->debug_go_call->data, 0); |
190 | IPC_SET_ARG1(t->debug_go_call->data, UDEBUG_EVENT_FINISHED); |
194 | IPC_SET_ARG1(t->debug_go_call->data, UDEBUG_EVENT_FINISHED); |
191 | ipc_answer(&ta->answerbox, t->debug_go_call); |
195 | ipc_answer(&ta->answerbox, t->debug_go_call); |
192 | } else { |
196 | } else { |
193 | /* |
197 | /* |
194 | * Debug_stop is already at initial value. |
198 | * Debug_stop is already at initial value. |
195 | * Yet this means the thread needs waking up. |
199 | * Yet this means the thread needs waking up. |
196 | */ |
200 | */ |
197 | waitq_wakeup(&t->go_wq, WAKEUP_FIRST); |
201 | waitq_wakeup(&t->go_wq, WAKEUP_FIRST); |
198 | } |
202 | } |
199 | } |
203 | } |
200 | 204 | ||
201 | spinlock_unlock(&t->lock); |
205 | spinlock_unlock(&t->lock); |
202 | } |
206 | } |
203 | 207 | ||
204 | ta->dt_state = UDEBUG_TS_INACTIVE; |
208 | ta->dt_state = UDEBUG_TS_INACTIVE; |
205 | 209 | ||
206 | spinlock_unlock(&ta->lock); |
210 | spinlock_unlock(&ta->lock); |
207 | interrupts_restore(ipl); |
211 | interrupts_restore(ipl); |
208 | 212 | ||
209 | IPC_SET_RETVAL(call->data, 0); |
213 | IPC_SET_RETVAL(call->data, 0); |
210 | 214 | ||
211 | klog_printf("udebug_rp_end() done\n"); |
215 | klog_printf("udebug_rp_end() done\n"); |
212 | 216 | ||
213 | return 1; |
217 | return 1; |
214 | } |
218 | } |
215 | 219 | ||
216 | 220 | ||
217 | static int udebug_rp_go(call_t *call, phone_t *phone) |
221 | static int udebug_rp_go(call_t *call, phone_t *phone) |
218 | { |
222 | { |
219 | thread_t *t; |
223 | thread_t *t; |
220 | task_t *ta; |
224 | task_t *ta; |
221 | ipl_t ipl; |
225 | ipl_t ipl; |
222 | 226 | ||
223 | klog_printf("debug_go()"); |
227 | klog_printf("debug_go()"); |
224 | ta = get_lock_callee_task(phone); |
228 | ta = get_lock_callee_task(phone); |
225 | spinlock_unlock(&ta->lock); |
229 | spinlock_unlock(&ta->lock); |
226 | // TODO: don't lock ta |
230 | // TODO: don't lock ta |
227 | 231 | ||
228 | t = (thread_t *) IPC_GET_ARG2(call->data); |
232 | t = (thread_t *) IPC_GET_ARG2(call->data); |
229 | 233 | ||
230 | ipl = interrupts_disable(); |
234 | ipl = interrupts_disable(); |
231 | spinlock_lock(&threads_lock); |
235 | spinlock_lock(&threads_lock); |
232 | 236 | ||
233 | 237 | ||
234 | t->debug_go_call = call; |
238 | t->debug_go_call = call; |
235 | t->debug_stop = false; |
239 | t->debug_stop = false; |
236 | waitq_wakeup(&t->go_wq, WAKEUP_FIRST); |
240 | waitq_wakeup(&t->go_wq, WAKEUP_FIRST); |
237 | 241 | ||
238 | spinlock_unlock(&threads_lock); |
242 | spinlock_unlock(&threads_lock); |
239 | interrupts_restore(ipl); |
243 | interrupts_restore(ipl); |
240 | 244 | ||
241 | return 0; /* no backsend */ |
245 | return 0; /* no backsend */ |
242 | } |
246 | } |
243 | 247 | ||
244 | static int udebug_rp_args_read(call_t *call, phone_t *phone) |
248 | static int udebug_rp_args_read(call_t *call, phone_t *phone) |
245 | { |
249 | { |
246 | thread_t *t; |
250 | thread_t *t; |
247 | task_t *ta; |
251 | task_t *ta; |
248 | void *uspace_buffer; |
252 | void *uspace_buffer; |
249 | int rc; |
253 | int rc; |
250 | ipl_t ipl; |
254 | ipl_t ipl; |
251 | unative_t buffer[6]; |
255 | unative_t buffer[6]; |
252 | 256 | ||
253 | klog_printf("debug_args_read()"); |
257 | klog_printf("debug_args_read()"); |
254 | 258 | ||
255 | ta = get_lock_callee_task(phone); |
259 | ta = get_lock_callee_task(phone); |
256 | klog_printf("task %llu", ta->taskid); |
260 | klog_printf("task %llu", ta->taskid); |
257 | spinlock_unlock(&ta->lock); |
261 | spinlock_unlock(&ta->lock); |
258 | 262 | ||
259 | t = (thread_t *) IPC_GET_ARG2(call->data); |
263 | t = (thread_t *) IPC_GET_ARG2(call->data); |
260 | 264 | ||
261 | ipl = interrupts_disable(); |
265 | ipl = interrupts_disable(); |
262 | spinlock_lock(&threads_lock); |
266 | spinlock_lock(&threads_lock); |
263 | 267 | ||
264 | /* Verify that thread t exists and may be operated on */ |
268 | /* Verify that thread t exists and may be operated on */ |
265 | rc = verify_thread(t); |
269 | rc = verify_thread(t, ta); |
266 | if (rc != EOK) { |
270 | if (rc != EOK) { |
267 | spinlock_unlock(&threads_lock); |
271 | spinlock_unlock(&threads_lock); |
268 | interrupts_restore(ipl); |
272 | interrupts_restore(ipl); |
269 | return rc; |
273 | return rc; |
270 | } |
274 | } |
271 | 275 | ||
272 | //FIXME: additionally we need to verify that we are inside a syscall |
276 | //FIXME: additionally we need to verify that we are inside a syscall |
273 | 277 | ||
274 | /* Copy to a local buffer before releasing the lock */ |
278 | /* Copy to a local buffer before releasing the lock */ |
275 | memcpy(buffer, t->syscall_args, 6 * sizeof(unative_t)); |
279 | memcpy(buffer, t->syscall_args, 6 * sizeof(unative_t)); |
276 | 280 | ||
277 | spinlock_unlock(&threads_lock); |
281 | spinlock_unlock(&threads_lock); |
278 | interrupts_restore(ipl); |
282 | interrupts_restore(ipl); |
279 | 283 | ||
280 | /* Now copy to userspace */ |
284 | /* Now copy to userspace */ |
281 | 285 | ||
282 | uspace_buffer = (void *)IPC_GET_ARG3(call->data); |
286 | uspace_buffer = (void *)IPC_GET_ARG3(call->data); |
283 | 287 | ||
284 | rc = copy_to_uspace(uspace_buffer, buffer, 6 * sizeof(unative_t)); |
288 | rc = copy_to_uspace(uspace_buffer, buffer, 6 * sizeof(unative_t)); |
285 | if (rc != 0) { |
289 | if (rc != 0) { |
286 | spinlock_unlock(&ta->lock); |
290 | spinlock_unlock(&ta->lock); |
287 | klog_printf("debug_args_read() - copy failed"); |
291 | klog_printf("debug_args_read() - copy failed"); |
288 | return rc; |
292 | return rc; |
289 | } |
293 | } |
290 | 294 | ||
291 | klog_printf("debug_args_read() done"); |
295 | klog_printf("debug_args_read() done"); |
292 | return 1; /* actually need becksend with retval 0 */ |
296 | return 1; /* actually need becksend with retval 0 */ |
293 | } |
297 | } |
294 | 298 | ||
295 | static int udebug_rp_regs_read(call_t *call, phone_t *phone) |
299 | static int udebug_rp_regs_read(call_t *call, phone_t *phone) |
296 | { |
300 | { |
297 | thread_t *t; |
301 | thread_t *t; |
298 | task_t *ta; |
302 | task_t *ta; |
299 | void *uspace_buffer; |
303 | void *uspace_buffer; |
300 | unative_t to_copy; |
304 | unative_t to_copy; |
301 | int rc; |
305 | int rc; |
302 | istate_t *state; |
306 | istate_t *state; |
303 | istate_t state_copy; |
307 | istate_t state_copy; |
304 | ipl_t ipl; |
308 | ipl_t ipl; |
305 | 309 | ||
306 | klog_printf("debug_regs_read()"); |
310 | klog_printf("debug_regs_read()"); |
307 | 311 | ||
308 | ta = get_lock_callee_task(phone); |
312 | ta = get_lock_callee_task(phone); |
309 | spinlock_unlock(&ta->lock); |
313 | spinlock_unlock(&ta->lock); |
310 | //FIXME: don't lock ta |
314 | //FIXME: don't lock ta |
311 | 315 | ||
312 | ipl = interrupts_disable(); |
316 | ipl = interrupts_disable(); |
313 | spinlock_lock(&threads_lock); |
317 | spinlock_lock(&threads_lock); |
314 | 318 | ||
315 | t = (thread_t *) IPC_GET_ARG2(call->data); |
319 | t = (thread_t *) IPC_GET_ARG2(call->data); |
316 | 320 | ||
317 | /* Verify that thread t exists and may be operated on */ |
321 | /* Verify that thread t exists and may be operated on */ |
318 | rc = verify_thread(t); |
322 | rc = verify_thread(t, ta); |
319 | if (rc != EOK) { |
323 | if (rc != EOK) { |
320 | spinlock_unlock(&threads_lock); |
324 | spinlock_unlock(&threads_lock); |
321 | interrupts_restore(ipl); |
325 | interrupts_restore(ipl); |
322 | return rc; |
326 | return rc; |
323 | } |
327 | } |
324 | 328 | ||
325 | state = t->uspace_state; |
329 | state = t->uspace_state; |
326 | if (state == NULL) { |
330 | if (state == NULL) { |
327 | spinlock_unlock(&threads_lock); |
331 | spinlock_unlock(&threads_lock); |
328 | interrupts_restore(ipl); |
332 | interrupts_restore(ipl); |
329 | klog_printf("debug_regs_read() - istate not available"); |
333 | klog_printf("debug_regs_read() - istate not available"); |
330 | return EBUSY; |
334 | return EBUSY; |
331 | } |
335 | } |
332 | 336 | ||
333 | /* Copy to a local buffer so that we can release the lock */ |
337 | /* Copy to a local buffer so that we can release the lock */ |
334 | memcpy(&state_copy, state, sizeof(state_copy)); |
338 | memcpy(&state_copy, state, sizeof(state_copy)); |
335 | spinlock_unlock(&threads_lock); |
339 | spinlock_unlock(&threads_lock); |
336 | interrupts_restore(ipl); |
340 | interrupts_restore(ipl); |
337 | 341 | ||
338 | uspace_buffer = (void *)IPC_GET_ARG3(call->data); |
342 | uspace_buffer = (void *)IPC_GET_ARG3(call->data); |
339 | to_copy = IPC_GET_ARG4(call->data); |
343 | to_copy = IPC_GET_ARG4(call->data); |
340 | if (to_copy > sizeof(istate_t)) to_copy = sizeof(istate_t); |
344 | if (to_copy > sizeof(istate_t)) to_copy = sizeof(istate_t); |
341 | 345 | ||
342 | rc = copy_to_uspace(uspace_buffer, &state_copy, to_copy); |
346 | rc = copy_to_uspace(uspace_buffer, &state_copy, to_copy); |
343 | if (rc != 0) { |
347 | if (rc != 0) { |
344 | spinlock_unlock(&ta->lock); |
348 | spinlock_unlock(&ta->lock); |
345 | klog_printf("debug_regs_read() - copy failed"); |
349 | klog_printf("debug_regs_read() - copy failed"); |
346 | return rc; |
350 | return rc; |
347 | } |
351 | } |
348 | 352 | ||
349 | IPC_SET_ARG1(call->data, to_copy); |
353 | IPC_SET_ARG1(call->data, to_copy); |
350 | IPC_SET_ARG2(call->data, sizeof(istate_t)); |
354 | IPC_SET_ARG2(call->data, sizeof(istate_t)); |
351 | 355 | ||
352 | klog_printf("debug_regs_read() done"); |
356 | klog_printf("debug_regs_read() done"); |
353 | return 1; /* actually need becksend with retval 0 */ |
357 | return 1; /* actually need becksend with retval 0 */ |
354 | } |
358 | } |
355 | 359 | ||
356 | static int udebug_rp_regs_write(call_t *call, phone_t *phone) |
360 | static int udebug_rp_regs_write(call_t *call, phone_t *phone) |
357 | { |
361 | { |
358 | thread_t *t; |
362 | thread_t *t; |
359 | task_t *ta; |
363 | task_t *ta; |
360 | void *uspace_data; |
364 | void *uspace_data; |
361 | unative_t to_copy; |
365 | unative_t to_copy; |
362 | int rc; |
366 | int rc; |
363 | istate_t *state; |
367 | istate_t *state; |
364 | istate_t data_copy; |
368 | istate_t data_copy; |
365 | ipl_t ipl; |
369 | ipl_t ipl; |
366 | 370 | ||
367 | klog_printf("debug_regs_write()"); |
371 | klog_printf("debug_regs_write()"); |
368 | 372 | ||
369 | /* First copy to a local buffer */ |
373 | /* First copy to a local buffer */ |
370 | 374 | ||
371 | uspace_data = (void *)IPC_GET_ARG3(call->data); |
375 | uspace_data = (void *)IPC_GET_ARG3(call->data); |
372 | to_copy = IPC_GET_ARG4(call->data); |
376 | to_copy = IPC_GET_ARG4(call->data); |
373 | if (to_copy > sizeof(istate_t)) to_copy = sizeof(istate_t); |
377 | if (to_copy > sizeof(istate_t)) to_copy = sizeof(istate_t); |
374 | 378 | ||
375 | rc = copy_from_uspace(&data_copy, uspace_data, to_copy); |
379 | rc = copy_from_uspace(&data_copy, uspace_data, to_copy); |
376 | if (rc != 0) { |
380 | if (rc != 0) { |
377 | klog_printf("debug_regs_write() - copy failed"); |
381 | klog_printf("debug_regs_write() - copy failed"); |
378 | return rc; |
382 | return rc; |
379 | } |
383 | } |
380 | 384 | ||
381 | ta = get_lock_callee_task(phone); |
385 | ta = get_lock_callee_task(phone); |
382 | spinlock_unlock(&ta->lock); |
386 | spinlock_unlock(&ta->lock); |
383 | //FIXME: don't lock ta |
387 | //FIXME: don't lock ta |
384 | 388 | ||
385 | /* Now try to change the thread's uspace_state */ |
389 | /* Now try to change the thread's uspace_state */ |
386 | 390 | ||
387 | ipl = interrupts_disable(); |
391 | ipl = interrupts_disable(); |
388 | spinlock_lock(&threads_lock); |
392 | spinlock_lock(&threads_lock); |
389 | 393 | ||
390 | t = (thread_t *) IPC_GET_ARG2(call->data); |
394 | t = (thread_t *) IPC_GET_ARG2(call->data); |
391 | 395 | ||
392 | /* Verify that thread t exists and may be operated on */ |
396 | /* Verify that thread t exists and may be operated on */ |
393 | rc = verify_thread(t); |
397 | rc = verify_thread(t, ta); |
394 | if (rc != EOK) { |
398 | if (rc != EOK) { |
395 | spinlock_unlock(&threads_lock); |
399 | spinlock_unlock(&threads_lock); |
396 | interrupts_restore(ipl); |
400 | interrupts_restore(ipl); |
397 | return rc; |
401 | return rc; |
398 | } |
402 | } |
399 | 403 | ||
400 | state = t->uspace_state; |
404 | state = t->uspace_state; |
401 | if (state == NULL) { |
405 | if (state == NULL) { |
402 | spinlock_unlock(&threads_lock); |
406 | spinlock_unlock(&threads_lock); |
403 | interrupts_restore(ipl); |
407 | interrupts_restore(ipl); |
404 | klog_printf("debug_regs_write() - istate not available"); |
408 | klog_printf("debug_regs_write() - istate not available"); |
405 | return EBUSY; |
409 | return EBUSY; |
406 | } |
410 | } |
407 | 411 | ||
408 | memcpy(t->uspace_state, &data_copy, sizeof(t->uspace_state)); |
412 | memcpy(t->uspace_state, &data_copy, sizeof(t->uspace_state)); |
409 | 413 | ||
410 | spinlock_unlock(&threads_lock); |
414 | spinlock_unlock(&threads_lock); |
411 | interrupts_restore(ipl); |
415 | interrupts_restore(ipl); |
412 | 416 | ||
413 | /* Set answer values */ |
417 | /* Set answer values */ |
414 | 418 | ||
415 | IPC_SET_ARG1(call->data, to_copy); |
419 | IPC_SET_ARG1(call->data, to_copy); |
416 | IPC_SET_ARG2(call->data, sizeof(istate_t)); |
420 | IPC_SET_ARG2(call->data, sizeof(istate_t)); |
417 | 421 | ||
418 | klog_printf("debug_regs_write() done"); |
422 | klog_printf("debug_regs_write() done"); |
419 | return 1; /* actually need becksend with retval 0 */ |
423 | return 1; /* actually need becksend with retval 0 */ |
420 | } |
424 | } |
421 | 425 | ||
422 | static int udebug_rp_thread_read(call_t *call, phone_t *phone) |
426 | static int udebug_rp_thread_read(call_t *call, phone_t *phone) |
423 | { |
427 | { |
424 | thread_t *t; |
428 | thread_t *t; |
425 | link_t *cur; |
429 | link_t *cur; |
426 | task_t *ta; |
430 | task_t *ta; |
427 | unative_t *uspace_buffer; |
431 | unative_t *uspace_buffer; |
428 | unative_t to_copy; |
432 | unative_t to_copy; |
429 | int rc; |
433 | int rc; |
430 | unsigned total_bytes; |
434 | unsigned total_bytes; |
431 | unsigned buf_size; |
435 | unsigned buf_size; |
432 | unative_t tid; |
436 | unative_t tid; |
433 | unsigned num_threads, copied_ids; |
437 | unsigned num_threads, copied_ids; |
434 | ipl_t ipl; |
438 | ipl_t ipl; |
435 | unative_t *buffer; |
439 | unative_t *buffer; |
436 | int flags; |
440 | int flags; |
437 | 441 | ||
438 | klog_printf("debug_thread_read()"); |
442 | klog_printf("debug_thread_read()"); |
439 | 443 | ||
440 | ipl = interrupts_disable(); |
444 | ipl = interrupts_disable(); |
441 | ta = get_lock_callee_task(phone); |
445 | ta = get_lock_callee_task(phone); |
442 | 446 | ||
443 | /* Verify task state */ |
447 | /* Verify task state */ |
444 | if (ta->dt_state != UDEBUG_TS_ACTIVE) { |
448 | if (ta->dt_state != UDEBUG_TS_ACTIVE) { |
445 | spinlock_unlock(&ta->lock); |
449 | spinlock_unlock(&ta->lock); |
446 | interrupts_restore(ipl); |
450 | interrupts_restore(ipl); |
447 | return EBUSY; |
451 | return EBUSY; |
448 | } |
452 | } |
449 | 453 | ||
450 | /* Count the threads first */ |
454 | /* Count the threads first */ |
451 | 455 | ||
452 | num_threads = 0; |
456 | num_threads = 0; |
453 | for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) { |
457 | for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) { |
454 | /* Count all threads, to be on the safe side */ |
458 | /* Count all threads, to be on the safe side */ |
455 | ++num_threads; |
459 | ++num_threads; |
456 | } |
460 | } |
457 | 461 | ||
458 | /* Allocate a buffer and copy down the threads' ids */ |
462 | /* Allocate a buffer and copy down the threads' ids */ |
459 | buffer = malloc(num_threads * sizeof(unative_t), 0); // ??? |
463 | buffer = malloc(num_threads * sizeof(unative_t), 0); // ??? |
460 | 464 | ||
461 | copied_ids = 0; |
465 | copied_ids = 0; |
462 | for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) { |
466 | for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) { |
463 | t = list_get_instance(cur, thread_t, th_link); |
467 | t = list_get_instance(cur, thread_t, th_link); |
464 | 468 | ||
465 | spinlock_lock(&t->lock); |
469 | spinlock_lock(&t->lock); |
466 | flags = t->flags; |
470 | flags = t->flags; |
467 | spinlock_unlock(&t->lock); |
471 | spinlock_unlock(&t->lock); |
468 | 472 | ||
469 | /* Not interested in kernel threads */ |
473 | /* Not interested in kernel threads */ |
470 | if ((flags & THREAD_FLAG_USPACE) != 0) { |
474 | if ((flags & THREAD_FLAG_USPACE) != 0) { |
471 | /* Using thread struct pointer for identification */ |
475 | /* Using thread struct pointer for identification */ |
472 | tid = (unative_t) t; |
476 | tid = (unative_t) t; |
473 | buffer[copied_ids++] = tid; |
477 | buffer[copied_ids++] = tid; |
474 | } |
478 | } |
475 | } |
479 | } |
476 | 480 | ||
477 | spinlock_unlock(&ta->lock); |
481 | spinlock_unlock(&ta->lock); |
478 | interrupts_restore(ipl); |
482 | interrupts_restore(ipl); |
479 | 483 | ||
480 | /* Now copy to userspace */ |
484 | /* Now copy to userspace */ |
481 | 485 | ||
482 | uspace_buffer = (void *)IPC_GET_ARG2(call->data); |
486 | uspace_buffer = (void *)IPC_GET_ARG2(call->data); |
483 | buf_size = IPC_GET_ARG3(call->data); |
487 | buf_size = IPC_GET_ARG3(call->data); |
484 | 488 | ||
485 | total_bytes = copied_ids * sizeof(unative_t); |
489 | total_bytes = copied_ids * sizeof(unative_t); |
486 | 490 | ||
487 | if (buf_size > total_bytes) |
491 | if (buf_size > total_bytes) |
488 | to_copy = total_bytes; |
492 | to_copy = total_bytes; |
489 | else |
493 | else |
490 | to_copy = buf_size; |
494 | to_copy = buf_size; |
491 | 495 | ||
492 | rc = copy_to_uspace(uspace_buffer, buffer, to_copy); |
496 | rc = copy_to_uspace(uspace_buffer, buffer, to_copy); |
493 | free(buffer); |
497 | free(buffer); |
494 | 498 | ||
495 | if (rc != 0) { |
499 | if (rc != 0) { |
496 | klog_printf("debug_thread_read() - copy failed"); |
500 | klog_printf("debug_thread_read() - copy failed"); |
497 | return rc; |
501 | return rc; |
498 | } |
502 | } |
499 | 503 | ||
500 | IPC_SET_ARG1(call->data, to_copy); |
504 | IPC_SET_ARG1(call->data, to_copy); |
501 | IPC_SET_ARG2(call->data, total_bytes); |
505 | IPC_SET_ARG2(call->data, total_bytes); |
502 | 506 | ||
503 | klog_printf("debug_thread_read() done"); |
507 | klog_printf("debug_thread_read() done"); |
504 | return 1; /* actually need becksend with retval 0 */ |
508 | return 1; /* actually need becksend with retval 0 */ |
505 | } |
509 | } |
506 | 510 | ||
507 | static int udebug_rp_mem_write(call_t *call, phone_t *phone) |
511 | static int udebug_rp_mem_write(call_t *call, phone_t *phone) |
508 | { |
512 | { |
509 | void *uspace_data; |
513 | void *uspace_data; |
510 | unative_t to_copy; |
514 | unative_t to_copy; |
511 | int rc; |
515 | int rc; |
512 | void *buffer; |
516 | void *buffer; |
513 | 517 | ||
514 | klog_printf("udebug_rp_mem_write()"); |
518 | klog_printf("udebug_rp_mem_write()"); |
515 | 519 | ||
516 | uspace_data = (void *)IPC_GET_ARG2(call->data); |
520 | uspace_data = (void *)IPC_GET_ARG2(call->data); |
517 | to_copy = IPC_GET_ARG4(call->data); |
521 | to_copy = IPC_GET_ARG4(call->data); |
518 | 522 | ||
519 | buffer = malloc(to_copy, 0); // ??? |
523 | buffer = malloc(to_copy, 0); // ??? |
520 | 524 | ||
521 | rc = copy_from_uspace(buffer, uspace_data, to_copy); |
525 | rc = copy_from_uspace(buffer, uspace_data, to_copy); |
522 | if (rc != 0) { |
526 | if (rc != 0) { |
523 | klog_printf(" - copy failed"); |
527 | klog_printf(" - copy failed"); |
524 | return rc; |
528 | return rc; |
525 | } |
529 | } |
526 | 530 | ||
527 | call->buffer = buffer; |
531 | call->buffer = buffer; |
528 | 532 | ||
529 | klog_printf(" - done"); |
533 | klog_printf(" - done"); |
530 | return 1; /* actually need becksend with retval 0 */ |
534 | return 1; /* actually need becksend with retval 0 */ |
531 | } |
535 | } |
532 | 536 | ||
533 | 537 | ||
534 | int udebug_request_preprocess(call_t *call, phone_t *phone) |
538 | int udebug_request_preprocess(call_t *call, phone_t *phone) |
535 | { |
539 | { |
536 | int rc; |
540 | int rc; |
537 | 541 | ||
538 | switch (IPC_GET_ARG1(call->data)) { |
542 | switch (IPC_GET_ARG1(call->data)) { |
539 | case UDEBUG_M_BEGIN: |
543 | case UDEBUG_M_BEGIN: |
540 | rc = udebug_rp_begin(call, phone); |
544 | rc = udebug_rp_begin(call, phone); |
541 | return rc; |
545 | return rc; |
542 | case UDEBUG_M_END: |
546 | case UDEBUG_M_END: |
543 | rc = udebug_rp_end(call, phone); |
547 | rc = udebug_rp_end(call, phone); |
544 | return rc; |
548 | return rc; |
545 | case UDEBUG_M_GO: |
549 | case UDEBUG_M_GO: |
546 | rc = udebug_rp_go(call, phone); |
550 | rc = udebug_rp_go(call, phone); |
547 | return rc; |
551 | return rc; |
548 | case UDEBUG_M_ARGS_READ: |
552 | case UDEBUG_M_ARGS_READ: |
549 | rc = udebug_rp_args_read(call, phone); |
553 | rc = udebug_rp_args_read(call, phone); |
550 | return rc; |
554 | return rc; |
551 | case UDEBUG_M_REGS_READ: |
555 | case UDEBUG_M_REGS_READ: |
552 | rc = udebug_rp_regs_read(call, phone); |
556 | rc = udebug_rp_regs_read(call, phone); |
553 | return rc; |
557 | return rc; |
554 | case UDEBUG_M_REGS_WRITE: |
558 | case UDEBUG_M_REGS_WRITE: |
555 | rc = udebug_rp_regs_write(call, phone); |
559 | rc = udebug_rp_regs_write(call, phone); |
556 | return rc; |
560 | return rc; |
557 | case UDEBUG_M_THREAD_READ: |
561 | case UDEBUG_M_THREAD_READ: |
558 | rc = udebug_rp_thread_read(call, phone); |
562 | rc = udebug_rp_thread_read(call, phone); |
559 | return rc; |
563 | return rc; |
560 | case UDEBUG_M_MEM_WRITE: |
564 | case UDEBUG_M_MEM_WRITE: |
561 | rc = udebug_rp_mem_write(call, phone); |
565 | rc = udebug_rp_mem_write(call, phone); |
562 | return rc; |
566 | return rc; |
563 | default: |
567 | default: |
564 | break; |
568 | break; |
565 | } |
569 | } |
566 | 570 | ||
567 | return 0; |
571 | return 0; |
568 | } |
572 | } |
569 | 573 | ||
570 | static void udebug_receive_mem_read(call_t *call) |
574 | static void udebug_receive_mem_read(call_t *call) |
571 | { |
575 | { |
572 | unative_t uspace_dst; |
576 | unative_t uspace_dst; |
573 | void *uspace_ptr; |
577 | void *uspace_ptr; |
574 | unsigned size; |
578 | unsigned size; |
575 | void *buffer; |
579 | void *buffer; |
576 | int rc; |
580 | int rc; |
577 | 581 | ||
578 | klog_printf("debug_mem_read()"); |
582 | klog_printf("debug_mem_read()"); |
579 | uspace_dst = IPC_GET_ARG2(call->data); |
583 | uspace_dst = IPC_GET_ARG2(call->data); |
580 | uspace_ptr = (void *)IPC_GET_ARG3(call->data); |
584 | uspace_ptr = (void *)IPC_GET_ARG3(call->data); |
581 | size = IPC_GET_ARG4(call->data); |
585 | size = IPC_GET_ARG4(call->data); |
582 | 586 | ||
583 | buffer = malloc(size, 0); // ??? |
587 | buffer = malloc(size, 0); // ??? |
584 | klog_printf("debug_mem_read: src=%u, size=%u", uspace_ptr, size); |
588 | klog_printf("debug_mem_read: src=%u, size=%u", uspace_ptr, size); |
585 | 589 | ||
586 | /* NOTE: this is not strictly from a syscall... but that shouldn't |
590 | /* NOTE: this is not strictly from a syscall... but that shouldn't |
587 | * be a problem */ |
591 | * be a problem */ |
588 | rc = copy_from_uspace(buffer, uspace_ptr, size); |
592 | rc = copy_from_uspace(buffer, uspace_ptr, size); |
589 | if (rc) { |
593 | if (rc) { |
590 | IPC_SET_RETVAL(call->data, rc); |
594 | IPC_SET_RETVAL(call->data, rc); |
591 | return; |
595 | return; |
592 | } |
596 | } |
593 | 597 | ||
594 | klog_printf("first word: %u", *((unative_t *)buffer)); |
598 | klog_printf("first word: %u", *((unative_t *)buffer)); |
595 | 599 | ||
596 | IPC_SET_RETVAL(call->data, 0); |
600 | IPC_SET_RETVAL(call->data, 0); |
597 | /* Hack: ARG1=dest, ARG2=size as in IPC_M_DATA_READ so that |
601 | /* Hack: ARG1=dest, ARG2=size as in IPC_M_DATA_READ so that |
598 | same code in process_answer() can be used |
602 | same code in process_answer() can be used |
599 | (no way to distinguish method in answer) */ |
603 | (no way to distinguish method in answer) */ |
600 | IPC_SET_ARG1(call->data, uspace_dst); |
604 | IPC_SET_ARG1(call->data, uspace_dst); |
601 | IPC_SET_ARG2(call->data, size); |
605 | IPC_SET_ARG2(call->data, size); |
602 | call->buffer = buffer; |
606 | call->buffer = buffer; |
603 | 607 | ||
604 | ipc_answer(&TASK->kernel_box, call); |
608 | ipc_answer(&TASK->kernel_box, call); |
605 | } |
609 | } |
606 | 610 | ||
607 | static void udebug_receive_mem_write(call_t *call) |
611 | static void udebug_receive_mem_write(call_t *call) |
608 | { |
612 | { |
609 | void *uspace_dst; |
613 | void *uspace_dst; |
610 | unsigned size; |
614 | unsigned size; |
611 | void *buffer; |
615 | void *buffer; |
612 | int rc; |
616 | int rc; |
613 | udebug_task_state_t dts; |
617 | udebug_task_state_t dts; |
614 | 618 | ||
615 | klog_printf("udebug_receive_mem_write()"); |
619 | klog_printf("udebug_receive_mem_write()"); |
616 | 620 | ||
617 | /* Verify task state */ |
621 | /* Verify task state */ |
618 | spinlock_lock(&TASK->lock); |
622 | spinlock_lock(&TASK->lock); |
619 | dts = TASK->dt_state; |
623 | dts = TASK->dt_state; |
620 | spinlock_unlock(&TASK->lock); |
624 | spinlock_unlock(&TASK->lock); |
621 | 625 | ||
622 | if (dts != UDEBUG_TS_ACTIVE) { |
626 | if (dts != UDEBUG_TS_ACTIVE) { |
623 | IPC_SET_RETVAL(call->data, EBUSY); |
627 | IPC_SET_RETVAL(call->data, EBUSY); |
624 | ipc_answer(&TASK->kernel_box, call); |
628 | ipc_answer(&TASK->kernel_box, call); |
625 | return; |
629 | return; |
626 | } |
630 | } |
627 | 631 | ||
628 | uspace_dst = (void *)IPC_GET_ARG3(call->data); |
632 | uspace_dst = (void *)IPC_GET_ARG3(call->data); |
629 | size = IPC_GET_ARG4(call->data); |
633 | size = IPC_GET_ARG4(call->data); |
630 | 634 | ||
631 | buffer = call->buffer; |
635 | buffer = call->buffer; |
632 | klog_printf("dst=%u, size=%u", uspace_dst, size); |
636 | klog_printf("dst=%u, size=%u", uspace_dst, size); |
633 | 637 | ||
634 | /* NOTE: this is not strictly from a syscall... but that shouldn't |
638 | /* NOTE: this is not strictly from a syscall... but that shouldn't |
635 | * be a problem */ |
639 | * be a problem */ |
636 | rc = copy_to_uspace(uspace_dst, buffer, size); |
640 | rc = copy_to_uspace(uspace_dst, buffer, size); |
637 | if (rc) { |
641 | if (rc) { |
638 | IPC_SET_RETVAL(call->data, rc); |
642 | IPC_SET_RETVAL(call->data, rc); |
639 | ipc_answer(&TASK->kernel_box, call); |
643 | ipc_answer(&TASK->kernel_box, call); |
640 | return; |
644 | return; |
641 | } |
645 | } |
642 | 646 | ||
643 | IPC_SET_RETVAL(call->data, 0); |
647 | IPC_SET_RETVAL(call->data, 0); |
644 | 648 | ||
645 | free(call->buffer); |
649 | free(call->buffer); |
646 | call->buffer = NULL; |
650 | call->buffer = NULL; |
647 | 651 | ||
648 | ipc_answer(&TASK->kernel_box, call); |
652 | ipc_answer(&TASK->kernel_box, call); |
649 | } |
653 | } |
650 | 654 | ||
651 | 655 | ||
652 | /** |
656 | /** |
653 | * Handle a debug call received on the kernel answerbox. |
657 | * Handle a debug call received on the kernel answerbox. |
654 | * |
658 | * |
655 | * This is called by the kbox servicing thread. |
659 | * This is called by the kbox servicing thread. |
656 | */ |
660 | */ |
657 | void udebug_call_receive(call_t *call) |
661 | void udebug_call_receive(call_t *call) |
658 | { |
662 | { |
659 | int debug_method; |
663 | int debug_method; |
660 | 664 | ||
661 | debug_method = IPC_GET_ARG1(call->data); |
665 | debug_method = IPC_GET_ARG1(call->data); |
662 | 666 | ||
663 | switch (debug_method) { |
667 | switch (debug_method) { |
664 | case UDEBUG_M_MEM_READ: |
668 | case UDEBUG_M_MEM_READ: |
665 | udebug_receive_mem_read(call); |
669 | udebug_receive_mem_read(call); |
666 | break; |
670 | break; |
667 | case UDEBUG_M_MEM_WRITE: |
671 | case UDEBUG_M_MEM_WRITE: |
668 | udebug_receive_mem_write(call); |
672 | udebug_receive_mem_write(call); |
669 | break; |
673 | break; |
670 | } |
674 | } |
671 | } |
675 | } |
672 | 676 | ||
673 | /** @} |
677 | /** @} |
674 | */ |
678 | */ |
675 | 679 |