Rev 2851 | Rev 2870 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 2851 | Rev 2866 | ||
---|---|---|---|
1 | /** @addtogroup generic |
1 | /** @addtogroup generic |
2 | * @{ |
2 | * @{ |
3 | */ |
3 | */ |
4 | 4 | ||
5 | /** |
5 | /** |
6 | * @file |
6 | * @file |
7 | * @brief Tdebug. |
7 | * @brief Tdebug. |
8 | */ |
8 | */ |
9 | 9 | ||
10 | #include <console/klog.h> |
10 | #include <console/klog.h> |
11 | #include <proc/task.h> |
11 | #include <proc/task.h> |
12 | #include <proc/thread.h> |
12 | #include <proc/thread.h> |
13 | #include <arch.h> |
13 | #include <arch.h> |
14 | #include <errno.h> |
14 | #include <errno.h> |
15 | #include <ipc/ipc.h> |
15 | #include <ipc/ipc.h> |
16 | #include <syscall/copy.h> |
16 | #include <syscall/copy.h> |
17 | #include <udebug/udebug.h> |
17 | #include <udebug/udebug.h> |
18 | #include <udebug/udebug_ipc.h> |
18 | #include <udebug/udebug_ipc.h> |
19 | 19 | ||
20 | /** |
20 | /** |
21 | * Get a phone's callee task id. |
21 | * Get a phone's callee task id. |
22 | * |
22 | * |
23 | * This will return the id of the task to which the phone |
23 | * This will return the id of the task to which the phone |
24 | * is connected. |
24 | * is connected. |
25 | * |
25 | * |
26 | * Interrupts must be already disabled. |
26 | * Interrupts must be already disabled. |
27 | */ |
27 | */ |
28 | static task_id_t get_callee_task_id(phone_t *phone) |
28 | static task_id_t get_callee_task_id(phone_t *phone) |
29 | { |
29 | { |
30 | answerbox_t *box; |
30 | answerbox_t *box; |
31 | task_id_t taskid; |
31 | task_id_t taskid; |
32 | 32 | ||
33 | spinlock_lock(&phone->lock); |
33 | spinlock_lock(&phone->lock); |
34 | if (phone->state != IPC_PHONE_CONNECTED) { |
34 | if (phone->state != IPC_PHONE_CONNECTED) { |
35 | spinlock_unlock(&phone->lock); |
35 | spinlock_unlock(&phone->lock); |
36 | return NULL; |
36 | return NULL; |
37 | } |
37 | } |
38 | 38 | ||
39 | box = phone->callee; |
39 | box = phone->callee; |
40 | 40 | ||
41 | spinlock_lock(&box->lock); |
41 | spinlock_lock(&box->lock); |
42 | taskid = box->task->taskid; |
42 | taskid = box->task->taskid; |
43 | spinlock_unlock(&box->lock); |
43 | spinlock_unlock(&box->lock); |
44 | spinlock_unlock(&phone->lock); |
44 | spinlock_unlock(&phone->lock); |
45 | 45 | ||
46 | return taskid; |
46 | return taskid; |
47 | } |
47 | } |
48 | 48 | ||
49 | /** |
49 | /** |
50 | * Get and lock a phone's callee task. |
50 | * Get and lock a phone's callee task. |
51 | * |
51 | * |
52 | * This will return a pointer to the task to which the phone |
52 | * This will return a pointer to the task to which the phone |
53 | * is connected. It will lock the task, making sure it exists. |
53 | * is connected. It will lock the task, making sure it exists. |
54 | * |
54 | * |
55 | * Interrupts must be already disabled. |
55 | * Interrupts must be already disabled. |
56 | * |
- | |
57 | * (TODO: make sure the udebug-cleanup of the task hasn't |
- | |
58 | * started yet) |
- | |
59 | */ |
56 | */ |
60 | static task_t *get_lock_callee_task(phone_t *phone) |
57 | static task_t *get_lock_callee_task(phone_t *phone) |
61 | { |
58 | { |
62 | task_t *ta; |
59 | task_t *ta; |
63 | task_id_t taskid; |
60 | task_id_t taskid; |
64 | 61 | ||
65 | taskid = get_callee_task_id(phone); |
62 | taskid = get_callee_task_id(phone); |
66 | 63 | ||
67 | spinlock_lock(&tasks_lock); |
64 | spinlock_lock(&tasks_lock); |
68 | ta = task_find_by_id(taskid); |
65 | ta = task_find_by_id(taskid); |
69 | if (ta == NULL) { |
66 | if (ta == NULL) { |
70 | spinlock_unlock(&tasks_lock); |
67 | spinlock_unlock(&tasks_lock); |
71 | return NULL; |
68 | return NULL; |
72 | } |
69 | } |
73 | 70 | ||
74 | spinlock_lock(&ta->lock); |
71 | spinlock_lock(&ta->lock); |
75 | spinlock_unlock(&tasks_lock); |
72 | spinlock_unlock(&tasks_lock); |
76 | 73 | ||
77 | return ta; |
74 | return ta; |
78 | } |
75 | } |
79 | 76 | ||
80 | /** |
77 | /** |
81 | * Prepare a thread for a debugging operation. |
78 | * Prepare a thread for a debugging operation. |
82 | * |
79 | * |
83 | * Simply put, return thread t with t->debug_lock held, |
80 | * Simply put, return thread t with t->debug_lock held, |
84 | * but only if it verifies all conditions. |
81 | * but only if it verifies all conditions. |
85 | * |
82 | * |
86 | * Specifically, verifies that thread t exists, is a userspace thread, |
83 | * Specifically, verifies that thread t exists, is a userspace thread, |
87 | * belongs to the callee of 'phone'. It also locks t->debug_lock, |
84 | * belongs to the callee of 'phone'. It also locks t->debug_lock, |
88 | * making sure that t->debug_active is true - that the thread is |
85 | * making sure that t->debug_active is true - that the thread is |
89 | * in a valid debugging session. |
86 | * in a valid debugging session. |
90 | * |
87 | * |
91 | * Returns EOK if all went well, or an error code otherwise. |
88 | * Returns EOK if all went well, or an error code otherwise. |
92 | * Interrupts must be already disabled when calling this function. |
89 | * Interrupts must be already disabled when calling this function. |
93 | * |
90 | * |
94 | * Note: This function sports complicated locking. |
91 | * Note: This function sports complicated locking. |
95 | */ |
92 | */ |
96 | static int _thread_op_begin(phone_t *phone, thread_t *t) |
93 | static int _thread_op_begin(phone_t *phone, thread_t *t) |
97 | { |
94 | { |
98 | int rc; |
95 | int rc; |
99 | task_id_t taskid; |
96 | task_id_t taskid; |
100 | int task_match; |
97 | int task_match; |
101 | DEADLOCK_PROBE_INIT(p_tasklock); |
98 | DEADLOCK_PROBE_INIT(p_tasklock); |
102 | 99 | ||
103 | taskid = get_callee_task_id(phone); |
100 | taskid = get_callee_task_id(phone); |
104 | 101 | ||
105 | /* Need to lock down the thread and than it's owner task */ |
102 | /* Need to lock down the thread and than it's owner task */ |
106 | grab_locks: |
103 | grab_locks: |
107 | spinlock_lock(&threads_lock); |
104 | spinlock_lock(&threads_lock); |
108 | 105 | ||
109 | if (!thread_exists(t)) { |
106 | if (!thread_exists(t)) { |
110 | spinlock_unlock(&threads_lock); |
107 | spinlock_unlock(&threads_lock); |
111 | return ENOENT; |
108 | return ENOENT; |
112 | } |
109 | } |
113 | 110 | ||
114 | spinlock_lock(&t->debug_lock); |
111 | spinlock_lock(&t->debug_lock); |
115 | spinlock_lock(&t->lock); |
112 | spinlock_lock(&t->lock); |
116 | 113 | ||
117 | if (!spinlock_trylock(&t->task->lock)) { |
114 | if (!spinlock_trylock(&t->task->lock)) { |
118 | spinlock_unlock(&t->lock); |
115 | spinlock_unlock(&t->lock); |
119 | spinlock_unlock(&t->debug_lock); |
116 | spinlock_unlock(&t->debug_lock); |
120 | DEADLOCK_PROBE(p_tasklock, DEADLOCK_THRESHOLD); |
117 | DEADLOCK_PROBE(p_tasklock, DEADLOCK_THRESHOLD); |
121 | goto grab_locks; /* avoid deadlock */ |
118 | goto grab_locks; /* avoid deadlock */ |
122 | } |
119 | } |
123 | 120 | ||
124 | /* Now verify that it's the callee */ |
121 | /* Now verify that it's the callee */ |
125 | task_match = (t->task->taskid == taskid); |
122 | task_match = (t->task->taskid == taskid); |
126 | 123 | ||
127 | spinlock_unlock(&t->task->lock); |
124 | spinlock_unlock(&t->task->lock); |
128 | 125 | ||
129 | if (!task_match) { |
126 | if (!task_match) { |
130 | /* No such thread belonging to callee */ |
127 | /* No such thread belonging to callee */ |
131 | rc = ENOENT; |
128 | rc = ENOENT; |
132 | goto error_exit; |
129 | goto error_exit; |
133 | } |
130 | } |
134 | 131 | ||
135 | /* Verify that 't' is a userspace thread */ |
132 | /* Verify that 't' is a userspace thread */ |
136 | if ((t->flags & THREAD_FLAG_USPACE) == 0) { |
133 | if ((t->flags & THREAD_FLAG_USPACE) == 0) { |
137 | /* It's not, deny its existence */ |
134 | /* It's not, deny its existence */ |
138 | rc = ENOENT; |
135 | rc = ENOENT; |
139 | goto error_exit; |
136 | goto error_exit; |
140 | } |
137 | } |
141 | 138 | ||
142 | if ((t->debug_active != true) || (t->debug_stop != true)) { |
139 | if ((t->debug_active != true) || (t->debug_stop != true)) { |
143 | /* Not in debugging session or already has GO */ |
140 | /* Not in debugging session or already has GO */ |
144 | rc = ENOENT; |
141 | rc = ENOENT; |
145 | goto error_exit; |
142 | goto error_exit; |
146 | } |
143 | } |
147 | 144 | ||
148 | spinlock_unlock(&threads_lock); |
145 | spinlock_unlock(&threads_lock); |
149 | spinlock_unlock(&t->lock); |
146 | spinlock_unlock(&t->lock); |
150 | 147 | ||
151 | /* Only t->debug_lock left */ |
148 | /* Only t->debug_lock left */ |
152 | 149 | ||
153 | return EOK; /* All went well */ |
150 | return EOK; /* All went well */ |
154 | 151 | ||
155 | 152 | ||
156 | /* Executed when a check on the thread fails */ |
153 | /* Executed when a check on the thread fails */ |
157 | error_exit: |
154 | error_exit: |
158 | spinlock_unlock(&t->lock); |
155 | spinlock_unlock(&t->lock); |
159 | spinlock_unlock(&t->debug_lock); |
156 | spinlock_unlock(&t->debug_lock); |
160 | spinlock_unlock(&threads_lock); |
157 | spinlock_unlock(&threads_lock); |
161 | 158 | ||
162 | /* No locks left here */ |
159 | /* No locks left here */ |
163 | return rc; /* Some errors occured */ |
160 | return rc; /* Some errors occured */ |
164 | } |
161 | } |
165 | 162 | ||
166 | static void _thread_op_end(thread_t *t) |
163 | static void _thread_op_end(thread_t *t) |
167 | { |
164 | { |
168 | spinlock_unlock(&t->debug_lock); |
165 | spinlock_unlock(&t->debug_lock); |
169 | } |
166 | } |
170 | 167 | ||
171 | static int udebug_rp_begin(call_t *call, phone_t *phone) |
168 | static int udebug_rp_begin(call_t *call, phone_t *phone) |
172 | { |
169 | { |
173 | task_t *ta; |
170 | task_t *ta; |
174 | ipl_t ipl; |
171 | ipl_t ipl; |
175 | int rc; |
172 | int rc; |
176 | 173 | ||
177 | thread_t *t; |
174 | thread_t *t; |
178 | link_t *cur; |
175 | link_t *cur; |
179 | 176 | ||
180 | klog_printf("debug_begin()"); |
177 | klog_printf("debug_begin()"); |
181 | 178 | ||
182 | ipl = interrupts_disable(); |
179 | ipl = interrupts_disable(); |
183 | ta = get_lock_callee_task(phone); |
180 | ta = get_lock_callee_task(phone); |
184 | klog_printf("debugging task %llu", ta->taskid); |
181 | klog_printf("debugging task %llu", ta->taskid); |
185 | 182 | ||
186 | if (ta->dt_state != UDEBUG_TS_INACTIVE) { |
183 | if (ta->dt_state != UDEBUG_TS_INACTIVE) { |
187 | spinlock_unlock(&ta->lock); |
184 | spinlock_unlock(&ta->lock); |
188 | interrupts_restore(ipl); |
185 | interrupts_restore(ipl); |
189 | klog_printf("debug_begin(): busy error"); |
186 | klog_printf("debug_begin(): busy error"); |
190 | return EBUSY; |
187 | return EBUSY; |
191 | } |
188 | } |
192 | 189 | ||
193 | ta->dt_state = UDEBUG_TS_BEGINNING; |
190 | ta->dt_state = UDEBUG_TS_BEGINNING; |
194 | ta->debug_begin_call = call; |
191 | ta->debug_begin_call = call; |
195 | 192 | ||
196 | if (ta->not_stoppable_count == 0) { |
193 | if (ta->not_stoppable_count == 0) { |
197 | ta->dt_state = UDEBUG_TS_ACTIVE; |
194 | ta->dt_state = UDEBUG_TS_ACTIVE; |
198 | ta->debug_begin_call = NULL; |
195 | ta->debug_begin_call = NULL; |
199 | rc = 1; /* actually we need backsend with 0 retval */ |
196 | rc = 1; /* actually we need backsend with 0 retval */ |
200 | } else { |
197 | } else { |
201 | rc = 0; /* no backsend */ |
198 | rc = 0; /* no backsend */ |
202 | } |
199 | } |
203 | 200 | ||
204 | /* Set debug_active on all of the task's userspace threads */ |
201 | /* Set debug_active on all of the task's userspace threads */ |
205 | 202 | ||
206 | for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) { |
203 | for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) { |
207 | t = list_get_instance(cur, thread_t, th_link); |
204 | t = list_get_instance(cur, thread_t, th_link); |
208 | 205 | ||
209 | spinlock_lock(&t->debug_lock); |
206 | spinlock_lock(&t->debug_lock); |
210 | if ((t->flags & THREAD_FLAG_USPACE) != 0) |
207 | if ((t->flags & THREAD_FLAG_USPACE) != 0) |
211 | t->debug_active = true; |
208 | t->debug_active = true; |
212 | spinlock_unlock(&t->debug_lock); |
209 | spinlock_unlock(&t->debug_lock); |
213 | } |
210 | } |
214 | 211 | ||
215 | spinlock_unlock(&ta->lock); |
212 | spinlock_unlock(&ta->lock); |
216 | interrupts_restore(ipl); |
213 | interrupts_restore(ipl); |
217 | 214 | ||
218 | klog_printf("debug_begin() done (%s)", |
215 | klog_printf("debug_begin() done (%s)", |
219 | rc ? "backsend" : "stoppability wait"); |
216 | rc ? "backsend" : "stoppability wait"); |
220 | 217 | ||
221 | return rc; |
218 | return rc; |
222 | } |
219 | } |
223 | 220 | ||
224 | static int udebug_rp_end(call_t *call, phone_t *phone) |
221 | static int udebug_rp_end(call_t *call, phone_t *phone) |
225 | { |
222 | { |
226 | task_t *ta; |
223 | task_t *ta; |
227 | ipl_t ipl; |
224 | ipl_t ipl; |
228 | 225 | ||
229 | thread_t *t; |
226 | thread_t *t; |
230 | link_t *cur; |
227 | link_t *cur; |
231 | int flags; |
228 | int flags; |
232 | 229 | ||
233 | klog_printf("udebug_rp_end()"); |
230 | klog_printf("udebug_rp_end()"); |
234 | 231 | ||
235 | ipl = interrupts_disable(); |
232 | ipl = interrupts_disable(); |
236 | ta = get_lock_callee_task(phone); |
233 | ta = get_lock_callee_task(phone); |
237 | klog_printf("task %llu", ta->taskid); |
234 | klog_printf("task %llu", ta->taskid); |
238 | 235 | ||
239 | if (ta->dt_state == UDEBUG_TS_BEGINNING && |
236 | if (ta->dt_state == UDEBUG_TS_BEGINNING && |
240 | ta->dt_state != UDEBUG_TS_ACTIVE) { |
237 | ta->dt_state != UDEBUG_TS_ACTIVE) { |
241 | spinlock_unlock(&ta->lock); |
238 | spinlock_unlock(&ta->lock); |
242 | interrupts_restore(ipl); |
239 | interrupts_restore(ipl); |
243 | klog_printf("udebug_rp_begin(): task not being debugged"); |
240 | klog_printf("udebug_rp_begin(): task not being debugged"); |
244 | return EINVAL; |
241 | return EINVAL; |
245 | } |
242 | } |
246 | 243 | ||
247 | /* Finish debugging of all userspace threads */ |
244 | /* Finish debugging of all userspace threads */ |
248 | for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) { |
245 | for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) { |
249 | t = list_get_instance(cur, thread_t, th_link); |
246 | t = list_get_instance(cur, thread_t, th_link); |
250 | 247 | ||
251 | spinlock_lock(&t->debug_lock); |
248 | spinlock_lock(&t->debug_lock); |
252 | spinlock_lock(&t->lock); |
249 | spinlock_lock(&t->lock); |
253 | 250 | ||
254 | flags = t->flags; |
251 | flags = t->flags; |
255 | 252 | ||
256 | spinlock_unlock(&t->lock); |
253 | spinlock_unlock(&t->lock); |
257 | 254 | ||
258 | /* Only process userspace threads */ |
255 | /* Only process userspace threads */ |
259 | if ((flags & THREAD_FLAG_USPACE) != 0) { |
256 | if ((flags & THREAD_FLAG_USPACE) != 0) { |
260 | /* Prevent any further debug activity in thread */ |
257 | /* Prevent any further debug activity in thread */ |
261 | t->debug_active = false; |
258 | t->debug_active = false; |
- | 259 | t->cur_event = 0; /* none */ |
|
262 | 260 | ||
263 | /* Still has go? */ |
261 | /* Still has go? */ |
264 | if (t->debug_stop == false) { |
262 | if (t->debug_stop == false) { |
265 | /* |
263 | /* |
266 | * Yes, so clear go. As debug_active == false, |
264 | * Yes, so clear go. As debug_active == false, |
267 | * this doesn't affect anything. |
265 | * this doesn't affect anything. |
268 | */ |
266 | */ |
269 | t->debug_stop = true; |
267 | t->debug_stop = true; |
270 | 268 | ||
271 | /* Answer GO call */ |
269 | /* Answer GO call */ |
272 | klog_printf("answer GO call with EVENT_FINISHED"); |
270 | klog_printf("answer GO call with EVENT_FINISHED"); |
273 | IPC_SET_RETVAL(t->debug_go_call->data, 0); |
271 | IPC_SET_RETVAL(t->debug_go_call->data, 0); |
274 | IPC_SET_ARG1(t->debug_go_call->data, UDEBUG_EVENT_FINISHED); |
272 | IPC_SET_ARG1(t->debug_go_call->data, UDEBUG_EVENT_FINISHED); |
275 | ipc_answer(&ta->answerbox, t->debug_go_call); |
273 | ipc_answer(&ta->answerbox, t->debug_go_call); |
276 | } else { |
274 | } else { |
277 | /* |
275 | /* |
278 | * Debug_stop is already at initial value. |
276 | * Debug_stop is already at initial value. |
279 | * Yet this means the thread needs waking up. |
277 | * Yet this means the thread needs waking up. |
280 | */ |
278 | */ |
281 | 279 | ||
282 | /* |
280 | /* |
283 | * t's lock must not be held when calling |
281 | * t's lock must not be held when calling |
284 | * waitq_wakeup. |
282 | * waitq_wakeup. |
285 | */ |
283 | */ |
286 | waitq_wakeup(&t->go_wq, WAKEUP_FIRST); |
284 | waitq_wakeup(&t->go_wq, WAKEUP_FIRST); |
287 | } |
285 | } |
288 | } |
286 | } |
289 | spinlock_unlock(&t->debug_lock); |
287 | spinlock_unlock(&t->debug_lock); |
290 | } |
288 | } |
291 | 289 | ||
292 | ta->dt_state = UDEBUG_TS_INACTIVE; |
290 | ta->dt_state = UDEBUG_TS_INACTIVE; |
293 | 291 | ||
294 | spinlock_unlock(&ta->lock); |
292 | spinlock_unlock(&ta->lock); |
295 | interrupts_restore(ipl); |
293 | interrupts_restore(ipl); |
296 | 294 | ||
297 | IPC_SET_RETVAL(call->data, 0); |
295 | IPC_SET_RETVAL(call->data, 0); |
298 | 296 | ||
299 | klog_printf("udebug_rp_end() done\n"); |
297 | klog_printf("udebug_rp_end() done\n"); |
300 | 298 | ||
301 | return 1; |
299 | return 1; |
302 | } |
300 | } |
303 | 301 | ||
304 | 302 | ||
305 | static int udebug_rp_go(call_t *call, phone_t *phone) |
303 | static int udebug_rp_go(call_t *call, phone_t *phone) |
306 | { |
304 | { |
307 | thread_t *t; |
305 | thread_t *t; |
308 | ipl_t ipl; |
306 | ipl_t ipl; |
309 | int rc; |
307 | int rc; |
310 | 308 | ||
311 | klog_printf("debug_go()"); |
309 | klog_printf("debug_go()"); |
312 | 310 | ||
313 | t = (thread_t *)IPC_GET_ARG2(call->data); |
311 | t = (thread_t *)IPC_GET_ARG2(call->data); |
314 | 312 | ||
315 | ipl = interrupts_disable(); |
313 | ipl = interrupts_disable(); |
316 | 314 | ||
317 | /* On success, this will lock t->debug_lock */ |
315 | /* On success, this will lock t->debug_lock */ |
318 | rc = _thread_op_begin(phone, t); |
316 | rc = _thread_op_begin(phone, t); |
319 | if (rc != EOK) { |
317 | if (rc != EOK) { |
320 | interrupts_restore(ipl); |
318 | interrupts_restore(ipl); |
321 | return rc; |
319 | return rc; |
322 | } |
320 | } |
323 | 321 | ||
324 | t->debug_go_call = call; |
322 | t->debug_go_call = call; |
325 | t->debug_stop = false; |
323 | t->debug_stop = false; |
- | 324 | t->cur_event = 0; /* none */ |
|
326 | 325 | ||
327 | /* |
326 | /* |
328 | * Neither t's lock nor threads_lock may be held during wakeup |
327 | * Neither t's lock nor threads_lock may be held during wakeup |
329 | */ |
328 | */ |
330 | waitq_wakeup(&t->go_wq, WAKEUP_FIRST); |
329 | waitq_wakeup(&t->go_wq, WAKEUP_FIRST); |
331 | 330 | ||
332 | _thread_op_end(t); |
331 | _thread_op_end(t); |
333 | interrupts_restore(ipl); |
332 | interrupts_restore(ipl); |
334 | 333 | ||
335 | return 0; /* no backsend */ |
334 | return 0; /* no backsend */ |
336 | } |
335 | } |
337 | 336 | ||
338 | static int udebug_rp_args_read(call_t *call, phone_t *phone) |
337 | static int udebug_rp_args_read(call_t *call, phone_t *phone) |
339 | { |
338 | { |
340 | thread_t *t; |
339 | thread_t *t; |
341 | void *uspace_buffer; |
340 | void *uspace_buffer; |
342 | int rc; |
341 | int rc; |
343 | ipl_t ipl; |
342 | ipl_t ipl; |
344 | unative_t buffer[6]; |
343 | unative_t buffer[6]; |
345 | 344 | ||
346 | klog_printf("debug_args_read()"); |
345 | klog_printf("debug_args_read()"); |
347 | 346 | ||
348 | t = (thread_t *)IPC_GET_ARG2(call->data); |
347 | t = (thread_t *)IPC_GET_ARG2(call->data); |
349 | 348 | ||
350 | ipl = interrupts_disable(); |
349 | ipl = interrupts_disable(); |
351 | 350 | ||
352 | /* On success, this will lock t->debug_lock */ |
351 | /* On success, this will lock t->debug_lock */ |
353 | rc = _thread_op_begin(phone, t); |
352 | rc = _thread_op_begin(phone, t); |
354 | if (rc != EOK) { |
353 | if (rc != EOK) { |
355 | interrupts_restore(ipl); |
354 | interrupts_restore(ipl); |
356 | return rc; |
355 | return rc; |
357 | } |
356 | } |
358 | 357 | ||
359 | //FIXME: additionally we need to verify that we are inside a syscall |
358 | /* Additionally we need to verify that we are inside a syscall */ |
- | 359 | if (t->cur_event != UDEBUG_EVENT_SYSCALL) { |
|
- | 360 | _thread_op_end(t); |
|
- | 361 | interrupts_restore(ipl); |
|
- | 362 | return EINVAL; |
|
- | 363 | } |
|
360 | 364 | ||
361 | /* Copy to a local buffer before releasing the lock */ |
365 | /* Copy to a local buffer before releasing the lock */ |
362 | memcpy(buffer, t->syscall_args, 6 * sizeof(unative_t)); |
366 | memcpy(buffer, t->syscall_args, 6 * sizeof(unative_t)); |
363 | 367 | ||
364 | _thread_op_end(t); |
368 | _thread_op_end(t); |
365 | interrupts_restore(ipl); |
369 | interrupts_restore(ipl); |
366 | 370 | ||
367 | /* Now copy to userspace */ |
371 | /* Now copy to userspace */ |
368 | 372 | ||
369 | uspace_buffer = (void *)IPC_GET_ARG3(call->data); |
373 | uspace_buffer = (void *)IPC_GET_ARG3(call->data); |
370 | 374 | ||
371 | rc = copy_to_uspace(uspace_buffer, buffer, 6 * sizeof(unative_t)); |
375 | rc = copy_to_uspace(uspace_buffer, buffer, 6 * sizeof(unative_t)); |
372 | if (rc != 0) { |
376 | if (rc != 0) { |
373 | klog_printf("debug_args_read() - copy failed"); |
377 | klog_printf("debug_args_read() - copy failed"); |
374 | return rc; |
378 | return rc; |
375 | } |
379 | } |
376 | 380 | ||
377 | klog_printf("debug_args_read() done"); |
381 | klog_printf("debug_args_read() done"); |
378 | return 1; /* actually need becksend with retval 0 */ |
382 | return 1; /* actually need becksend with retval 0 */ |
379 | } |
383 | } |
380 | 384 | ||
381 | static int udebug_rp_regs_read(call_t *call, phone_t *phone) |
385 | static int udebug_rp_regs_read(call_t *call, phone_t *phone) |
382 | { |
386 | { |
383 | thread_t *t; |
387 | thread_t *t; |
384 | void *uspace_buffer; |
388 | void *uspace_buffer; |
385 | unative_t to_copy; |
389 | unative_t to_copy; |
386 | int rc; |
390 | int rc; |
387 | istate_t *state; |
391 | istate_t *state; |
388 | istate_t state_copy; |
392 | istate_t state_copy; |
389 | ipl_t ipl; |
393 | ipl_t ipl; |
390 | 394 | ||
391 | klog_printf("debug_regs_read()"); |
395 | klog_printf("debug_regs_read()"); |
392 | 396 | ||
393 | t = (thread_t *) IPC_GET_ARG2(call->data); |
397 | t = (thread_t *) IPC_GET_ARG2(call->data); |
394 | 398 | ||
395 | ipl = interrupts_disable(); |
399 | ipl = interrupts_disable(); |
396 | 400 | ||
397 | /* On success, this will lock t->debug_lock */ |
401 | /* On success, this will lock t->debug_lock */ |
398 | rc = _thread_op_begin(phone, t); |
402 | rc = _thread_op_begin(phone, t); |
399 | if (rc != EOK) { |
403 | if (rc != EOK) { |
400 | interrupts_restore(ipl); |
404 | interrupts_restore(ipl); |
401 | return rc; |
405 | return rc; |
402 | } |
406 | } |
403 | 407 | ||
404 | state = t->uspace_state; |
408 | state = t->uspace_state; |
405 | if (state == NULL) { |
409 | if (state == NULL) { |
406 | _thread_op_end(t); |
410 | _thread_op_end(t); |
407 | interrupts_restore(ipl); |
411 | interrupts_restore(ipl); |
408 | klog_printf("debug_regs_read() - istate not available"); |
412 | klog_printf("debug_regs_read() - istate not available"); |
409 | return EBUSY; |
413 | return EBUSY; |
410 | } |
414 | } |
411 | 415 | ||
412 | /* Copy to a local buffer so that we can release the lock */ |
416 | /* Copy to a local buffer so that we can release the lock */ |
413 | memcpy(&state_copy, state, sizeof(state_copy)); |
417 | memcpy(&state_copy, state, sizeof(state_copy)); |
414 | _thread_op_end(t); |
418 | _thread_op_end(t); |
415 | interrupts_restore(ipl); |
419 | interrupts_restore(ipl); |
416 | 420 | ||
417 | uspace_buffer = (void *)IPC_GET_ARG3(call->data); |
421 | uspace_buffer = (void *)IPC_GET_ARG3(call->data); |
418 | to_copy = IPC_GET_ARG4(call->data); |
422 | to_copy = IPC_GET_ARG4(call->data); |
419 | if (to_copy > sizeof(istate_t)) to_copy = sizeof(istate_t); |
423 | if (to_copy > sizeof(istate_t)) to_copy = sizeof(istate_t); |
420 | 424 | ||
421 | rc = copy_to_uspace(uspace_buffer, &state_copy, to_copy); |
425 | rc = copy_to_uspace(uspace_buffer, &state_copy, to_copy); |
422 | if (rc != 0) { |
426 | if (rc != 0) { |
423 | klog_printf("debug_regs_read() - copy failed"); |
427 | klog_printf("debug_regs_read() - copy failed"); |
424 | return rc; |
428 | return rc; |
425 | } |
429 | } |
426 | 430 | ||
427 | IPC_SET_ARG1(call->data, to_copy); |
431 | IPC_SET_ARG1(call->data, to_copy); |
428 | IPC_SET_ARG2(call->data, sizeof(istate_t)); |
432 | IPC_SET_ARG2(call->data, sizeof(istate_t)); |
429 | 433 | ||
430 | klog_printf("debug_regs_read() done"); |
434 | klog_printf("debug_regs_read() done"); |
431 | return 1; /* actually need becksend with retval 0 */ |
435 | return 1; /* actually need becksend with retval 0 */ |
432 | } |
436 | } |
433 | 437 | ||
434 | 438 | ||
435 | 439 | ||
436 | static int udebug_rp_regs_write(call_t *call, phone_t *phone) |
440 | static int udebug_rp_regs_write(call_t *call, phone_t *phone) |
437 | { |
441 | { |
438 | thread_t *t; |
442 | thread_t *t; |
439 | void *uspace_data; |
443 | void *uspace_data; |
440 | unative_t to_copy; |
444 | unative_t to_copy; |
441 | int rc; |
445 | int rc; |
442 | istate_t *state; |
446 | istate_t *state; |
443 | istate_t data_copy; |
447 | istate_t data_copy; |
444 | ipl_t ipl; |
448 | ipl_t ipl; |
445 | 449 | ||
446 | klog_printf("debug_regs_write()"); |
450 | klog_printf("debug_regs_write()"); |
447 | 451 | ||
448 | /* First copy to a local buffer */ |
452 | /* First copy to a local buffer */ |
449 | 453 | ||
450 | uspace_data = (void *)IPC_GET_ARG3(call->data); |
454 | uspace_data = (void *)IPC_GET_ARG3(call->data); |
451 | to_copy = IPC_GET_ARG4(call->data); |
455 | to_copy = IPC_GET_ARG4(call->data); |
452 | if (to_copy > sizeof(istate_t)) to_copy = sizeof(istate_t); |
456 | if (to_copy > sizeof(istate_t)) to_copy = sizeof(istate_t); |
453 | 457 | ||
454 | rc = copy_from_uspace(&data_copy, uspace_data, to_copy); |
458 | rc = copy_from_uspace(&data_copy, uspace_data, to_copy); |
455 | if (rc != 0) { |
459 | if (rc != 0) { |
456 | klog_printf("debug_regs_write() - copy failed"); |
460 | klog_printf("debug_regs_write() - copy failed"); |
457 | return rc; |
461 | return rc; |
458 | } |
462 | } |
459 | 463 | ||
460 | /* Now try to change the thread's uspace_state */ |
464 | /* Now try to change the thread's uspace_state */ |
461 | 465 | ||
462 | ipl = interrupts_disable(); |
466 | ipl = interrupts_disable(); |
463 | t = (thread_t *) IPC_GET_ARG2(call->data); |
467 | t = (thread_t *) IPC_GET_ARG2(call->data); |
464 | 468 | ||
465 | /* On success, this will lock t->debug_lock */ |
469 | /* On success, this will lock t->debug_lock */ |
466 | rc = _thread_op_begin(phone, t); |
470 | rc = _thread_op_begin(phone, t); |
467 | if (rc != EOK) { |
471 | if (rc != EOK) { |
468 | interrupts_restore(ipl); |
472 | interrupts_restore(ipl); |
469 | return rc; |
473 | return rc; |
470 | } |
474 | } |
471 | 475 | ||
472 | state = t->uspace_state; |
476 | state = t->uspace_state; |
473 | if (state == NULL) { |
477 | if (state == NULL) { |
474 | _thread_op_end(t); |
478 | _thread_op_end(t); |
475 | interrupts_restore(ipl); |
479 | interrupts_restore(ipl); |
476 | klog_printf("debug_regs_write() - istate not available"); |
480 | klog_printf("debug_regs_write() - istate not available"); |
477 | return EBUSY; |
481 | return EBUSY; |
478 | } |
482 | } |
479 | 483 | ||
480 | memcpy(t->uspace_state, &data_copy, sizeof(t->uspace_state)); |
484 | memcpy(t->uspace_state, &data_copy, sizeof(t->uspace_state)); |
481 | 485 | ||
482 | _thread_op_end(t); |
486 | _thread_op_end(t); |
483 | interrupts_restore(ipl); |
487 | interrupts_restore(ipl); |
484 | 488 | ||
485 | /* Set answer values */ |
489 | /* Set answer values */ |
486 | 490 | ||
487 | IPC_SET_ARG1(call->data, to_copy); |
491 | IPC_SET_ARG1(call->data, to_copy); |
488 | IPC_SET_ARG2(call->data, sizeof(istate_t)); |
492 | IPC_SET_ARG2(call->data, sizeof(istate_t)); |
489 | 493 | ||
490 | klog_printf("debug_regs_write() done"); |
494 | klog_printf("debug_regs_write() done"); |
491 | return 1; /* actually need becksend with retval 0 */ |
495 | return 1; /* actually need becksend with retval 0 */ |
492 | } |
496 | } |
493 | 497 | ||
494 | static int udebug_rp_thread_read(call_t *call, phone_t *phone) |
498 | static int udebug_rp_thread_read(call_t *call, phone_t *phone) |
495 | { |
499 | { |
496 | thread_t *t; |
500 | thread_t *t; |
497 | link_t *cur; |
501 | link_t *cur; |
498 | task_t *ta; |
502 | task_t *ta; |
499 | unative_t *uspace_buffer; |
503 | unative_t *uspace_buffer; |
500 | unative_t to_copy; |
504 | unative_t to_copy; |
501 | int rc; |
505 | int rc; |
502 | unsigned total_bytes; |
506 | unsigned total_bytes; |
503 | unsigned buf_size; |
507 | unsigned buf_size; |
504 | unative_t tid; |
508 | unative_t tid; |
505 | unsigned num_threads, copied_ids; |
509 | unsigned num_threads, copied_ids; |
506 | ipl_t ipl; |
510 | ipl_t ipl; |
507 | unative_t *buffer; |
511 | unative_t *buffer; |
508 | int flags; |
512 | int flags; |
509 | 513 | ||
510 | klog_printf("debug_thread_read()"); |
514 | klog_printf("debug_thread_read()"); |
511 | 515 | ||
512 | ipl = interrupts_disable(); |
516 | ipl = interrupts_disable(); |
513 | ta = get_lock_callee_task(phone); |
517 | ta = get_lock_callee_task(phone); |
514 | 518 | ||
515 | /* Verify task state */ |
519 | /* Verify task state */ |
516 | if (ta->dt_state != UDEBUG_TS_ACTIVE) { |
520 | if (ta->dt_state != UDEBUG_TS_ACTIVE) { |
517 | spinlock_unlock(&ta->lock); |
521 | spinlock_unlock(&ta->lock); |
518 | interrupts_restore(ipl); |
522 | interrupts_restore(ipl); |
519 | return EBUSY; |
523 | return EBUSY; |
520 | } |
524 | } |
521 | 525 | ||
522 | /* Count the threads first */ |
526 | /* Count the threads first */ |
523 | 527 | ||
524 | num_threads = 0; |
528 | num_threads = 0; |
525 | for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) { |
529 | for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) { |
526 | /* Count all threads, to be on the safe side */ |
530 | /* Count all threads, to be on the safe side */ |
527 | ++num_threads; |
531 | ++num_threads; |
528 | } |
532 | } |
529 | 533 | ||
530 | /* Allocate a buffer and copy down the threads' ids */ |
534 | /* Allocate a buffer and copy down the threads' ids */ |
531 | buffer = malloc(num_threads * sizeof(unative_t), 0); // ??? |
535 | buffer = malloc(num_threads * sizeof(unative_t), 0); // ??? |
532 | 536 | ||
533 | copied_ids = 0; |
537 | copied_ids = 0; |
534 | for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) { |
538 | for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) { |
535 | t = list_get_instance(cur, thread_t, th_link); |
539 | t = list_get_instance(cur, thread_t, th_link); |
536 | 540 | ||
537 | spinlock_lock(&t->lock); |
541 | spinlock_lock(&t->lock); |
538 | flags = t->flags; |
542 | flags = t->flags; |
539 | spinlock_unlock(&t->lock); |
543 | spinlock_unlock(&t->lock); |
540 | 544 | ||
541 | /* Not interested in kernel threads */ |
545 | /* Not interested in kernel threads */ |
542 | if ((flags & THREAD_FLAG_USPACE) != 0) { |
546 | if ((flags & THREAD_FLAG_USPACE) != 0) { |
543 | /* Using thread struct pointer for identification */ |
547 | /* Using thread struct pointer for identification */ |
544 | tid = (unative_t) t; |
548 | tid = (unative_t) t; |
545 | buffer[copied_ids++] = tid; |
549 | buffer[copied_ids++] = tid; |
546 | } |
550 | } |
547 | } |
551 | } |
548 | 552 | ||
549 | spinlock_unlock(&ta->lock); |
553 | spinlock_unlock(&ta->lock); |
550 | interrupts_restore(ipl); |
554 | interrupts_restore(ipl); |
551 | 555 | ||
552 | /* Now copy to userspace */ |
556 | /* Now copy to userspace */ |
553 | 557 | ||
554 | uspace_buffer = (void *)IPC_GET_ARG2(call->data); |
558 | uspace_buffer = (void *)IPC_GET_ARG2(call->data); |
555 | buf_size = IPC_GET_ARG3(call->data); |
559 | buf_size = IPC_GET_ARG3(call->data); |
556 | 560 | ||
557 | total_bytes = copied_ids * sizeof(unative_t); |
561 | total_bytes = copied_ids * sizeof(unative_t); |
558 | 562 | ||
559 | if (buf_size > total_bytes) |
563 | if (buf_size > total_bytes) |
560 | to_copy = total_bytes; |
564 | to_copy = total_bytes; |
561 | else |
565 | else |
562 | to_copy = buf_size; |
566 | to_copy = buf_size; |
563 | 567 | ||
564 | rc = copy_to_uspace(uspace_buffer, buffer, to_copy); |
568 | rc = copy_to_uspace(uspace_buffer, buffer, to_copy); |
565 | free(buffer); |
569 | free(buffer); |
566 | 570 | ||
567 | if (rc != 0) { |
571 | if (rc != 0) { |
568 | klog_printf("debug_thread_read() - copy failed"); |
572 | klog_printf("debug_thread_read() - copy failed"); |
569 | return rc; |
573 | return rc; |
570 | } |
574 | } |
571 | 575 | ||
572 | IPC_SET_ARG1(call->data, to_copy); |
576 | IPC_SET_ARG1(call->data, to_copy); |
573 | IPC_SET_ARG2(call->data, total_bytes); |
577 | IPC_SET_ARG2(call->data, total_bytes); |
574 | 578 | ||
575 | klog_printf("debug_thread_read() done"); |
579 | klog_printf("debug_thread_read() done"); |
576 | return 1; /* actually need becksend with retval 0 */ |
580 | return 1; /* actually need becksend with retval 0 */ |
577 | } |
581 | } |
578 | 582 | ||
579 | static int udebug_rp_mem_write(call_t *call, phone_t *phone) |
583 | static int udebug_rp_mem_write(call_t *call, phone_t *phone) |
580 | { |
584 | { |
581 | void *uspace_data; |
585 | void *uspace_data; |
582 | unative_t to_copy; |
586 | unative_t to_copy; |
583 | int rc; |
587 | int rc; |
584 | void *buffer; |
588 | void *buffer; |
585 | 589 | ||
586 | klog_printf("udebug_rp_mem_write()"); |
590 | klog_printf("udebug_rp_mem_write()"); |
587 | 591 | ||
588 | uspace_data = (void *)IPC_GET_ARG2(call->data); |
592 | uspace_data = (void *)IPC_GET_ARG2(call->data); |
589 | to_copy = IPC_GET_ARG4(call->data); |
593 | to_copy = IPC_GET_ARG4(call->data); |
590 | 594 | ||
591 | buffer = malloc(to_copy, 0); // ??? |
595 | buffer = malloc(to_copy, 0); // ??? |
592 | 596 | ||
593 | rc = copy_from_uspace(buffer, uspace_data, to_copy); |
597 | rc = copy_from_uspace(buffer, uspace_data, to_copy); |
594 | if (rc != 0) { |
598 | if (rc != 0) { |
595 | klog_printf(" - copy failed"); |
599 | klog_printf(" - copy failed"); |
596 | return rc; |
600 | return rc; |
597 | } |
601 | } |
598 | 602 | ||
599 | call->buffer = buffer; |
603 | call->buffer = buffer; |
600 | 604 | ||
601 | klog_printf(" - done"); |
605 | klog_printf(" - done"); |
602 | return 1; /* actually need becksend with retval 0 */ |
606 | return 1; /* actually need becksend with retval 0 */ |
603 | } |
607 | } |
604 | 608 | ||
605 | 609 | ||
606 | int udebug_request_preprocess(call_t *call, phone_t *phone) |
610 | int udebug_request_preprocess(call_t *call, phone_t *phone) |
607 | { |
611 | { |
608 | int rc; |
612 | int rc; |
609 | 613 | ||
610 | switch (IPC_GET_ARG1(call->data)) { |
614 | switch (IPC_GET_ARG1(call->data)) { |
611 | case UDEBUG_M_BEGIN: |
615 | case UDEBUG_M_BEGIN: |
612 | rc = udebug_rp_begin(call, phone); |
616 | rc = udebug_rp_begin(call, phone); |
613 | return rc; |
617 | return rc; |
614 | case UDEBUG_M_END: |
618 | case UDEBUG_M_END: |
615 | rc = udebug_rp_end(call, phone); |
619 | rc = udebug_rp_end(call, phone); |
616 | return rc; |
620 | return rc; |
617 | case UDEBUG_M_GO: |
621 | case UDEBUG_M_GO: |
618 | rc = udebug_rp_go(call, phone); |
622 | rc = udebug_rp_go(call, phone); |
619 | return rc; |
623 | return rc; |
620 | case UDEBUG_M_ARGS_READ: |
624 | case UDEBUG_M_ARGS_READ: |
621 | rc = udebug_rp_args_read(call, phone); |
625 | rc = udebug_rp_args_read(call, phone); |
622 | return rc; |
626 | return rc; |
623 | case UDEBUG_M_REGS_READ: |
627 | case UDEBUG_M_REGS_READ: |
624 | rc = udebug_rp_regs_read(call, phone); |
628 | rc = udebug_rp_regs_read(call, phone); |
625 | return rc; |
629 | return rc; |
626 | case UDEBUG_M_REGS_WRITE: |
630 | case UDEBUG_M_REGS_WRITE: |
627 | rc = udebug_rp_regs_write(call, phone); |
631 | rc = udebug_rp_regs_write(call, phone); |
628 | return rc; |
632 | return rc; |
629 | case UDEBUG_M_THREAD_READ: |
633 | case UDEBUG_M_THREAD_READ: |
630 | rc = udebug_rp_thread_read(call, phone); |
634 | rc = udebug_rp_thread_read(call, phone); |
631 | return rc; |
635 | return rc; |
632 | case UDEBUG_M_MEM_WRITE: |
636 | case UDEBUG_M_MEM_WRITE: |
633 | rc = udebug_rp_mem_write(call, phone); |
637 | rc = udebug_rp_mem_write(call, phone); |
634 | return rc; |
638 | return rc; |
635 | default: |
639 | default: |
636 | break; |
640 | break; |
637 | } |
641 | } |
638 | 642 | ||
639 | return 0; |
643 | return 0; |
640 | } |
644 | } |
641 | 645 | ||
642 | static void udebug_receive_mem_read(call_t *call) |
646 | static void udebug_receive_mem_read(call_t *call) |
643 | { |
647 | { |
644 | unative_t uspace_dst; |
648 | unative_t uspace_dst; |
645 | void *uspace_ptr; |
649 | void *uspace_ptr; |
646 | unsigned size; |
650 | unsigned size; |
647 | void *buffer; |
651 | void *buffer; |
648 | int rc; |
652 | int rc; |
649 | 653 | ||
650 | klog_printf("debug_mem_read()"); |
654 | klog_printf("debug_mem_read()"); |
651 | uspace_dst = IPC_GET_ARG2(call->data); |
655 | uspace_dst = IPC_GET_ARG2(call->data); |
652 | uspace_ptr = (void *)IPC_GET_ARG3(call->data); |
656 | uspace_ptr = (void *)IPC_GET_ARG3(call->data); |
653 | size = IPC_GET_ARG4(call->data); |
657 | size = IPC_GET_ARG4(call->data); |
654 | 658 | ||
655 | buffer = malloc(size, 0); // ??? |
659 | buffer = malloc(size, 0); // ??? |
656 | klog_printf("debug_mem_read: src=%u, size=%u", uspace_ptr, size); |
660 | klog_printf("debug_mem_read: src=%u, size=%u", uspace_ptr, size); |
657 | 661 | ||
658 | /* NOTE: this is not strictly from a syscall... but that shouldn't |
662 | /* NOTE: this is not strictly from a syscall... but that shouldn't |
659 | * be a problem */ |
663 | * be a problem */ |
660 | rc = copy_from_uspace(buffer, uspace_ptr, size); |
664 | rc = copy_from_uspace(buffer, uspace_ptr, size); |
661 | if (rc) { |
665 | if (rc) { |
662 | IPC_SET_RETVAL(call->data, rc); |
666 | IPC_SET_RETVAL(call->data, rc); |
663 | return; |
667 | return; |
664 | } |
668 | } |
665 | 669 | ||
666 | klog_printf("first word: %u", *((unative_t *)buffer)); |
670 | klog_printf("first word: %u", *((unative_t *)buffer)); |
667 | 671 | ||
668 | IPC_SET_RETVAL(call->data, 0); |
672 | IPC_SET_RETVAL(call->data, 0); |
669 | /* Hack: ARG1=dest, ARG2=size as in IPC_M_DATA_READ so that |
673 | /* Hack: ARG1=dest, ARG2=size as in IPC_M_DATA_READ so that |
670 | same code in process_answer() can be used |
674 | same code in process_answer() can be used |
671 | (no way to distinguish method in answer) */ |
675 | (no way to distinguish method in answer) */ |
672 | IPC_SET_ARG1(call->data, uspace_dst); |
676 | IPC_SET_ARG1(call->data, uspace_dst); |
673 | IPC_SET_ARG2(call->data, size); |
677 | IPC_SET_ARG2(call->data, size); |
674 | call->buffer = buffer; |
678 | call->buffer = buffer; |
675 | 679 | ||
676 | ipc_answer(&TASK->kernel_box, call); |
680 | ipc_answer(&TASK->kernel_box, call); |
677 | } |
681 | } |
678 | 682 | ||
679 | static void udebug_receive_mem_write(call_t *call) |
683 | static void udebug_receive_mem_write(call_t *call) |
680 | { |
684 | { |
681 | void *uspace_dst; |
685 | void *uspace_dst; |
682 | unsigned size; |
686 | unsigned size; |
683 | void *buffer; |
687 | void *buffer; |
684 | int rc; |
688 | int rc; |
685 | udebug_task_state_t dts; |
689 | udebug_task_state_t dts; |
686 | 690 | ||
687 | klog_printf("udebug_receive_mem_write()"); |
691 | klog_printf("udebug_receive_mem_write()"); |
688 | 692 | ||
689 | /* Verify task state */ |
693 | /* Verify task state */ |
690 | spinlock_lock(&TASK->lock); |
694 | spinlock_lock(&TASK->lock); |
691 | dts = TASK->dt_state; |
695 | dts = TASK->dt_state; |
692 | spinlock_unlock(&TASK->lock); |
696 | spinlock_unlock(&TASK->lock); |
693 | 697 | ||
694 | if (dts != UDEBUG_TS_ACTIVE) { |
698 | if (dts != UDEBUG_TS_ACTIVE) { |
695 | IPC_SET_RETVAL(call->data, EBUSY); |
699 | IPC_SET_RETVAL(call->data, EBUSY); |
696 | ipc_answer(&TASK->kernel_box, call); |
700 | ipc_answer(&TASK->kernel_box, call); |
697 | return; |
701 | return; |
698 | } |
702 | } |
699 | 703 | ||
700 | uspace_dst = (void *)IPC_GET_ARG3(call->data); |
704 | uspace_dst = (void *)IPC_GET_ARG3(call->data); |
701 | size = IPC_GET_ARG4(call->data); |
705 | size = IPC_GET_ARG4(call->data); |
702 | 706 | ||
703 | buffer = call->buffer; |
707 | buffer = call->buffer; |
704 | klog_printf("dst=%u, size=%u", uspace_dst, size); |
708 | klog_printf("dst=%u, size=%u", uspace_dst, size); |
705 | 709 | ||
706 | /* NOTE: this is not strictly from a syscall... but that shouldn't |
710 | /* NOTE: this is not strictly from a syscall... but that shouldn't |
707 | * be a problem */ |
711 | * be a problem */ |
708 | rc = copy_to_uspace(uspace_dst, buffer, size); |
712 | rc = copy_to_uspace(uspace_dst, buffer, size); |
709 | if (rc) { |
713 | if (rc) { |
710 | IPC_SET_RETVAL(call->data, rc); |
714 | IPC_SET_RETVAL(call->data, rc); |
711 | ipc_answer(&TASK->kernel_box, call); |
715 | ipc_answer(&TASK->kernel_box, call); |
712 | return; |
716 | return; |
713 | } |
717 | } |
714 | 718 | ||
715 | IPC_SET_RETVAL(call->data, 0); |
719 | IPC_SET_RETVAL(call->data, 0); |
716 | 720 | ||
717 | free(call->buffer); |
721 | free(call->buffer); |
718 | call->buffer = NULL; |
722 | call->buffer = NULL; |
719 | 723 | ||
720 | ipc_answer(&TASK->kernel_box, call); |
724 | ipc_answer(&TASK->kernel_box, call); |
721 | } |
725 | } |
722 | 726 | ||
723 | 727 | ||
724 | /** |
728 | /** |
725 | * Handle a debug call received on the kernel answerbox. |
729 | * Handle a debug call received on the kernel answerbox. |
726 | * |
730 | * |
727 | * This is called by the kbox servicing thread. |
731 | * This is called by the kbox servicing thread. |
728 | */ |
732 | */ |
729 | void udebug_call_receive(call_t *call) |
733 | void udebug_call_receive(call_t *call) |
730 | { |
734 | { |
731 | int debug_method; |
735 | int debug_method; |
732 | 736 | ||
733 | debug_method = IPC_GET_ARG1(call->data); |
737 | debug_method = IPC_GET_ARG1(call->data); |
734 | 738 | ||
735 | switch (debug_method) { |
739 | switch (debug_method) { |
736 | case UDEBUG_M_MEM_READ: |
740 | case UDEBUG_M_MEM_READ: |
737 | udebug_receive_mem_read(call); |
741 | udebug_receive_mem_read(call); |
738 | break; |
742 | break; |
739 | case UDEBUG_M_MEM_WRITE: |
743 | case UDEBUG_M_MEM_WRITE: |
740 | udebug_receive_mem_write(call); |
744 | udebug_receive_mem_write(call); |
741 | break; |
745 | break; |
742 | } |
746 | } |
743 | } |
747 | } |
744 | 748 | ||
745 | /** @} |
749 | /** @} |
746 | */ |
750 | */ |
747 | 751 |