Subversion Repositories HelenOS

Rev

Rev 3742 | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 3742 Rev 4073
1
/*
1
/*
2
 * Copyright (c) 2008 Jiri Svoboda
2
 * Copyright (c) 2008 Jiri Svoboda
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
/** @addtogroup generic
29
/** @addtogroup generic
30
 * @{
30
 * @{
31
 */
31
 */
32
 
32
 
33
/**
33
/**
34
 * @file
34
 * @file
35
 * @brief   Udebug operations.
35
 * @brief   Udebug operations.
36
 *
36
 *
37
 * Udebug operations on tasks and threads are implemented here. The
37
 * Udebug operations on tasks and threads are implemented here. The
38
 * functions defined here are called from the udebug_ipc module
38
 * functions defined here are called from the udebug_ipc module
39
 * when servicing udebug IPC messages.
39
 * when servicing udebug IPC messages.
40
 */
40
 */
41
 
41
 
42
#include <debug.h>
42
#include <debug.h>
43
#include <proc/task.h>
43
#include <proc/task.h>
44
#include <proc/thread.h>
44
#include <proc/thread.h>
45
#include <arch.h>
45
#include <arch.h>
-
 
46
#include <arch/asm.h>
46
#include <errno.h>
47
#include <errno.h>
47
#include <syscall/copy.h>
48
#include <syscall/copy.h>
48
#include <ipc/ipc.h>
49
#include <ipc/ipc.h>
49
#include <udebug/udebug.h>
50
#include <udebug/udebug.h>
50
#include <udebug/udebug_ops.h>
51
#include <udebug/udebug_ops.h>
-
 
52
#include <print.h>
51
 
53
 
52
/**
54
/**
53
 * Prepare a thread for a debugging operation.
55
 * Prepare a thread for a debugging operation.
54
 *
56
 *
55
 * Simply put, return thread t with t->udebug.lock held,
57
 * Simply put, return thread t with t->udebug.lock held,
56
 * but only if it verifies all conditions.
58
 * but only if it verifies all conditions.
57
 *
59
 *
58
 * Specifically, verifies that thread t exists, is a userspace thread,
60
 * Specifically, verifies that thread t exists, is a userspace thread,
59
 * and belongs to the current task (TASK). Verifies, that the thread
61
 * and belongs to the current task (TASK). Verifies, that the thread
60
 * is (or is not) go according to being_go (typically false).
62
 * is (or is not) go according to being_go (typically false).
61
 * It also locks t->udebug.lock, making sure that t->udebug.active
63
 * It also locks t->udebug.lock, making sure that t->udebug.active
62
 * is true - that the thread is in a valid debugging session.
64
 * is true - that the thread is in a valid debugging session.
63
 *
65
 *
64
 * With this verified and the t->udebug.lock mutex held, it is ensured
66
 * With this verified and the t->udebug.lock mutex held, it is ensured
65
 * that the thread cannot leave the debugging session, let alone cease
67
 * that the thread cannot leave the debugging session, let alone cease
66
 * to exist.
68
 * to exist.
67
 *
69
 *
68
 * In this function, holding the TASK->udebug.lock mutex prevents the
70
 * In this function, holding the TASK->udebug.lock mutex prevents the
69
 * thread from leaving the debugging session, while relaxing from
71
 * thread from leaving the debugging session, while relaxing from
70
 * the t->lock spinlock to the t->udebug.lock mutex.
72
 * the t->lock spinlock to the t->udebug.lock mutex.
71
 *
73
 *
72
 * @param t     Pointer, need not at all be valid.
74
 * @param t     Pointer, need not at all be valid.
73
 * @param being_go  Required thread state.
75
 * @param being_go  Required thread state.
74
 *
76
 *
75
 * Returns EOK if all went well, or an error code otherwise.
77
 * Returns EOK if all went well, or an error code otherwise.
76
 */
78
 */
77
static int _thread_op_begin(thread_t *t, bool being_go)
79
static int _thread_op_begin(thread_t *t, bool being_go)
78
{
80
{
79
    task_id_t taskid;
81
    task_id_t taskid;
80
    ipl_t ipl;
82
    ipl_t ipl;
81
 
83
 
82
    taskid = TASK->taskid;
84
    taskid = TASK->taskid;
83
 
85
 
84
    mutex_lock(&TASK->udebug.lock);
86
    mutex_lock(&TASK->udebug.lock);
85
 
87
 
86
    /* thread_exists() must be called with threads_lock held */
88
    /* thread_exists() must be called with threads_lock held */
87
    ipl = interrupts_disable();
89
    ipl = interrupts_disable();
88
    spinlock_lock(&threads_lock);
90
    spinlock_lock(&threads_lock);
89
 
91
 
90
    if (!thread_exists(t)) {
92
    if (!thread_exists(t)) {
91
        spinlock_unlock(&threads_lock);
93
        spinlock_unlock(&threads_lock);
92
        interrupts_restore(ipl);
94
        interrupts_restore(ipl);
93
        mutex_unlock(&TASK->udebug.lock);
95
        mutex_unlock(&TASK->udebug.lock);
94
        return ENOENT;
96
        return ENOENT;
95
    }
97
    }
96
 
98
 
97
    /* t->lock is enough to ensure the thread's existence */
99
    /* t->lock is enough to ensure the thread's existence */
98
    spinlock_lock(&t->lock);
100
    spinlock_lock(&t->lock);
99
    spinlock_unlock(&threads_lock);
101
    spinlock_unlock(&threads_lock);
100
 
102
 
101
    /* Verify that 't' is a userspace thread. */
103
    /* Verify that 't' is a userspace thread. */
102
    if ((t->flags & THREAD_FLAG_USPACE) == 0) {
104
    if ((t->flags & THREAD_FLAG_USPACE) == 0) {
103
        /* It's not, deny its existence */
105
        /* It's not, deny its existence */
104
        spinlock_unlock(&t->lock);
106
        spinlock_unlock(&t->lock);
105
        interrupts_restore(ipl);
107
        interrupts_restore(ipl);
106
        mutex_unlock(&TASK->udebug.lock);
108
        mutex_unlock(&TASK->udebug.lock);
107
        return ENOENT;
109
        return ENOENT;
108
    }
110
    }
109
 
111
 
110
    /* Verify debugging state. */
112
    /* Verify debugging state. */
111
    if (t->udebug.active != true) {
113
    if (t->udebug.active != true) {
112
        /* Not in debugging session or undesired GO state */
114
        /* Not in debugging session or undesired GO state */
113
        spinlock_unlock(&t->lock);
115
        spinlock_unlock(&t->lock);
114
        interrupts_restore(ipl);
116
        interrupts_restore(ipl);
115
        mutex_unlock(&TASK->udebug.lock);
117
        mutex_unlock(&TASK->udebug.lock);
116
        return ENOENT;
118
        return ENOENT;
117
    }
119
    }
118
 
120
 
119
    /*
121
    /*
120
     * Since the thread has active == true, TASK->udebug.lock
122
     * Since the thread has active == true, TASK->udebug.lock
121
     * is enough to ensure its existence and that active remains
123
     * is enough to ensure its existence and that active remains
122
     * true.
124
     * true.
123
     */
125
     */
124
    spinlock_unlock(&t->lock);
126
    spinlock_unlock(&t->lock);
125
    interrupts_restore(ipl);
127
    interrupts_restore(ipl);
126
 
128
 
127
    /* Only mutex TASK->udebug.lock left. */
129
    /* Only mutex TASK->udebug.lock left. */
128
   
130
   
129
    /* Now verify that the thread belongs to the current task. */
131
    /* Now verify that the thread belongs to the current task. */
130
    if (t->task != TASK) {
132
    if (t->task != TASK) {
131
        /* No such thread belonging this task*/
133
        /* No such thread belonging this task*/
132
        mutex_unlock(&TASK->udebug.lock);
134
        mutex_unlock(&TASK->udebug.lock);
133
        return ENOENT;
135
        return ENOENT;
134
    }
136
    }
135
 
137
 
136
    /*
138
    /*
137
     * Now we need to grab the thread's debug lock for synchronization
139
     * Now we need to grab the thread's debug lock for synchronization
138
     * of the threads stoppability/stop state.
140
     * of the threads stoppability/stop state.
139
     */
141
     */
140
    mutex_lock(&t->udebug.lock);
142
    mutex_lock(&t->udebug.lock);
141
 
143
 
142
    /* The big task mutex is no longer needed. */
144
    /* The big task mutex is no longer needed. */
143
    mutex_unlock(&TASK->udebug.lock);
145
    mutex_unlock(&TASK->udebug.lock);
144
 
146
 
145
    if (t->udebug.go != being_go) {
147
    if (t->udebug.go != being_go) {
146
        /* Not in debugging session or undesired GO state. */
148
        /* Not in debugging session or undesired GO state. */
147
        mutex_unlock(&t->udebug.lock);
149
        mutex_unlock(&t->udebug.lock);
148
        return EINVAL;
150
        return EINVAL;
149
    }
151
    }
150
 
152
 
151
    /* Only t->udebug.lock left. */
153
    /* Only t->udebug.lock left. */
152
 
154
 
153
    return EOK; /* All went well. */
155
    return EOK; /* All went well. */
154
}
156
}
155
 
157
 
156
/** End debugging operation on a thread. */
158
/** End debugging operation on a thread. */
157
static void _thread_op_end(thread_t *t)
159
static void _thread_op_end(thread_t *t)
158
{
160
{
159
    mutex_unlock(&t->udebug.lock);
161
    mutex_unlock(&t->udebug.lock);
160
}
162
}
161
 
163
 
162
/** Begin debugging the current task.
164
/** Begin debugging the current task.
163
 *
165
 *
164
 * Initiates a debugging session for the current task (and its threads).
166
 * Initiates a debugging session for the current task (and its threads).
165
 * When the debugging session has started a reply will be sent to the
167
 * When the debugging session has started a reply will be sent to the
166
 * UDEBUG_BEGIN call. This may happen immediately in this function if
168
 * UDEBUG_BEGIN call. This may happen immediately in this function if
167
 * all the threads in this task are stoppable at the moment and in this
169
 * all the threads in this task are stoppable at the moment and in this
168
 * case the function returns 1.
170
 * case the function returns 1.
169
 *
171
 *
170
 * Otherwise the function returns 0 and the reply will be sent as soon as
172
 * Otherwise the function returns 0 and the reply will be sent as soon as
171
 * all the threads become stoppable (i.e. they can be considered stopped).
173
 * all the threads become stoppable (i.e. they can be considered stopped).
172
 *
174
 *
173
 * @param call  The BEGIN call we are servicing.
175
 * @param call  The BEGIN call we are servicing.
174
 * @return  0 (OK, but not done yet), 1 (done) or negative error code.
176
 * @return  0 (OK, but not done yet), 1 (done) or negative error code.
175
 */
177
 */
176
int udebug_begin(call_t *call)
178
int udebug_begin(call_t *call)
177
{
179
{
178
    int reply;
180
    int reply;
179
 
181
 
180
    thread_t *t;
182
    thread_t *t;
181
    link_t *cur;
183
    link_t *cur;
182
 
184
 
183
    LOG("udebug_begin()\n");
185
    LOG("udebug_begin()\n");
184
 
186
 
185
    mutex_lock(&TASK->udebug.lock);
187
    mutex_lock(&TASK->udebug.lock);
186
    LOG("debugging task %llu\n", TASK->taskid);
188
    LOG("debugging task %llu\n", TASK->taskid);
187
 
189
 
188
    if (TASK->udebug.dt_state != UDEBUG_TS_INACTIVE) {
190
    if (TASK->udebug.dt_state != UDEBUG_TS_INACTIVE) {
189
        mutex_unlock(&TASK->udebug.lock);
191
        mutex_unlock(&TASK->udebug.lock);
190
        LOG("udebug_begin(): busy error\n");
192
        LOG("udebug_begin(): busy error\n");
191
 
193
 
192
        return EBUSY;
194
        return EBUSY;
193
    }
195
    }
194
 
196
 
195
    TASK->udebug.dt_state = UDEBUG_TS_BEGINNING;
197
    TASK->udebug.dt_state = UDEBUG_TS_BEGINNING;
196
    TASK->udebug.begin_call = call;
198
    TASK->udebug.begin_call = call;
197
    TASK->udebug.debugger = call->sender;
199
    TASK->udebug.debugger = call->sender;
198
 
200
 
199
    if (TASK->udebug.not_stoppable_count == 0) {
201
    if (TASK->udebug.not_stoppable_count == 0) {
200
        TASK->udebug.dt_state = UDEBUG_TS_ACTIVE;
202
        TASK->udebug.dt_state = UDEBUG_TS_ACTIVE;
201
        TASK->udebug.begin_call = NULL;
203
        TASK->udebug.begin_call = NULL;
202
        reply = 1; /* immediate reply */
204
        reply = 1; /* immediate reply */
203
    } else {
205
    } else {
204
        reply = 0; /* no reply */
206
        reply = 0; /* no reply */
205
    }
207
    }
206
   
208
   
207
    /* Set udebug.active on all of the task's userspace threads. */
209
    /* Set udebug.active on all of the task's userspace threads. */
208
 
210
 
209
    for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) {
211
    for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) {
210
        t = list_get_instance(cur, thread_t, th_link);
212
        t = list_get_instance(cur, thread_t, th_link);
211
 
213
 
212
        mutex_lock(&t->udebug.lock);
214
        mutex_lock(&t->udebug.lock);
213
        if ((t->flags & THREAD_FLAG_USPACE) != 0)
215
        if ((t->flags & THREAD_FLAG_USPACE) != 0)
214
            t->udebug.active = true;
216
            t->udebug.active = true;
215
        mutex_unlock(&t->udebug.lock);
217
        mutex_unlock(&t->udebug.lock);
216
    }
218
    }
217
 
219
 
218
    mutex_unlock(&TASK->udebug.lock);
220
    mutex_unlock(&TASK->udebug.lock);
219
 
221
 
220
    LOG("udebug_begin() done (%s)\n",
222
    LOG("udebug_begin() done (%s)\n",
221
        reply ? "reply" : "stoppability wait");
223
        reply ? "reply" : "stoppability wait");
222
 
224
 
223
    return reply;
225
    return reply;
224
}
226
}
225
 
227
 
226
/** Finish debugging the current task.
228
/** Finish debugging the current task.
227
 *
229
 *
228
 * Closes the debugging session for the current task.
230
 * Closes the debugging session for the current task.
229
 * @return Zero on success or negative error code.
231
 * @return Zero on success or negative error code.
230
 */
232
 */
231
int udebug_end(void)
233
int udebug_end(void)
232
{
234
{
233
    int rc;
235
    int rc;
234
 
236
 
235
    LOG("udebug_end()\n");
237
    LOG("udebug_end()\n");
236
 
238
 
237
    mutex_lock(&TASK->udebug.lock);
239
    mutex_lock(&TASK->udebug.lock);
238
    LOG("task %" PRIu64 "\n", TASK->taskid);
240
    LOG("task %" PRIu64 "\n", TASK->taskid);
239
 
241
 
240
    rc = udebug_task_cleanup(TASK);
242
    rc = udebug_task_cleanup(TASK);
241
 
243
 
242
    mutex_unlock(&TASK->udebug.lock);
244
    mutex_unlock(&TASK->udebug.lock);
243
 
245
 
244
    return rc;
246
    return rc;
245
}
247
}
246
 
248
 
247
/** Set the event mask.
249
/** Set the event mask.
248
 *
250
 *
249
 * Sets the event mask that determines which events are enabled.
251
 * Sets the event mask that determines which events are enabled.
250
 *
252
 *
251
 * @param mask  Or combination of events that should be enabled.
253
 * @param mask  Or combination of events that should be enabled.
252
 * @return  Zero on success or negative error code.
254
 * @return  Zero on success or negative error code.
253
 */
255
 */
254
int udebug_set_evmask(udebug_evmask_t mask)
256
int udebug_set_evmask(udebug_evmask_t mask)
255
{
257
{
256
    LOG("udebug_set_mask()\n");
258
    LOG("udebug_set_mask()\n");
257
 
259
 
258
    mutex_lock(&TASK->udebug.lock);
260
    mutex_lock(&TASK->udebug.lock);
259
 
261
 
260
    if (TASK->udebug.dt_state != UDEBUG_TS_ACTIVE) {
262
    if (TASK->udebug.dt_state != UDEBUG_TS_ACTIVE) {
261
        mutex_unlock(&TASK->udebug.lock);
263
        mutex_unlock(&TASK->udebug.lock);
262
        LOG("udebug_set_mask(): not active debuging session\n");
264
        LOG("udebug_set_mask(): not active debuging session\n");
263
 
265
 
264
        return EINVAL;
266
        return EINVAL;
265
    }
267
    }
266
 
268
 
267
    TASK->udebug.evmask = mask;
269
    TASK->udebug.evmask = mask;
268
 
270
 
269
    mutex_unlock(&TASK->udebug.lock);
271
    mutex_unlock(&TASK->udebug.lock);
270
 
272
 
271
    return 0;
273
    return 0;
272
}
274
}
273
 
275
 
274
/** Give thread GO.
276
/** Give thread GO.
275
 *
277
 *
276
 * Upon recieving a go message, the thread is given GO. Being GO
278
 * Upon recieving a go message, the thread is given GO. Being GO
277
 * means the thread is allowed to execute userspace code (until
279
 * means the thread is allowed to execute userspace code (until
278
 * a debugging event or STOP occurs, at which point the thread loses GO.
280
 * a debugging event or STOP occurs, at which point the thread loses GO.
279
 *
281
 *
280
 * @param t The thread to operate on (unlocked and need not be valid).
282
 * @param t The thread to operate on (unlocked and need not be valid).
281
 * @param call  The GO call that we are servicing.
283
 * @param call  The GO call that we are servicing.
282
 */
284
 */
283
int udebug_go(thread_t *t, call_t *call)
285
int udebug_go(thread_t *t, call_t *call)
284
{
286
{
285
    int rc;
287
    int rc;
286
 
288
 
287
    /* On success, this will lock t->udebug.lock. */
289
    /* On success, this will lock t->udebug.lock. */
288
    rc = _thread_op_begin(t, false);
290
    rc = _thread_op_begin(t, false);
289
    if (rc != EOK) {
291
    if (rc != EOK) {
290
        return rc;
292
        return rc;
291
    }
293
    }
292
 
294
 
293
    t->udebug.go_call = call;
295
    t->udebug.go_call = call;
294
    t->udebug.go = true;
296
    t->udebug.go = true;
295
    t->udebug.cur_event = 0;    /* none */
297
    t->udebug.cur_event = 0;    /* none */
296
 
298
 
297
    /*
299
    /*
298
     * Neither t's lock nor threads_lock may be held during wakeup.
300
     * Neither t's lock nor threads_lock may be held during wakeup.
299
     */
301
     */
300
    waitq_wakeup(&t->udebug.go_wq, WAKEUP_FIRST);
302
    waitq_wakeup(&t->udebug.go_wq, WAKEUP_FIRST);
301
 
303
 
302
    _thread_op_end(t);
304
    _thread_op_end(t);
303
 
305
 
304
    return 0;
306
    return 0;
305
}
307
}
306
 
308
 
307
/** Stop a thread (i.e. take its GO away)
309
/** Stop a thread (i.e. take its GO away)
308
 *
310
 *
309
 * Generates a STOP event as soon as the thread becomes stoppable (i.e.
311
 * Generates a STOP event as soon as the thread becomes stoppable (i.e.
310
 * can be considered stopped).
312
 * can be considered stopped).
311
 *
313
 *
312
 * @param t The thread to operate on (unlocked and need not be valid).
314
 * @param t The thread to operate on (unlocked and need not be valid).
313
 * @param call  The GO call that we are servicing.
315
 * @param call  The GO call that we are servicing.
314
 */
316
 */
315
int udebug_stop(thread_t *t, call_t *call)
317
int udebug_stop(thread_t *t, call_t *call)
316
{
318
{
317
    int rc;
319
    int rc;
318
 
320
 
319
    LOG("udebug_stop()\n");
321
    LOG("udebug_stop()\n");
320
 
322
 
321
    /*
323
    /*
322
     * On success, this will lock t->udebug.lock. Note that this makes sure
324
     * On success, this will lock t->udebug.lock. Note that this makes sure
323
     * the thread is not stopped.
325
     * the thread is not stopped.
324
     */
326
     */
325
    rc = _thread_op_begin(t, true);
327
    rc = _thread_op_begin(t, true);
326
    if (rc != EOK) {
328
    if (rc != EOK) {
327
        return rc;
329
        return rc;
328
    }
330
    }
329
 
331
 
330
    /* Take GO away from the thread. */
332
    /* Take GO away from the thread. */
331
    t->udebug.go = false;
333
    t->udebug.go = false;
332
 
334
 
333
    if (t->udebug.stoppable != true) {
335
    if (t->udebug.stoppable != true) {
334
        /* Answer will be sent when the thread becomes stoppable. */
336
        /* Answer will be sent when the thread becomes stoppable. */
335
        _thread_op_end(t);
337
        _thread_op_end(t);
336
        return 0;
338
        return 0;
337
    }
339
    }
338
 
340
 
339
    /*
341
    /*
340
     * Answer GO call.
342
     * Answer GO call.
341
     */
343
     */
342
    LOG("udebug_stop - answering go call\n");
344
    LOG("udebug_stop - answering go call\n");
343
 
345
 
344
    /* Make sure nobody takes this call away from us. */
346
    /* Make sure nobody takes this call away from us. */
345
    call = t->udebug.go_call;
347
    call = t->udebug.go_call;
346
    t->udebug.go_call = NULL;
348
    t->udebug.go_call = NULL;
347
 
349
 
348
    IPC_SET_RETVAL(call->data, 0);
350
    IPC_SET_RETVAL(call->data, 0);
349
    IPC_SET_ARG1(call->data, UDEBUG_EVENT_STOP);
351
    IPC_SET_ARG1(call->data, UDEBUG_EVENT_STOP);
350
    LOG("udebug_stop/ipc_answer\n");
352
    LOG("udebug_stop/ipc_answer\n");
351
 
353
 
352
    THREAD->udebug.cur_event = UDEBUG_EVENT_STOP;
354
    THREAD->udebug.cur_event = UDEBUG_EVENT_STOP;
353
 
355
 
354
    _thread_op_end(t);
356
    _thread_op_end(t);
355
 
357
 
356
    mutex_lock(&TASK->udebug.lock);
358
    mutex_lock(&TASK->udebug.lock);
357
    ipc_answer(&TASK->answerbox, call);
359
    ipc_answer(&TASK->answerbox, call);
358
    mutex_unlock(&TASK->udebug.lock);
360
    mutex_unlock(&TASK->udebug.lock);
359
 
361
 
360
    LOG("udebog_stop/done\n");
362
    LOG("udebog_stop/done\n");
361
    return 0;
363
    return 0;
362
}
364
}
363
 
365
 
364
/** Read the list of userspace threads in the current task.
366
/** Read the list of userspace threads in the current task.
365
 *
367
 *
366
 * The list takes the form of a sequence of thread hashes (i.e. the pointers
368
 * The list takes the form of a sequence of thread hashes (i.e. the pointers
367
 * to thread structures). A buffer of size @a buf_size is allocated and
369
 * to thread structures). A buffer of size @a buf_size is allocated and
368
 * a pointer to it written to @a buffer. The sequence of hashes is written
370
 * a pointer to it written to @a buffer. The sequence of hashes is written
369
 * into this buffer.
371
 * into this buffer.
370
 *
372
 *
371
 * If the sequence is longer than @a buf_size bytes, only as much hashes
373
 * If the sequence is longer than @a buf_size bytes, only as much hashes
372
 * as can fit are copied. The number of thread hashes copied is stored
374
 * as can fit are copied. The number of thread hashes copied is stored
373
 * in @a n.
375
 * in @a n.
374
 *
376
 *
375
 * The rationale for having @a buf_size is that this function is only
377
 * The rationale for having @a buf_size is that this function is only
376
 * used for servicing the THREAD_READ message, which always specifies
378
 * used for servicing the THREAD_READ message, which always specifies
377
 * a maximum size for the userspace buffer.
379
 * a maximum size for the userspace buffer.
378
 *
380
 *
379
 * @param buffer    The buffer for storing thread hashes.
381
 * @param buffer    The buffer for storing thread hashes.
380
 * @param buf_size  Buffer size in bytes.
382
 * @param buf_size  Buffer size in bytes.
381
 * @param n     The actual number of hashes copied will be stored here.
383
 * @param n     The actual number of hashes copied will be stored here.
382
 */
384
 */
383
int udebug_thread_read(void **buffer, size_t buf_size, size_t *n)
385
int udebug_thread_read(void **buffer, size_t buf_size, size_t *n)
384
{
386
{
385
    thread_t *t;
387
    thread_t *t;
386
    link_t *cur;
388
    link_t *cur;
387
    unative_t tid;
389
    unative_t tid;
388
    unsigned copied_ids;
390
    unsigned copied_ids;
389
    ipl_t ipl;
391
    ipl_t ipl;
390
    unative_t *id_buffer;
392
    unative_t *id_buffer;
391
    int flags;
393
    int flags;
392
    size_t max_ids;
394
    size_t max_ids;
393
 
395
 
394
    LOG("udebug_thread_read()\n");
396
    LOG("udebug_thread_read()\n");
395
 
397
 
396
    /* Allocate a buffer to hold thread IDs */
398
    /* Allocate a buffer to hold thread IDs */
397
    id_buffer = malloc(buf_size, 0);
399
    id_buffer = malloc(buf_size, 0);
398
 
400
 
399
    mutex_lock(&TASK->udebug.lock);
401
    mutex_lock(&TASK->udebug.lock);
400
 
402
 
401
    /* Verify task state */
403
    /* Verify task state */
402
    if (TASK->udebug.dt_state != UDEBUG_TS_ACTIVE) {
404
    if (TASK->udebug.dt_state != UDEBUG_TS_ACTIVE) {
403
        mutex_unlock(&TASK->udebug.lock);
405
        mutex_unlock(&TASK->udebug.lock);
404
        return EINVAL;
406
        return EINVAL;
405
    }
407
    }
406
 
408
 
407
    ipl = interrupts_disable();
409
    ipl = interrupts_disable();
408
    spinlock_lock(&TASK->lock);
410
    spinlock_lock(&TASK->lock);
409
    /* Copy down the thread IDs */
411
    /* Copy down the thread IDs */
410
 
412
 
411
    max_ids = buf_size / sizeof(unative_t);
413
    max_ids = buf_size / sizeof(unative_t);
412
    copied_ids = 0;
414
    copied_ids = 0;
413
 
415
 
414
    /* FIXME: make sure the thread isn't past debug shutdown... */
416
    /* FIXME: make sure the thread isn't past debug shutdown... */
415
    for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) {
417
    for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) {
416
        /* Do not write past end of buffer */
418
        /* Do not write past end of buffer */
417
        if (copied_ids >= max_ids) break;
419
        if (copied_ids >= max_ids) break;
418
 
420
 
419
        t = list_get_instance(cur, thread_t, th_link);
421
        t = list_get_instance(cur, thread_t, th_link);
420
 
422
 
421
        spinlock_lock(&t->lock);
423
        spinlock_lock(&t->lock);
422
        flags = t->flags;
424
        flags = t->flags;
423
        spinlock_unlock(&t->lock);
425
        spinlock_unlock(&t->lock);
424
 
426
 
425
        /* Not interested in kernel threads. */
427
        /* Not interested in kernel threads. */
426
        if ((flags & THREAD_FLAG_USPACE) != 0) {
428
        if ((flags & THREAD_FLAG_USPACE) != 0) {
427
            /* Using thread struct pointer as identification hash */
429
            /* Using thread struct pointer as identification hash */
428
            tid = (unative_t) t;
430
            tid = (unative_t) t;
429
            id_buffer[copied_ids++] = tid;
431
            id_buffer[copied_ids++] = tid;
430
        }
432
        }
431
    }
433
    }
432
 
434
 
433
    spinlock_unlock(&TASK->lock);
435
    spinlock_unlock(&TASK->lock);
434
    interrupts_restore(ipl);
436
    interrupts_restore(ipl);
435
 
437
 
436
    mutex_unlock(&TASK->udebug.lock);
438
    mutex_unlock(&TASK->udebug.lock);
437
 
439
 
438
    *buffer = id_buffer;
440
    *buffer = id_buffer;
439
    *n = copied_ids * sizeof(unative_t);
441
    *n = copied_ids * sizeof(unative_t);
440
 
442
 
441
    return 0;
443
    return 0;
442
}
444
}
443
 
445
 
444
/** Read the arguments of a system call.
446
/** Read the arguments of a system call.
445
 *
447
 *
446
 * The arguments of the system call being being executed are copied
448
 * The arguments of the system call being being executed are copied
447
 * to an allocated buffer and a pointer to it is written to @a buffer.
449
 * to an allocated buffer and a pointer to it is written to @a buffer.
448
 * The size of the buffer is exactly such that it can hold the maximum number
450
 * The size of the buffer is exactly such that it can hold the maximum number
449
 * of system-call arguments.
451
 * of system-call arguments.
450
 *
452
 *
451
 * Unless the thread is currently blocked in a SYSCALL_B or SYSCALL_E event,
453
 * Unless the thread is currently blocked in a SYSCALL_B or SYSCALL_E event,
452
 * this function will fail with an EINVAL error code.
454
 * this function will fail with an EINVAL error code.
453
 *
455
 *
454
 * @param buffer    The buffer for storing thread hashes.
456
 * @param buffer    The buffer for storing thread hashes.
455
 */
457
 */
456
int udebug_args_read(thread_t *t, void **buffer)
458
int udebug_args_read(thread_t *t, void **buffer)
457
{
459
{
458
    int rc;
460
    int rc;
459
    unative_t *arg_buffer;
461
    unative_t *arg_buffer;
460
 
462
 
461
    /* Prepare a buffer to hold the arguments. */
463
    /* Prepare a buffer to hold the arguments. */
462
    arg_buffer = malloc(6 * sizeof(unative_t), 0);
464
    arg_buffer = malloc(6 * sizeof(unative_t), 0);
463
 
465
 
464
    /* On success, this will lock t->udebug.lock. */
466
    /* On success, this will lock t->udebug.lock. */
465
    rc = _thread_op_begin(t, false);
467
    rc = _thread_op_begin(t, false);
466
    if (rc != EOK) {
468
    if (rc != EOK) {
467
        return rc;
469
        return rc;
468
    }
470
    }
469
 
471
 
470
    /* Additionally we need to verify that we are inside a syscall. */
472
    /* Additionally we need to verify that we are inside a syscall. */
471
    if (t->udebug.cur_event != UDEBUG_EVENT_SYSCALL_B &&
473
    if (t->udebug.cur_event != UDEBUG_EVENT_SYSCALL_B &&
472
        t->udebug.cur_event != UDEBUG_EVENT_SYSCALL_E) {
474
        t->udebug.cur_event != UDEBUG_EVENT_SYSCALL_E) {
473
        _thread_op_end(t);
475
        _thread_op_end(t);
474
        return EINVAL;
476
        return EINVAL;
475
    }
477
    }
476
 
478
 
477
    /* Copy to a local buffer before releasing the lock. */
479
    /* Copy to a local buffer before releasing the lock. */
478
    memcpy(arg_buffer, t->udebug.syscall_args, 6 * sizeof(unative_t));
480
    memcpy(arg_buffer, t->udebug.syscall_args, 6 * sizeof(unative_t));
479
 
481
 
480
    _thread_op_end(t);
482
    _thread_op_end(t);
481
 
483
 
482
    *buffer = arg_buffer;
484
    *buffer = arg_buffer;
483
    return 0;
485
    return 0;
484
}
486
}
485
 
487
 
486
/** Read the memory of the debugged task.
488
/** Read the memory of the debugged task.
487
 *
489
 *
488
 * Reads @a n bytes from the address space of the debugged task, starting
490
 * Reads @a n bytes from the address space of the debugged task, starting
489
 * from @a uspace_addr. The bytes are copied into an allocated buffer
491
 * from @a uspace_addr. The bytes are copied into an allocated buffer
490
 * and a pointer to it is written into @a buffer.
492
 * and a pointer to it is written into @a buffer.
491
 *
493
 *
492
 * @param uspace_addr   Address from where to start reading.
494
 * @param uspace_addr   Address from where to start reading.
493
 * @param n     Number of bytes to read.
495
 * @param n     Number of bytes to read.
494
 * @param buffer    For storing a pointer to the allocated buffer.
496
 * @param buffer    For storing a pointer to the allocated buffer.
495
 */
497
 */
496
int udebug_mem_read(unative_t uspace_addr, size_t n, void **buffer)
498
int udebug_mem_read(unative_t uspace_addr, size_t n, void **buffer)
497
{
499
{
498
    void *data_buffer;
500
    void *data_buffer;
499
    int rc;
501
    int rc;
500
 
502
 
501
    /* Verify task state */
503
    /* Verify task state */
502
    mutex_lock(&TASK->udebug.lock);
504
    mutex_lock(&TASK->udebug.lock);
503
 
505
 
504
    if (TASK->udebug.dt_state != UDEBUG_TS_ACTIVE) {
506
    if (TASK->udebug.dt_state != UDEBUG_TS_ACTIVE) {
505
        mutex_unlock(&TASK->udebug.lock);
507
        mutex_unlock(&TASK->udebug.lock);
506
        return EBUSY;
508
        return EBUSY;
507
    }
509
    }
508
 
510
 
509
    data_buffer = malloc(n, 0);
511
    data_buffer = malloc(n, 0);
510
 
512
 
511
    /* NOTE: this is not strictly from a syscall... but that shouldn't
513
    /* NOTE: this is not strictly from a syscall... but that shouldn't
512
     * be a problem */
514
     * be a problem */
513
    rc = copy_from_uspace(data_buffer, (void *)uspace_addr, n);
515
    rc = copy_from_uspace(data_buffer, (void *)uspace_addr, n);
514
    mutex_unlock(&TASK->udebug.lock);
516
    mutex_unlock(&TASK->udebug.lock);
515
 
517
 
516
    if (rc != 0) return rc;
518
    if (rc != 0) return rc;
517
 
519
 
518
    *buffer = data_buffer;
520
    *buffer = data_buffer;
519
    return 0;
521
    return 0;
520
}
522
}
521
 
523
 
522
/** @}
524
/** @}
523
 */
525
 */
524
 
526