Subversion Repositories HelenOS

Rev

Rev 3474 | Rev 4337 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 3474 Rev 3674
1
/*
1
/*
2
 * Copyright (c) 2008 Jiri Svoboda
2
 * Copyright (c) 2008 Jiri Svoboda
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
/** @addtogroup generic
29
/** @addtogroup generic
30
 * @{
30
 * @{
31
 */
31
 */
32
 
32
 
33
/**
33
/**
34
 * @file
34
 * @file
35
 * @brief   Udebug operations.
35
 * @brief   Udebug operations.
36
 *
36
 *
37
 * Udebug operations on tasks and threads are implemented here. The
37
 * Udebug operations on tasks and threads are implemented here. The
38
 * functions defined here are called from the udebug_ipc module
38
 * functions defined here are called from the udebug_ipc module
39
 * when servicing udebug IPC messages.
39
 * when servicing udebug IPC messages.
40
 */
40
 */
41
 
41
 
42
#include <debug.h>
42
#include <debug.h>
43
#include <proc/task.h>
43
#include <proc/task.h>
44
#include <proc/thread.h>
44
#include <proc/thread.h>
45
#include <arch.h>
45
#include <arch.h>
46
#include <errno.h>
46
#include <errno.h>
47
#include <syscall/copy.h>
47
#include <syscall/copy.h>
48
#include <ipc/ipc.h>
48
#include <ipc/ipc.h>
49
#include <udebug/udebug.h>
49
#include <udebug/udebug.h>
50
#include <udebug/udebug_ops.h>
50
#include <udebug/udebug_ops.h>
51
 
51
 
52
/**
52
/**
53
 * Prepare a thread for a debugging operation.
53
 * Prepare a thread for a debugging operation.
54
 *
54
 *
55
 * Simply put, return thread t with t->udebug.lock held,
55
 * Simply put, return thread t with t->udebug.lock held,
56
 * but only if it verifies all conditions.
56
 * but only if it verifies all conditions.
57
 *
57
 *
58
 * Specifically, verifies that thread t exists, is a userspace thread,
58
 * Specifically, verifies that thread t exists, is a userspace thread,
59
 * and belongs to the current task (TASK). Verifies, that the thread
59
 * and belongs to the current task (TASK). Verifies, that the thread
60
 * has (or hasn't) go according to having_go (typically false).
60
 * is (or is not) go according to being_go (typically false).
61
 * It also locks t->udebug.lock, making sure that t->udebug.debug_active
61
 * It also locks t->udebug.lock, making sure that t->udebug.debug_active
62
 * is true - that the thread is in a valid debugging session.
62
 * is true - that the thread is in a valid debugging session.
63
 *
63
 *
64
 * With this verified and the t->udebug.lock mutex held, it is ensured
64
 * With this verified and the t->udebug.lock mutex held, it is ensured
65
 * that the thread cannot leave the debugging session, let alone cease
65
 * that the thread cannot leave the debugging session, let alone cease
66
 * to exist.
66
 * to exist.
67
 *
67
 *
68
 * In this function, holding the TASK->udebug.lock mutex prevents the
68
 * In this function, holding the TASK->udebug.lock mutex prevents the
69
 * thread from leaving the debugging session, while relaxing from
69
 * thread from leaving the debugging session, while relaxing from
70
 * the t->lock spinlock to the t->udebug.lock mutex.
70
 * the t->lock spinlock to the t->udebug.lock mutex.
71
 *
71
 *
72
 * @param t     Pointer, need not at all be valid.
72
 * @param t     Pointer, need not at all be valid.
73
 * @param having_go Required thread state.
73
 * @param being_go  Required thread state.
74
 *
74
 *
75
 * Returns EOK if all went well, or an error code otherwise.
75
 * Returns EOK if all went well, or an error code otherwise.
76
 */
76
 */
77
static int _thread_op_begin(thread_t *t, bool having_go)
77
static int _thread_op_begin(thread_t *t, bool being_go)
78
{
78
{
79
    task_id_t taskid;
79
    task_id_t taskid;
80
    ipl_t ipl;
80
    ipl_t ipl;
81
 
81
 
82
    taskid = TASK->taskid;
82
    taskid = TASK->taskid;
83
 
83
 
84
    mutex_lock(&TASK->udebug.lock);
84
    mutex_lock(&TASK->udebug.lock);
85
 
85
 
86
    /* thread_exists() must be called with threads_lock held */
86
    /* thread_exists() must be called with threads_lock held */
87
    ipl = interrupts_disable();
87
    ipl = interrupts_disable();
88
    spinlock_lock(&threads_lock);
88
    spinlock_lock(&threads_lock);
89
 
89
 
90
    if (!thread_exists(t)) {
90
    if (!thread_exists(t)) {
91
        spinlock_unlock(&threads_lock);
91
        spinlock_unlock(&threads_lock);
92
        interrupts_restore(ipl);
92
        interrupts_restore(ipl);
93
        mutex_unlock(&TASK->udebug.lock);
93
        mutex_unlock(&TASK->udebug.lock);
94
        return ENOENT;
94
        return ENOENT;
95
    }
95
    }
96
 
96
 
97
    /* t->lock is enough to ensure the thread's existence */
97
    /* t->lock is enough to ensure the thread's existence */
98
    spinlock_lock(&t->lock);
98
    spinlock_lock(&t->lock);
99
    spinlock_unlock(&threads_lock);
99
    spinlock_unlock(&threads_lock);
100
 
100
 
101
    /* Verify that 't' is a userspace thread */
101
    /* Verify that 't' is a userspace thread. */
102
    if ((t->flags & THREAD_FLAG_USPACE) == 0) {
102
    if ((t->flags & THREAD_FLAG_USPACE) == 0) {
103
        /* It's not, deny its existence */
103
        /* It's not, deny its existence */
104
        spinlock_unlock(&t->lock);
104
        spinlock_unlock(&t->lock);
105
        interrupts_restore(ipl);
105
        interrupts_restore(ipl);
106
        mutex_unlock(&TASK->udebug.lock);
106
        mutex_unlock(&TASK->udebug.lock);
107
        return ENOENT;
107
        return ENOENT;
108
    }
108
    }
109
 
109
 
110
    /* Verify debugging state */
110
    /* Verify debugging state. */
111
    if (t->udebug.debug_active != true) {
111
    if (t->udebug.debug_active != true) {
112
        /* Not in debugging session or undesired GO state */
112
        /* Not in debugging session or undesired GO state */
113
        spinlock_unlock(&t->lock);
113
        spinlock_unlock(&t->lock);
114
        interrupts_restore(ipl);
114
        interrupts_restore(ipl);
115
        mutex_unlock(&TASK->udebug.lock);
115
        mutex_unlock(&TASK->udebug.lock);
116
        return ENOENT;
116
        return ENOENT;
117
    }
117
    }
118
 
118
 
119
    /*
119
    /*
120
     * Since the thread has debug_active == true, TASK->udebug.lock
120
     * Since the thread has debug_active == true, TASK->udebug.lock
121
     * is enough to ensure its existence and that debug_active remains
121
     * is enough to ensure its existence and that debug_active remains
122
     * true.
122
     * true.
123
     */
123
     */
124
    spinlock_unlock(&t->lock);
124
    spinlock_unlock(&t->lock);
125
    interrupts_restore(ipl);
125
    interrupts_restore(ipl);
126
 
126
 
127
    /* Only mutex TASK->udebug.lock left */
127
    /* Only mutex TASK->udebug.lock left. */
128
   
128
   
129
    /* Now verify that the thread belongs to the current task */
129
    /* Now verify that the thread belongs to the current task. */
130
    if (t->task != TASK) {
130
    if (t->task != TASK) {
131
        /* No such thread belonging this task*/
131
        /* No such thread belonging this task*/
132
        mutex_unlock(&TASK->udebug.lock);
132
        mutex_unlock(&TASK->udebug.lock);
133
        return ENOENT;
133
        return ENOENT;
134
    }
134
    }
135
 
135
 
136
    /*
136
    /*
137
     * Now we need to grab the thread's debug lock for synchronization
137
     * Now we need to grab the thread's debug lock for synchronization
138
     * of the threads stoppability/stop state.
138
     * of the threads stoppability/stop state.
139
     */
139
     */
140
    mutex_lock(&t->udebug.lock);
140
    mutex_lock(&t->udebug.lock);
141
 
141
 
142
    /* The big task mutex is no longer needed */
142
    /* The big task mutex is no longer needed. */
143
    mutex_unlock(&TASK->udebug.lock);
143
    mutex_unlock(&TASK->udebug.lock);
144
 
144
 
145
    if (!t->udebug.stop != having_go) {
145
    if (t->udebug.go != being_go) {
146
        /* Not in debugging session or undesired GO state */
146
        /* Not in debugging session or undesired GO state. */
147
        mutex_unlock(&t->udebug.lock);
147
        mutex_unlock(&t->udebug.lock);
148
        return EINVAL;
148
        return EINVAL;
149
    }
149
    }
150
 
150
 
151
    /* Only t->udebug.lock left */
151
    /* Only t->udebug.lock left. */
152
 
152
 
153
    return EOK; /* All went well */
153
    return EOK; /* All went well. */
154
}
154
}
155
 
155
 
156
/** End debugging operation on a thread. */
156
/** End debugging operation on a thread. */
157
static void _thread_op_end(thread_t *t)
157
static void _thread_op_end(thread_t *t)
158
{
158
{
159
    mutex_unlock(&t->udebug.lock);
159
    mutex_unlock(&t->udebug.lock);
160
}
160
}
161
 
161
 
162
/** Begin debugging the current task.
162
/** Begin debugging the current task.
163
 *
163
 *
164
 * Initiates a debugging session for the current task (and its threads).
164
 * Initiates a debugging session for the current task (and its threads).
165
 * When the debugging session has started a reply will be sent to the
165
 * When the debugging session has started a reply will be sent to the
166
 * UDEBUG_BEGIN call. This may happen immediately in this function if
166
 * UDEBUG_BEGIN call. This may happen immediately in this function if
167
 * all the threads in this task are stoppable at the moment and in this
167
 * all the threads in this task are stoppable at the moment and in this
168
 * case the function returns 1.
168
 * case the function returns 1.
169
 *
169
 *
170
 * Otherwise the function returns 0 and the reply will be sent as soon as
170
 * Otherwise the function returns 0 and the reply will be sent as soon as
171
 * all the threads become stoppable (i.e. they can be considered stopped).
171
 * all the threads become stoppable (i.e. they can be considered stopped).
172
 *
172
 *
173
 * @param call  The BEGIN call we are servicing.
173
 * @param call  The BEGIN call we are servicing.
174
 * @return  0 (OK, but not done yet), 1 (done) or negative error code.
174
 * @return  0 (OK, but not done yet), 1 (done) or negative error code.
175
 */
175
 */
176
int udebug_begin(call_t *call)
176
int udebug_begin(call_t *call)
177
{
177
{
178
    int reply;
178
    int reply;
179
 
179
 
180
    thread_t *t;
180
    thread_t *t;
181
    link_t *cur;
181
    link_t *cur;
182
 
182
 
183
    LOG("udebug_begin()\n");
183
    LOG("udebug_begin()\n");
184
 
184
 
185
    mutex_lock(&TASK->udebug.lock);
185
    mutex_lock(&TASK->udebug.lock);
186
    LOG("debugging task %llu\n", TASK->taskid);
186
    LOG("debugging task %llu\n", TASK->taskid);
187
 
187
 
188
    if (TASK->udebug.dt_state != UDEBUG_TS_INACTIVE) {
188
    if (TASK->udebug.dt_state != UDEBUG_TS_INACTIVE) {
189
        mutex_unlock(&TASK->udebug.lock);
189
        mutex_unlock(&TASK->udebug.lock);
190
        LOG("udebug_begin(): busy error\n");
190
        LOG("udebug_begin(): busy error\n");
191
 
191
 
192
        return EBUSY;
192
        return EBUSY;
193
    }
193
    }
194
 
194
 
195
    TASK->udebug.dt_state = UDEBUG_TS_BEGINNING;
195
    TASK->udebug.dt_state = UDEBUG_TS_BEGINNING;
196
    TASK->udebug.begin_call = call;
196
    TASK->udebug.begin_call = call;
197
    TASK->udebug.debugger = call->sender;
197
    TASK->udebug.debugger = call->sender;
198
 
198
 
199
    if (TASK->udebug.not_stoppable_count == 0) {
199
    if (TASK->udebug.not_stoppable_count == 0) {
200
        TASK->udebug.dt_state = UDEBUG_TS_ACTIVE;
200
        TASK->udebug.dt_state = UDEBUG_TS_ACTIVE;
201
        TASK->udebug.begin_call = NULL;
201
        TASK->udebug.begin_call = NULL;
202
        reply = 1; /* immediate reply */
202
        reply = 1; /* immediate reply */
203
    } else {
203
    } else {
204
        reply = 0; /* no reply */
204
        reply = 0; /* no reply */
205
    }
205
    }
206
   
206
   
207
    /* Set udebug.debug_active on all of the task's userspace threads */
207
    /* Set udebug.debug_active on all of the task's userspace threads. */
208
 
208
 
209
    for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) {
209
    for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) {
210
        t = list_get_instance(cur, thread_t, th_link);
210
        t = list_get_instance(cur, thread_t, th_link);
211
 
211
 
212
        mutex_lock(&t->udebug.lock);
212
        mutex_lock(&t->udebug.lock);
213
        if ((t->flags & THREAD_FLAG_USPACE) != 0)
213
        if ((t->flags & THREAD_FLAG_USPACE) != 0)
214
            t->udebug.debug_active = true;
214
            t->udebug.debug_active = true;
215
        mutex_unlock(&t->udebug.lock);
215
        mutex_unlock(&t->udebug.lock);
216
    }
216
    }
217
 
217
 
218
    mutex_unlock(&TASK->udebug.lock);
218
    mutex_unlock(&TASK->udebug.lock);
219
 
219
 
220
    LOG("udebug_begin() done (%s)\n",
220
    LOG("udebug_begin() done (%s)\n",
221
        reply ? "reply" : "stoppability wait");
221
        reply ? "reply" : "stoppability wait");
222
 
222
 
223
    return reply;
223
    return reply;
224
}
224
}
225
 
225
 
226
/** Finish debugging the current task.
226
/** Finish debugging the current task.
227
 *
227
 *
228
 * Closes the debugging session for the current task.
228
 * Closes the debugging session for the current task.
229
 * @return Zero on success or negative error code.
229
 * @return Zero on success or negative error code.
230
 */
230
 */
231
int udebug_end(void)
231
int udebug_end(void)
232
{
232
{
233
    int rc;
233
    int rc;
234
 
234
 
235
    LOG("udebug_end()\n");
235
    LOG("udebug_end()\n");
236
 
236
 
237
    mutex_lock(&TASK->udebug.lock);
237
    mutex_lock(&TASK->udebug.lock);
238
    LOG("task %" PRIu64 "\n", TASK->taskid);
238
    LOG("task %" PRIu64 "\n", TASK->taskid);
239
 
239
 
240
    rc = udebug_task_cleanup(TASK);
240
    rc = udebug_task_cleanup(TASK);
241
 
241
 
242
    mutex_unlock(&TASK->udebug.lock);
242
    mutex_unlock(&TASK->udebug.lock);
243
 
243
 
244
    return rc;
244
    return rc;
245
}
245
}
246
 
246
 
247
/** Set the event mask.
247
/** Set the event mask.
248
 *
248
 *
249
 * Sets the event mask that determines which events are enabled.
249
 * Sets the event mask that determines which events are enabled.
250
 *
250
 *
251
 * @param mask  Or combination of events that should be enabled.
251
 * @param mask  Or combination of events that should be enabled.
252
 * @return  Zero on success or negative error code.
252
 * @return  Zero on success or negative error code.
253
 */
253
 */
254
int udebug_set_evmask(udebug_evmask_t mask)
254
int udebug_set_evmask(udebug_evmask_t mask)
255
{
255
{
256
    LOG("udebug_set_mask()\n");
256
    LOG("udebug_set_mask()\n");
257
 
257
 
258
    mutex_lock(&TASK->udebug.lock);
258
    mutex_lock(&TASK->udebug.lock);
259
 
259
 
260
    if (TASK->udebug.dt_state != UDEBUG_TS_ACTIVE) {
260
    if (TASK->udebug.dt_state != UDEBUG_TS_ACTIVE) {
261
        mutex_unlock(&TASK->udebug.lock);
261
        mutex_unlock(&TASK->udebug.lock);
262
        LOG("udebug_set_mask(): not active debuging session\n");
262
        LOG("udebug_set_mask(): not active debuging session\n");
263
 
263
 
264
        return EINVAL;
264
        return EINVAL;
265
    }
265
    }
266
 
266
 
267
    TASK->udebug.evmask = mask;
267
    TASK->udebug.evmask = mask;
268
 
268
 
269
    mutex_unlock(&TASK->udebug.lock);
269
    mutex_unlock(&TASK->udebug.lock);
270
 
270
 
271
    return 0;
271
    return 0;
272
}
272
}
273
 
273
 
274
/** Give thread GO.
274
/** Give thread GO.
275
 *
275
 *
276
 * Upon recieving a go message, the thread is given GO. Having GO
276
 * Upon recieving a go message, the thread is given GO. Being GO
277
 * means the thread is allowed to execute userspace code (until
277
 * means the thread is allowed to execute userspace code (until
278
 * a debugging event or STOP occurs, at which point the thread loses GO.
278
 * a debugging event or STOP occurs, at which point the thread loses GO.
279
 *
279
 *
280
 * @param t The thread to operate on (unlocked and need not be valid).
280
 * @param t The thread to operate on (unlocked and need not be valid).
281
 * @param call  The GO call that we are servicing.
281
 * @param call  The GO call that we are servicing.
282
 */
282
 */
283
int udebug_go(thread_t *t, call_t *call)
283
int udebug_go(thread_t *t, call_t *call)
284
{
284
{
285
    int rc;
285
    int rc;
286
 
286
 
287
    /* On success, this will lock t->udebug.lock */
287
    /* On success, this will lock t->udebug.lock. */
288
    rc = _thread_op_begin(t, false);
288
    rc = _thread_op_begin(t, false);
289
    if (rc != EOK) {
289
    if (rc != EOK) {
290
        return rc;
290
        return rc;
291
    }
291
    }
292
 
292
 
293
    t->udebug.go_call = call;
293
    t->udebug.go_call = call;
294
    t->udebug.stop = false;
294
    t->udebug.go = true;
295
    t->udebug.cur_event = 0;    /* none */
295
    t->udebug.cur_event = 0;    /* none */
296
 
296
 
297
    /*
297
    /*
298
     * Neither t's lock nor threads_lock may be held during wakeup
298
     * Neither t's lock nor threads_lock may be held during wakeup.
299
     */
299
     */
300
    waitq_wakeup(&t->udebug.go_wq, WAKEUP_FIRST);
300
    waitq_wakeup(&t->udebug.go_wq, WAKEUP_FIRST);
301
 
301
 
302
    _thread_op_end(t);
302
    _thread_op_end(t);
303
 
303
 
304
    return 0;
304
    return 0;
305
}
305
}
306
 
306
 
307
/** Stop a thread (i.e. take its GO away)
307
/** Stop a thread (i.e. take its GO away)
308
 *
308
 *
309
 * Generates a STOP event as soon as the thread becomes stoppable (i.e.
309
 * Generates a STOP event as soon as the thread becomes stoppable (i.e.
310
 * can be considered stopped).
310
 * can be considered stopped).
311
 *
311
 *
312
 * @param t The thread to operate on (unlocked and need not be valid).
312
 * @param t The thread to operate on (unlocked and need not be valid).
313
 * @param call  The GO call that we are servicing.
313
 * @param call  The GO call that we are servicing.
314
 */
314
 */
315
int udebug_stop(thread_t *t, call_t *call)
315
int udebug_stop(thread_t *t, call_t *call)
316
{
316
{
317
    int rc;
317
    int rc;
318
 
318
 
319
    LOG("udebug_stop()\n");
319
    LOG("udebug_stop()\n");
320
    mutex_lock(&TASK->udebug.lock);
-
 
321
 
320
 
322
    /*
321
    /*
323
     * On success, this will lock t->udebug.lock. Note that this makes sure
322
     * On success, this will lock t->udebug.lock. Note that this makes sure
324
     * the thread is not stopped.
323
     * the thread is not stopped.
325
     */
324
     */
326
    rc = _thread_op_begin(t, true);
325
    rc = _thread_op_begin(t, true);
327
    if (rc != EOK) {
326
    if (rc != EOK) {
328
        return rc;
327
        return rc;
329
    }
328
    }
330
 
329
 
331
    /* Take GO away from the thread */
330
    /* Take GO away from the thread. */
332
    t->udebug.stop = true;
331
    t->udebug.go = false;
333
 
332
 
334
    if (!t->udebug.stoppable) {
333
    if (t->udebug.stoppable != true) {
335
        /* Answer will be sent when the thread becomes stoppable */
334
        /* Answer will be sent when the thread becomes stoppable. */
336
        _thread_op_end(t);
335
        _thread_op_end(t);
337
        return 0;
336
        return 0;
338
    }
337
    }
339
 
338
 
340
    /*
339
    /*
341
     * Answer GO call
340
     * Answer GO call.
342
     */
341
     */
343
    LOG("udebug_stop - answering go call\n");
342
    LOG("udebug_stop - answering go call\n");
344
 
343
 
345
    /* Make sure nobody takes this call away from us */
344
    /* Make sure nobody takes this call away from us. */
346
    call = t->udebug.go_call;
345
    call = t->udebug.go_call;
347
    t->udebug.go_call = NULL;
346
    t->udebug.go_call = NULL;
348
 
347
 
349
    IPC_SET_RETVAL(call->data, 0);
348
    IPC_SET_RETVAL(call->data, 0);
350
    IPC_SET_ARG1(call->data, UDEBUG_EVENT_STOP);
349
    IPC_SET_ARG1(call->data, UDEBUG_EVENT_STOP);
351
    LOG("udebug_stop/ipc_answer\n");
350
    LOG("udebug_stop/ipc_answer\n");
352
 
351
 
353
    THREAD->udebug.cur_event = UDEBUG_EVENT_STOP;
352
    THREAD->udebug.cur_event = UDEBUG_EVENT_STOP;
354
 
353
 
355
    _thread_op_end(t);
354
    _thread_op_end(t);
356
 
355
 
-
 
356
    mutex_lock(&TASK->udebug.lock);
357
    ipc_answer(&TASK->answerbox, call);
357
    ipc_answer(&TASK->answerbox, call);
358
    mutex_unlock(&TASK->udebug.lock);
358
    mutex_unlock(&TASK->udebug.lock);
359
 
359
 
360
    LOG("udebog_stop/done\n");
360
    LOG("udebog_stop/done\n");
361
    return 0;
361
    return 0;
362
}
362
}
363
 
363
 
364
/** Read the list of userspace threads in the current task.
364
/** Read the list of userspace threads in the current task.
365
 *
365
 *
366
 * The list takes the form of a sequence of thread hashes (i.e. the pointers
366
 * The list takes the form of a sequence of thread hashes (i.e. the pointers
367
 * to thread structures). A buffer of size @a buf_size is allocated and
367
 * to thread structures). A buffer of size @a buf_size is allocated and
368
 * a pointer to it written to @a buffer. The sequence of hashes is written
368
 * a pointer to it written to @a buffer. The sequence of hashes is written
369
 * into this buffer.
369
 * into this buffer.
370
 *
370
 *
371
 * If the sequence is longer than @a buf_size bytes, only as much hashes
371
 * If the sequence is longer than @a buf_size bytes, only as much hashes
372
 * as can fit are copied. The number of thread hashes copied is stored
372
 * as can fit are copied. The number of thread hashes copied is stored
373
 * in @a n.
373
 * in @a n.
374
 *
374
 *
375
 * The rationale for having @a buf_size is that this function is only
375
 * The rationale for having @a buf_size is that this function is only
376
 * used for servicing the THREAD_READ message, which always specifies
376
 * used for servicing the THREAD_READ message, which always specifies
377
 * a maximum size for the userspace buffer.
377
 * a maximum size for the userspace buffer.
378
 *
378
 *
379
 * @param buffer    The buffer for storing thread hashes.
379
 * @param buffer    The buffer for storing thread hashes.
380
 * @param buf_size  Buffer size in bytes.
380
 * @param buf_size  Buffer size in bytes.
381
 * @param n     The actual number of hashes copied will be stored here.
381
 * @param n     The actual number of hashes copied will be stored here.
382
 */
382
 */
383
int udebug_thread_read(void **buffer, size_t buf_size, size_t *n)
383
int udebug_thread_read(void **buffer, size_t buf_size, size_t *n)
384
{
384
{
385
    thread_t *t;
385
    thread_t *t;
386
    link_t *cur;
386
    link_t *cur;
387
    unative_t tid;
387
    unative_t tid;
388
    unsigned copied_ids;
388
    unsigned copied_ids;
389
    ipl_t ipl;
389
    ipl_t ipl;
390
    unative_t *id_buffer;
390
    unative_t *id_buffer;
391
    int flags;
391
    int flags;
392
    size_t max_ids;
392
    size_t max_ids;
393
 
393
 
394
    LOG("udebug_thread_read()\n");
394
    LOG("udebug_thread_read()\n");
395
 
395
 
396
    /* Allocate a buffer to hold thread IDs */
396
    /* Allocate a buffer to hold thread IDs */
397
    id_buffer = malloc(buf_size, 0);
397
    id_buffer = malloc(buf_size, 0);
398
 
398
 
399
    mutex_lock(&TASK->udebug.lock);
399
    mutex_lock(&TASK->udebug.lock);
400
 
400
 
401
    /* Verify task state */
401
    /* Verify task state */
402
    if (TASK->udebug.dt_state != UDEBUG_TS_ACTIVE) {
402
    if (TASK->udebug.dt_state != UDEBUG_TS_ACTIVE) {
403
        mutex_unlock(&TASK->udebug.lock);
403
        mutex_unlock(&TASK->udebug.lock);
404
        return EINVAL;
404
        return EINVAL;
405
    }
405
    }
406
 
406
 
407
    ipl = interrupts_disable();
407
    ipl = interrupts_disable();
408
    spinlock_lock(&TASK->lock);
408
    spinlock_lock(&TASK->lock);
409
    /* Copy down the thread IDs */
409
    /* Copy down the thread IDs */
410
 
410
 
411
    max_ids = buf_size / sizeof(unative_t);
411
    max_ids = buf_size / sizeof(unative_t);
412
    copied_ids = 0;
412
    copied_ids = 0;
413
 
413
 
414
    /* FIXME: make sure the thread isn't past debug shutdown... */
414
    /* FIXME: make sure the thread isn't past debug shutdown... */
415
    for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) {
415
    for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) {
416
        /* Do not write past end of buffer */
416
        /* Do not write past end of buffer */
417
        if (copied_ids >= max_ids) break;
417
        if (copied_ids >= max_ids) break;
418
 
418
 
419
        t = list_get_instance(cur, thread_t, th_link);
419
        t = list_get_instance(cur, thread_t, th_link);
420
 
420
 
421
        spinlock_lock(&t->lock);
421
        spinlock_lock(&t->lock);
422
        flags = t->flags;
422
        flags = t->flags;
423
        spinlock_unlock(&t->lock);
423
        spinlock_unlock(&t->lock);
424
 
424
 
425
        /* Not interested in kernel threads */
425
        /* Not interested in kernel threads. */
426
        if ((flags & THREAD_FLAG_USPACE) != 0) {
426
        if ((flags & THREAD_FLAG_USPACE) != 0) {
427
            /* Using thread struct pointer as identification hash */
427
            /* Using thread struct pointer as identification hash */
428
            tid = (unative_t) t;
428
            tid = (unative_t) t;
429
            id_buffer[copied_ids++] = tid;
429
            id_buffer[copied_ids++] = tid;
430
        }
430
        }
431
    }
431
    }
432
 
432
 
433
    spinlock_unlock(&TASK->lock);
433
    spinlock_unlock(&TASK->lock);
434
    interrupts_restore(ipl);
434
    interrupts_restore(ipl);
435
 
435
 
436
    mutex_unlock(&TASK->udebug.lock);
436
    mutex_unlock(&TASK->udebug.lock);
437
 
437
 
438
    *buffer = id_buffer;
438
    *buffer = id_buffer;
439
    *n = copied_ids * sizeof(unative_t);
439
    *n = copied_ids * sizeof(unative_t);
440
 
440
 
441
    return 0;
441
    return 0;
442
}
442
}
443
 
443
 
444
/** Read the arguments of a system call.
444
/** Read the arguments of a system call.
445
 *
445
 *
446
 * The arguments of the system call being being executed are copied
446
 * The arguments of the system call being being executed are copied
447
 * to an allocated buffer and a pointer to it is written to @a buffer.
447
 * to an allocated buffer and a pointer to it is written to @a buffer.
448
 * The size of the buffer is exactly such that it can hold the maximum number
448
 * The size of the buffer is exactly such that it can hold the maximum number
449
 * of system-call arguments.
449
 * of system-call arguments.
450
 *
450
 *
451
 * Unless the thread is currently blocked in a SYSCALL_B or SYSCALL_E event,
451
 * Unless the thread is currently blocked in a SYSCALL_B or SYSCALL_E event,
452
 * this function will fail with an EINVAL error code.
452
 * this function will fail with an EINVAL error code.
453
 *
453
 *
454
 * @param buffer    The buffer for storing thread hashes.
454
 * @param buffer    The buffer for storing thread hashes.
455
 */
455
 */
456
int udebug_args_read(thread_t *t, void **buffer)
456
int udebug_args_read(thread_t *t, void **buffer)
457
{
457
{
458
    int rc;
458
    int rc;
459
    unative_t *arg_buffer;
459
    unative_t *arg_buffer;
460
 
460
 
461
    /* Prepare a buffer to hold the arguments */
461
    /* Prepare a buffer to hold the arguments. */
462
    arg_buffer = malloc(6 * sizeof(unative_t), 0);
462
    arg_buffer = malloc(6 * sizeof(unative_t), 0);
463
 
463
 
464
    /* On success, this will lock t->udebug.lock */
464
    /* On success, this will lock t->udebug.lock. */
465
    rc = _thread_op_begin(t, false);
465
    rc = _thread_op_begin(t, false);
466
    if (rc != EOK) {
466
    if (rc != EOK) {
467
        return rc;
467
        return rc;
468
    }
468
    }
469
 
469
 
470
    /* Additionally we need to verify that we are inside a syscall */
470
    /* Additionally we need to verify that we are inside a syscall. */
471
    if (t->udebug.cur_event != UDEBUG_EVENT_SYSCALL_B &&
471
    if (t->udebug.cur_event != UDEBUG_EVENT_SYSCALL_B &&
472
        t->udebug.cur_event != UDEBUG_EVENT_SYSCALL_E) {
472
        t->udebug.cur_event != UDEBUG_EVENT_SYSCALL_E) {
473
        _thread_op_end(t);
473
        _thread_op_end(t);
474
        return EINVAL;
474
        return EINVAL;
475
    }
475
    }
476
 
476
 
477
    /* Copy to a local buffer before releasing the lock */
477
    /* Copy to a local buffer before releasing the lock. */
478
    memcpy(arg_buffer, t->udebug.syscall_args, 6 * sizeof(unative_t));
478
    memcpy(arg_buffer, t->udebug.syscall_args, 6 * sizeof(unative_t));
479
 
479
 
480
    _thread_op_end(t);
480
    _thread_op_end(t);
481
 
481
 
482
    *buffer = arg_buffer;
482
    *buffer = arg_buffer;
483
    return 0;
483
    return 0;
484
}
484
}
485
 
485
 
486
/** Read the memory of the debugged task.
486
/** Read the memory of the debugged task.
487
 *
487
 *
488
 * Reads @a n bytes from the address space of the debugged task, starting
488
 * Reads @a n bytes from the address space of the debugged task, starting
489
 * from @a uspace_addr. The bytes are copied into an allocated buffer
489
 * from @a uspace_addr. The bytes are copied into an allocated buffer
490
 * and a pointer to it is written into @a buffer.
490
 * and a pointer to it is written into @a buffer.
491
 *
491
 *
492
 * @param uspace_addr   Address from where to start reading.
492
 * @param uspace_addr   Address from where to start reading.
493
 * @param n     Number of bytes to read.
493
 * @param n     Number of bytes to read.
494
 * @param buffer    For storing a pointer to the allocated buffer.
494
 * @param buffer    For storing a pointer to the allocated buffer.
495
 */
495
 */
496
int udebug_mem_read(unative_t uspace_addr, size_t n, void **buffer)
496
int udebug_mem_read(unative_t uspace_addr, size_t n, void **buffer)
497
{
497
{
498
    void *data_buffer;
498
    void *data_buffer;
499
    int rc;
499
    int rc;
500
 
500
 
501
    /* Verify task state */
501
    /* Verify task state */
502
    mutex_lock(&TASK->udebug.lock);
502
    mutex_lock(&TASK->udebug.lock);
503
 
503
 
504
    if (TASK->udebug.dt_state != UDEBUG_TS_ACTIVE) {
504
    if (TASK->udebug.dt_state != UDEBUG_TS_ACTIVE) {
505
        mutex_unlock(&TASK->udebug.lock);
505
        mutex_unlock(&TASK->udebug.lock);
506
        return EBUSY;
506
        return EBUSY;
507
    }
507
    }
508
 
508
 
509
    data_buffer = malloc(n, 0);
509
    data_buffer = malloc(n, 0);
510
 
510
 
511
    /* NOTE: this is not strictly from a syscall... but that shouldn't
511
    /* NOTE: this is not strictly from a syscall... but that shouldn't
512
     * be a problem */
512
     * be a problem */
513
    rc = copy_from_uspace(data_buffer, (void *)uspace_addr, n);
513
    rc = copy_from_uspace(data_buffer, (void *)uspace_addr, n);
514
    mutex_unlock(&TASK->udebug.lock);
514
    mutex_unlock(&TASK->udebug.lock);
515
 
515
 
516
    if (rc != 0) return rc;
516
    if (rc != 0) return rc;
517
 
517
 
518
    *buffer = data_buffer;
518
    *buffer = data_buffer;
519
    return 0;
519
    return 0;
520
}
520
}
521
 
521
 
522
/** @}
522
/** @}
523
 */
523
 */
524
 
524