Subversion Repositories HelenOS

Rev

Rev 3018 | Rev 3424 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 3018 Rev 3026
Line 52... Line 52...
52
 * but only if it verifies all conditions.
52
 * but only if it verifies all conditions.
53
 *
53
 *
54
 * Specifically, verifies that thread t exists, is a userspace thread,
54
 * Specifically, verifies that thread t exists, is a userspace thread,
55
 * and belongs to the current task (TASK). Verifies, that the thread
55
 * and belongs to the current task (TASK). Verifies, that the thread
56
 * has (or hasn't) go according to having_go (typically false).
56
 * has (or hasn't) go according to having_go (typically false).
57
 * It also locks t->udebug.lock, making sure that t->udebug.debug_active is true
57
 * It also locks t->udebug.lock, making sure that t->udebug.debug_active
58
 * - that the thread is in a valid debugging session.
58
 * is true - that the thread is in a valid debugging session.
59
 *
59
 *
60
 * Returns EOK if all went well, or an error code otherwise.
60
 * With this verified and the t->udebug.lock mutex held, it is ensured
61
 * Interrupts must be already disabled when calling this function.
61
 * that the thread cannot leave the debugging session, let alone cease
-
 
62
 * to exist.
-
 
63
 *
-
 
64
 * In this function, holding the TASK->udebug.lock mutex prevents the
-
 
65
 * thread from leaving the debugging session, while relaxing from
-
 
66
 * the t->lock spinlock to the t->udebug.lock mutex.
62
 *
67
 *
63
 * Note: This function sports complicated locking.
68
 * Returns EOK if all went well, or an error code otherwise.
64
 */
69
 */
65
static int _thread_op_begin(thread_t *t, bool having_go)
70
static int _thread_op_begin(thread_t *t, bool having_go)
66
{
71
{
67
    int rc;
-
 
68
    task_id_t taskid;
72
    task_id_t taskid;
-
 
73
    ipl_t ipl;
69
 
74
 
70
    taskid = TASK->taskid;
75
    taskid = TASK->taskid;
71
 
76
 
-
 
77
    mutex_lock(&TASK->udebug.lock);
-
 
78
 
72
    /* Must lock threads_lock to ensure continued existence of the thread */
79
    /* thread_exists() must be called with threads_lock held */
-
 
80
    ipl = interrupts_disable();
73
    spinlock_lock(&threads_lock);
81
    spinlock_lock(&threads_lock);
74
 
82
 
75
    if (!thread_exists(t)) {
83
    if (!thread_exists(t)) {
76
        spinlock_unlock(&threads_lock);
84
        spinlock_unlock(&threads_lock);
-
 
85
        interrupts_restore(ipl);
-
 
86
        mutex_unlock(&TASK->udebug.lock);
77
        return ENOENT;
87
        return ENOENT;
78
    }
88
    }
79
 
89
 
80
    spinlock_lock(&t->udebug.lock);
90
    /* t->lock is enough to ensure the thread's existence */
81
    spinlock_lock(&t->lock);
91
    spinlock_lock(&t->lock);
82
   
-
 
83
    /* Now verify that it's the current task */
-
 
84
    if (t->task != TASK) {
-
 
85
        /* No such thread belonging to callee */
92
    spinlock_unlock(&threads_lock);
86
        rc = ENOENT;
-
 
87
        goto error_exit;
-
 
88
    }
-
 
89
 
93
 
90
    /* Verify that 't' is a userspace thread */
94
    /* Verify that 't' is a userspace thread */
91
    if ((t->flags & THREAD_FLAG_USPACE) == 0) {
95
    if ((t->flags & THREAD_FLAG_USPACE) == 0) {
92
        /* It's not, deny its existence */
96
        /* It's not, deny its existence */
-
 
97
        spinlock_unlock(&t->lock);
93
        rc = ENOENT;
98
        interrupts_restore(ipl);
-
 
99
        mutex_unlock(&TASK->udebug.lock);
94
        goto error_exit;
100
        return ENOENT;
95
    }
101
    }
96
 
102
 
-
 
103
    /* Verify debugging state */
97
    if ((t->udebug.debug_active != true) || (!t->udebug.stop != having_go)) {
104
    if (t->udebug.debug_active != true) {
98
        /* Not in debugging session or undesired GO state */
105
        /* Not in debugging session or undesired GO state */
-
 
106
        spinlock_unlock(&t->lock);
99
        rc = EINVAL;
107
        interrupts_restore(ipl);
-
 
108
        mutex_unlock(&TASK->udebug.lock);
100
        goto error_exit;
109
        return ENOENT;
101
    }
110
    }
102
 
111
 
-
 
112
    /*
103
    spinlock_unlock(&threads_lock);
113
     * Since the thread has debug_active == true, TASK->udebug.lock
-
 
114
     * is enough to ensure its existence and that debug_active remains
-
 
115
     * true.
-
 
116
     */
104
    spinlock_unlock(&t->lock);
117
    spinlock_unlock(&t->lock);
-
 
118
    interrupts_restore(ipl);
105
 
119
 
106
    /* Only t->udebug.lock left */
120
    /* Only mutex TASK->udebug.lock left */
-
 
121
   
-
 
122
    /* Now verify that the thread belongs to the current task */
-
 
123
    if (t->task != TASK) {
-
 
124
        /* No such thread belonging this task*/
-
 
125
        mutex_unlock(&TASK->udebug.lock);
-
 
126
        return ENOENT;
-
 
127
    }
107
 
128
 
-
 
129
    /*
-
 
130
     * Now we need to grab the thread's debug lock for synchronization
-
 
131
     * of the threads stoppability/stop state.
-
 
132
     */
108
    return EOK; /* All went well */
133
    mutex_lock(&t->udebug.lock);
109
 
134
 
-
 
135
    /* The big task mutex is no longer needed */
-
 
136
    mutex_unlock(&TASK->udebug.lock);
110
 
137
 
111
    /* Executed when a check on the thread fails */
138
    if (!t->udebug.stop != having_go) {
112
error_exit:
-
 
113
    spinlock_unlock(&t->lock);
139
        /* Not in debugging session or undesired GO state */
114
    spinlock_unlock(&t->udebug.lock);
140
        mutex_unlock(&t->udebug.lock);
115
    spinlock_unlock(&threads_lock);
141
        return EINVAL;
-
 
142
    }
116
 
143
 
117
    /* No locks left here */
144
    /* Only t->udebug.lock left */
-
 
145
 
118
    return rc;  /* Some errors occured */
146
    return EOK; /* All went well */
119
}
147
}
120
 
148
 
121
 
149
 
122
static void _thread_op_end(thread_t *t)
150
static void _thread_op_end(thread_t *t)
123
{
151
{
124
    spinlock_unlock(&t->udebug.lock);
152
    mutex_unlock(&t->udebug.lock);
125
}
153
}
126
 
154
 
127
/**
155
/**
128
 * \return 0 (ok, but not done yet), 1 (done) or negative error code.
156
 * \return 0 (ok, but not done yet), 1 (done) or negative error code.
129
 */
157
 */
130
int udebug_begin(call_t *call)
158
int udebug_begin(call_t *call)
131
{
159
{
132
    ipl_t ipl;
-
 
133
    int reply;
160
    int reply;
134
 
161
 
135
    thread_t *t;
162
    thread_t *t;
136
    link_t *cur;
163
    link_t *cur;
137
 
164
 
Line 162... Line 189...
162
    /* Set udebug.debug_active on all of the task's userspace threads */
189
    /* Set udebug.debug_active on all of the task's userspace threads */
163
 
190
 
164
    for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) {
191
    for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) {
165
        t = list_get_instance(cur, thread_t, th_link);
192
        t = list_get_instance(cur, thread_t, th_link);
166
 
193
 
167
        ipl = interrupts_disable();
-
 
168
        spinlock_lock(&t->udebug.lock);
194
        mutex_lock(&t->udebug.lock);
169
        if ((t->flags & THREAD_FLAG_USPACE) != 0)
195
        if ((t->flags & THREAD_FLAG_USPACE) != 0)
170
            t->udebug.debug_active = true;
196
            t->udebug.debug_active = true;
171
        spinlock_unlock(&t->udebug.lock);
197
        mutex_unlock(&t->udebug.lock);
172
        interrupts_restore(ipl);
-
 
173
    }
198
    }
174
 
199
 
175
    mutex_unlock(&TASK->udebug.lock);
200
    mutex_unlock(&TASK->udebug.lock);
176
 
201
 
177
    klog_printf("udebug_begin() done (%s)",
202
    klog_printf("udebug_begin() done (%s)",
Line 219... Line 244...
219
}
244
}
220
 
245
 
221
 
246
 
222
int udebug_go(thread_t *t, call_t *call)
247
int udebug_go(thread_t *t, call_t *call)
223
{
248
{
224
    ipl_t ipl;
-
 
225
    int rc;
249
    int rc;
226
 
250
 
227
//  klog_printf("udebug_go()");
251
//  klog_printf("udebug_go()");
228
 
252
 
229
    ipl = interrupts_disable();
-
 
230
 
-
 
231
    /* On success, this will lock t->udebug.lock */
253
    /* On success, this will lock t->udebug.lock */
232
    rc = _thread_op_begin(t, false);
254
    rc = _thread_op_begin(t, false);
233
    if (rc != EOK) {
255
    if (rc != EOK) {
234
        interrupts_restore(ipl);
-
 
235
        return rc;
256
        return rc;
236
    }
257
    }
237
 
258
 
238
    t->udebug.go_call = call;
259
    t->udebug.go_call = call;
239
    t->udebug.stop = false;
260
    t->udebug.stop = false;
Line 243... Line 264...
243
     * Neither t's lock nor threads_lock may be held during wakeup
264
     * Neither t's lock nor threads_lock may be held during wakeup
244
     */
265
     */
245
    waitq_wakeup(&t->udebug.go_wq, WAKEUP_FIRST);
266
    waitq_wakeup(&t->udebug.go_wq, WAKEUP_FIRST);
246
 
267
 
247
    _thread_op_end(t);
268
    _thread_op_end(t);
248
    interrupts_restore(ipl);
-
 
249
 
269
 
250
    return 0;
270
    return 0;
251
}
271
}
252
 
272
 
253
int udebug_stop(thread_t *t, call_t *call)
273
int udebug_stop(thread_t *t, call_t *call)
254
{
274
{
255
    ipl_t ipl;
-
 
256
    int rc;
275
    int rc;
257
 
276
 
258
    klog_printf("udebug_stop()");
277
    klog_printf("udebug_stop()");
259
    mutex_lock(&TASK->udebug.lock);
278
    mutex_lock(&TASK->udebug.lock);
260
 
279
 
261
    ipl = interrupts_disable();
-
 
262
 
-
 
263
    /*
280
    /*
264
     * On success, this will lock t->udebug.lock. Note that this makes sure
281
     * On success, this will lock t->udebug.lock. Note that this makes sure
265
     * the thread is not stopped.
282
     * the thread is not stopped.
266
     */
283
     */
267
    rc = _thread_op_begin(t, true);
284
    rc = _thread_op_begin(t, true);
268
    if (rc != EOK) {
285
    if (rc != EOK) {
269
        interrupts_restore(ipl);
-
 
270
        return rc;
286
        return rc;
271
    }
287
    }
272
 
288
 
273
    /* Take GO away from the thread */
289
    /* Take GO away from the thread */
274
    t->udebug.stop = true;
290
    t->udebug.stop = true;
275
 
291
 
276
    if (!t->udebug.stoppable) {
292
    if (!t->udebug.stoppable) {
277
        /* Answer will be sent when the thread becomes stoppable */
293
        /* Answer will be sent when the thread becomes stoppable */
278
        _thread_op_end(t);
294
        _thread_op_end(t);
279
        interrupts_restore(ipl);
-
 
280
        return 0;
295
        return 0;
281
    }
296
    }
282
 
297
 
283
    /*
298
    /*
284
     * Answer GO call
299
     * Answer GO call
Line 294... Line 309...
294
    klog_printf("udebug_stop/ipc_answer");
309
    klog_printf("udebug_stop/ipc_answer");
295
 
310
 
296
    THREAD->udebug.cur_event = UDEBUG_EVENT_STOP;
311
    THREAD->udebug.cur_event = UDEBUG_EVENT_STOP;
297
 
312
 
298
    _thread_op_end(t);
313
    _thread_op_end(t);
299
    interrupts_restore(ipl);
-
 
300
 
314
 
301
    ipc_answer(&TASK->answerbox, call);
315
    ipc_answer(&TASK->answerbox, call);
302
    mutex_unlock(&TASK->udebug.lock);
316
    mutex_unlock(&TASK->udebug.lock);
303
 
317
 
304
    klog_printf("udebog_stop/done");
318
    klog_printf("udebog_stop/done");
Line 334... Line 348...
334
    /* Copy down the thread IDs */
348
    /* Copy down the thread IDs */
335
 
349
 
336
    max_ids = buf_size / sizeof(unative_t);
350
    max_ids = buf_size / sizeof(unative_t);
337
    copied_ids = 0;
351
    copied_ids = 0;
338
 
352
 
-
 
353
    /* FIXME: make sure the thread isn't past debug shutdown... */
339
    for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) {
354
    for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) {
340
        /* Do not write past end of buffer */
355
        /* Do not write past end of buffer */
341
        if (copied_ids >= max_ids) break;
356
        if (copied_ids >= max_ids) break;
342
 
357
 
343
        t = list_get_instance(cur, thread_t, th_link);
358
        t = list_get_instance(cur, thread_t, th_link);
Line 366... Line 381...
366
}
381
}
367
 
382
 
368
int udebug_args_read(thread_t *t, void **buffer)
383
int udebug_args_read(thread_t *t, void **buffer)
369
{
384
{
370
    int rc;
385
    int rc;
371
    ipl_t ipl;
-
 
372
    unative_t *arg_buffer;
386
    unative_t *arg_buffer;
373
 
387
 
374
    klog_printf("udebug_args_read()");
388
//  klog_printf("udebug_args_read()");
375
 
389
 
376
    /* Prepare a buffer to hold the arguments */
390
    /* Prepare a buffer to hold the arguments */
377
    arg_buffer = malloc(6 * sizeof(unative_t), 0);
391
    arg_buffer = malloc(6 * sizeof(unative_t), 0);
378
 
392
 
379
    ipl = interrupts_disable();
-
 
380
 
-
 
381
    /* On success, this will lock t->udebug.lock */
393
    /* On success, this will lock t->udebug.lock */
382
    rc = _thread_op_begin(t, false);
394
    rc = _thread_op_begin(t, false);
383
    if (rc != EOK) {
395
    if (rc != EOK) {
384
        interrupts_restore(ipl);
-
 
385
        return rc;
396
        return rc;
386
    }
397
    }
387
 
398
 
388
    /* Additionally we need to verify that we are inside a syscall */
399
    /* Additionally we need to verify that we are inside a syscall */
389
    if (t->udebug.cur_event != UDEBUG_EVENT_SYSCALL_B &&
400
    if (t->udebug.cur_event != UDEBUG_EVENT_SYSCALL_B &&
390
        t->udebug.cur_event != UDEBUG_EVENT_SYSCALL_E) {
401
        t->udebug.cur_event != UDEBUG_EVENT_SYSCALL_E) {
391
        _thread_op_end(t);
402
        _thread_op_end(t);
392
        interrupts_restore(ipl);
-
 
393
 
-
 
394
        return EINVAL;
403
        return EINVAL;
395
    }
404
    }
396
 
405
 
397
    /* Copy to a local buffer before releasing the lock */
406
    /* Copy to a local buffer before releasing the lock */
398
    memcpy(arg_buffer, t->udebug.syscall_args, 6 * sizeof(unative_t));
407
    memcpy(arg_buffer, t->udebug.syscall_args, 6 * sizeof(unative_t));
399
 
408
 
400
    _thread_op_end(t);
409
    _thread_op_end(t);
401
    interrupts_restore(ipl);
-
 
402
 
410
 
403
    *buffer = arg_buffer;
411
    *buffer = arg_buffer;
404
    return 0;
412
    return 0;
405
}
413
}
406
 
414
 
407
int udebug_regs_read(thread_t *t, void *buffer)
415
int udebug_regs_read(thread_t *t, void *buffer)
408
{
416
{
409
    istate_t *state;
417
    istate_t *state;
410
    int rc;
418
    int rc;
411
    ipl_t ipl;
-
 
412
 
419
 
413
    klog_printf("udebug_regs_read()");
420
//  klog_printf("udebug_regs_read()");
414
 
-
 
415
    ipl = interrupts_disable();
-
 
416
 
421
 
417
    /* On success, this will lock t->udebug.lock */
422
    /* On success, this will lock t->udebug.lock */
418
    rc = _thread_op_begin(t, false);
423
    rc = _thread_op_begin(t, false);
419
    if (rc != EOK) {
424
    if (rc != EOK) {
420
        interrupts_restore(ipl);
-
 
421
        return rc;
425
        return rc;
422
    }
426
    }
423
 
427
 
424
    state = t->udebug.uspace_state;
428
    state = t->udebug.uspace_state;
425
    if (state == NULL) {
429
    if (state == NULL) {
426
        _thread_op_end(t);
430
        _thread_op_end(t);
427
        interrupts_restore(ipl);
-
 
428
        klog_printf("udebug_regs_read() - istate not available");
431
        klog_printf("udebug_regs_read() - istate not available");
429
        return EBUSY;
432
        return EBUSY;
430
    }
433
    }
431
 
434
 
432
    /* Copy to the allocated buffer */
435
    /* Copy to the allocated buffer */
433
    memcpy(buffer, state, sizeof(istate_t));
436
    memcpy(buffer, state, sizeof(istate_t));
434
 
437
 
435
    _thread_op_end(t);
438
    _thread_op_end(t);
436
    interrupts_restore(ipl);
-
 
437
 
439
 
438
    return 0;
440
    return 0;
439
}
441
}
440
 
442
 
441
int udebug_regs_write(thread_t *t, void *buffer)
443
int udebug_regs_write(thread_t *t, void *buffer)
442
{
444
{
443
    int rc;
445
    int rc;
444
    istate_t *state;
446
    istate_t *state;
445
    ipl_t ipl;
-
 
446
 
447
 
447
    klog_printf("udebug_regs_write()");
448
    klog_printf("udebug_regs_write()");
448
 
449
 
449
    /* Try to change the thread's uspace_state */
450
    /* Try to change the thread's uspace_state */
450
 
451
 
451
    ipl = interrupts_disable();
-
 
452
 
-
 
453
    /* On success, this will lock t->udebug.lock */
452
    /* On success, this will lock t->udebug.lock */
454
    rc = _thread_op_begin(t, false);
453
    rc = _thread_op_begin(t, false);
455
    if (rc != EOK) {
454
    if (rc != EOK) {
456
        klog_printf("error locking thread");
455
        klog_printf("error locking thread");
457
        interrupts_restore(ipl);
-
 
458
        return rc;
456
        return rc;
459
    }
457
    }
460
 
458
 
461
    state = t->udebug.uspace_state;
459
    state = t->udebug.uspace_state;
462
    if (state == NULL) {
460
    if (state == NULL) {
463
        _thread_op_end(t);
461
        _thread_op_end(t);
464
        interrupts_restore(ipl);
-
 
465
        klog_printf("udebug_regs_write() - istate not available");
462
        klog_printf("udebug_regs_write() - istate not available");
466
 
-
 
467
        return EBUSY;
463
        return EBUSY;
468
    }
464
    }
469
 
465
 
470
    memcpy(t->udebug.uspace_state, buffer, sizeof(istate_t));
466
    memcpy(t->udebug.uspace_state, buffer, sizeof(istate_t));
471
 
467
 
472
    _thread_op_end(t);
468
    _thread_op_end(t);
473
    interrupts_restore(ipl);
-
 
474
 
469
 
475
    return 0;
470
    return 0;
476
}
471
}
477
 
472
 
478
 
473