Subversion Repositories HelenOS

Rev

Rev 2842 | Rev 2849 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 2842 Rev 2848
Line 20... Line 20...
20
/**
20
/**
21
 * Get and lock a phone's callee task.
21
 * Get and lock a phone's callee task.
22
 *
22
 *
23
 * This will return a pointer to the task to which the phone
23
 * This will return a pointer to the task to which the phone
24
 * is connected. It will lock the task, making sure it exists.
24
 * is connected. It will lock the task, making sure it exists.
-
 
25
 *
-
 
26
 * Interrupts must be already disabled.
-
 
27
 *
25
 * (TODO: make sure the udebug-cleanup of the task hasn't
28
 * (TODO: make sure the udebug-cleanup of the task hasn't
26
 * started yet)
29
 * started yet)
27
 */
30
 */
28
static task_t *get_lock_callee_task(phone_t *phone)
31
static task_t *get_lock_callee_task(phone_t *phone)
29
{
32
{
30
    answerbox_t *box;
33
    answerbox_t *box;
31
    task_t *ta;
34
    task_t *ta;
32
    task_id_t taskid;
35
    task_id_t taskid;
33
    ipl_t ipl;
-
 
34
 
36
 
35
    ipl = interrupts_disable();
-
 
36
    spinlock_lock(&phone->lock);
37
    spinlock_lock(&phone->lock);
37
    if (phone->state != IPC_PHONE_CONNECTED) {
38
    if (phone->state != IPC_PHONE_CONNECTED) {
38
        spinlock_unlock(&phone->lock);
39
        spinlock_unlock(&phone->lock);
39
        interrupts_restore(ipl);
-
 
40
        return NULL;
40
        return NULL;
41
    }
41
    }
42
 
42
 
43
    box = phone->callee;
43
    box = phone->callee;
44
   
44
   
Line 52... Line 52...
52
   
52
   
53
    spinlock_lock(&tasks_lock);
53
    spinlock_lock(&tasks_lock);
54
    ta = task_find_by_id(taskid);
54
    ta = task_find_by_id(taskid);
55
    if (ta == NULL) {
55
    if (ta == NULL) {
56
        spinlock_unlock(&tasks_lock);
56
        spinlock_unlock(&tasks_lock);
57
        interrupts_restore(ipl);
-
 
58
        return NULL;
57
        return NULL;
59
    }
58
    }
60
 
59
 
61
    spinlock_lock(&ta->lock);
60
    spinlock_lock(&ta->lock);
62
    spinlock_unlock(&tasks_lock);
61
    spinlock_unlock(&tasks_lock);
63
    interrupts_restore(ipl);
-
 
64
 
62
 
65
    return ta;
63
    return ta;
66
}
64
}
67
 
65
 
68
/**
66
/**
Line 130... Line 128...
130
    /* Set debug_active on all of the task's userspace threads */
128
    /* Set debug_active on all of the task's userspace threads */
131
 
129
 
132
    for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) {
130
    for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) {
133
        t = list_get_instance(cur, thread_t, th_link);
131
        t = list_get_instance(cur, thread_t, th_link);
134
 
132
 
135
        spinlock_lock(&t->lock);
133
        spinlock_lock(&t->debug_lock);
136
        if ((t->flags & THREAD_FLAG_USPACE) != 0)
134
        if ((t->flags & THREAD_FLAG_USPACE) != 0)
137
            t->debug_active = true;
135
            t->debug_active = true;
138
        spinlock_unlock(&t->lock);
136
        spinlock_unlock(&t->debug_lock);
139
    }
137
    }
140
 
138
 
141
    spinlock_unlock(&ta->lock);
139
    spinlock_unlock(&ta->lock);
142
    interrupts_restore(ipl);
140
    interrupts_restore(ipl);
143
 
141
 
Line 152... Line 150...
152
    task_t *ta;
150
    task_t *ta;
153
    ipl_t ipl;
151
    ipl_t ipl;
154
 
152
 
155
    thread_t *t;
153
    thread_t *t;
156
    link_t *cur;
154
    link_t *cur;
-
 
155
    int flags;
157
 
156
 
158
    klog_printf("udebug_rp_end()");
157
    klog_printf("udebug_rp_end()");
159
 
158
 
160
    ipl = interrupts_disable();
159
    ipl = interrupts_disable();
161
    ta = get_lock_callee_task(phone);
160
    ta = get_lock_callee_task(phone);
Line 173... Line 172...
173
    for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) {
172
    for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) {
174
        t = list_get_instance(cur, thread_t, th_link);
173
        t = list_get_instance(cur, thread_t, th_link);
175
 
174
 
176
        spinlock_lock(&t->lock);
175
        spinlock_lock(&t->lock);
177
 
176
 
-
 
177
        flags = t->flags;
-
 
178
 
-
 
179
        spinlock_lock(&t->debug_lock);
-
 
180
        spinlock_unlock(&t->lock);
-
 
181
 
178
        /* Only process userspace threads */
182
        /* Only process userspace threads */
179
        if ((t->flags & THREAD_FLAG_USPACE) != 0) {
183
        if ((flags & THREAD_FLAG_USPACE) != 0) {
180
            /* Prevent any further debug activity in thread */
184
            /* Prevent any further debug activity in thread */
181
            t->debug_active = false;
185
            t->debug_active = false;
182
 
186
 
183
            /* Still has go? */
187
            /* Still has go? */
184
            if (t->debug_stop == false) {
188
            if (t->debug_stop == false) {
Line 196... Line 200...
196
            } else {
200
            } else {
197
                /*
201
                /*
198
                 * Debug_stop is already at initial value.
202
                 * Debug_stop is already at initial value.
199
                 * Yet this means the thread needs waking up.
203
                 * Yet this means the thread needs waking up.
200
                 */
204
                 */
-
 
205
 
-
 
206
                /*
-
 
207
                 * t's lock must not be held when calling
-
 
208
                 * waitq_wakeup.
-
 
209
                 */
201
                waitq_wakeup(&t->go_wq, WAKEUP_FIRST);
210
                waitq_wakeup(&t->go_wq, WAKEUP_FIRST);
202
            }
211
            }
203
        }
212
        }
204
 
-
 
205
        spinlock_unlock(&t->lock);
213
        spinlock_unlock(&t->debug_lock);
206
    }
214
    }
207
 
215
 
208
    ta->dt_state = UDEBUG_TS_INACTIVE;
216
    ta->dt_state = UDEBUG_TS_INACTIVE;
209
 
217
 
210
    spinlock_unlock(&ta->lock);
218
    spinlock_unlock(&ta->lock);
Line 221... Line 229...
221
static int udebug_rp_go(call_t *call, phone_t *phone)
229
static int udebug_rp_go(call_t *call, phone_t *phone)
222
{
230
{
223
    thread_t *t;
231
    thread_t *t;
224
    task_t *ta;
232
    task_t *ta;
225
    ipl_t ipl;
233
    ipl_t ipl;
-
 
234
    int rc;
226
 
235
 
227
    klog_printf("debug_go()");
236
    klog_printf("debug_go()");
-
 
237
 
-
 
238
    ipl = interrupts_disable();
-
 
239
 
228
    ta = get_lock_callee_task(phone);
240
    ta = get_lock_callee_task(phone);
229
    spinlock_unlock(&ta->lock);
241
    spinlock_unlock(&ta->lock);
230
    // TODO: don't lock ta
242
    // TODO: don't lock ta
231
 
243
 
232
    t = (thread_t *) IPC_GET_ARG2(call->data);
244
    t = (thread_t *) IPC_GET_ARG2(call->data);
233
 
245
 
234
    ipl = interrupts_disable();
-
 
235
    spinlock_lock(&threads_lock);
246
    spinlock_lock(&threads_lock);
-
 
247
    if (!thread_exists(t)) {
-
 
248
        spinlock_unlock(&threads_lock);
-
 
249
        interrupts_restore(ipl);
-
 
250
        return ENOENT;
-
 
251
    }
236
 
252
 
-
 
253
    spinlock_lock(&t->debug_lock);
-
 
254
 
-
 
255
    /* Verify that thread t may be operated on */
-
 
256
    rc = verify_thread(t, ta);
-
 
257
    if (rc != EOK) {
-
 
258
        spinlock_unlock(&t->debug_lock);
-
 
259
        spinlock_unlock(&threads_lock);
-
 
260
        interrupts_restore(ipl);
-
 
261
        return rc;
-
 
262
    }
-
 
263
 
-
 
264
    /*
-
 
265
     * Since t->debug_active == true and t->debug_lock is held,
-
 
266
     * we can safely release threads_lock and t will continue
-
 
267
     * to exist (and will stay in debug_active state)
-
 
268
     */
-
 
269
    spinlock_unlock(&threads_lock);
237
 
270
 
238
    t->debug_go_call = call;
271
    t->debug_go_call = call;
239
    t->debug_stop = false;
272
    t->debug_stop = false;
-
 
273
 
-
 
274
    /*
-
 
275
     * Neither t's lock nor threads_lock may be held during wakeup
-
 
276
     */
240
    waitq_wakeup(&t->go_wq, WAKEUP_FIRST);
277
    waitq_wakeup(&t->go_wq, WAKEUP_FIRST);
241
 
278
 
242
    spinlock_unlock(&threads_lock);
279
    spinlock_unlock(&t->debug_lock);
243
    interrupts_restore(ipl);
280
    interrupts_restore(ipl);
244
 
281
 
245
    return 0; /* no backsend */
282
    return 0; /* no backsend */
246
}
283
}
247
 
284
 
Line 254... Line 291...
254
    ipl_t ipl;
291
    ipl_t ipl;
255
    unative_t buffer[6];
292
    unative_t buffer[6];
256
 
293
 
257
    klog_printf("debug_args_read()");
294
    klog_printf("debug_args_read()");
258
 
295
 
-
 
296
    ipl = interrupts_disable();
259
    ta = get_lock_callee_task(phone);
297
    ta = get_lock_callee_task(phone);
260
    klog_printf("task %llu", ta->taskid);
298
    klog_printf("task %llu", ta->taskid);
261
    spinlock_unlock(&ta->lock);
299
    spinlock_unlock(&ta->lock);
262
 
300
 
263
    t = (thread_t *) IPC_GET_ARG2(call->data);
301
    t = (thread_t *) IPC_GET_ARG2(call->data);
264
 
302
 
265
    ipl = interrupts_disable();
-
 
266
    spinlock_lock(&threads_lock);
303
    spinlock_lock(&threads_lock);
267
 
304
 
-
 
305
    if (!thread_exists(t)) {
-
 
306
        spinlock_unlock(&threads_lock);
-
 
307
        interrupts_restore(ipl);       
-
 
308
        return ENOENT;
-
 
309
    }
-
 
310
 
-
 
311
    spinlock_lock(&t->debug_lock);
-
 
312
 
268
    /* Verify that thread t exists and may be operated on */
313
    /* Verify that thread t may be operated on */
269
    rc = verify_thread(t, ta);
314
    rc = verify_thread(t, ta);
270
    if (rc != EOK) {
315
    if (rc != EOK) {
-
 
316
        spinlock_unlock(&t->debug_lock);
271
        spinlock_unlock(&threads_lock);
317
        spinlock_unlock(&threads_lock);
272
        interrupts_restore(ipl);
318
        interrupts_restore(ipl);
273
        return rc;
319
        return rc;
274
    }
320
    }
275
 
321
 
-
 
322
    /*
-
 
323
     * We can now safely release threads_lock as debug_active == true
-
 
324
     * and t->debug_lock is held.
-
 
325
     */
-
 
326
    spinlock_unlock(&threads_lock);
-
 
327
 
276
    //FIXME: additionally we need to verify that we are inside a syscall
328
    //FIXME: additionally we need to verify that we are inside a syscall
277
 
329
 
278
    /* Copy to a local buffer before releasing the lock */
330
    /* Copy to a local buffer before releasing the lock */
279
    memcpy(buffer, t->syscall_args, 6 * sizeof(unative_t));
331
    memcpy(buffer, t->syscall_args, 6 * sizeof(unative_t));
280
 
332
 
281
    spinlock_unlock(&threads_lock);
333
    spinlock_unlock(&t->debug_lock);
282
    interrupts_restore(ipl);
334
    interrupts_restore(ipl);
283
 
335
 
284
    /* Now copy to userspace */
336
    /* Now copy to userspace */
285
 
337
 
286
    uspace_buffer = (void *)IPC_GET_ARG3(call->data);
338
    uspace_buffer = (void *)IPC_GET_ARG3(call->data);
Line 316... Line 368...
316
    ipl = interrupts_disable();
368
    ipl = interrupts_disable();
317
    spinlock_lock(&threads_lock);
369
    spinlock_lock(&threads_lock);
318
 
370
 
319
    t = (thread_t *) IPC_GET_ARG2(call->data);
371
    t = (thread_t *) IPC_GET_ARG2(call->data);
320
 
372
 
-
 
373
    if (!thread_exists(t)) {
-
 
374
        spinlock_unlock(&threads_lock);
-
 
375
        interrupts_restore(ipl);       
-
 
376
        return ENOENT;
-
 
377
    }
-
 
378
 
-
 
379
    spinlock_lock(&t->debug_lock);
-
 
380
 
321
    /* Verify that thread t exists and may be operated on */
381
    /* Verify that thread t may be operated on */
322
    rc = verify_thread(t, ta);
382
    rc = verify_thread(t, ta);
323
    if (rc != EOK) {
383
    if (rc != EOK) {
-
 
384
        spinlock_unlock(&t->debug_lock);
324
        spinlock_unlock(&threads_lock);
385
        spinlock_unlock(&threads_lock);
325
        interrupts_restore(ipl);
386
        interrupts_restore(ipl);
326
        return rc;
387
        return rc;
327
    }
388
    }
328
 
389
 
-
 
390
    /*
-
 
391
     * We can now safely release threads_lock as debug_active == true
-
 
392
     * and t->debug_lock is held.
-
 
393
     */
-
 
394
    spinlock_unlock(&threads_lock);
-
 
395
 
329
    state = t->uspace_state;
396
    state = t->uspace_state;
330
    if (state == NULL) {
397
    if (state == NULL) {
331
        spinlock_unlock(&threads_lock);
398
        spinlock_unlock(&threads_lock);
332
        interrupts_restore(ipl);
399
        interrupts_restore(ipl);
333
        klog_printf("debug_regs_read() - istate not available");
400
        klog_printf("debug_regs_read() - istate not available");
334
        return EBUSY;
401
        return EBUSY;
335
    }
402
    }
336
 
403
 
337
    /* Copy to a local buffer so that we can release the lock */
404
    /* Copy to a local buffer so that we can release the lock */
338
    memcpy(&state_copy, state, sizeof(state_copy));
405
    memcpy(&state_copy, state, sizeof(state_copy));
339
    spinlock_unlock(&threads_lock);
406
    spinlock_unlock(&t->debug_lock);
340
    interrupts_restore(ipl);
407
    interrupts_restore(ipl);
341
 
408
 
342
    uspace_buffer = (void *)IPC_GET_ARG3(call->data);
409
    uspace_buffer = (void *)IPC_GET_ARG3(call->data);
343
    to_copy = IPC_GET_ARG4(call->data);
410
    to_copy = IPC_GET_ARG4(call->data);
344
    if (to_copy > sizeof(istate_t)) to_copy = sizeof(istate_t);
411
    if (to_copy > sizeof(istate_t)) to_copy = sizeof(istate_t);
345
 
412
 
346
    rc = copy_to_uspace(uspace_buffer, &state_copy, to_copy);
413
    rc = copy_to_uspace(uspace_buffer, &state_copy, to_copy);
347
    if (rc != 0) {
414
    if (rc != 0) {
348
        spinlock_unlock(&ta->lock);
-
 
349
        klog_printf("debug_regs_read() - copy failed");
415
        klog_printf("debug_regs_read() - copy failed");
350
        return rc;
416
        return rc;
351
    }
417
    }
352
 
418
 
353
    IPC_SET_ARG1(call->data, to_copy);
419
    IPC_SET_ARG1(call->data, to_copy);
Line 391... Line 457...
391
    ipl = interrupts_disable();
457
    ipl = interrupts_disable();
392
    spinlock_lock(&threads_lock);
458
    spinlock_lock(&threads_lock);
393
 
459
 
394
    t = (thread_t *) IPC_GET_ARG2(call->data);
460
    t = (thread_t *) IPC_GET_ARG2(call->data);
395
 
461
 
-
 
462
    if (!thread_exists(t)) {
-
 
463
        spinlock_unlock(&threads_lock);
-
 
464
        interrupts_restore(ipl);       
-
 
465
        return ENOENT;
-
 
466
    }
-
 
467
 
-
 
468
    spinlock_lock(&t->debug_lock);
-
 
469
 
396
    /* Verify that thread t exists and may be operated on */
470
    /* Verify that thread t may be operated on */
397
    rc = verify_thread(t, ta);
471
    rc = verify_thread(t, ta);
398
    if (rc != EOK) {
472
    if (rc != EOK) {
-
 
473
        spinlock_unlock(&t->debug_lock);
399
        spinlock_unlock(&threads_lock);
474
        spinlock_unlock(&threads_lock);
400
        interrupts_restore(ipl);
475
        interrupts_restore(ipl);
401
        return rc;
476
        return rc;
402
    }
477
    }
403
 
478
 
404
    state = t->uspace_state;
479
    state = t->uspace_state;
405
    if (state == NULL) {
480
    if (state == NULL) {
406
        spinlock_unlock(&threads_lock);
481
        spinlock_unlock(&t->debug_lock);
407
        interrupts_restore(ipl);
482
        interrupts_restore(ipl);
408
        klog_printf("debug_regs_write() - istate not available");
483
        klog_printf("debug_regs_write() - istate not available");
409
        return EBUSY;
484
        return EBUSY;
410
    }
485
    }
411
 
486
 
412
    memcpy(t->uspace_state, &data_copy, sizeof(t->uspace_state));
487
    memcpy(t->uspace_state, &data_copy, sizeof(t->uspace_state));
413
 
488
 
414
    spinlock_unlock(&threads_lock);
489
    spinlock_unlock(&t->debug_lock);
415
    interrupts_restore(ipl);
490
    interrupts_restore(ipl);
416
 
491
 
417
    /* Set answer values */
492
    /* Set answer values */
418
 
493
 
419
    IPC_SET_ARG1(call->data, to_copy);
494
    IPC_SET_ARG1(call->data, to_copy);