Rev 2440 | Rev 2451 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
| Rev 2440 | Rev 2446 | ||
|---|---|---|---|
| Line 65... | Line 65... | ||
| 65 | #include <mm/slab.h> |
65 | #include <mm/slab.h> |
| 66 | #include <debug.h> |
66 | #include <debug.h> |
| 67 | #include <main/uinit.h> |
67 | #include <main/uinit.h> |
| 68 | #include <syscall/copy.h> |
68 | #include <syscall/copy.h> |
| 69 | #include <errno.h> |
69 | #include <errno.h> |
| - | 70 | #include <console/klog.h> |
|
| 70 | 71 | ||
| 71 | 72 | ||
| 72 | /** Thread states */ |
73 | /** Thread states */ |
| 73 | char *thread_states[] = { |
74 | char *thread_states[] = { |
| 74 | "Invalid", |
75 | "Invalid", |
| 75 | "Running", |
76 | "Running", |
| 76 | "Sleeping", |
77 | "Sleeping", |
| 77 | "Ready", |
78 | "Ready", |
| 78 | "Entering", |
79 | "Entering", |
| 79 | "Exiting", |
80 | "Exiting", |
| 80 | "Undead" |
81 | "JoinMe" |
| 81 | }; |
82 | }; |
| 82 | 83 | ||
| 83 | /** Lock protecting the threads_btree B+tree. |
84 | /** Lock protecting the threads_btree B+tree. |
| 84 | * |
85 | * |
| 85 | * For locking rules, see declaration thereof. |
86 | * For locking rules, see declaration thereof. |
| Line 326... | Line 327... | ||
| 326 | 327 | ||
| 327 | t->in_copy_from_uspace = false; |
328 | t->in_copy_from_uspace = false; |
| 328 | t->in_copy_to_uspace = false; |
329 | t->in_copy_to_uspace = false; |
| 329 | 330 | ||
| 330 | t->interrupted = false; |
331 | t->interrupted = false; |
| 331 | t->join_type = None; |
- | |
| 332 | t->detached = false; |
332 | t->detached = false; |
| 333 | waitq_initialize(&t->join_wq); |
333 | waitq_initialize(&t->join_wq); |
| 334 | 334 | ||
| 335 | t->rwlock_holder_type = RWLOCK_NONE; |
335 | t->rwlock_holder_type = RWLOCK_NONE; |
| 336 | 336 | ||
| Line 340... | Line 340... | ||
| 340 | t->fpu_context_engaged = 0; |
340 | t->fpu_context_engaged = 0; |
| 341 | 341 | ||
| 342 | /* might depend on previous initialization */ |
342 | /* might depend on previous initialization */ |
| 343 | thread_create_arch(t); |
343 | thread_create_arch(t); |
| 344 | 344 | ||
| 345 | ipl = interrupts_disable(); |
- | |
| 346 | spinlock_lock(&task->lock); |
- | |
| 347 | if (!task->accept_new_threads) { |
- | |
| 348 | spinlock_unlock(&task->lock); |
- | |
| 349 | slab_free(thread_slab, t); |
- | |
| 350 | interrupts_restore(ipl); |
- | |
| 351 | return NULL; |
- | |
| 352 | } else { |
- | |
| 353 | /* |
- | |
| 354 | * Bump the reference count so that this task cannot be |
- | |
| 355 | * destroyed while the new thread is being attached to it. |
- | |
| 356 | */ |
- | |
| 357 | task->refcount++; |
- | |
| 358 | } |
- | |
| 359 | spinlock_unlock(&task->lock); |
- | |
| 360 | interrupts_restore(ipl); |
- | |
| 361 | - | ||
| 362 | if (!(flags & THREAD_FLAG_NOATTACH)) |
345 | if (!(flags & THREAD_FLAG_NOATTACH)) |
| 363 | thread_attach(t, task); |
346 | thread_attach(t, task); |
| 364 | 347 | ||
| 365 | return t; |
348 | return t; |
| 366 | } |
349 | } |
| Line 371... | Line 354... | ||
| 371 | * |
354 | * |
| 372 | * Assume thread->lock is held!! |
355 | * Assume thread->lock is held!! |
| 373 | */ |
356 | */ |
| 374 | void thread_destroy(thread_t *t) |
357 | void thread_destroy(thread_t *t) |
| 375 | { |
358 | { |
| 376 | bool destroy_task = false; |
- | |
| 377 | - | ||
| 378 | ASSERT(t->state == Exiting || t->state == Undead); |
359 | ASSERT(t->state == Exiting || t->state == JoinMe); |
| 379 | ASSERT(t->task); |
360 | ASSERT(t->task); |
| 380 | ASSERT(t->cpu); |
361 | ASSERT(t->cpu); |
| 381 | 362 | ||
| 382 | spinlock_lock(&t->cpu->lock); |
363 | spinlock_lock(&t->cpu->lock); |
| 383 | if (t->cpu->fpu_owner == t) |
364 | if (t->cpu->fpu_owner == t) |
| Line 393... | Line 374... | ||
| 393 | /* |
374 | /* |
| 394 | * Detach from the containing task. |
375 | * Detach from the containing task. |
| 395 | */ |
376 | */ |
| 396 | spinlock_lock(&t->task->lock); |
377 | spinlock_lock(&t->task->lock); |
| 397 | list_remove(&t->th_link); |
378 | list_remove(&t->th_link); |
| 398 | if (--t->task->refcount == 0) { |
- | |
| 399 | t->task->accept_new_threads = false; |
- | |
| 400 | destroy_task = true; |
- | |
| 401 | } |
- | |
| 402 | spinlock_unlock(&t->task->lock); |
379 | spinlock_unlock(&t->task->lock); |
| 403 | 380 | ||
| - | 381 | /* |
|
| - | 382 | * t is guaranteed to be the very last thread of its task. |
|
| 404 | if (destroy_task) |
383 | * It is safe to destroy the task. |
| - | 384 | */ |
|
| - | 385 | if (atomic_predec(&t->task->refcount) == 0) |
|
| 405 | task_destroy(t->task); |
386 | task_destroy(t->task); |
| 406 | 387 | ||
| 407 | /* |
388 | /* |
| 408 | * If the thread had a userspace context, free up its kernel_uarg |
389 | * If the thread had a userspace context, free up its kernel_uarg |
| 409 | * structure. |
390 | * structure. |
| Line 429... | Line 410... | ||
| 429 | ipl_t ipl; |
410 | ipl_t ipl; |
| 430 | 411 | ||
| 431 | /* |
412 | /* |
| 432 | * Attach to the current task. |
413 | * Attach to the current task. |
| 433 | */ |
414 | */ |
| 434 | ipl = interrupts_disable(); |
415 | ipl = interrupts_disable(); |
| 435 | spinlock_lock(&task->lock); |
416 | spinlock_lock(&task->lock); |
| 436 | ASSERT(task->refcount); |
417 | atomic_inc(&task->refcount); |
| - | 418 | atomic_inc(&task->lifecount); |
|
| 437 | list_append(&t->th_link, &task->th_head); |
419 | list_append(&t->th_link, &task->th_head); |
| 438 | if (task->refcount == 1) |
- | |
| 439 | task->main_thread = t; |
- | |
| 440 | spinlock_unlock(&task->lock); |
420 | spinlock_unlock(&task->lock); |
| 441 | 421 | ||
| 442 | /* |
422 | /* |
| 443 | * Register this thread in the system-wide list. |
423 | * Register this thread in the system-wide list. |
| 444 | */ |
424 | */ |
| Line 457... | Line 437... | ||
| 457 | */ |
437 | */ |
| 458 | void thread_exit(void) |
438 | void thread_exit(void) |
| 459 | { |
439 | { |
| 460 | ipl_t ipl; |
440 | ipl_t ipl; |
| 461 | 441 | ||
| - | 442 | if (atomic_predec(&TASK->lifecount) == 0) { |
|
| - | 443 | /* |
|
| - | 444 | * We are the last thread in the task that still has not exited. |
|
| - | 445 | * With the exception of the moment the task was created, new |
|
| - | 446 | * threads can only be created by threads of the same task. |
|
| - | 447 | * We are safe to perform cleanup. |
|
| - | 448 | */ |
|
| - | 449 | if (THREAD->flags & THREAD_FLAG_USPACE) { |
|
| - | 450 | ipc_cleanup(); |
|
| - | 451 | futex_cleanup(); |
|
| - | 452 | klog_printf("Cleanup of task %llu completed.", |
|
| - | 453 | TASK->taskid); |
|
| - | 454 | } |
|
| - | 455 | } |
|
| - | 456 | ||
| 462 | restart: |
457 | restart: |
| 463 | ipl = interrupts_disable(); |
458 | ipl = interrupts_disable(); |
| 464 | spinlock_lock(&THREAD->lock); |
459 | spinlock_lock(&THREAD->lock); |
| 465 | if (THREAD->timeout_pending) { |
460 | if (THREAD->timeout_pending) { |
| 466 | /* busy waiting for timeouts in progress */ |
461 | /* busy waiting for timeouts in progress */ |
| 467 | spinlock_unlock(&THREAD->lock); |
462 | spinlock_unlock(&THREAD->lock); |
| 468 | interrupts_restore(ipl); |
463 | interrupts_restore(ipl); |
| 469 | goto restart; |
464 | goto restart; |
| 470 | } |
465 | } |
| - | 466 | ||
| 471 | THREAD->state = Exiting; |
467 | THREAD->state = Exiting; |
| 472 | spinlock_unlock(&THREAD->lock); |
468 | spinlock_unlock(&THREAD->lock); |
| 473 | scheduler(); |
469 | scheduler(); |
| 474 | 470 | ||
| 475 | /* Not reached */ |
471 | /* Not reached */ |
| Line 522... | Line 518... | ||
| 522 | return rc; |
518 | return rc; |
| 523 | } |
519 | } |
| 524 | 520 | ||
| 525 | /** Detach thread. |
521 | /** Detach thread. |
| 526 | * |
522 | * |
| 527 | * Mark the thread as detached, if the thread is already in the Undead state, |
523 | * Mark the thread as detached, if the thread is already in the JoinMe state, |
| 528 | * deallocate its resources. |
524 | * deallocate its resources. |
| 529 | * |
525 | * |
| 530 | * @param t Thread to be detached. |
526 | * @param t Thread to be detached. |
| 531 | */ |
527 | */ |
| 532 | void thread_detach(thread_t *t) |
528 | void thread_detach(thread_t *t) |
| Line 538... | Line 534... | ||
| 538 | * pointer to it must be still valid. |
534 | * pointer to it must be still valid. |
| 539 | */ |
535 | */ |
| 540 | ipl = interrupts_disable(); |
536 | ipl = interrupts_disable(); |
| 541 | spinlock_lock(&t->lock); |
537 | spinlock_lock(&t->lock); |
| 542 | ASSERT(!t->detached); |
538 | ASSERT(!t->detached); |
| 543 | if (t->state == Undead) { |
539 | if (t->state == JoinMe) { |
| 544 | thread_destroy(t); /* unlocks &t->lock */ |
540 | thread_destroy(t); /* unlocks &t->lock */ |
| 545 | interrupts_restore(ipl); |
541 | interrupts_restore(ipl); |
| 546 | return; |
542 | return; |
| 547 | } else { |
543 | } else { |
| 548 | t->detached = true; |
544 | t->detached = true; |
| Line 700... | Line 696... | ||
| 700 | int rc; |
696 | int rc; |
| 701 | 697 | ||
| 702 | rc = copy_to_uspace(uspace_thread_id, &t->tid, |
698 | rc = copy_to_uspace(uspace_thread_id, &t->tid, |
| 703 | sizeof(t->tid)); |
699 | sizeof(t->tid)); |
| 704 | if (rc != 0) { |
700 | if (rc != 0) { |
| 705 | ipl_t ipl; |
- | |
| 706 | - | ||
| 707 | /* |
701 | /* |
| 708 | * We have encountered a failure, but the thread |
702 | * We have encountered a failure, but the thread |
| 709 | * has already been created. We need to undo its |
703 | * has already been created. We need to undo its |
| 710 | * creation now. |
704 | * creation now. |
| 711 | */ |
705 | */ |
| 712 | 706 | ||
| 713 | /* |
707 | /* |
| 714 | * The new thread structure is initialized, |
708 | * The new thread structure is initialized, but |
| 715 | * but is still not visible to the system. |
709 | * is still not visible to the system. |
| 716 | * We can safely deallocate it. |
710 | * We can safely deallocate it. |
| 717 | */ |
711 | */ |
| 718 | slab_free(thread_slab, t); |
712 | slab_free(thread_slab, t); |
| 719 | free(kernel_uarg); |
713 | free(kernel_uarg); |
| 720 | 714 | ||
| 721 | /* |
- | |
| 722 | * Now we need to decrement the task reference |
- | |
| 723 | * counter. Because we are running within the |
- | |
| 724 | * same task, thread t is not the last thread |
- | |
| 725 | * in the task, so it is safe to merely |
- | |
| 726 | * decrement the counter. |
- | |
| 727 | */ |
- | |
| 728 | ipl = interrupts_disable(); |
- | |
| 729 | spinlock_lock(&TASK->lock); |
- | |
| 730 | TASK->refcount--; |
- | |
| 731 | spinlock_unlock(&TASK->lock); |
- | |
| 732 | interrupts_restore(ipl); |
- | |
| 733 | - | ||
| 734 | return (unative_t) rc; |
715 | return (unative_t) rc; |
| 735 | } |
716 | } |
| 736 | } |
717 | } |
| 737 | thread_attach(t, TASK); |
718 | thread_attach(t, TASK); |
| 738 | thread_ready(t); |
719 | thread_ready(t); |