Rev 2870 | Rev 2913 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
| Rev 2870 | Rev 2902 | ||
|---|---|---|---|
| Line 47... | Line 47... | ||
| 47 | #include <proc/task.h> |
47 | #include <proc/task.h> |
| 48 | #include <memstr.h> |
48 | #include <memstr.h> |
| 49 | #include <debug.h> |
49 | #include <debug.h> |
| 50 | 50 | ||
| 51 | #include <print.h> |
51 | #include <print.h> |
| - | 52 | #include <console/klog.h> |
|
| 52 | #include <proc/thread.h> |
53 | #include <proc/thread.h> |
| 53 | #include <arch/interrupt.h> |
54 | #include <arch/interrupt.h> |
| 54 | #include <ipc/irq.h> |
55 | #include <ipc/irq.h> |
| 55 | 56 | ||
| 56 | /** Open channel that is assigned automatically to new tasks */ |
57 | /** Open channel that is assigned automatically to new tasks */ |
| Line 422... | Line 423... | ||
| 422 | IPC_SET_RETVAL(call->data, EHANGUP); |
423 | IPC_SET_RETVAL(call->data, EHANGUP); |
| 423 | _ipc_answer_free_call(call); |
424 | _ipc_answer_free_call(call); |
| 424 | } |
425 | } |
| 425 | } |
426 | } |
| 426 | 427 | ||
| 427 | /** Cleans up all IPC communication of the current task. |
428 | /** Disconnects all phones connected to an answerbox. |
| 428 | * |
429 | * |
| 429 | * Note: ipc_hangup sets returning answerbox to TASK->answerbox, you |
430 | * @param box Answerbox to disconnect phones from. |
| 430 | * have to change it as well if you want to cleanup other tasks than TASK. |
431 | * @param notify_box If true, the answerbox will get a hangup message for |
| - | 432 | * each disconnected phone. |
|
| 431 | */ |
433 | */ |
| 432 | void ipc_cleanup(void) |
434 | static void ipc_answerbox_slam_phones(answerbox_t *box, bool notify_box) |
| 433 | { |
435 | { |
| 434 | int i; |
- | |
| 435 | call_t *call; |
- | |
| 436 | phone_t *phone; |
436 | phone_t *phone; |
| 437 | DEADLOCK_PROBE_INIT(p_phonelck); |
437 | DEADLOCK_PROBE_INIT(p_phonelck); |
| - | 438 | ipl_t ipl; |
|
| - | 439 | call_t *call; |
|
| 438 | 440 | ||
| 439 | /* Disconnect all our phones ('ipc_phone_hangup') */ |
- | |
| 440 | for (i = 0; i < IPC_MAX_PHONES; i++) |
- | |
| 441 | ipc_phone_hangup(&TASK->phones[i]); |
- | |
| 442 | - | ||
| 443 | /* Disconnect all connected irqs */ |
- | |
| 444 | ipc_irq_cleanup(&TASK->answerbox); |
441 | call = ipc_call_alloc(0); |
| 445 | 442 | ||
| 446 | /* Disconnect all phones connected to our answerbox */ |
443 | /* Disconnect all phones connected to our answerbox */ |
| 447 | restart_phones: |
444 | restart_phones: |
| - | 445 | ipl = interrupts_disable(); |
|
| 448 | spinlock_lock(&TASK->answerbox.lock); |
446 | spinlock_lock(&box->lock); |
| 449 | while (!list_empty(&TASK->answerbox.connected_phones)) { |
447 | while (!list_empty(&box->connected_phones)) { |
| 450 | phone = list_get_instance(TASK->answerbox.connected_phones.next, |
448 | phone = list_get_instance(box->connected_phones.next, |
| 451 | phone_t, link); |
449 | phone_t, link); |
| 452 | if (!spinlock_trylock(&phone->lock)) { |
450 | if (!spinlock_trylock(&phone->lock)) { |
| 453 | spinlock_unlock(&TASK->answerbox.lock); |
451 | spinlock_unlock(&box->lock); |
| - | 452 | interrupts_restore(ipl); |
|
| 454 | DEADLOCK_PROBE(p_phonelck, DEADLOCK_THRESHOLD); |
453 | DEADLOCK_PROBE(p_phonelck, DEADLOCK_THRESHOLD); |
| 455 | goto restart_phones; |
454 | goto restart_phones; |
| 456 | } |
455 | } |
| 457 | 456 | ||
| 458 | /* Disconnect phone */ |
457 | /* Disconnect phone */ |
| 459 | ASSERT(phone->state == IPC_PHONE_CONNECTED); |
458 | ASSERT(phone->state == IPC_PHONE_CONNECTED); |
| 460 | phone->state = IPC_PHONE_SLAMMED; |
- | |
| - | 459 | ||
| 461 | list_remove(&phone->link); |
460 | list_remove(&phone->link); |
| - | 461 | phone->state = IPC_PHONE_SLAMMED; |
|
| - | 462 | ||
| - | 463 | if (notify_box) { |
|
| - | 464 | spinlock_unlock(&phone->lock); |
|
| - | 465 | spinlock_unlock(&box->lock); |
|
| - | 466 | interrupts_restore(ipl); |
|
| - | 467 | ||
| - | 468 | /* |
|
| - | 469 | * Send one message to the answerbox for each |
|
| - | 470 | * phone. Used to make sure the kbox thread |
|
| - | 471 | * wakes up after the last phone has been |
|
| - | 472 | * disconnected. |
|
| - | 473 | */ |
|
| - | 474 | IPC_SET_METHOD(call->data, IPC_M_PHONE_HUNGUP); |
|
| - | 475 | call->flags |= IPC_CALL_DISCARD_ANSWER; |
|
| - | 476 | _ipc_call(phone, box, call); |
|
| - | 477 | ||
| - | 478 | /* Allocate another call in advance */ |
|
| - | 479 | call = ipc_call_alloc(0); |
|
| - | 480 | ||
| - | 481 | /* Must start again */ |
|
| - | 482 | goto restart_phones; |
|
| - | 483 | } |
|
| 462 | 484 | ||
| 463 | spinlock_unlock(&phone->lock); |
485 | spinlock_unlock(&phone->lock); |
| 464 | } |
486 | } |
| 465 | 487 | ||
| - | 488 | spinlock_unlock(&box->lock); |
|
| - | 489 | interrupts_restore(ipl); |
|
| - | 490 | ||
| - | 491 | /* Free unused call */ |
|
| - | 492 | if (call) ipc_call_free(call); |
|
| - | 493 | } |
|
| - | 494 | ||
| - | 495 | static void ipc_kbox_cleanup() |
|
| - | 496 | { |
|
| - | 497 | ipl_t ipl; |
|
| - | 498 | bool have_kb_thread; |
|
| - | 499 | ||
| - | 500 | /* Only hold kb_cleanup_lock while setting kb_finished - this is enough */ |
|
| - | 501 | ipl = interrupts_disable(); |
|
| - | 502 | spinlock_lock(&TASK->kb_cleanup_lock); |
|
| - | 503 | ||
| - | 504 | TASK->kb_finished = true; |
|
| - | 505 | ||
| - | 506 | spinlock_unlock(&TASK->kb_cleanup_lock); |
|
| - | 507 | interrupts_restore(ipl); |
|
| - | 508 | ||
| - | 509 | have_kb_thread = (TASK->kb_thread != NULL); |
|
| - | 510 | ||
| - | 511 | /* From now on nobody will try to connect phones or attach kbox threads */ |
|
| - | 512 | ||
| - | 513 | /* |
|
| - | 514 | * Disconnect all phones connected to our kbox. Passing true for |
|
| - | 515 | * notify_box causes a HANGUP message to be inserted for each |
|
| - | 516 | * disconnected phone. This ensures the kbox thread is going to |
|
| - | 517 | * wake up and terminate. |
|
| - | 518 | */ |
|
| - | 519 | ipc_answerbox_slam_phones(&TASK->kernel_box, have_kb_thread); |
|
| - | 520 | ||
| - | 521 | /* TODO: Wait for kbox thread to terminate */ |
|
| - | 522 | if (have_kb_thread) { |
|
| - | 523 | klog_printf("ipc_kbox_cleanup - wait for kbox thread to finish"); |
|
| - | 524 | waitq_sleep(&TASK->kb_thread_shutdown_wq); |
|
| - | 525 | } |
|
| - | 526 | ||
| - | 527 | /* Answer all messages in 'calls' and 'dispatched_calls' queues */ |
|
| - | 528 | spinlock_lock(&TASK->kernel_box.lock); |
|
| - | 529 | ipc_cleanup_call_list(&TASK->kernel_box.dispatched_calls); |
|
| - | 530 | ipc_cleanup_call_list(&TASK->kernel_box.calls); |
|
| - | 531 | spinlock_unlock(&TASK->kernel_box.lock); |
|
| - | 532 | } |
|
| - | 533 | ||
| - | 534 | ||
| - | 535 | /** Cleans up all IPC communication of the current task. |
|
| - | 536 | * |
|
| - | 537 | * Note: ipc_hangup sets returning answerbox to TASK->answerbox, you |
|
| - | 538 | * have to change it as well if you want to cleanup other tasks than TASK. |
|
| - | 539 | */ |
|
| - | 540 | void ipc_cleanup(void) |
|
| - | 541 | { |
|
| - | 542 | int i; |
|
| - | 543 | call_t *call; |
|
| - | 544 | ||
| - | 545 | /* Disconnect all our phones ('ipc_phone_hangup') */ |
|
| - | 546 | for (i = 0; i < IPC_MAX_PHONES; i++) |
|
| - | 547 | ipc_phone_hangup(&TASK->phones[i]); |
|
| - | 548 | ||
| - | 549 | /* Disconnect all connected irqs */ |
|
| - | 550 | ipc_irq_cleanup(&TASK->answerbox); |
|
| - | 551 | ||
| - | 552 | /* Disconnect all phones connected to our regular answerbox */ |
|
| - | 553 | ipc_answerbox_slam_phones(&TASK->answerbox, false); |
|
| - | 554 | ||
| - | 555 | /* Clean up kbox thread and communications */ |
|
| - | 556 | ipc_kbox_cleanup(); |
|
| - | 557 | ||
| 466 | /* Answer all messages in 'calls' and 'dispatched_calls' queues */ |
558 | /* Answer all messages in 'calls' and 'dispatched_calls' queues */ |
| - | 559 | spinlock_lock(&TASK->answerbox.lock); |
|
| 467 | ipc_cleanup_call_list(&TASK->answerbox.dispatched_calls); |
560 | ipc_cleanup_call_list(&TASK->answerbox.dispatched_calls); |
| 468 | ipc_cleanup_call_list(&TASK->answerbox.calls); |
561 | ipc_cleanup_call_list(&TASK->answerbox.calls); |
| 469 | spinlock_unlock(&TASK->answerbox.lock); |
562 | spinlock_unlock(&TASK->answerbox.lock); |
| 470 | 563 | ||
| 471 | /* Wait for all async answers to arrive */ |
564 | /* Wait for all async answers to arrive */ |
| Line 632... | Line 725... | ||
| 632 | if (method == IPC_M_DEBUG_ALL) { |
725 | if (method == IPC_M_DEBUG_ALL) { |
| 633 | udebug_call_receive(call); |
726 | udebug_call_receive(call); |
| 634 | } |
727 | } |
| 635 | 728 | ||
| 636 | if (method == IPC_M_PHONE_HUNGUP) { |
729 | if (method == IPC_M_PHONE_HUNGUP) { |
| 637 | klog_printf("kbox: handle hangup message\n"); |
730 | klog_printf("kbox: handle hangup message"); |
| 638 | 731 | ||
| 639 | /* Was it our debugger, who hung up? */ |
732 | /* Was it our debugger, who hung up? */ |
| 640 | if (call->sender == TASK->debugger) { |
733 | if (call->sender == TASK->debugger) { |
| 641 | /* Terminate debugging session (if any) */ |
734 | /* Terminate debugging session (if any) */ |
| 642 | klog_printf("kbox: terminate debug session\n"); |
735 | klog_printf("kbox: terminate debug session"); |
| 643 | ipl = interrupts_disable(); |
736 | ipl = interrupts_disable(); |
| 644 | spinlock_lock(&TASK->lock); |
737 | spinlock_lock(&TASK->lock); |
| 645 | udebug_task_cleanup(TASK); |
738 | udebug_task_cleanup(TASK); |
| 646 | spinlock_unlock(&TASK->lock); |
739 | spinlock_unlock(&TASK->lock); |
| 647 | interrupts_restore(ipl); |
740 | interrupts_restore(ipl); |
| 648 | } else { |
741 | } else { |
| 649 | klog_printf("kbox: was not debugger\n"); |
742 | klog_printf("kbox: was not debugger"); |
| 650 | } |
743 | } |
| 651 | 744 | ||
| 652 | klog_printf("kbox: continue with hangup message\n"); |
745 | klog_printf("kbox: continue with hangup message"); |
| 653 | IPC_SET_RETVAL(call->data, 0); |
746 | IPC_SET_RETVAL(call->data, 0); |
| 654 | ipc_answer(&TASK->kernel_box, call); |
747 | ipc_answer(&TASK->kernel_box, call); |
| 655 | 748 | ||
| 656 | ipl = interrupts_disable(); |
749 | ipl = interrupts_disable(); |
| 657 | spinlock_lock(&TASK->lock); |
750 | spinlock_lock(&TASK->lock); |
| 658 | spinlock_lock(&TASK->answerbox.lock); |
751 | spinlock_lock(&TASK->answerbox.lock); |
| 659 | if (list_empty(&TASK->answerbox.connected_phones)) { |
752 | if (list_empty(&TASK->answerbox.connected_phones)) { |
| 660 | /* Last phone has been disconnected */ |
753 | /* Last phone has been disconnected */ |
| 661 | TASK->kb_thread_at_hand = false; |
- | |
| 662 | TASK->kb_thread = NULL; |
754 | TASK->kb_thread = NULL; |
| 663 | done = true; |
755 | done = true; |
| 664 | printf("phone list is empty\n"); |
756 | klog_printf("phone list is empty"); |
| 665 | } |
757 | } |
| 666 | spinlock_unlock(&TASK->answerbox.lock); |
758 | spinlock_unlock(&TASK->answerbox.lock); |
| 667 | spinlock_unlock(&TASK->lock); |
759 | spinlock_unlock(&TASK->lock); |
| 668 | interrupts_restore(ipl); |
760 | interrupts_restore(ipl); |
| 669 | } |
761 | } |
| 670 | } |
762 | } |
| 671 | } |
763 | } |
| 672 | 764 | ||
| - | 765 | klog_printf("kbox: done, waking up possible shutdown routine"); |
|
| - | 766 | waitq_wakeup(&TASK->kb_thread_shutdown_wq, WAKEUP_ALL); |
|
| 673 | klog_printf("kbox: finished"); |
767 | klog_printf("kbox: finished"); |
| 674 | } |
768 | } |
| 675 | 769 | ||
| 676 | 770 | ||
| 677 | /** |
771 | /** |
| 678 | * Connect phone to a task kernel-box specified by id. |
772 | * Connect phone to a task kernel-box specified by id. |
| 679 | * |
773 | * |
| - | 774 | * Note that this is not completely atomic. For optimisation reasons, |
|
| - | 775 | * The task might start cleaning up kbox after the phone has been connected |
|
| - | 776 | * and before a kbox thread has been created. This must be taken into account |
|
| - | 777 | * in the cleanup code. |
|
| - | 778 | * |
|
| 680 | * @return Phone id on success, or negative error code. |
779 | * @return Phone id on success, or negative error code. |
| 681 | */ |
780 | */ |
| 682 | int ipc_connect_kbox(task_id_t taskid) |
781 | int ipc_connect_kbox(task_id_t taskid) |
| 683 | { |
782 | { |
| 684 | int newphid; |
783 | int newphid; |
| 685 | task_t *ta; |
784 | task_t *ta; |
| 686 | thread_t *kb_thread; |
785 | thread_t *kb_thread; |
| 687 | int rc; |
- | |
| 688 | ipl_t ipl; |
786 | ipl_t ipl; |
| - | 787 | bool had_kb_thread; |
|
| 689 | 788 | ||
| 690 | newphid = phone_alloc(); |
789 | newphid = phone_alloc(); |
| 691 | if (newphid < 0) |
790 | if (newphid < 0) |
| 692 | return ELIMIT; |
791 | return ELIMIT; |
| 693 | 792 | ||
| Line 695... | Line 794... | ||
| 695 | spinlock_lock(&tasks_lock); |
794 | spinlock_lock(&tasks_lock); |
| 696 | 795 | ||
| 697 | ta = task_find_by_id(taskid); |
796 | ta = task_find_by_id(taskid); |
| 698 | if (ta == NULL) { |
797 | if (ta == NULL) { |
| 699 | spinlock_unlock(&tasks_lock); |
798 | spinlock_unlock(&tasks_lock); |
| - | 799 | interrupts_restore(ipl); |
|
| 700 | return ENOENT; |
800 | return ENOENT; |
| 701 | } |
801 | } |
| 702 | 802 | ||
| 703 | spinlock_lock(&ta->lock); |
803 | spinlock_lock(&ta->kb_cleanup_lock); |
| 704 | spinlock_unlock(&tasks_lock); |
804 | spinlock_unlock(&tasks_lock); |
| 705 | 805 | ||
| - | 806 | /* |
|
| - | 807 | * Only ta->kb_cleanup_lock left. Since we checked the value |
|
| - | 808 | * of ta->kb_finished, this suffices to ensure the task's exitence. |
|
| - | 809 | * (And that it didn't start kbox cleanup yet). It also ensures |
|
| 706 | ipc_phone_connect(&TASK->phones[newphid], &ta->kernel_box); |
810 | * mutual exclusion with other threads running this function. |
| - | 811 | */ |
|
| 707 | 812 | ||
| 708 | if (ta->kb_thread_at_hand == false) { |
813 | if (ta->kb_finished != false) { |
| 709 | ta->kb_thread_at_hand = true; |
- | |
| 710 | spinlock_unlock(&ta->lock); |
814 | spinlock_unlock(&ta->kb_cleanup_lock); |
| 711 | interrupts_restore(ipl); |
815 | interrupts_restore(ipl); |
| - | 816 | return EINVAL; |
|
| - | 817 | } |
|
| 712 | 818 | ||
| 713 | kb_thread = thread_create(kbox_thread_proc, |
819 | /* Connect the newly allocated phone to the kbox */ |
| 714 | NULL, ta, THREAD_FLAG_NOATTACH, "kbox", false); |
820 | ipc_phone_connect(&TASK->phones[newphid], &ta->kernel_box); |
| 715 | if (!kb_thread) |
- | |
| 716 | return ENOMEM; |
- | |
| 717 | 821 | ||
| 718 | rc = thread_attach_by_id(kb_thread, taskid); |
822 | had_kb_thread = (ta->kb_thread != NULL); |
| 719 | 823 | ||
| - | 824 | /* |
|
| - | 825 | * Release all locks. This is an optimisation, that makes |
|
| - | 826 | * unnecessary thread creation very unlikely. |
|
| - | 827 | */ |
|
| 720 | if (rc == EOK) { |
828 | spinlock_unlock(&ta->kb_cleanup_lock); |
| 721 | ipl = interrupts_disable(); |
829 | interrupts_restore(ipl); |
| - | 830 | ||
| 722 | spinlock_lock(&ta->lock); |
831 | /* Create a kbox thread */ |
| - | 832 | ||
| 723 | ta->kb_thread = kb_thread; |
833 | kb_thread = thread_create(kbox_thread_proc, |
| - | 834 | NULL, ta, THREAD_FLAG_NOATTACH, "kbox", false); |
|
| 724 | spinlock_unlock(&ta->lock); |
835 | if (!kb_thread) |
| - | 836 | return ENOMEM; |
|
| - | 837 | ||
| - | 838 | /* |
|
| - | 839 | * It might happen that someone else has attached a kbox thread. |
|
| - | 840 | * in the meantime. Also, the task might have gone or shut down. |
|
| 725 | interrupts_restore(ipl); |
841 | * Let's try from the beginning. |
| - | 842 | */ |
|
| 726 | 843 | ||
| 727 | thread_detach(kb_thread); |
844 | ipl = interrupts_disable(); |
| 728 | thread_ready(kb_thread); |
845 | spinlock_lock(&tasks_lock); |
| 729 | } else { |
846 | |
| 730 | /* Return the allocated thread struct */ |
- | |
| 731 | thread_unattached_free(kb_thread); |
847 | ta = task_find_by_id(taskid); |
| 732 | } |
- | |
| 733 | } else { |
848 | if (ta == NULL) { |
| 734 | spinlock_unlock(&ta->lock); |
849 | spinlock_unlock(&tasks_lock); |
| 735 | interrupts_restore(ipl); |
850 | return ENOENT; |
| 736 | } |
851 | } |
| 737 | 852 | ||
| - | 853 | spinlock_lock(&ta->kb_cleanup_lock); |
|
| - | 854 | spinlock_unlock(&tasks_lock); |
|
| - | 855 | ||
| - | 856 | if (ta->kb_finished != false || ta->kb_thread != NULL) { |
|
| - | 857 | spinlock_unlock(&ta->kb_cleanup_lock); |
|
| - | 858 | interrupts_restore(ipl); |
|
| - | 859 | ||
| - | 860 | /* |
|
| - | 861 | * Release the allocated thread struct. This won't |
|
| - | 862 | * happen too often, only if two CPUs raced for |
|
| - | 863 | * connecting to the kbox. |
|
| - | 864 | */ |
|
| - | 865 | thread_unattached_free(kb_thread); |
|
| - | 866 | return EINVAL; |
|
| - | 867 | } |
|
| - | 868 | ||
| - | 869 | /* Attach thread */ |
|
| - | 870 | ta->kb_thread = kb_thread; |
|
| - | 871 | thread_attach(kb_thread, ta); |
|
| - | 872 | ||
| - | 873 | thread_detach(kb_thread); |
|
| - | 874 | thread_ready(kb_thread); |
|
| - | 875 | ||
| - | 876 | spinlock_unlock(&ta->kb_cleanup_lock); |
|
| - | 877 | interrupts_restore(ipl); |
|
| - | 878 | ||
| 738 | return newphid; |
879 | return newphid; |
| 739 | } |
880 | } |
| 740 | 881 | ||
| 741 | /** @} |
882 | /** @} |
| 742 | */ |
883 | */ |