Rev 2131 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
| Rev 2131 | Rev 2292 | ||
|---|---|---|---|
| Line 93... | Line 93... | ||
| 93 | */ |
93 | */ |
| 94 | static slab_cache_t *as_slab; |
94 | static slab_cache_t *as_slab; |
| 95 | #endif |
95 | #endif |
| 96 | 96 | ||
| 97 | /** |
97 | /** |
| - | 98 | * This lock serializes access to the ASID subsystem. |
|
| - | 99 | * It protects: |
|
| 98 | * This lock protects inactive_as_with_asid_head list. It must be acquired |
100 | * - inactive_as_with_asid_head list |
| - | 101 | * - as->asid for each as of the as_t type |
|
| 99 | * before as_t mutex. |
102 | * - asids_allocated counter |
| 100 | */ |
103 | */ |
| 101 | SPINLOCK_INITIALIZE(inactive_as_with_asid_lock); |
104 | SPINLOCK_INITIALIZE(asidlock); |
| 102 | 105 | ||
| 103 | /** |
106 | /** |
| 104 | * This list contains address spaces that are not active on any |
107 | * This list contains address spaces that are not active on any |
| 105 | * processor and that have valid ASID. |
108 | * processor and that have valid ASID. |
| 106 | */ |
109 | */ |
| Line 203... | Line 206... | ||
| 203 | 206 | ||
| 204 | /* |
207 | /* |
| 205 | * Since there is no reference to this area, |
208 | * Since there is no reference to this area, |
| 206 | * it is safe not to lock its mutex. |
209 | * it is safe not to lock its mutex. |
| 207 | */ |
210 | */ |
| - | 211 | ||
| 208 | ipl = interrupts_disable(); |
212 | ipl = interrupts_disable(); |
| 209 | spinlock_lock(&inactive_as_with_asid_lock); |
213 | spinlock_lock(&asidlock); |
| 210 | if (as->asid != ASID_INVALID && as != AS_KERNEL) { |
214 | if (as->asid != ASID_INVALID && as != AS_KERNEL) { |
| 211 | if (as != AS && as->cpu_refcount == 0) |
215 | if (as != AS && as->cpu_refcount == 0) |
| 212 | list_remove(&as->inactive_as_with_asid_link); |
216 | list_remove(&as->inactive_as_with_asid_link); |
| 213 | asid_put(as->asid); |
217 | asid_put(as->asid); |
| 214 | } |
218 | } |
| 215 | spinlock_unlock(&inactive_as_with_asid_lock); |
219 | spinlock_unlock(&asidlock); |
| 216 | 220 | ||
| 217 | /* |
221 | /* |
| 218 | * Destroy address space areas of the address space. |
222 | * Destroy address space areas of the address space. |
| 219 | * The B+tree must be walked carefully because it is |
223 | * The B+tree must be walked carefully because it is |
| 220 | * also being destroyed. |
224 | * also being destroyed. |
| Line 409... | Line 413... | ||
| 409 | count_t c = |
413 | count_t c = |
| 410 | (count_t) node->value[node->keys - 1]; |
414 | (count_t) node->value[node->keys - 1]; |
| 411 | int i = 0; |
415 | int i = 0; |
| 412 | 416 | ||
| 413 | if (overlaps(b, c * PAGE_SIZE, area->base, |
417 | if (overlaps(b, c * PAGE_SIZE, area->base, |
| 414 | pages*PAGE_SIZE)) { |
418 | pages * PAGE_SIZE)) { |
| 415 | 419 | ||
| 416 | if (b + c * PAGE_SIZE <= start_free) { |
420 | if (b + c * PAGE_SIZE <= start_free) { |
| 417 | /* |
421 | /* |
| 418 | * The whole interval fits |
422 | * The whole interval fits |
| 419 | * completely in the resized |
423 | * completely in the resized |
| Line 551... | Line 555... | ||
| 551 | ASSERT(pte && PTE_VALID(pte) && |
555 | ASSERT(pte && PTE_VALID(pte) && |
| 552 | PTE_PRESENT(pte)); |
556 | PTE_PRESENT(pte)); |
| 553 | if (area->backend && |
557 | if (area->backend && |
| 554 | area->backend->frame_free) { |
558 | area->backend->frame_free) { |
| 555 | area->backend->frame_free(area, b + |
559 | area->backend->frame_free(area, b + |
| 556 | j * PAGE_SIZE, PTE_GET_FRAME(pte)); |
560 | j * PAGE_SIZE, PTE_GET_FRAME(pte)); |
| 557 | } |
561 | } |
| 558 | page_mapping_remove(as, b + j * PAGE_SIZE); |
562 | page_mapping_remove(as, b + j * PAGE_SIZE); |
| 559 | page_table_unlock(as, false); |
563 | page_table_unlock(as, false); |
| 560 | } |
564 | } |
| 561 | } |
565 | } |
| Line 611... | Line 615... | ||
| 611 | * |
615 | * |
| 612 | * @return Zero on success or ENOENT if there is no such task or if there is no |
616 | * @return Zero on success or ENOENT if there is no such task or if there is no |
| 613 | * such address space area, EPERM if there was a problem in accepting the area |
617 | * such address space area, EPERM if there was a problem in accepting the area |
| 614 | * or ENOMEM if there was a problem in allocating destination address space |
618 | * or ENOMEM if there was a problem in allocating destination address space |
| 615 | * area. ENOTSUP is returned if the address space area backend does not support |
619 | * area. ENOTSUP is returned if the address space area backend does not support |
| 616 | * sharing or if the kernel detects an attempt to create an illegal address |
- | |
| 617 | * alias. |
620 | * sharing. |
| 618 | */ |
621 | */ |
| 619 | int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size, |
622 | int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size, |
| 620 | as_t *dst_as, uintptr_t dst_base, int dst_flags_mask) |
623 | as_t *dst_as, uintptr_t dst_base, int dst_flags_mask) |
| 621 | { |
624 | { |
| 622 | ipl_t ipl; |
625 | ipl_t ipl; |
| Line 665... | Line 668... | ||
| 665 | mutex_unlock(&src_as->lock); |
668 | mutex_unlock(&src_as->lock); |
| 666 | interrupts_restore(ipl); |
669 | interrupts_restore(ipl); |
| 667 | return EPERM; |
670 | return EPERM; |
| 668 | } |
671 | } |
| 669 | 672 | ||
| 670 | #ifdef CONFIG_VIRT_IDX_DCACHE |
- | |
| 671 | if (!(dst_flags_mask & AS_AREA_EXEC)) { |
- | |
| 672 | if (PAGE_COLOR(src_area->base) != PAGE_COLOR(dst_base)) { |
- | |
| 673 | /* |
- | |
| 674 | * Refuse to create an illegal address alias. |
- | |
| 675 | */ |
- | |
| 676 | mutex_unlock(&src_area->lock); |
- | |
| 677 | mutex_unlock(&src_as->lock); |
- | |
| 678 | interrupts_restore(ipl); |
- | |
| 679 | return ENOTSUP; |
- | |
| 680 | } |
- | |
| 681 | } |
- | |
| 682 | #endif /* CONFIG_VIRT_IDX_DCACHE */ |
- | |
| 683 | - | ||
| 684 | /* |
673 | /* |
| 685 | * Now we are committed to sharing the area. |
674 | * Now we are committed to sharing the area. |
| 686 | * First, prepare the area for sharing. |
675 | * First, prepare the area for sharing. |
| 687 | * Then it will be safe to unlock it. |
676 | * Then it will be safe to unlock it. |
| 688 | */ |
677 | */ |
| Line 873... | Line 862... | ||
| 873 | } |
862 | } |
| 874 | 863 | ||
| 875 | /** Switch address spaces. |
864 | /** Switch address spaces. |
| 876 | * |
865 | * |
| 877 | * Note that this function cannot sleep as it is essentially a part of |
866 | * Note that this function cannot sleep as it is essentially a part of |
| 878 | * scheduling. Sleeping here would lead to deadlock on wakeup. |
867 | * scheduling. Sleeping here would lead to deadlock on wakeup. Another |
| - | 868 | * thing which is forbidden in this context is locking the address space. |
|
| 879 | * |
869 | * |
| 880 | * @param old Old address space or NULL. |
870 | * @param old Old address space or NULL. |
| 881 | * @param new New address space. |
871 | * @param new New address space. |
| 882 | */ |
872 | */ |
| 883 | void as_switch(as_t *old_as, as_t *new_as) |
873 | void as_switch(as_t *old_as, as_t *new_as) |
| 884 | { |
874 | { |
| 885 | ipl_t ipl; |
- | |
| 886 | bool needs_asid = false; |
- | |
| 887 | - | ||
| 888 | ipl = interrupts_disable(); |
- | |
| 889 | spinlock_lock(&inactive_as_with_asid_lock); |
875 | spinlock_lock(&asidlock); |
| 890 | 876 | ||
| 891 | /* |
877 | /* |
| 892 | * First, take care of the old address space. |
878 | * First, take care of the old address space. |
| 893 | */ |
879 | */ |
| 894 | if (old_as) { |
880 | if (old_as) { |
| 895 | mutex_lock_active(&old_as->lock); |
- | |
| 896 | ASSERT(old_as->cpu_refcount); |
881 | ASSERT(old_as->cpu_refcount); |
| 897 | if((--old_as->cpu_refcount == 0) && (old_as != AS_KERNEL)) { |
882 | if((--old_as->cpu_refcount == 0) && (old_as != AS_KERNEL)) { |
| 898 | /* |
883 | /* |
| 899 | * The old address space is no longer active on |
884 | * The old address space is no longer active on |
| 900 | * any processor. It can be appended to the |
885 | * any processor. It can be appended to the |
| 901 | * list of inactive address spaces with assigned |
886 | * list of inactive address spaces with assigned |
| 902 | * ASID. |
887 | * ASID. |
| 903 | */ |
888 | */ |
| 904 | ASSERT(old_as->asid != ASID_INVALID); |
889 | ASSERT(old_as->asid != ASID_INVALID); |
| 905 | list_append(&old_as->inactive_as_with_asid_link, |
890 | list_append(&old_as->inactive_as_with_asid_link, |
| 906 | &inactive_as_with_asid_head); |
891 | &inactive_as_with_asid_head); |
| 907 | } |
892 | } |
| 908 | mutex_unlock(&old_as->lock); |
- | |
| 909 | 893 | ||
| 910 | /* |
894 | /* |
| 911 | * Perform architecture-specific tasks when the address space |
895 | * Perform architecture-specific tasks when the address space |
| 912 | * is being removed from the CPU. |
896 | * is being removed from the CPU. |
| 913 | */ |
897 | */ |
| Line 915... | Line 899... | ||
| 915 | } |
899 | } |
| 916 | 900 | ||
| 917 | /* |
901 | /* |
| 918 | * Second, prepare the new address space. |
902 | * Second, prepare the new address space. |
| 919 | */ |
903 | */ |
| 920 | mutex_lock_active(&new_as->lock); |
- | |
| 921 | if ((new_as->cpu_refcount++ == 0) && (new_as != AS_KERNEL)) { |
904 | if ((new_as->cpu_refcount++ == 0) && (new_as != AS_KERNEL)) { |
| 922 | if (new_as->asid != ASID_INVALID) { |
905 | if (new_as->asid != ASID_INVALID) |
| 923 | list_remove(&new_as->inactive_as_with_asid_link); |
906 | list_remove(&new_as->inactive_as_with_asid_link); |
| 924 | } else { |
907 | else |
| 925 | /* |
- | |
| 926 | * Defer call to asid_get() until new_as->lock is released. |
- | |
| 927 | */ |
- | |
| 928 | needs_asid = true; |
908 | new_as->asid = asid_get(); |
| 929 | } |
- | |
| 930 | } |
909 | } |
| 931 | #ifdef AS_PAGE_TABLE |
910 | #ifdef AS_PAGE_TABLE |
| 932 | SET_PTL0_ADDRESS(new_as->genarch.page_table); |
911 | SET_PTL0_ADDRESS(new_as->genarch.page_table); |
| 933 | #endif |
912 | #endif |
| 934 | mutex_unlock(&new_as->lock); |
- | |
| 935 | - | ||
| 936 | if (needs_asid) { |
- | |
| 937 | /* |
- | |
| 938 | * Allocation of new ASID was deferred |
- | |
| 939 | * until now in order to avoid deadlock. |
- | |
| 940 | */ |
- | |
| 941 | asid_t asid; |
- | |
| 942 | - | ||
| 943 | asid = asid_get(); |
- | |
| 944 | mutex_lock_active(&new_as->lock); |
- | |
| 945 | new_as->asid = asid; |
- | |
| 946 | mutex_unlock(&new_as->lock); |
- | |
| 947 | } |
- | |
| 948 | spinlock_unlock(&inactive_as_with_asid_lock); |
- | |
| 949 | interrupts_restore(ipl); |
- | |
| 950 | 913 | ||
| 951 | /* |
914 | /* |
| 952 | * Perform architecture-specific steps. |
915 | * Perform architecture-specific steps. |
| 953 | * (e.g. write ASID to hardware register etc.) |
916 | * (e.g. write ASID to hardware register etc.) |
| 954 | */ |
917 | */ |
| 955 | as_install_arch(new_as); |
918 | as_install_arch(new_as); |
| - | 919 | ||
| - | 920 | spinlock_unlock(&asidlock); |
|
| 956 | 921 | ||
| 957 | AS = new_as; |
922 | AS = new_as; |
| 958 | } |
923 | } |
| 959 | 924 | ||
| 960 | /** Convert address space area flags to page flags. |
925 | /** Convert address space area flags to page flags. |