Rev 2141 | Rev 2183 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 2141 | Rev 2170 | ||
---|---|---|---|
Line 93... | Line 93... | ||
93 | */ |
93 | */ |
94 | static slab_cache_t *as_slab; |
94 | static slab_cache_t *as_slab; |
95 | #endif |
95 | #endif |
96 | 96 | ||
97 | /** |
97 | /** |
- | 98 | * This lock serializes access to the ASID subsystem. |
|
- | 99 | * It protects: |
|
98 | * This lock protects inactive_as_with_asid_head list. It must be acquired |
100 | * - inactive_as_with_asid_head list |
- | 101 | * - as->asid for each as of the as_t type |
|
99 | * before as_t mutex. |
102 | * - asids_allocated counter |
100 | */ |
103 | */ |
101 | SPINLOCK_INITIALIZE(inactive_as_with_asid_lock); |
104 | SPINLOCK_INITIALIZE(asidlock); |
102 | 105 | ||
103 | /** |
106 | /** |
104 | * This list contains address spaces that are not active on any |
107 | * This list contains address spaces that are not active on any |
105 | * processor and that have valid ASID. |
108 | * processor and that have valid ASID. |
106 | */ |
109 | */ |
Line 203... | Line 206... | ||
203 | 206 | ||
204 | /* |
207 | /* |
205 | * Since there is no reference to this area, |
208 | * Since there is no reference to this area, |
206 | * it is safe not to lock its mutex. |
209 | * it is safe not to lock its mutex. |
207 | */ |
210 | */ |
- | 211 | ||
208 | ipl = interrupts_disable(); |
212 | ipl = interrupts_disable(); |
209 | spinlock_lock(&inactive_as_with_asid_lock); |
213 | spinlock_lock(&asidlock); |
210 | if (as->asid != ASID_INVALID && as != AS_KERNEL) { |
214 | if (as->asid != ASID_INVALID && as != AS_KERNEL) { |
211 | if (as != AS && as->cpu_refcount == 0) |
215 | if (as != AS && as->cpu_refcount == 0) |
212 | list_remove(&as->inactive_as_with_asid_link); |
216 | list_remove(&as->inactive_as_with_asid_link); |
213 | asid_put(as->asid); |
217 | asid_put(as->asid); |
214 | } |
218 | } |
215 | spinlock_unlock(&inactive_as_with_asid_lock); |
219 | spinlock_unlock(&asidlock); |
216 | 220 | ||
217 | /* |
221 | /* |
218 | * Destroy address space areas of the address space. |
222 | * Destroy address space areas of the address space. |
219 | * The B+tree must be walked carefully because it is |
223 | * The B+tree must be walked carefully because it is |
220 | * also being destroyed. |
224 | * also being destroyed. |
Line 858... | Line 862... | ||
858 | } |
862 | } |
859 | 863 | ||
860 | /** Switch address spaces. |
864 | /** Switch address spaces. |
861 | * |
865 | * |
862 | * Note that this function cannot sleep as it is essentially a part of |
866 | * Note that this function cannot sleep as it is essentially a part of |
863 | * scheduling. Sleeping here would lead to deadlock on wakeup. |
867 | * scheduling. Sleeping here would lead to deadlock on wakeup. Another |
- | 868 | * thing which is forbidden in this context is locking the address space. |
|
864 | * |
869 | * |
865 | * @param old Old address space or NULL. |
870 | * @param old Old address space or NULL. |
866 | * @param new New address space. |
871 | * @param new New address space. |
867 | */ |
872 | */ |
868 | void as_switch(as_t *old_as, as_t *new_as) |
873 | void as_switch(as_t *old_as, as_t *new_as) |
869 | { |
874 | { |
870 | ipl_t ipl; |
- | |
871 | bool needs_asid = false; |
- | |
872 | - | ||
873 | ipl = interrupts_disable(); |
- | |
874 | spinlock_lock(&inactive_as_with_asid_lock); |
875 | spinlock_lock(&asidlock); |
875 | 876 | ||
876 | /* |
877 | /* |
877 | * First, take care of the old address space. |
878 | * First, take care of the old address space. |
878 | */ |
879 | */ |
879 | if (old_as) { |
880 | if (old_as) { |
880 | mutex_lock_active(&old_as->lock); |
- | |
881 | ASSERT(old_as->cpu_refcount); |
881 | ASSERT(old_as->cpu_refcount); |
882 | if((--old_as->cpu_refcount == 0) && (old_as != AS_KERNEL)) { |
882 | if((--old_as->cpu_refcount == 0) && (old_as != AS_KERNEL)) { |
883 | /* |
883 | /* |
884 | * The old address space is no longer active on |
884 | * The old address space is no longer active on |
885 | * any processor. It can be appended to the |
885 | * any processor. It can be appended to the |
Line 888... | Line 888... | ||
888 | */ |
888 | */ |
889 | ASSERT(old_as->asid != ASID_INVALID); |
889 | ASSERT(old_as->asid != ASID_INVALID); |
890 | list_append(&old_as->inactive_as_with_asid_link, |
890 | list_append(&old_as->inactive_as_with_asid_link, |
891 | &inactive_as_with_asid_head); |
891 | &inactive_as_with_asid_head); |
892 | } |
892 | } |
893 | mutex_unlock(&old_as->lock); |
- | |
894 | 893 | ||
895 | /* |
894 | /* |
896 | * Perform architecture-specific tasks when the address space |
895 | * Perform architecture-specific tasks when the address space |
897 | * is being removed from the CPU. |
896 | * is being removed from the CPU. |
898 | */ |
897 | */ |
Line 900... | Line 899... | ||
900 | } |
899 | } |
901 | 900 | ||
902 | /* |
901 | /* |
903 | * Second, prepare the new address space. |
902 | * Second, prepare the new address space. |
904 | */ |
903 | */ |
905 | mutex_lock_active(&new_as->lock); |
- | |
906 | if ((new_as->cpu_refcount++ == 0) && (new_as != AS_KERNEL)) { |
904 | if ((new_as->cpu_refcount++ == 0) && (new_as != AS_KERNEL)) { |
907 | if (new_as->asid != ASID_INVALID) { |
905 | if (new_as->asid != ASID_INVALID) |
908 | list_remove(&new_as->inactive_as_with_asid_link); |
906 | list_remove(&new_as->inactive_as_with_asid_link); |
909 | } else { |
907 | else |
910 | /* |
- | |
911 | * Defer call to asid_get() until new_as->lock is released. |
- | |
912 | */ |
- | |
913 | needs_asid = true; |
908 | new_as->asid = asid_get(); |
914 | } |
- | |
915 | } |
909 | } |
916 | #ifdef AS_PAGE_TABLE |
910 | #ifdef AS_PAGE_TABLE |
917 | SET_PTL0_ADDRESS(new_as->genarch.page_table); |
911 | SET_PTL0_ADDRESS(new_as->genarch.page_table); |
918 | #endif |
912 | #endif |
919 | mutex_unlock(&new_as->lock); |
- | |
920 | - | ||
921 | if (needs_asid) { |
- | |
922 | /* |
- | |
923 | * Allocation of new ASID was deferred |
- | |
924 | * until now in order to avoid deadlock. |
- | |
925 | */ |
- | |
926 | asid_t asid; |
- | |
927 | - | ||
928 | asid = asid_get(); |
- | |
929 | mutex_lock_active(&new_as->lock); |
- | |
930 | new_as->asid = asid; |
- | |
931 | mutex_unlock(&new_as->lock); |
- | |
932 | } |
- | |
933 | spinlock_unlock(&inactive_as_with_asid_lock); |
- | |
934 | interrupts_restore(ipl); |
- | |
935 | 913 | ||
936 | /* |
914 | /* |
937 | * Perform architecture-specific steps. |
915 | * Perform architecture-specific steps. |
938 | * (e.g. write ASID to hardware register etc.) |
916 | * (e.g. write ASID to hardware register etc.) |
939 | */ |
917 | */ |
940 | as_install_arch(new_as); |
918 | as_install_arch(new_as); |
- | 919 | ||
- | 920 | spinlock_unlock(&asidlock); |
|
941 | 921 | ||
942 | AS = new_as; |
922 | AS = new_as; |
943 | } |
923 | } |
944 | 924 | ||
945 | /** Convert address space area flags to page flags. |
925 | /** Convert address space area flags to page flags. |