Rev 2170 | Rev 2647 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 2170 | Rev 2183 | ||
---|---|---|---|
Line 55... | Line 55... | ||
55 | #include <arch/mm/page.h> |
55 | #include <arch/mm/page.h> |
56 | #include <genarch/mm/page_pt.h> |
56 | #include <genarch/mm/page_pt.h> |
57 | #include <genarch/mm/page_ht.h> |
57 | #include <genarch/mm/page_ht.h> |
58 | #include <mm/asid.h> |
58 | #include <mm/asid.h> |
59 | #include <arch/mm/asid.h> |
59 | #include <arch/mm/asid.h> |
- | 60 | #include <preemption.h> |
|
60 | #include <synch/spinlock.h> |
61 | #include <synch/spinlock.h> |
61 | #include <synch/mutex.h> |
62 | #include <synch/mutex.h> |
62 | #include <adt/list.h> |
63 | #include <adt/list.h> |
63 | #include <adt/btree.h> |
64 | #include <adt/btree.h> |
64 | #include <proc/task.h> |
65 | #include <proc/task.h> |
Line 179... | Line 180... | ||
179 | if (flags & FLAG_AS_KERNEL) |
180 | if (flags & FLAG_AS_KERNEL) |
180 | as->asid = ASID_KERNEL; |
181 | as->asid = ASID_KERNEL; |
181 | else |
182 | else |
182 | as->asid = ASID_INVALID; |
183 | as->asid = ASID_INVALID; |
183 | 184 | ||
184 | as->refcount = 0; |
185 | atomic_set(&as->refcount, 0); |
185 | as->cpu_refcount = 0; |
186 | as->cpu_refcount = 0; |
186 | #ifdef AS_PAGE_TABLE |
187 | #ifdef AS_PAGE_TABLE |
187 | as->genarch.page_table = page_table_create(flags); |
188 | as->genarch.page_table = page_table_create(flags); |
188 | #else |
189 | #else |
189 | page_table_create(flags); |
190 | page_table_create(flags); |
Line 194... | Line 195... | ||
194 | 195 | ||
195 | /** Destroy adress space. |
196 | /** Destroy adress space. |
196 | * |
197 | * |
197 | * When there are no tasks referencing this address space (i.e. its refcount is |
198 | * When there are no tasks referencing this address space (i.e. its refcount is |
198 | * zero), the address space can be destroyed. |
199 | * zero), the address space can be destroyed. |
- | 200 | * |
|
- | 201 | * We know that we don't hold any spinlock. |
|
199 | */ |
202 | */ |
200 | void as_destroy(as_t *as) |
203 | void as_destroy(as_t *as) |
201 | { |
204 | { |
202 | ipl_t ipl; |
205 | ipl_t ipl; |
203 | bool cond; |
206 | bool cond; |
- | 207 | DEADLOCK_PROBE_INIT(p_asidlock); |
|
204 | 208 | ||
205 | ASSERT(as->refcount == 0); |
209 | ASSERT(atomic_get(&as->refcount) == 0); |
206 | 210 | ||
207 | /* |
211 | /* |
208 | * Since there is no reference to this area, |
212 | * Since there is no reference to this area, |
209 | * it is safe not to lock its mutex. |
213 | * it is safe not to lock its mutex. |
210 | */ |
214 | */ |
211 | 215 | ||
- | 216 | /* |
|
- | 217 | * We need to avoid deadlock between TLB shootdown and asidlock. |
|
- | 218 | * We therefore try to take asid conditionally and if we don't succeed, |
|
- | 219 | * we enable interrupts and try again. This is done while preemption is |
|
- | 220 | * disabled to prevent nested context switches. We also depend on the |
|
- | 221 | * fact that so far no spinlocks are held. |
|
- | 222 | */ |
|
- | 223 | preemption_disable(); |
|
212 | ipl = interrupts_disable(); |
224 | ipl = interrupts_read(); |
- | 225 | retry: |
|
- | 226 | interrupts_disable(); |
|
213 | spinlock_lock(&asidlock); |
227 | if (!spinlock_trylock(&asidlock)) { |
- | 228 | interrupts_enable(); |
|
- | 229 | DEADLOCK_PROBE(p_asidlock, DEADLOCK_THRESHOLD); |
|
- | 230 | goto retry; |
|
- | 231 | } |
|
- | 232 | preemption_enable(); /* Interrupts disabled, enable preemption */ |
|
214 | if (as->asid != ASID_INVALID && as != AS_KERNEL) { |
233 | if (as->asid != ASID_INVALID && as != AS_KERNEL) { |
215 | if (as != AS && as->cpu_refcount == 0) |
234 | if (as != AS && as->cpu_refcount == 0) |
216 | list_remove(&as->inactive_as_with_asid_link); |
235 | list_remove(&as->inactive_as_with_asid_link); |
217 | asid_put(as->asid); |
236 | asid_put(as->asid); |
218 | } |
237 | } |
Line 470... | Line 489... | ||
470 | } |
489 | } |
471 | 490 | ||
472 | /* |
491 | /* |
473 | * Finish TLB shootdown sequence. |
492 | * Finish TLB shootdown sequence. |
474 | */ |
493 | */ |
- | 494 | ||
475 | tlb_invalidate_pages(as->asid, area->base + pages * PAGE_SIZE, |
495 | tlb_invalidate_pages(as->asid, area->base + pages * PAGE_SIZE, |
476 | area->pages - pages); |
496 | area->pages - pages); |
477 | tlb_shootdown_finalize(); |
- | |
478 | - | ||
479 | /* |
497 | /* |
480 | * Invalidate software translation caches (e.g. TSB on sparc64). |
498 | * Invalidate software translation caches (e.g. TSB on sparc64). |
481 | */ |
499 | */ |
482 | as_invalidate_translation_cache(as, area->base + |
500 | as_invalidate_translation_cache(as, area->base + |
483 | pages * PAGE_SIZE, area->pages - pages); |
501 | pages * PAGE_SIZE, area->pages - pages); |
- | 502 | tlb_shootdown_finalize(); |
|
- | 503 | ||
484 | } else { |
504 | } else { |
485 | /* |
505 | /* |
486 | * Growing the area. |
506 | * Growing the area. |
487 | * Check for overlaps with other address space areas. |
507 | * Check for overlaps with other address space areas. |
488 | */ |
508 | */ |
Line 566... | Line 586... | ||
566 | } |
586 | } |
567 | 587 | ||
568 | /* |
588 | /* |
569 | * Finish TLB shootdown sequence. |
589 | * Finish TLB shootdown sequence. |
570 | */ |
590 | */ |
- | 591 | ||
571 | tlb_invalidate_pages(as->asid, area->base, area->pages); |
592 | tlb_invalidate_pages(as->asid, area->base, area->pages); |
572 | tlb_shootdown_finalize(); |
- | |
573 | - | ||
574 | /* |
593 | /* |
575 | * Invalidate potential software translation caches (e.g. TSB on |
594 | * Invalidate potential software translation caches (e.g. TSB on |
576 | * sparc64). |
595 | * sparc64). |
577 | */ |
596 | */ |
578 | as_invalidate_translation_cache(as, area->base, area->pages); |
597 | as_invalidate_translation_cache(as, area->base, area->pages); |
- | 598 | tlb_shootdown_finalize(); |
|
579 | 599 | ||
580 | btree_destroy(&area->used_space); |
600 | btree_destroy(&area->used_space); |
581 | 601 | ||
582 | area->attributes |= AS_AREA_ATTR_PARTIAL; |
602 | area->attributes |= AS_AREA_ATTR_PARTIAL; |
583 | 603 | ||
Line 865... | Line 885... | ||
865 | * |
885 | * |
866 | * Note that this function cannot sleep as it is essentially a part of |
886 | * Note that this function cannot sleep as it is essentially a part of |
867 | * scheduling. Sleeping here would lead to deadlock on wakeup. Another |
887 | * scheduling. Sleeping here would lead to deadlock on wakeup. Another |
868 | * thing which is forbidden in this context is locking the address space. |
888 | * thing which is forbidden in this context is locking the address space. |
869 | * |
889 | * |
- | 890 | * When this function is enetered, no spinlocks may be held. |
|
- | 891 | * |
|
870 | * @param old Old address space or NULL. |
892 | * @param old Old address space or NULL. |
871 | * @param new New address space. |
893 | * @param new New address space. |
872 | */ |
894 | */ |
873 | void as_switch(as_t *old_as, as_t *new_as) |
895 | void as_switch(as_t *old_as, as_t *new_as) |
874 | { |
896 | { |
- | 897 | DEADLOCK_PROBE_INIT(p_asidlock); |
|
- | 898 | preemption_disable(); |
|
- | 899 | retry: |
|
- | 900 | (void) interrupts_disable(); |
|
875 | spinlock_lock(&asidlock); |
901 | if (!spinlock_trylock(&asidlock)) { |
- | 902 | /* |
|
- | 903 | * Avoid deadlock with TLB shootdown. |
|
- | 904 | * We can enable interrupts here because |
|
- | 905 | * preemption is disabled. We should not be |
|
- | 906 | * holding any other lock. |
|
- | 907 | */ |
|
- | 908 | (void) interrupts_enable(); |
|
- | 909 | DEADLOCK_PROBE(p_asidlock, DEADLOCK_THRESHOLD); |
|
- | 910 | goto retry; |
|
- | 911 | } |
|
- | 912 | preemption_enable(); |
|
876 | 913 | ||
877 | /* |
914 | /* |
878 | * First, take care of the old address space. |
915 | * First, take care of the old address space. |
879 | */ |
916 | */ |
880 | if (old_as) { |
917 | if (old_as) { |