Rev 2131 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 2131 | Rev 2292 | ||
---|---|---|---|
Line 40... | Line 40... | ||
40 | #include <config.h> |
40 | #include <config.h> |
41 | 41 | ||
42 | #ifdef CONFIG_TSB |
42 | #ifdef CONFIG_TSB |
43 | #include <arch/mm/tsb.h> |
43 | #include <arch/mm/tsb.h> |
44 | #include <arch/memstr.h> |
44 | #include <arch/memstr.h> |
45 | #include <synch/mutex.h> |
- | |
46 | #include <arch/asm.h> |
45 | #include <arch/asm.h> |
47 | #include <mm/frame.h> |
46 | #include <mm/frame.h> |
48 | #include <bitops.h> |
47 | #include <bitops.h> |
49 | #include <macros.h> |
48 | #include <macros.h> |
50 | #endif /* CONFIG_TSB */ |
49 | #endif /* CONFIG_TSB */ |
Line 59... | Line 58... | ||
59 | } |
58 | } |
60 | 59 | ||
61 | int as_constructor_arch(as_t *as, int flags) |
60 | int as_constructor_arch(as_t *as, int flags) |
62 | { |
61 | { |
63 | #ifdef CONFIG_TSB |
62 | #ifdef CONFIG_TSB |
- | 63 | /* |
|
- | 64 | * The order must be calculated with respect to the emulated |
|
- | 65 | * 16K page size. |
|
- | 66 | */ |
|
64 | int order = fnzb32(((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) * |
67 | int order = fnzb32(((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) * |
65 | sizeof(tsb_entry_t)) >> FRAME_WIDTH); |
68 | sizeof(tsb_entry_t)) >> FRAME_WIDTH); |
66 | uintptr_t tsb = (uintptr_t) frame_alloc(order, flags | FRAME_KA); |
69 | uintptr_t tsb = (uintptr_t) frame_alloc(order, flags | FRAME_KA); |
67 | 70 | ||
68 | if (!tsb) |
71 | if (!tsb) |
69 | return -1; |
72 | return -1; |
70 | 73 | ||
71 | as->arch.itsb = (tsb_entry_t *) tsb; |
74 | as->arch.itsb = (tsb_entry_t *) tsb; |
72 | as->arch.dtsb = (tsb_entry_t *) (tsb + ITSB_ENTRY_COUNT * |
75 | as->arch.dtsb = (tsb_entry_t *) (tsb + ITSB_ENTRY_COUNT * |
73 | sizeof(tsb_entry_t)); |
76 | sizeof(tsb_entry_t)); |
74 | memsetb((uintptr_t) as->arch.itsb, (ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) |
77 | memsetb((uintptr_t) as->arch.itsb, |
75 | * sizeof(tsb_entry_t), 0); |
78 | (ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) * sizeof(tsb_entry_t), 0); |
76 | #endif |
79 | #endif |
77 | return 0; |
80 | return 0; |
78 | } |
81 | } |
79 | 82 | ||
80 | int as_destructor_arch(as_t *as) |
83 | int as_destructor_arch(as_t *as) |
81 | { |
84 | { |
82 | #ifdef CONFIG_TSB |
85 | #ifdef CONFIG_TSB |
- | 86 | /* |
|
- | 87 | * The count must be calculated with respect to the emualted 16K page |
|
- | 88 | * size. |
|
- | 89 | */ |
|
83 | count_t cnt = ((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) * |
90 | count_t cnt = ((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) * |
84 | sizeof(tsb_entry_t)) >> FRAME_WIDTH; |
91 | sizeof(tsb_entry_t)) >> FRAME_WIDTH; |
85 | frame_free(KA2PA((uintptr_t) as->arch.itsb)); |
92 | frame_free(KA2PA((uintptr_t) as->arch.itsb)); |
86 | return cnt; |
93 | return cnt; |
87 | #else |
94 | #else |
88 | return 0; |
95 | return 0; |
89 | #endif |
96 | #endif |
90 | } |
97 | } |
91 | 98 | ||
92 | int as_create_arch(as_t *as, int flags) |
99 | int as_create_arch(as_t *as, int flags) |
93 | { |
100 | { |
94 | #ifdef CONFIG_TSB |
101 | #ifdef CONFIG_TSB |
95 | ipl_t ipl; |
- | |
96 | - | ||
97 | ipl = interrupts_disable(); |
- | |
98 | mutex_lock_active(&as->lock); /* completely unnecessary, but polite */ |
- | |
99 | tsb_invalidate(as, 0, (count_t) -1); |
102 | tsb_invalidate(as, 0, (count_t) -1); |
100 | mutex_unlock(&as->lock); |
- | |
101 | interrupts_restore(ipl); |
- | |
102 | #endif |
103 | #endif |
103 | return 0; |
104 | return 0; |
104 | } |
105 | } |
105 | 106 | ||
106 | /** Perform sparc64-specific tasks when an address space becomes active on the |
107 | /** Perform sparc64-specific tasks when an address space becomes active on the |
Line 113... | Line 114... | ||
113 | void as_install_arch(as_t *as) |
114 | void as_install_arch(as_t *as) |
114 | { |
115 | { |
115 | tlb_context_reg_t ctx; |
116 | tlb_context_reg_t ctx; |
116 | 117 | ||
117 | /* |
118 | /* |
118 | * Note that we don't lock the address space. |
119 | * Note that we don't and may not lock the address space. That's ok |
119 | * That's correct - we can afford it here |
- | |
120 | * because we only read members that are |
120 | * since we only read members that are currently read-only. |
- | 121 | * |
|
121 | * currently read-only. |
122 | * Moreover, the as->asid is protected by asidlock, which is being held. |
122 | */ |
123 | */ |
123 | 124 | ||
124 | /* |
125 | /* |
125 | * Write ASID to secondary context register. |
126 | * Write ASID to secondary context register. The primary context |
126 | * The primary context register has to be set |
- | |
127 | * from TL>0 so it will be filled from the |
127 | * register has to be set from TL>0 so it will be filled from the |
128 | * secondary context register from the TL=1 |
128 | * secondary context register from the TL=1 code just before switch to |
129 | * code just before switch to userspace. |
129 | * userspace. |
130 | */ |
130 | */ |
131 | ctx.v = 0; |
131 | ctx.v = 0; |
132 | ctx.context = as->asid; |
132 | ctx.context = as->asid; |
133 | mmu_secondary_context_write(ctx.v); |
133 | mmu_secondary_context_write(ctx.v); |
134 | 134 | ||
Line 137... | Line 137... | ||
137 | 137 | ||
138 | ASSERT(as->arch.itsb && as->arch.dtsb); |
138 | ASSERT(as->arch.itsb && as->arch.dtsb); |
139 | 139 | ||
140 | uintptr_t tsb = (uintptr_t) as->arch.itsb; |
140 | uintptr_t tsb = (uintptr_t) as->arch.itsb; |
141 | 141 | ||
142 | if (!overlaps(tsb, 8 * PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) { |
142 | if (!overlaps(tsb, 8 * MMU_PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) { |
143 | /* |
143 | /* |
144 | * TSBs were allocated from memory not covered |
144 | * TSBs were allocated from memory not covered |
145 | * by the locked 4M kernel DTLB entry. We need |
145 | * by the locked 4M kernel DTLB entry. We need |
146 | * to map both TSBs explicitly. |
146 | * to map both TSBs explicitly. |
147 | */ |
147 | */ |
Line 156... | Line 156... | ||
156 | 156 | ||
157 | tsb_base.value = 0; |
157 | tsb_base.value = 0; |
158 | tsb_base.size = TSB_SIZE; |
158 | tsb_base.size = TSB_SIZE; |
159 | tsb_base.split = 0; |
159 | tsb_base.split = 0; |
160 | 160 | ||
161 | tsb_base.base = ((uintptr_t) as->arch.itsb) >> PAGE_WIDTH; |
161 | tsb_base.base = ((uintptr_t) as->arch.itsb) >> MMU_PAGE_WIDTH; |
162 | itsb_base_write(tsb_base.value); |
162 | itsb_base_write(tsb_base.value); |
163 | tsb_base.base = ((uintptr_t) as->arch.dtsb) >> PAGE_WIDTH; |
163 | tsb_base.base = ((uintptr_t) as->arch.dtsb) >> MMU_PAGE_WIDTH; |
164 | dtsb_base_write(tsb_base.value); |
164 | dtsb_base_write(tsb_base.value); |
165 | #endif |
165 | #endif |
166 | } |
166 | } |
167 | 167 | ||
168 | /** Perform sparc64-specific tasks when an address space is removed from the |
168 | /** Perform sparc64-specific tasks when an address space is removed from the |
Line 174... | Line 174... | ||
174 | */ |
174 | */ |
175 | void as_deinstall_arch(as_t *as) |
175 | void as_deinstall_arch(as_t *as) |
176 | { |
176 | { |
177 | 177 | ||
178 | /* |
178 | /* |
179 | * Note that we don't lock the address space. |
179 | * Note that we don't and may not lock the address space. That's ok |
180 | * That's correct - we can afford it here |
- | |
181 | * because we only read members that are |
180 | * since we only read members that are currently read-only. |
- | 181 | * |
|
182 | * currently read-only. |
182 | * Moreover, the as->asid is protected by asidlock, which is being held. |
183 | */ |
183 | */ |
184 | 184 | ||
185 | #ifdef CONFIG_TSB |
185 | #ifdef CONFIG_TSB |
186 | uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH); |
186 | uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH); |
187 | 187 | ||
188 | ASSERT(as->arch.itsb && as->arch.dtsb); |
188 | ASSERT(as->arch.itsb && as->arch.dtsb); |
189 | 189 | ||
190 | uintptr_t tsb = (uintptr_t) as->arch.itsb; |
190 | uintptr_t tsb = (uintptr_t) as->arch.itsb; |
191 | 191 | ||
192 | if (!overlaps(tsb, 8 * PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) { |
192 | if (!overlaps(tsb, 8 * MMU_PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) { |
193 | /* |
193 | /* |
194 | * TSBs were allocated from memory not covered |
194 | * TSBs were allocated from memory not covered |
195 | * by the locked 4M kernel DTLB entry. We need |
195 | * by the locked 4M kernel DTLB entry. We need |
196 | * to demap the entry installed by as_install_arch(). |
196 | * to demap the entry installed by as_install_arch(). |
197 | */ |
197 | */ |