Rev 1987 | Rev 2010 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
756 | jermar | 1 | /* |
2 | * Copyright (C) 2006 Jakub Jermar |
||
3 | * All rights reserved. |
||
4 | * |
||
5 | * Redistribution and use in source and binary forms, with or without |
||
6 | * modification, are permitted provided that the following conditions |
||
7 | * are met: |
||
8 | * |
||
9 | * - Redistributions of source code must retain the above copyright |
||
10 | * notice, this list of conditions and the following disclaimer. |
||
11 | * - Redistributions in binary form must reproduce the above copyright |
||
12 | * notice, this list of conditions and the following disclaimer in the |
||
13 | * documentation and/or other materials provided with the distribution. |
||
14 | * - The name of the author may not be used to endorse or promote products |
||
15 | * derived from this software without specific prior written permission. |
||
16 | * |
||
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
||
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
||
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
||
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
||
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
||
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
||
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
||
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
||
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
||
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
||
27 | */ |
||
28 | |||
1860 | jermar | 29 | /** @addtogroup sparc64mm |
1702 | cejka | 30 | * @{ |
31 | */ |
||
32 | /** @file |
||
33 | */ |
||
34 | |||
756 | jermar | 35 | #include <arch/mm/as.h> |
1860 | jermar | 36 | #include <arch/mm/tlb.h> |
756 | jermar | 37 | #include <genarch/mm/as_ht.h> |
830 | jermar | 38 | #include <genarch/mm/asid_fifo.h> |
1890 | jermar | 39 | #include <debug.h> |
1903 | jermar | 40 | #include <config.h> |
756 | jermar | 41 | |
1890 | jermar | 42 | #ifdef CONFIG_TSB |
43 | #include <arch/mm/tsb.h> |
||
1891 | jermar | 44 | #include <arch/memstr.h> |
45 | #include <synch/mutex.h> |
||
46 | #include <arch/asm.h> |
||
47 | #include <mm/frame.h> |
||
48 | #include <bitops.h> |
||
49 | #include <macros.h> |
||
2009 | jermar | 50 | #endif /* CONFIG_TSB */ |
1890 | jermar | 51 | |
2009 | jermar | 52 | #ifdef CONFIG_VIRT_IDX_DCACHE |
53 | #include <arch/mm/cache.h> |
||
54 | #endif /* CONFIG_VIRT_IDX_DCACHE */ |
||
55 | |||
756 | jermar | 56 | /** Architecture dependent address space init. */ |
57 | void as_arch_init(void) |
||
58 | { |
||
1903 | jermar | 59 | if (config.cpu_active == 1) { |
60 | as_operations = &as_ht_operations; |
||
61 | asid_fifo_init(); |
||
62 | } |
||
756 | jermar | 63 | } |
1702 | cejka | 64 | |
1891 | jermar | 65 | int as_constructor_arch(as_t *as, int flags) |
66 | { |
||
67 | #ifdef CONFIG_TSB |
||
68 | int order = fnzb32(((ITSB_ENTRY_COUNT+DTSB_ENTRY_COUNT)*sizeof(tsb_entry_t))>>FRAME_WIDTH); |
||
1987 | jermar | 69 | uintptr_t tsb = (uintptr_t) frame_alloc(order, flags | FRAME_KA); |
1891 | jermar | 70 | |
71 | if (!tsb) |
||
72 | return -1; |
||
73 | |||
74 | as->arch.itsb = (tsb_entry_t *) tsb; |
||
75 | as->arch.dtsb = (tsb_entry_t *) (tsb + ITSB_ENTRY_COUNT * sizeof(tsb_entry_t)); |
||
1892 | jermar | 76 | memsetb((uintptr_t) as->arch.itsb, (ITSB_ENTRY_COUNT+DTSB_ENTRY_COUNT)*sizeof(tsb_entry_t), 0); |
1891 | jermar | 77 | #endif |
78 | return 0; |
||
79 | } |
||
80 | |||
81 | int as_destructor_arch(as_t *as) |
||
82 | { |
||
83 | #ifdef CONFIG_TSB |
||
84 | count_t cnt = ((ITSB_ENTRY_COUNT+DTSB_ENTRY_COUNT)*sizeof(tsb_entry_t))>>FRAME_WIDTH; |
||
1987 | jermar | 85 | frame_free(KA2PA((uintptr_t) as->arch.itsb)); |
1891 | jermar | 86 | return cnt; |
87 | #else |
||
88 | return 0; |
||
89 | #endif |
||
90 | } |
||
91 | |||
92 | int as_create_arch(as_t *as, int flags) |
||
93 | { |
||
94 | #ifdef CONFIG_TSB |
||
95 | ipl_t ipl; |
||
96 | |||
97 | ipl = interrupts_disable(); |
||
98 | mutex_lock_active(&as->lock); /* completely unnecessary, but polite */ |
||
99 | tsb_invalidate(as, 0, (count_t) -1); |
||
100 | mutex_unlock(&as->lock); |
||
101 | interrupts_restore(ipl); |
||
102 | #endif |
||
103 | return 0; |
||
104 | } |
||
105 | |||
1890 | jermar | 106 | /** Perform sparc64-specific tasks when an address space becomes active on the processor. |
107 | * |
||
108 | * Install ASID and map TSBs. |
||
109 | * |
||
110 | * @param as Address space. |
||
111 | */ |
||
1860 | jermar | 112 | void as_install_arch(as_t *as) |
113 | { |
||
114 | tlb_context_reg_t ctx; |
||
115 | |||
116 | /* |
||
1890 | jermar | 117 | * Note that we don't lock the address space. |
118 | * That's correct - we can afford it here |
||
119 | * because we only read members that are |
||
120 | * currently read-only. |
||
121 | */ |
||
122 | |||
123 | /* |
||
1860 | jermar | 124 | * Write ASID to secondary context register. |
125 | * The primary context register has to be set |
||
126 | * from TL>0 so it will be filled from the |
||
127 | * secondary context register from the TL=1 |
||
128 | * code just before switch to userspace. |
||
129 | */ |
||
130 | ctx.v = 0; |
||
131 | ctx.context = as->asid; |
||
132 | mmu_secondary_context_write(ctx.v); |
||
1890 | jermar | 133 | |
134 | #ifdef CONFIG_TSB |
||
1891 | jermar | 135 | uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH); |
1890 | jermar | 136 | |
1891 | jermar | 137 | ASSERT(as->arch.itsb && as->arch.dtsb); |
1890 | jermar | 138 | |
1891 | jermar | 139 | uintptr_t tsb = (uintptr_t) as->arch.itsb; |
1890 | jermar | 140 | |
1891 | jermar | 141 | if (!overlaps(tsb, 8*PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) { |
1890 | jermar | 142 | /* |
1891 | jermar | 143 | * TSBs were allocated from memory not covered |
144 | * by the locked 4M kernel DTLB entry. We need |
||
145 | * to map both TSBs explicitly. |
||
1890 | jermar | 146 | */ |
1891 | jermar | 147 | dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, tsb); |
148 | dtlb_insert_mapping(tsb, KA2PA(tsb), PAGESIZE_64K, true, true); |
||
149 | } |
||
1890 | jermar | 150 | |
1891 | jermar | 151 | /* |
152 | * Setup TSB Base registers. |
||
153 | */ |
||
154 | tsb_base_reg_t tsb_base; |
||
155 | |||
156 | tsb_base.value = 0; |
||
157 | tsb_base.size = TSB_SIZE; |
||
158 | tsb_base.split = 0; |
||
1890 | jermar | 159 | |
1891 | jermar | 160 | tsb_base.base = ((uintptr_t) as->arch.itsb) >> PAGE_WIDTH; |
161 | itsb_base_write(tsb_base.value); |
||
162 | tsb_base.base = ((uintptr_t) as->arch.dtsb) >> PAGE_WIDTH; |
||
163 | dtsb_base_write(tsb_base.value); |
||
1890 | jermar | 164 | #endif |
2009 | jermar | 165 | #ifdef CONFIG_VIRT_IDX_DCACHE |
166 | if (as->dcache_flush_on_install) { |
||
167 | /* |
||
168 | * Some mappings in this address space are illegal address |
||
169 | * aliases. Upon their creation, the flush_dcache_on_install |
||
170 | * flag was set. |
||
171 | * |
||
172 | * We are now obliged to flush the D-cache in order to guarantee |
||
173 | * that there will be at most one cache line for each address |
||
174 | * alias. |
||
175 | * |
||
176 | * This flush performs a cleanup after another address space in |
||
177 | * which the alias might have existed. |
||
178 | */ |
||
179 | dcache_flush(); |
||
180 | } |
||
181 | #endif /* CONFIG_VIRT_IDX_DCACHE */ |
||
1860 | jermar | 182 | } |
183 | |||
1890 | jermar | 184 | /** Perform sparc64-specific tasks when an address space is removed from the processor. |
185 | * |
||
186 | * Demap TSBs. |
||
187 | * |
||
188 | * @param as Address space. |
||
189 | */ |
||
190 | void as_deinstall_arch(as_t *as) |
||
191 | { |
||
192 | |||
193 | /* |
||
194 | * Note that we don't lock the address space. |
||
195 | * That's correct - we can afford it here |
||
196 | * because we only read members that are |
||
197 | * currently read-only. |
||
198 | */ |
||
199 | |||
200 | #ifdef CONFIG_TSB |
||
1891 | jermar | 201 | uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH); |
1890 | jermar | 202 | |
1891 | jermar | 203 | ASSERT(as->arch.itsb && as->arch.dtsb); |
1890 | jermar | 204 | |
1891 | jermar | 205 | uintptr_t tsb = (uintptr_t) as->arch.itsb; |
1890 | jermar | 206 | |
1891 | jermar | 207 | if (!overlaps(tsb, 8*PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) { |
208 | /* |
||
209 | * TSBs were allocated from memory not covered |
||
210 | * by the locked 4M kernel DTLB entry. We need |
||
211 | * to demap the entry installed by as_install_arch(). |
||
212 | */ |
||
213 | dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, tsb); |
||
1890 | jermar | 214 | } |
215 | #endif |
||
2009 | jermar | 216 | #ifdef CONFIG_VIRT_IDX_DCACHE |
217 | if (as->dcache_flush_on_deinstall) { |
||
218 | /* |
||
219 | * Some mappings in this address space are illegal address |
||
220 | * aliases. Upon their creation, the flush_dcache_on_deinstall |
||
221 | * flag was set. |
||
222 | * |
||
223 | * We are now obliged to flush the D-cache in order to guarantee |
||
224 | * that there will be at most one cache line for each address |
||
225 | * alias. |
||
226 | * |
||
227 | * This flush performs a cleanup after this address space. It is |
||
228 | * necessary because other address spaces that contain the same |
||
229 | * alias are not necessarily aware of the need to carry out the |
||
230 | * cache flush. The only address spaces that are aware of it are |
||
231 | * those that created the illegal alias. |
||
232 | */ |
||
233 | dcache_flush(); |
||
234 | } |
||
235 | #endif /* CONFIG_VIRT_IDX_DCACHE */ |
||
1890 | jermar | 236 | } |
237 | |||
1860 | jermar | 238 | /** @} |
1702 | cejka | 239 | */ |