Rev 2462 | Rev 2927 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
570 | jermar | 1 | /* |
2071 | jermar | 2 | * Copyright (c) 2005 Jakub Jermar |
570 | jermar | 3 | * All rights reserved. |
4 | * |
||
5 | * Redistribution and use in source and binary forms, with or without |
||
6 | * modification, are permitted provided that the following conditions |
||
7 | * are met: |
||
8 | * |
||
9 | * - Redistributions of source code must retain the above copyright |
||
10 | * notice, this list of conditions and the following disclaimer. |
||
11 | * - Redistributions in binary form must reproduce the above copyright |
||
12 | * notice, this list of conditions and the following disclaimer in the |
||
13 | * documentation and/or other materials provided with the distribution. |
||
14 | * - The name of the author may not be used to endorse or promote products |
||
15 | * derived from this software without specific prior written permission. |
||
16 | * |
||
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
||
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
||
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
||
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
||
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
||
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
||
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
||
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
||
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
||
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
||
27 | */ |
||
28 | |||
1792 | jermar | 29 | /** @addtogroup sparc64mm |
1702 | cejka | 30 | * @{ |
31 | */ |
||
32 | /** @file |
||
33 | */ |
||
34 | |||
570 | jermar | 35 | #include <arch/mm/tlb.h> |
36 | #include <mm/tlb.h> |
||
1851 | jermar | 37 | #include <mm/as.h> |
38 | #include <mm/asid.h> |
||
619 | jermar | 39 | #include <arch/mm/frame.h> |
40 | #include <arch/mm/page.h> |
||
41 | #include <arch/mm/mmu.h> |
||
1851 | jermar | 42 | #include <arch/interrupt.h> |
1870 | jermar | 43 | #include <interrupt.h> |
1851 | jermar | 44 | #include <arch.h> |
570 | jermar | 45 | #include <print.h> |
617 | jermar | 46 | #include <arch/types.h> |
619 | jermar | 47 | #include <config.h> |
630 | jermar | 48 | #include <arch/trap/trap.h> |
1880 | jermar | 49 | #include <arch/trap/exception.h> |
863 | jermar | 50 | #include <panic.h> |
873 | jermar | 51 | #include <arch/asm.h> |
894 | jermar | 52 | |
1891 | jermar | 53 | #ifdef CONFIG_TSB |
54 | #include <arch/mm/tsb.h> |
||
55 | #endif |
||
56 | |||
2141 | jermar | 57 | static void dtlb_pte_copy(pte_t *t, index_t index, bool ro); |
58 | static void itlb_pte_copy(pte_t *t, index_t index); |
||
59 | static void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, |
||
60 | const char *str); |
||
2048 | jermar | 61 | static void do_fast_data_access_mmu_miss_fault(istate_t *istate, |
2141 | jermar | 62 | tlb_tag_access_reg_t tag, const char *str); |
2048 | jermar | 63 | static void do_fast_data_access_protection_fault(istate_t *istate, |
2141 | jermar | 64 | tlb_tag_access_reg_t tag, const char *str); |
1851 | jermar | 65 | |
873 | jermar | 66 | char *context_encoding[] = { |
67 | "Primary", |
||
68 | "Secondary", |
||
69 | "Nucleus", |
||
70 | "Reserved" |
||
71 | }; |
||
72 | |||
570 | jermar | 73 | void tlb_arch_init(void) |
74 | { |
||
1793 | jermar | 75 | /* |
1905 | jermar | 76 | * Invalidate all non-locked DTLB and ITLB entries. |
1793 | jermar | 77 | */ |
1905 | jermar | 78 | tlb_invalidate_all(); |
1946 | jermar | 79 | |
80 | /* |
||
81 | * Clear both SFSRs. |
||
82 | */ |
||
83 | dtlb_sfsr_write(0); |
||
84 | itlb_sfsr_write(0); |
||
897 | jermar | 85 | } |
873 | jermar | 86 | |
897 | jermar | 87 | /** Insert privileged mapping into DMMU TLB. |
88 | * |
||
89 | * @param page Virtual page address. |
||
90 | * @param frame Physical frame address. |
||
91 | * @param pagesize Page size. |
||
92 | * @param locked True for permanent mappings, false otherwise. |
||
93 | * @param cacheable True if the mapping is cacheable, false otherwise. |
||
94 | */ |
||
2141 | jermar | 95 | void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize, |
96 | bool locked, bool cacheable) |
||
897 | jermar | 97 | { |
98 | tlb_tag_access_reg_t tag; |
||
99 | tlb_data_t data; |
||
100 | page_address_t pg; |
||
101 | frame_address_t fr; |
||
873 | jermar | 102 | |
897 | jermar | 103 | pg.address = page; |
104 | fr.address = frame; |
||
873 | jermar | 105 | |
894 | jermar | 106 | tag.value = ASID_KERNEL; |
107 | tag.vpn = pg.vpn; |
||
108 | |||
109 | dtlb_tag_access_write(tag.value); |
||
110 | |||
111 | data.value = 0; |
||
112 | data.v = true; |
||
897 | jermar | 113 | data.size = pagesize; |
894 | jermar | 114 | data.pfn = fr.pfn; |
897 | jermar | 115 | data.l = locked; |
116 | data.cp = cacheable; |
||
2009 | jermar | 117 | #ifdef CONFIG_VIRT_IDX_DCACHE |
897 | jermar | 118 | data.cv = cacheable; |
2009 | jermar | 119 | #endif /* CONFIG_VIRT_IDX_DCACHE */ |
894 | jermar | 120 | data.p = true; |
121 | data.w = true; |
||
1868 | jermar | 122 | data.g = false; |
894 | jermar | 123 | |
124 | dtlb_data_in_write(data.value); |
||
570 | jermar | 125 | } |
126 | |||
1852 | jermar | 127 | /** Copy PTE to TLB. |
128 | * |
||
2141 | jermar | 129 | * @param t Page Table Entry to be copied. |
130 | * @param index Zero if lower 8K-subpage, one if higher 8K-subpage. |
||
131 | * @param ro If true, the entry will be created read-only, regardless of its |
||
132 | * w field. |
||
1852 | jermar | 133 | */ |
2141 | jermar | 134 | void dtlb_pte_copy(pte_t *t, index_t index, bool ro) |
1851 | jermar | 135 | { |
1852 | jermar | 136 | tlb_tag_access_reg_t tag; |
137 | tlb_data_t data; |
||
138 | page_address_t pg; |
||
139 | frame_address_t fr; |
||
140 | |||
2141 | jermar | 141 | pg.address = t->page + (index << MMU_PAGE_WIDTH); |
142 | fr.address = t->frame + (index << MMU_PAGE_WIDTH); |
||
1852 | jermar | 143 | |
144 | tag.value = 0; |
||
145 | tag.context = t->as->asid; |
||
146 | tag.vpn = pg.vpn; |
||
2141 | jermar | 147 | |
1852 | jermar | 148 | dtlb_tag_access_write(tag.value); |
2141 | jermar | 149 | |
1852 | jermar | 150 | data.value = 0; |
151 | data.v = true; |
||
152 | data.size = PAGESIZE_8K; |
||
153 | data.pfn = fr.pfn; |
||
154 | data.l = false; |
||
155 | data.cp = t->c; |
||
2009 | jermar | 156 | #ifdef CONFIG_VIRT_IDX_DCACHE |
1852 | jermar | 157 | data.cv = t->c; |
2009 | jermar | 158 | #endif /* CONFIG_VIRT_IDX_DCACHE */ |
1864 | jermar | 159 | data.p = t->k; /* p like privileged */ |
1852 | jermar | 160 | data.w = ro ? false : t->w; |
161 | data.g = t->g; |
||
2141 | jermar | 162 | |
1852 | jermar | 163 | dtlb_data_in_write(data.value); |
1851 | jermar | 164 | } |
165 | |||
1891 | jermar | 166 | /** Copy PTE to ITLB. |
167 | * |
||
2141 | jermar | 168 | * @param t Page Table Entry to be copied. |
169 | * @param index Zero if lower 8K-subpage, one if higher 8K-subpage. |
||
1891 | jermar | 170 | */ |
2141 | jermar | 171 | void itlb_pte_copy(pte_t *t, index_t index) |
1852 | jermar | 172 | { |
173 | tlb_tag_access_reg_t tag; |
||
174 | tlb_data_t data; |
||
175 | page_address_t pg; |
||
176 | frame_address_t fr; |
||
177 | |||
2141 | jermar | 178 | pg.address = t->page + (index << MMU_PAGE_WIDTH); |
179 | fr.address = t->frame + (index << MMU_PAGE_WIDTH); |
||
1852 | jermar | 180 | |
181 | tag.value = 0; |
||
182 | tag.context = t->as->asid; |
||
183 | tag.vpn = pg.vpn; |
||
184 | |||
185 | itlb_tag_access_write(tag.value); |
||
186 | |||
187 | data.value = 0; |
||
188 | data.v = true; |
||
189 | data.size = PAGESIZE_8K; |
||
190 | data.pfn = fr.pfn; |
||
191 | data.l = false; |
||
192 | data.cp = t->c; |
||
1864 | jermar | 193 | data.p = t->k; /* p like privileged */ |
1852 | jermar | 194 | data.w = false; |
195 | data.g = t->g; |
||
196 | |||
197 | itlb_data_in_write(data.value); |
||
198 | } |
||
199 | |||
863 | jermar | 200 | /** ITLB miss handler. */ |
2231 | jermar | 201 | void fast_instruction_access_mmu_miss(unative_t unused, istate_t *istate) |
863 | jermar | 202 | { |
1852 | jermar | 203 | uintptr_t va = ALIGN_DOWN(istate->tpc, PAGE_SIZE); |
2141 | jermar | 204 | index_t index = (istate->tpc >> MMU_PAGE_WIDTH) % MMU_PAGES_PER_PAGE; |
1852 | jermar | 205 | pte_t *t; |
206 | |||
207 | page_table_lock(AS, true); |
||
208 | t = page_mapping_find(AS, va); |
||
209 | if (t && PTE_EXECUTABLE(t)) { |
||
210 | /* |
||
211 | * The mapping was found in the software page hash table. |
||
212 | * Insert it into ITLB. |
||
213 | */ |
||
214 | t->a = true; |
||
2141 | jermar | 215 | itlb_pte_copy(t, index); |
1891 | jermar | 216 | #ifdef CONFIG_TSB |
2141 | jermar | 217 | itsb_pte_copy(t, index); |
1891 | jermar | 218 | #endif |
1852 | jermar | 219 | page_table_unlock(AS, true); |
220 | } else { |
||
221 | /* |
||
2048 | jermar | 222 | * Forward the page fault to the address space page fault |
223 | * handler. |
||
1852 | jermar | 224 | */ |
225 | page_table_unlock(AS, true); |
||
226 | if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) { |
||
2048 | jermar | 227 | do_fast_instruction_access_mmu_miss_fault(istate, |
2462 | jermar | 228 | __func__); |
1852 | jermar | 229 | } |
230 | } |
||
863 | jermar | 231 | } |
232 | |||
1851 | jermar | 233 | /** DTLB miss handler. |
234 | * |
||
2048 | jermar | 235 | * Note that some faults (e.g. kernel faults) were already resolved by the |
236 | * low-level, assembly language part of the fast_data_access_mmu_miss handler. |
||
2231 | jermar | 237 | * |
238 | * @param tag Content of the TLB Tag Access register as it existed when the |
||
239 | * trap happened. This is to prevent confusion created by clobbered |
||
240 | * Tag Access register during a nested DTLB miss. |
||
241 | * @param istate Interrupted state saved on the stack. |
||
1851 | jermar | 242 | */ |
2231 | jermar | 243 | void fast_data_access_mmu_miss(tlb_tag_access_reg_t tag, istate_t *istate) |
863 | jermar | 244 | { |
1851 | jermar | 245 | uintptr_t va; |
2141 | jermar | 246 | index_t index; |
1851 | jermar | 247 | pte_t *t; |
883 | jermar | 248 | |
2141 | jermar | 249 | va = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE); |
250 | index = tag.vpn % MMU_PAGES_PER_PAGE; |
||
1865 | jermar | 251 | |
1851 | jermar | 252 | if (tag.context == ASID_KERNEL) { |
253 | if (!tag.vpn) { |
||
254 | /* NULL access in kernel */ |
||
2048 | jermar | 255 | do_fast_data_access_mmu_miss_fault(istate, tag, |
2462 | jermar | 256 | __func__); |
1851 | jermar | 257 | } |
2048 | jermar | 258 | do_fast_data_access_mmu_miss_fault(istate, tag, "Unexpected " |
2141 | jermar | 259 | "kernel page fault."); |
1851 | jermar | 260 | } |
873 | jermar | 261 | |
1851 | jermar | 262 | page_table_lock(AS, true); |
263 | t = page_mapping_find(AS, va); |
||
264 | if (t) { |
||
265 | /* |
||
266 | * The mapping was found in the software page hash table. |
||
267 | * Insert it into DTLB. |
||
268 | */ |
||
1852 | jermar | 269 | t->a = true; |
2141 | jermar | 270 | dtlb_pte_copy(t, index, true); |
1891 | jermar | 271 | #ifdef CONFIG_TSB |
2141 | jermar | 272 | dtsb_pte_copy(t, index, true); |
1891 | jermar | 273 | #endif |
1851 | jermar | 274 | page_table_unlock(AS, true); |
275 | } else { |
||
276 | /* |
||
2141 | jermar | 277 | * Forward the page fault to the address space page fault |
278 | * handler. |
||
1851 | jermar | 279 | */ |
280 | page_table_unlock(AS, true); |
||
281 | if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) { |
||
2048 | jermar | 282 | do_fast_data_access_mmu_miss_fault(istate, tag, |
2462 | jermar | 283 | __func__); |
1851 | jermar | 284 | } |
877 | jermar | 285 | } |
863 | jermar | 286 | } |
287 | |||
2231 | jermar | 288 | /** DTLB protection fault handler. |
289 | * |
||
290 | * @param tag Content of the TLB Tag Access register as it existed when the |
||
291 | * trap happened. This is to prevent confusion created by clobbered |
||
292 | * Tag Access register during a nested DTLB miss. |
||
293 | * @param istate Interrupted state saved on the stack. |
||
294 | */ |
||
295 | void fast_data_access_protection(tlb_tag_access_reg_t tag, istate_t *istate) |
||
863 | jermar | 296 | { |
1859 | jermar | 297 | uintptr_t va; |
2141 | jermar | 298 | index_t index; |
1859 | jermar | 299 | pte_t *t; |
300 | |||
2141 | jermar | 301 | va = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE); |
302 | index = tag.vpn % MMU_PAGES_PER_PAGE; /* 16K-page emulation */ |
||
1859 | jermar | 303 | |
304 | page_table_lock(AS, true); |
||
305 | t = page_mapping_find(AS, va); |
||
306 | if (t && PTE_WRITABLE(t)) { |
||
307 | /* |
||
2048 | jermar | 308 | * The mapping was found in the software page hash table and is |
309 | * writable. Demap the old mapping and insert an updated mapping |
||
310 | * into DTLB. |
||
1859 | jermar | 311 | */ |
312 | t->a = true; |
||
313 | t->d = true; |
||
2141 | jermar | 314 | dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_SECONDARY, |
315 | va + index * MMU_PAGE_SIZE); |
||
316 | dtlb_pte_copy(t, index, false); |
||
1891 | jermar | 317 | #ifdef CONFIG_TSB |
2141 | jermar | 318 | dtsb_pte_copy(t, index, false); |
1891 | jermar | 319 | #endif |
1859 | jermar | 320 | page_table_unlock(AS, true); |
321 | } else { |
||
322 | /* |
||
2048 | jermar | 323 | * Forward the page fault to the address space page fault |
324 | * handler. |
||
1859 | jermar | 325 | */ |
326 | page_table_unlock(AS, true); |
||
327 | if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) { |
||
2048 | jermar | 328 | do_fast_data_access_protection_fault(istate, tag, |
2462 | jermar | 329 | __func__); |
1859 | jermar | 330 | } |
331 | } |
||
863 | jermar | 332 | } |
333 | |||
570 | jermar | 334 | /** Print contents of both TLBs. */ |
335 | void tlb_print(void) |
||
336 | { |
||
337 | int i; |
||
338 | tlb_data_t d; |
||
339 | tlb_tag_read_reg_t t; |
||
340 | |||
341 | printf("I-TLB contents:\n"); |
||
342 | for (i = 0; i < ITLB_ENTRY_COUNT; i++) { |
||
343 | d.value = itlb_data_access_read(i); |
||
613 | jermar | 344 | t.value = itlb_tag_read_read(i); |
2078 | jermar | 345 | |
2048 | jermar | 346 | printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, " |
2141 | jermar | 347 | "ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, " |
348 | "cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn, |
||
349 | t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag, |
||
350 | d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g); |
||
570 | jermar | 351 | } |
352 | |||
353 | printf("D-TLB contents:\n"); |
||
354 | for (i = 0; i < DTLB_ENTRY_COUNT; i++) { |
||
355 | d.value = dtlb_data_access_read(i); |
||
613 | jermar | 356 | t.value = dtlb_tag_read_read(i); |
570 | jermar | 357 | |
2048 | jermar | 358 | printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, " |
2141 | jermar | 359 | "ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, " |
360 | "cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn, |
||
361 | t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag, |
||
362 | d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g); |
||
570 | jermar | 363 | } |
364 | |||
365 | } |
||
617 | jermar | 366 | |
2141 | jermar | 367 | void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, |
368 | const char *str) |
||
1852 | jermar | 369 | { |
1870 | jermar | 370 | fault_if_from_uspace(istate, "%s\n", str); |
1880 | jermar | 371 | dump_istate(istate); |
1852 | jermar | 372 | panic("%s\n", str); |
373 | } |
||
374 | |||
2141 | jermar | 375 | void do_fast_data_access_mmu_miss_fault(istate_t *istate, |
376 | tlb_tag_access_reg_t tag, const char *str) |
||
1851 | jermar | 377 | { |
378 | uintptr_t va; |
||
379 | |||
2141 | jermar | 380 | va = tag.vpn << MMU_PAGE_WIDTH; |
2231 | jermar | 381 | if (tag.context) { |
382 | fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va, |
||
383 | tag.context); |
||
384 | } |
||
1880 | jermar | 385 | dump_istate(istate); |
1851 | jermar | 386 | printf("Faulting page: %p, ASID=%d\n", va, tag.context); |
387 | panic("%s\n", str); |
||
388 | } |
||
389 | |||
2141 | jermar | 390 | void do_fast_data_access_protection_fault(istate_t *istate, |
391 | tlb_tag_access_reg_t tag, const char *str) |
||
1859 | jermar | 392 | { |
393 | uintptr_t va; |
||
394 | |||
2141 | jermar | 395 | va = tag.vpn << MMU_PAGE_WIDTH; |
1859 | jermar | 396 | |
2231 | jermar | 397 | if (tag.context) { |
398 | fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va, |
||
399 | tag.context); |
||
400 | } |
||
1859 | jermar | 401 | printf("Faulting page: %p, ASID=%d\n", va, tag.context); |
1880 | jermar | 402 | dump_istate(istate); |
1859 | jermar | 403 | panic("%s\n", str); |
404 | } |
||
405 | |||
1946 | jermar | 406 | void dump_sfsr_and_sfar(void) |
407 | { |
||
408 | tlb_sfsr_reg_t sfsr; |
||
409 | uintptr_t sfar; |
||
410 | |||
411 | sfsr.value = dtlb_sfsr_read(); |
||
412 | sfar = dtlb_sfar_read(); |
||
413 | |||
2048 | jermar | 414 | printf("DTLB SFSR: asi=%#x, ft=%#x, e=%d, ct=%d, pr=%d, w=%d, ow=%d, " |
2141 | jermar | 415 | "fv=%d\n", sfsr.asi, sfsr.ft, sfsr.e, sfsr.ct, sfsr.pr, sfsr.w, |
416 | sfsr.ow, sfsr.fv); |
||
1946 | jermar | 417 | printf("DTLB SFAR: address=%p\n", sfar); |
418 | |||
419 | dtlb_sfsr_write(0); |
||
420 | } |
||
421 | |||
617 | jermar | 422 | /** Invalidate all unlocked ITLB and DTLB entries. */ |
423 | void tlb_invalidate_all(void) |
||
424 | { |
||
425 | int i; |
||
426 | tlb_data_t d; |
||
427 | tlb_tag_read_reg_t t; |
||
428 | |||
2078 | jermar | 429 | /* |
430 | * Walk all ITLB and DTLB entries and remove all unlocked mappings. |
||
431 | * |
||
432 | * The kernel doesn't use global mappings so any locked global mappings |
||
433 | * found must have been created by someone else. Their only purpose now |
||
434 | * is to collide with proper mappings. Invalidate immediately. It should |
||
435 | * be safe to invalidate them as late as now. |
||
436 | */ |
||
437 | |||
617 | jermar | 438 | for (i = 0; i < ITLB_ENTRY_COUNT; i++) { |
439 | d.value = itlb_data_access_read(i); |
||
2078 | jermar | 440 | if (!d.l || d.g) { |
617 | jermar | 441 | t.value = itlb_tag_read_read(i); |
442 | d.v = false; |
||
443 | itlb_tag_access_write(t.value); |
||
444 | itlb_data_access_write(i, d.value); |
||
445 | } |
||
446 | } |
||
447 | |||
448 | for (i = 0; i < DTLB_ENTRY_COUNT; i++) { |
||
449 | d.value = dtlb_data_access_read(i); |
||
2078 | jermar | 450 | if (!d.l || d.g) { |
617 | jermar | 451 | t.value = dtlb_tag_read_read(i); |
452 | d.v = false; |
||
453 | dtlb_tag_access_write(t.value); |
||
454 | dtlb_data_access_write(i, d.value); |
||
455 | } |
||
456 | } |
||
457 | |||
458 | } |
||
459 | |||
2048 | jermar | 460 | /** Invalidate all ITLB and DTLB entries that belong to specified ASID |
461 | * (Context). |
||
617 | jermar | 462 | * |
463 | * @param asid Address Space ID. |
||
464 | */ |
||
465 | void tlb_invalidate_asid(asid_t asid) |
||
466 | { |
||
1865 | jermar | 467 | tlb_context_reg_t pc_save, ctx; |
1860 | jermar | 468 | |
1865 | jermar | 469 | /* switch to nucleus because we are mapped by the primary context */ |
470 | nucleus_enter(); |
||
471 | |||
472 | ctx.v = pc_save.v = mmu_primary_context_read(); |
||
1860 | jermar | 473 | ctx.context = asid; |
1865 | jermar | 474 | mmu_primary_context_write(ctx.v); |
1860 | jermar | 475 | |
1865 | jermar | 476 | itlb_demap(TLB_DEMAP_CONTEXT, TLB_DEMAP_PRIMARY, 0); |
477 | dtlb_demap(TLB_DEMAP_CONTEXT, TLB_DEMAP_PRIMARY, 0); |
||
1860 | jermar | 478 | |
1865 | jermar | 479 | mmu_primary_context_write(pc_save.v); |
480 | |||
481 | nucleus_leave(); |
||
617 | jermar | 482 | } |
483 | |||
2048 | jermar | 484 | /** Invalidate all ITLB and DTLB entries for specified page range in specified |
485 | * address space. |
||
617 | jermar | 486 | * |
487 | * @param asid Address Space ID. |
||
727 | jermar | 488 | * @param page First page which to sweep out from ITLB and DTLB. |
489 | * @param cnt Number of ITLB and DTLB entries to invalidate. |
||
617 | jermar | 490 | */ |
1780 | jermar | 491 | void tlb_invalidate_pages(asid_t asid, uintptr_t page, count_t cnt) |
617 | jermar | 492 | { |
2745 | decky | 493 | unsigned int i; |
1865 | jermar | 494 | tlb_context_reg_t pc_save, ctx; |
727 | jermar | 495 | |
1865 | jermar | 496 | /* switch to nucleus because we are mapped by the primary context */ |
497 | nucleus_enter(); |
||
498 | |||
499 | ctx.v = pc_save.v = mmu_primary_context_read(); |
||
1860 | jermar | 500 | ctx.context = asid; |
1865 | jermar | 501 | mmu_primary_context_write(ctx.v); |
1860 | jermar | 502 | |
2141 | jermar | 503 | for (i = 0; i < cnt * MMU_PAGES_PER_PAGE; i++) { |
2134 | jermar | 504 | itlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY, |
2141 | jermar | 505 | page + i * MMU_PAGE_SIZE); |
2134 | jermar | 506 | dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY, |
2141 | jermar | 507 | page + i * MMU_PAGE_SIZE); |
727 | jermar | 508 | } |
1860 | jermar | 509 | |
1865 | jermar | 510 | mmu_primary_context_write(pc_save.v); |
511 | |||
512 | nucleus_leave(); |
||
617 | jermar | 513 | } |
1702 | cejka | 514 | |
1792 | jermar | 515 | /** @} |
1702 | cejka | 516 | */ |