Rev 4153 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
1889 | jermar | 1 | /* |
2071 | jermar | 2 | * Copyright (c) 2006 Jakub Jermar |
1889 | jermar | 3 | * All rights reserved. |
4 | * |
||
5 | * Redistribution and use in source and binary forms, with or without |
||
6 | * modification, are permitted provided that the following conditions |
||
7 | * are met: |
||
8 | * |
||
9 | * - Redistributions of source code must retain the above copyright |
||
10 | * notice, this list of conditions and the following disclaimer. |
||
11 | * - Redistributions in binary form must reproduce the above copyright |
||
12 | * notice, this list of conditions and the following disclaimer in the |
||
13 | * documentation and/or other materials provided with the distribution. |
||
14 | * - The name of the author may not be used to endorse or promote products |
||
15 | * derived from this software without specific prior written permission. |
||
16 | * |
||
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
||
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
||
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
||
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
||
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
||
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
||
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
||
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
||
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
||
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
||
27 | */ |
||
28 | |||
29 | /** @addtogroup sparc64mm |
||
30 | * @{ |
||
31 | */ |
||
32 | /** @file |
||
33 | */ |
||
34 | |||
35 | #include <arch/mm/tsb.h> |
||
1891 | jermar | 36 | #include <arch/mm/tlb.h> |
2141 | jermar | 37 | #include <arch/mm/page.h> |
1891 | jermar | 38 | #include <arch/barrier.h> |
1889 | jermar | 39 | #include <mm/as.h> |
40 | #include <arch/types.h> |
||
1891 | jermar | 41 | #include <macros.h> |
42 | #include <debug.h> |
||
1889 | jermar | 43 | |
2141 | jermar | 44 | #define TSB_INDEX_MASK ((1 << (21 + 1 + TSB_SIZE - MMU_PAGE_WIDTH)) - 1) |
1891 | jermar | 45 | |
1889 | jermar | 46 | /** Invalidate portion of TSB. |
47 | * |
||
2048 | jermar | 48 | * We assume that the address space is already locked. Note that respective |
49 | * portions of both TSBs are invalidated at a time. |
||
1889 | jermar | 50 | * |
51 | * @param as Address space. |
||
52 | * @param page First page to invalidate in TSB. |
||
4581 | mejdrech | 53 | * @param pages Number of pages to invalidate. Value of (size_t) -1 means the |
2048 | jermar | 54 | * whole TSB. |
1889 | jermar | 55 | */ |
4581 | mejdrech | 56 | void tsb_invalidate(as_t *as, uintptr_t page, size_t pages) |
1889 | jermar | 57 | { |
4581 | mejdrech | 58 | size_t i0; |
59 | size_t i; |
||
60 | size_t cnt; |
||
1891 | jermar | 61 | |
62 | ASSERT(as->arch.itsb && as->arch.dtsb); |
||
63 | |||
2141 | jermar | 64 | i0 = (page >> MMU_PAGE_WIDTH) & TSB_INDEX_MASK; |
2266 | jermar | 65 | ASSERT(i0 < ITSB_ENTRY_COUNT && i0 < DTSB_ENTRY_COUNT); |
66 | |||
4581 | mejdrech | 67 | if (pages == (size_t) -1 || (pages * 2) > ITSB_ENTRY_COUNT) |
2161 | jermar | 68 | cnt = ITSB_ENTRY_COUNT; |
69 | else |
||
70 | cnt = pages * 2; |
||
1891 | jermar | 71 | |
72 | for (i = 0; i < cnt; i++) { |
||
2048 | jermar | 73 | as->arch.itsb[(i0 + i) & (ITSB_ENTRY_COUNT - 1)].tag.invalid = |
2141 | jermar | 74 | true; |
2048 | jermar | 75 | as->arch.dtsb[(i0 + i) & (DTSB_ENTRY_COUNT - 1)].tag.invalid = |
2141 | jermar | 76 | true; |
1891 | jermar | 77 | } |
1889 | jermar | 78 | } |
79 | |||
1891 | jermar | 80 | /** Copy software PTE to ITSB. |
81 | * |
||
2141 | jermar | 82 | * @param t Software PTE. |
83 | * @param index Zero if lower 8K-subpage, one if higher 8K subpage. |
||
1891 | jermar | 84 | */ |
4581 | mejdrech | 85 | void itsb_pte_copy(pte_t *t, size_t index) |
1891 | jermar | 86 | { |
87 | as_t *as; |
||
88 | tsb_entry_t *tsb; |
||
4581 | mejdrech | 89 | size_t entry; |
2266 | jermar | 90 | |
91 | ASSERT(index <= 1); |
||
1891 | jermar | 92 | |
93 | as = t->as; |
||
2141 | jermar | 94 | entry = ((t->page >> MMU_PAGE_WIDTH) + index) & TSB_INDEX_MASK; |
2266 | jermar | 95 | ASSERT(entry < ITSB_ENTRY_COUNT); |
2141 | jermar | 96 | tsb = &as->arch.itsb[entry]; |
1891 | jermar | 97 | |
98 | /* |
||
99 | * We use write barriers to make sure that the TSB load |
||
100 | * won't use inconsistent data or that the fault will |
||
101 | * be repeated. |
||
102 | */ |
||
103 | |||
1960 | jermar | 104 | tsb->tag.invalid = true; /* invalidate the entry |
105 | * (tag target has this |
||
106 | * set to 0) */ |
||
1891 | jermar | 107 | |
108 | write_barrier(); |
||
109 | |||
110 | tsb->tag.context = as->asid; |
||
2266 | jermar | 111 | /* the shift is bigger than PAGE_WIDTH, do not bother with index */ |
112 | tsb->tag.va_tag = t->page >> VA_TAG_PAGE_SHIFT; |
||
1891 | jermar | 113 | tsb->data.value = 0; |
114 | tsb->data.size = PAGESIZE_8K; |
||
2141 | jermar | 115 | tsb->data.pfn = (t->frame >> MMU_FRAME_WIDTH) + index; |
4153 | mejdrech | 116 | tsb->data.cp = t->c; /* cp as cache in phys.-idxed, c as cacheable */ |
117 | tsb->data.p = t->k; /* p as privileged, k as kernel */ |
||
118 | tsb->data.v = t->p; /* v as valid, p as present */ |
||
1891 | jermar | 119 | |
120 | write_barrier(); |
||
121 | |||
1960 | jermar | 122 | tsb->tag.invalid = false; /* mark the entry as valid */ |
1891 | jermar | 123 | } |
124 | |||
125 | /** Copy software PTE to DTSB. |
||
126 | * |
||
2141 | jermar | 127 | * @param t Software PTE. |
128 | * @param index Zero if lower 8K-subpage, one if higher 8K-subpage. |
||
129 | * @param ro If true, the mapping is copied read-only. |
||
1891 | jermar | 130 | */ |
4581 | mejdrech | 131 | void dtsb_pte_copy(pte_t *t, size_t index, bool ro) |
1891 | jermar | 132 | { |
133 | as_t *as; |
||
134 | tsb_entry_t *tsb; |
||
4581 | mejdrech | 135 | size_t entry; |
1891 | jermar | 136 | |
2266 | jermar | 137 | ASSERT(index <= 1); |
138 | |||
1891 | jermar | 139 | as = t->as; |
2141 | jermar | 140 | entry = ((t->page >> MMU_PAGE_WIDTH) + index) & TSB_INDEX_MASK; |
2266 | jermar | 141 | ASSERT(entry < DTSB_ENTRY_COUNT); |
2141 | jermar | 142 | tsb = &as->arch.dtsb[entry]; |
1891 | jermar | 143 | |
144 | /* |
||
145 | * We use write barriers to make sure that the TSB load |
||
146 | * won't use inconsistent data or that the fault will |
||
147 | * be repeated. |
||
148 | */ |
||
149 | |||
1960 | jermar | 150 | tsb->tag.invalid = true; /* invalidate the entry |
151 | * (tag target has this |
||
152 | * set to 0) */ |
||
1891 | jermar | 153 | |
154 | write_barrier(); |
||
155 | |||
156 | tsb->tag.context = as->asid; |
||
2266 | jermar | 157 | /* the shift is bigger than PAGE_WIDTH, do not bother with index */ |
158 | tsb->tag.va_tag = t->page >> VA_TAG_PAGE_SHIFT; |
||
1891 | jermar | 159 | tsb->data.value = 0; |
160 | tsb->data.size = PAGESIZE_8K; |
||
2141 | jermar | 161 | tsb->data.pfn = (t->frame >> MMU_FRAME_WIDTH) + index; |
1891 | jermar | 162 | tsb->data.cp = t->c; |
2009 | jermar | 163 | #ifdef CONFIG_VIRT_IDX_DCACHE |
1891 | jermar | 164 | tsb->data.cv = t->c; |
2009 | jermar | 165 | #endif /* CONFIG_VIRT_IDX_DCACHE */ |
1960 | jermar | 166 | tsb->data.p = t->k; /* p as privileged */ |
1891 | jermar | 167 | tsb->data.w = ro ? false : t->w; |
168 | tsb->data.v = t->p; |
||
169 | |||
170 | write_barrier(); |
||
171 | |||
2161 | jermar | 172 | tsb->tag.invalid = false; /* mark the entry as valid */ |
1891 | jermar | 173 | } |
174 | |||
1889 | jermar | 175 | /** @} |
176 | */ |
||
4153 | mejdrech | 177 |