Rev 4344 | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 4344 | Rev 4691 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (c) 2006 Jakub Jermar |
2 | * Copyright (c) 2006 Jakub Jermar |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | /** @addtogroup ia64mm |
29 | /** @addtogroup ia64mm |
30 | * @{ |
30 | * @{ |
31 | */ |
31 | */ |
32 | /** @file |
32 | /** @file |
33 | */ |
33 | */ |
34 | 34 | ||
35 | /* |
35 | /* |
36 | * TLB management. |
36 | * TLB management. |
37 | */ |
37 | */ |
38 | 38 | ||
39 | #include <mm/tlb.h> |
39 | #include <mm/tlb.h> |
40 | #include <mm/asid.h> |
40 | #include <mm/asid.h> |
41 | #include <mm/page.h> |
41 | #include <mm/page.h> |
42 | #include <mm/as.h> |
42 | #include <mm/as.h> |
43 | #include <arch/mm/tlb.h> |
43 | #include <arch/mm/tlb.h> |
44 | #include <arch/mm/page.h> |
44 | #include <arch/mm/page.h> |
45 | #include <arch/mm/vhpt.h> |
45 | #include <arch/mm/vhpt.h> |
46 | #include <arch/barrier.h> |
46 | #include <arch/barrier.h> |
47 | #include <arch/interrupt.h> |
47 | #include <arch/interrupt.h> |
48 | #include <arch/pal/pal.h> |
48 | #include <arch/pal/pal.h> |
49 | #include <arch/asm.h> |
49 | #include <arch/asm.h> |
50 | #include <panic.h> |
50 | #include <panic.h> |
51 | #include <print.h> |
51 | #include <print.h> |
52 | #include <arch.h> |
52 | #include <arch.h> |
53 | #include <interrupt.h> |
53 | #include <interrupt.h> |
54 | 54 | ||
55 | /** Invalidate all TLB entries. */ |
55 | /** Invalidate all TLB entries. */ |
56 | void tlb_invalidate_all(void) |
56 | void tlb_invalidate_all(void) |
57 | { |
57 | { |
58 | ipl_t ipl; |
58 | ipl_t ipl; |
59 | uintptr_t adr; |
59 | uintptr_t adr; |
60 | uint32_t count1, count2, stride1, stride2; |
60 | uint32_t count1, count2, stride1, stride2; |
61 | 61 | ||
62 | unsigned int i, j; |
62 | unsigned int i, j; |
63 | 63 | ||
64 | adr = PAL_PTCE_INFO_BASE(); |
64 | adr = PAL_PTCE_INFO_BASE(); |
65 | count1 = PAL_PTCE_INFO_COUNT1(); |
65 | count1 = PAL_PTCE_INFO_COUNT1(); |
66 | count2 = PAL_PTCE_INFO_COUNT2(); |
66 | count2 = PAL_PTCE_INFO_COUNT2(); |
67 | stride1 = PAL_PTCE_INFO_STRIDE1(); |
67 | stride1 = PAL_PTCE_INFO_STRIDE1(); |
68 | stride2 = PAL_PTCE_INFO_STRIDE2(); |
68 | stride2 = PAL_PTCE_INFO_STRIDE2(); |
69 | 69 | ||
70 | ipl = interrupts_disable(); |
70 | ipl = interrupts_disable(); |
71 | 71 | ||
72 | for (i = 0; i < count1; i++) { |
72 | for (i = 0; i < count1; i++) { |
73 | for (j = 0; j < count2; j++) { |
73 | for (j = 0; j < count2; j++) { |
74 | asm volatile ( |
74 | asm volatile ( |
75 | "ptc.e %0 ;;" |
75 | "ptc.e %0 ;;" |
76 | : |
76 | : |
77 | : "r" (adr) |
77 | : "r" (adr) |
78 | ); |
78 | ); |
79 | adr += stride2; |
79 | adr += stride2; |
80 | } |
80 | } |
81 | adr += stride1; |
81 | adr += stride1; |
82 | } |
82 | } |
83 | 83 | ||
84 | interrupts_restore(ipl); |
84 | interrupts_restore(ipl); |
85 | 85 | ||
86 | srlz_d(); |
86 | srlz_d(); |
87 | srlz_i(); |
87 | srlz_i(); |
88 | #ifdef CONFIG_VHPT |
88 | #ifdef CONFIG_VHPT |
89 | vhpt_invalidate_all(); |
89 | vhpt_invalidate_all(); |
90 | #endif |
90 | #endif |
91 | } |
91 | } |
92 | 92 | ||
93 | /** Invalidate entries belonging to an address space. |
93 | /** Invalidate entries belonging to an address space. |
94 | * |
94 | * |
95 | * @param asid Address space identifier. |
95 | * @param asid Address space identifier. |
96 | */ |
96 | */ |
97 | void tlb_invalidate_asid(asid_t asid) |
97 | void tlb_invalidate_asid(asid_t asid) |
98 | { |
98 | { |
99 | tlb_invalidate_all(); |
99 | tlb_invalidate_all(); |
100 | } |
100 | } |
101 | 101 | ||
102 | 102 | ||
103 | void tlb_invalidate_pages(asid_t asid, uintptr_t page, count_t cnt) |
103 | void tlb_invalidate_pages(asid_t asid, uintptr_t page, size_t cnt) |
104 | { |
104 | { |
105 | region_register rr; |
105 | region_register rr; |
106 | bool restore_rr = false; |
106 | bool restore_rr = false; |
107 | int b = 0; |
107 | int b = 0; |
108 | int c = cnt; |
108 | int c = cnt; |
109 | 109 | ||
110 | uintptr_t va; |
110 | uintptr_t va; |
111 | va = page; |
111 | va = page; |
112 | 112 | ||
113 | rr.word = rr_read(VA2VRN(va)); |
113 | rr.word = rr_read(VA2VRN(va)); |
114 | if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) { |
114 | if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) { |
115 | /* |
115 | /* |
116 | * The selected region register does not contain required RID. |
116 | * The selected region register does not contain required RID. |
117 | * Save the old content of the register and replace the RID. |
117 | * Save the old content of the register and replace the RID. |
118 | */ |
118 | */ |
119 | region_register rr0; |
119 | region_register rr0; |
120 | 120 | ||
121 | rr0 = rr; |
121 | rr0 = rr; |
122 | rr0.map.rid = ASID2RID(asid, VA2VRN(va)); |
122 | rr0.map.rid = ASID2RID(asid, VA2VRN(va)); |
123 | rr_write(VA2VRN(va), rr0.word); |
123 | rr_write(VA2VRN(va), rr0.word); |
124 | srlz_d(); |
124 | srlz_d(); |
125 | srlz_i(); |
125 | srlz_i(); |
126 | } |
126 | } |
127 | 127 | ||
128 | while(c >>= 1) |
128 | while(c >>= 1) |
129 | b++; |
129 | b++; |
130 | b >>= 1; |
130 | b >>= 1; |
131 | uint64_t ps; |
131 | uint64_t ps; |
132 | 132 | ||
133 | switch (b) { |
133 | switch (b) { |
134 | case 0: /* cnt 1 - 3 */ |
134 | case 0: /* cnt 1 - 3 */ |
135 | ps = PAGE_WIDTH; |
135 | ps = PAGE_WIDTH; |
136 | break; |
136 | break; |
137 | case 1: /* cnt 4 - 15 */ |
137 | case 1: /* cnt 4 - 15 */ |
138 | ps = PAGE_WIDTH + 2; |
138 | ps = PAGE_WIDTH + 2; |
139 | va &= ~((1 << ps) - 1); |
139 | va &= ~((1 << ps) - 1); |
140 | break; |
140 | break; |
141 | case 2: /* cnt 16 - 63 */ |
141 | case 2: /* cnt 16 - 63 */ |
142 | ps = PAGE_WIDTH + 4; |
142 | ps = PAGE_WIDTH + 4; |
143 | va &= ~((1 << ps) - 1); |
143 | va &= ~((1 << ps) - 1); |
144 | break; |
144 | break; |
145 | case 3: /* cnt 64 - 255 */ |
145 | case 3: /* cnt 64 - 255 */ |
146 | ps = PAGE_WIDTH + 6; |
146 | ps = PAGE_WIDTH + 6; |
147 | va &= ~((1 << ps) - 1); |
147 | va &= ~((1 << ps) - 1); |
148 | break; |
148 | break; |
149 | case 4: /* cnt 256 - 1023 */ |
149 | case 4: /* cnt 256 - 1023 */ |
150 | ps = PAGE_WIDTH + 8; |
150 | ps = PAGE_WIDTH + 8; |
151 | va &= ~((1 << ps) - 1); |
151 | va &= ~((1 << ps) - 1); |
152 | break; |
152 | break; |
153 | case 5: /* cnt 1024 - 4095 */ |
153 | case 5: /* cnt 1024 - 4095 */ |
154 | ps = PAGE_WIDTH + 10; |
154 | ps = PAGE_WIDTH + 10; |
155 | va &= ~((1 << ps) - 1); |
155 | va &= ~((1 << ps) - 1); |
156 | break; |
156 | break; |
157 | case 6: /* cnt 4096 - 16383 */ |
157 | case 6: /* cnt 4096 - 16383 */ |
158 | ps = PAGE_WIDTH + 12; |
158 | ps = PAGE_WIDTH + 12; |
159 | va &= ~((1 << ps) - 1); |
159 | va &= ~((1 << ps) - 1); |
160 | break; |
160 | break; |
161 | case 7: /* cnt 16384 - 65535 */ |
161 | case 7: /* cnt 16384 - 65535 */ |
162 | case 8: /* cnt 65536 - (256K - 1) */ |
162 | case 8: /* cnt 65536 - (256K - 1) */ |
163 | ps = PAGE_WIDTH + 14; |
163 | ps = PAGE_WIDTH + 14; |
164 | va &= ~((1 << ps) - 1); |
164 | va &= ~((1 << ps) - 1); |
165 | break; |
165 | break; |
166 | default: |
166 | default: |
167 | ps = PAGE_WIDTH + 18; |
167 | ps = PAGE_WIDTH + 18; |
168 | va &= ~((1 << ps) - 1); |
168 | va &= ~((1 << ps) - 1); |
169 | break; |
169 | break; |
170 | } |
170 | } |
171 | for(; va < (page + cnt * PAGE_SIZE); va += (1 << ps)) |
171 | for(; va < (page + cnt * PAGE_SIZE); va += (1 << ps)) |
172 | asm volatile ("ptc.l %0, %1;;" :: "r" (va), "r" (ps << 2)); |
172 | asm volatile ("ptc.l %0, %1;;" :: "r" (va), "r" (ps << 2)); |
173 | srlz_d(); |
173 | srlz_d(); |
174 | srlz_i(); |
174 | srlz_i(); |
175 | 175 | ||
176 | if (restore_rr) { |
176 | if (restore_rr) { |
177 | rr_write(VA2VRN(va), rr.word); |
177 | rr_write(VA2VRN(va), rr.word); |
178 | srlz_d(); |
178 | srlz_d(); |
179 | srlz_i(); |
179 | srlz_i(); |
180 | } |
180 | } |
181 | } |
181 | } |
182 | 182 | ||
183 | /** Insert data into data translation cache. |
183 | /** Insert data into data translation cache. |
184 | * |
184 | * |
185 | * @param va Virtual page address. |
185 | * @param va Virtual page address. |
186 | * @param asid Address space identifier. |
186 | * @param asid Address space identifier. |
187 | * @param entry The rest of TLB entry as required by TLB insertion |
187 | * @param entry The rest of TLB entry as required by TLB insertion |
188 | * format. |
188 | * format. |
189 | */ |
189 | */ |
190 | void dtc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry) |
190 | void dtc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry) |
191 | { |
191 | { |
192 | tc_mapping_insert(va, asid, entry, true); |
192 | tc_mapping_insert(va, asid, entry, true); |
193 | } |
193 | } |
194 | 194 | ||
195 | /** Insert data into instruction translation cache. |
195 | /** Insert data into instruction translation cache. |
196 | * |
196 | * |
197 | * @param va Virtual page address. |
197 | * @param va Virtual page address. |
198 | * @param asid Address space identifier. |
198 | * @param asid Address space identifier. |
199 | * @param entry The rest of TLB entry as required by TLB insertion |
199 | * @param entry The rest of TLB entry as required by TLB insertion |
200 | * format. |
200 | * format. |
201 | */ |
201 | */ |
202 | void itc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry) |
202 | void itc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry) |
203 | { |
203 | { |
204 | tc_mapping_insert(va, asid, entry, false); |
204 | tc_mapping_insert(va, asid, entry, false); |
205 | } |
205 | } |
206 | 206 | ||
207 | /** Insert data into instruction or data translation cache. |
207 | /** Insert data into instruction or data translation cache. |
208 | * |
208 | * |
209 | * @param va Virtual page address. |
209 | * @param va Virtual page address. |
210 | * @param asid Address space identifier. |
210 | * @param asid Address space identifier. |
211 | * @param entry The rest of TLB entry as required by TLB insertion |
211 | * @param entry The rest of TLB entry as required by TLB insertion |
212 | * format. |
212 | * format. |
213 | * @param dtc If true, insert into data translation cache, use |
213 | * @param dtc If true, insert into data translation cache, use |
214 | * instruction translation cache otherwise. |
214 | * instruction translation cache otherwise. |
215 | */ |
215 | */ |
216 | void tc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtc) |
216 | void tc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtc) |
217 | { |
217 | { |
218 | region_register rr; |
218 | region_register rr; |
219 | bool restore_rr = false; |
219 | bool restore_rr = false; |
220 | 220 | ||
221 | rr.word = rr_read(VA2VRN(va)); |
221 | rr.word = rr_read(VA2VRN(va)); |
222 | if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) { |
222 | if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) { |
223 | /* |
223 | /* |
224 | * The selected region register does not contain required RID. |
224 | * The selected region register does not contain required RID. |
225 | * Save the old content of the register and replace the RID. |
225 | * Save the old content of the register and replace the RID. |
226 | */ |
226 | */ |
227 | region_register rr0; |
227 | region_register rr0; |
228 | 228 | ||
229 | rr0 = rr; |
229 | rr0 = rr; |
230 | rr0.map.rid = ASID2RID(asid, VA2VRN(va)); |
230 | rr0.map.rid = ASID2RID(asid, VA2VRN(va)); |
231 | rr_write(VA2VRN(va), rr0.word); |
231 | rr_write(VA2VRN(va), rr0.word); |
232 | srlz_d(); |
232 | srlz_d(); |
233 | srlz_i(); |
233 | srlz_i(); |
234 | } |
234 | } |
235 | 235 | ||
236 | asm volatile ( |
236 | asm volatile ( |
237 | "mov r8 = psr;;\n" |
237 | "mov r8 = psr;;\n" |
238 | "rsm %0;;\n" /* PSR_IC_MASK */ |
238 | "rsm %0;;\n" /* PSR_IC_MASK */ |
239 | "srlz.d;;\n" |
239 | "srlz.d;;\n" |
240 | "srlz.i;;\n" |
240 | "srlz.i;;\n" |
241 | "mov cr.ifa = %1\n" /* va */ |
241 | "mov cr.ifa = %1\n" /* va */ |
242 | "mov cr.itir = %2;;\n" /* entry.word[1] */ |
242 | "mov cr.itir = %2;;\n" /* entry.word[1] */ |
243 | "cmp.eq p6,p7 = %4,r0;;\n" /* decide between itc and dtc */ |
243 | "cmp.eq p6,p7 = %4,r0;;\n" /* decide between itc and dtc */ |
244 | "(p6) itc.i %3;;\n" |
244 | "(p6) itc.i %3;;\n" |
245 | "(p7) itc.d %3;;\n" |
245 | "(p7) itc.d %3;;\n" |
246 | "mov psr.l = r8;;\n" |
246 | "mov psr.l = r8;;\n" |
247 | "srlz.d;;\n" |
247 | "srlz.d;;\n" |
248 | : |
248 | : |
249 | : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), |
249 | : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), |
250 | "r" (entry.word[0]), "r" (dtc) |
250 | "r" (entry.word[0]), "r" (dtc) |
251 | : "p6", "p7", "r8" |
251 | : "p6", "p7", "r8" |
252 | ); |
252 | ); |
253 | 253 | ||
254 | if (restore_rr) { |
254 | if (restore_rr) { |
255 | rr_write(VA2VRN(va), rr.word); |
255 | rr_write(VA2VRN(va), rr.word); |
256 | srlz_d(); |
256 | srlz_d(); |
257 | srlz_i(); |
257 | srlz_i(); |
258 | } |
258 | } |
259 | } |
259 | } |
260 | 260 | ||
261 | /** Insert data into instruction translation register. |
261 | /** Insert data into instruction translation register. |
262 | * |
262 | * |
263 | * @param va Virtual page address. |
263 | * @param va Virtual page address. |
264 | * @param asid Address space identifier. |
264 | * @param asid Address space identifier. |
265 | * @param entry The rest of TLB entry as required by TLB insertion |
265 | * @param entry The rest of TLB entry as required by TLB insertion |
266 | * format. |
266 | * format. |
267 | * @param tr Translation register. |
267 | * @param tr Translation register. |
268 | */ |
268 | */ |
269 | void |
269 | void |
270 | itr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, index_t tr) |
270 | itr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, size_t tr) |
271 | { |
271 | { |
272 | tr_mapping_insert(va, asid, entry, false, tr); |
272 | tr_mapping_insert(va, asid, entry, false, tr); |
273 | } |
273 | } |
274 | 274 | ||
275 | /** Insert data into data translation register. |
275 | /** Insert data into data translation register. |
276 | * |
276 | * |
277 | * @param va Virtual page address. |
277 | * @param va Virtual page address. |
278 | * @param asid Address space identifier. |
278 | * @param asid Address space identifier. |
279 | * @param entry The rest of TLB entry as required by TLB insertion |
279 | * @param entry The rest of TLB entry as required by TLB insertion |
280 | * format. |
280 | * format. |
281 | * @param tr Translation register. |
281 | * @param tr Translation register. |
282 | */ |
282 | */ |
283 | void |
283 | void |
284 | dtr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, index_t tr) |
284 | dtr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, size_t tr) |
285 | { |
285 | { |
286 | tr_mapping_insert(va, asid, entry, true, tr); |
286 | tr_mapping_insert(va, asid, entry, true, tr); |
287 | } |
287 | } |
288 | 288 | ||
289 | /** Insert data into instruction or data translation register. |
289 | /** Insert data into instruction or data translation register. |
290 | * |
290 | * |
291 | * @param va Virtual page address. |
291 | * @param va Virtual page address. |
292 | * @param asid Address space identifier. |
292 | * @param asid Address space identifier. |
293 | * @param entry The rest of TLB entry as required by TLB insertion |
293 | * @param entry The rest of TLB entry as required by TLB insertion |
294 | * format. |
294 | * format. |
295 | * @param dtr If true, insert into data translation register, use |
295 | * @param dtr If true, insert into data translation register, use |
296 | * instruction translation register otherwise. |
296 | * instruction translation register otherwise. |
297 | * @param tr Translation register. |
297 | * @param tr Translation register. |
298 | */ |
298 | */ |
299 | void |
299 | void |
300 | tr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtr, |
300 | tr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtr, |
301 | index_t tr) |
301 | size_t tr) |
302 | { |
302 | { |
303 | region_register rr; |
303 | region_register rr; |
304 | bool restore_rr = false; |
304 | bool restore_rr = false; |
305 | 305 | ||
306 | rr.word = rr_read(VA2VRN(va)); |
306 | rr.word = rr_read(VA2VRN(va)); |
307 | if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) { |
307 | if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) { |
308 | /* |
308 | /* |
309 | * The selected region register does not contain required RID. |
309 | * The selected region register does not contain required RID. |
310 | * Save the old content of the register and replace the RID. |
310 | * Save the old content of the register and replace the RID. |
311 | */ |
311 | */ |
312 | region_register rr0; |
312 | region_register rr0; |
313 | 313 | ||
314 | rr0 = rr; |
314 | rr0 = rr; |
315 | rr0.map.rid = ASID2RID(asid, VA2VRN(va)); |
315 | rr0.map.rid = ASID2RID(asid, VA2VRN(va)); |
316 | rr_write(VA2VRN(va), rr0.word); |
316 | rr_write(VA2VRN(va), rr0.word); |
317 | srlz_d(); |
317 | srlz_d(); |
318 | srlz_i(); |
318 | srlz_i(); |
319 | } |
319 | } |
320 | 320 | ||
321 | asm volatile ( |
321 | asm volatile ( |
322 | "mov r8 = psr;;\n" |
322 | "mov r8 = psr;;\n" |
323 | "rsm %0;;\n" /* PSR_IC_MASK */ |
323 | "rsm %0;;\n" /* PSR_IC_MASK */ |
324 | "srlz.d;;\n" |
324 | "srlz.d;;\n" |
325 | "srlz.i;;\n" |
325 | "srlz.i;;\n" |
326 | "mov cr.ifa = %1\n" /* va */ |
326 | "mov cr.ifa = %1\n" /* va */ |
327 | "mov cr.itir = %2;;\n" /* entry.word[1] */ |
327 | "mov cr.itir = %2;;\n" /* entry.word[1] */ |
328 | "cmp.eq p6,p7 = %5,r0;;\n" /* decide between itr and dtr */ |
328 | "cmp.eq p6,p7 = %5,r0;;\n" /* decide between itr and dtr */ |
329 | "(p6) itr.i itr[%4] = %3;;\n" |
329 | "(p6) itr.i itr[%4] = %3;;\n" |
330 | "(p7) itr.d dtr[%4] = %3;;\n" |
330 | "(p7) itr.d dtr[%4] = %3;;\n" |
331 | "mov psr.l = r8;;\n" |
331 | "mov psr.l = r8;;\n" |
332 | "srlz.d;;\n" |
332 | "srlz.d;;\n" |
333 | : |
333 | : |
334 | : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), |
334 | : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), |
335 | "r" (entry.word[0]), "r" (tr), "r" (dtr) |
335 | "r" (entry.word[0]), "r" (tr), "r" (dtr) |
336 | : "p6", "p7", "r8" |
336 | : "p6", "p7", "r8" |
337 | ); |
337 | ); |
338 | 338 | ||
339 | if (restore_rr) { |
339 | if (restore_rr) { |
340 | rr_write(VA2VRN(va), rr.word); |
340 | rr_write(VA2VRN(va), rr.word); |
341 | srlz_d(); |
341 | srlz_d(); |
342 | srlz_i(); |
342 | srlz_i(); |
343 | } |
343 | } |
344 | } |
344 | } |
345 | 345 | ||
346 | /** Insert data into DTLB. |
346 | /** Insert data into DTLB. |
347 | * |
347 | * |
348 | * @param page Virtual page address including VRN bits. |
348 | * @param page Virtual page address including VRN bits. |
349 | * @param frame Physical frame address. |
349 | * @param frame Physical frame address. |
350 | * @param dtr If true, insert into data translation register, use data |
350 | * @param dtr If true, insert into data translation register, use data |
351 | * translation cache otherwise. |
351 | * translation cache otherwise. |
352 | * @param tr Translation register if dtr is true, ignored otherwise. |
352 | * @param tr Translation register if dtr is true, ignored otherwise. |
353 | */ |
353 | */ |
354 | void |
354 | void |
355 | dtlb_kernel_mapping_insert(uintptr_t page, uintptr_t frame, bool dtr, |
355 | dtlb_kernel_mapping_insert(uintptr_t page, uintptr_t frame, bool dtr, |
356 | index_t tr) |
356 | size_t tr) |
357 | { |
357 | { |
358 | tlb_entry_t entry; |
358 | tlb_entry_t entry; |
359 | 359 | ||
360 | entry.word[0] = 0; |
360 | entry.word[0] = 0; |
361 | entry.word[1] = 0; |
361 | entry.word[1] = 0; |
362 | 362 | ||
363 | entry.p = true; /* present */ |
363 | entry.p = true; /* present */ |
364 | entry.ma = MA_WRITEBACK; |
364 | entry.ma = MA_WRITEBACK; |
365 | entry.a = true; /* already accessed */ |
365 | entry.a = true; /* already accessed */ |
366 | entry.d = true; /* already dirty */ |
366 | entry.d = true; /* already dirty */ |
367 | entry.pl = PL_KERNEL; |
367 | entry.pl = PL_KERNEL; |
368 | entry.ar = AR_READ | AR_WRITE; |
368 | entry.ar = AR_READ | AR_WRITE; |
369 | entry.ppn = frame >> PPN_SHIFT; |
369 | entry.ppn = frame >> PPN_SHIFT; |
370 | entry.ps = PAGE_WIDTH; |
370 | entry.ps = PAGE_WIDTH; |
371 | 371 | ||
372 | if (dtr) |
372 | if (dtr) |
373 | dtr_mapping_insert(page, ASID_KERNEL, entry, tr); |
373 | dtr_mapping_insert(page, ASID_KERNEL, entry, tr); |
374 | else |
374 | else |
375 | dtc_mapping_insert(page, ASID_KERNEL, entry); |
375 | dtc_mapping_insert(page, ASID_KERNEL, entry); |
376 | } |
376 | } |
377 | 377 | ||
378 | /** Purge kernel entries from DTR. |
378 | /** Purge kernel entries from DTR. |
379 | * |
379 | * |
380 | * Purge DTR entries used by the kernel. |
380 | * Purge DTR entries used by the kernel. |
381 | * |
381 | * |
382 | * @param page Virtual page address including VRN bits. |
382 | * @param page Virtual page address including VRN bits. |
383 | * @param width Width of the purge in bits. |
383 | * @param width Width of the purge in bits. |
384 | */ |
384 | */ |
385 | void dtr_purge(uintptr_t page, count_t width) |
385 | void dtr_purge(uintptr_t page, size_t width) |
386 | { |
386 | { |
387 | asm volatile ("ptr.d %0, %1\n" : : "r" (page), "r" (width << 2)); |
387 | asm volatile ("ptr.d %0, %1\n" : : "r" (page), "r" (width << 2)); |
388 | } |
388 | } |
389 | 389 | ||
390 | 390 | ||
391 | /** Copy content of PTE into data translation cache. |
391 | /** Copy content of PTE into data translation cache. |
392 | * |
392 | * |
393 | * @param t PTE. |
393 | * @param t PTE. |
394 | */ |
394 | */ |
395 | void dtc_pte_copy(pte_t *t) |
395 | void dtc_pte_copy(pte_t *t) |
396 | { |
396 | { |
397 | tlb_entry_t entry; |
397 | tlb_entry_t entry; |
398 | 398 | ||
399 | entry.word[0] = 0; |
399 | entry.word[0] = 0; |
400 | entry.word[1] = 0; |
400 | entry.word[1] = 0; |
401 | 401 | ||
402 | entry.p = t->p; |
402 | entry.p = t->p; |
403 | entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE; |
403 | entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE; |
404 | entry.a = t->a; |
404 | entry.a = t->a; |
405 | entry.d = t->d; |
405 | entry.d = t->d; |
406 | entry.pl = t->k ? PL_KERNEL : PL_USER; |
406 | entry.pl = t->k ? PL_KERNEL : PL_USER; |
407 | entry.ar = t->w ? AR_WRITE : AR_READ; |
407 | entry.ar = t->w ? AR_WRITE : AR_READ; |
408 | entry.ppn = t->frame >> PPN_SHIFT; |
408 | entry.ppn = t->frame >> PPN_SHIFT; |
409 | entry.ps = PAGE_WIDTH; |
409 | entry.ps = PAGE_WIDTH; |
410 | 410 | ||
411 | dtc_mapping_insert(t->page, t->as->asid, entry); |
411 | dtc_mapping_insert(t->page, t->as->asid, entry); |
412 | #ifdef CONFIG_VHPT |
412 | #ifdef CONFIG_VHPT |
413 | vhpt_mapping_insert(t->page, t->as->asid, entry); |
413 | vhpt_mapping_insert(t->page, t->as->asid, entry); |
414 | #endif |
414 | #endif |
415 | } |
415 | } |
416 | 416 | ||
417 | /** Copy content of PTE into instruction translation cache. |
417 | /** Copy content of PTE into instruction translation cache. |
418 | * |
418 | * |
419 | * @param t PTE. |
419 | * @param t PTE. |
420 | */ |
420 | */ |
421 | void itc_pte_copy(pte_t *t) |
421 | void itc_pte_copy(pte_t *t) |
422 | { |
422 | { |
423 | tlb_entry_t entry; |
423 | tlb_entry_t entry; |
424 | 424 | ||
425 | entry.word[0] = 0; |
425 | entry.word[0] = 0; |
426 | entry.word[1] = 0; |
426 | entry.word[1] = 0; |
427 | 427 | ||
428 | ASSERT(t->x); |
428 | ASSERT(t->x); |
429 | 429 | ||
430 | entry.p = t->p; |
430 | entry.p = t->p; |
431 | entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE; |
431 | entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE; |
432 | entry.a = t->a; |
432 | entry.a = t->a; |
433 | entry.pl = t->k ? PL_KERNEL : PL_USER; |
433 | entry.pl = t->k ? PL_KERNEL : PL_USER; |
434 | entry.ar = t->x ? (AR_EXECUTE | AR_READ) : AR_READ; |
434 | entry.ar = t->x ? (AR_EXECUTE | AR_READ) : AR_READ; |
435 | entry.ppn = t->frame >> PPN_SHIFT; |
435 | entry.ppn = t->frame >> PPN_SHIFT; |
436 | entry.ps = PAGE_WIDTH; |
436 | entry.ps = PAGE_WIDTH; |
437 | 437 | ||
438 | itc_mapping_insert(t->page, t->as->asid, entry); |
438 | itc_mapping_insert(t->page, t->as->asid, entry); |
439 | #ifdef CONFIG_VHPT |
439 | #ifdef CONFIG_VHPT |
440 | vhpt_mapping_insert(t->page, t->as->asid, entry); |
440 | vhpt_mapping_insert(t->page, t->as->asid, entry); |
441 | #endif |
441 | #endif |
442 | } |
442 | } |
443 | 443 | ||
444 | /** Instruction TLB fault handler for faults with VHPT turned off. |
444 | /** Instruction TLB fault handler for faults with VHPT turned off. |
445 | * |
445 | * |
446 | * @param vector Interruption vector. |
446 | * @param vector Interruption vector. |
447 | * @param istate Structure with saved interruption state. |
447 | * @param istate Structure with saved interruption state. |
448 | */ |
448 | */ |
449 | void alternate_instruction_tlb_fault(uint64_t vector, istate_t *istate) |
449 | void alternate_instruction_tlb_fault(uint64_t vector, istate_t *istate) |
450 | { |
450 | { |
451 | region_register rr; |
451 | region_register rr; |
452 | rid_t rid; |
452 | rid_t rid; |
453 | uintptr_t va; |
453 | uintptr_t va; |
454 | pte_t *t; |
454 | pte_t *t; |
455 | 455 | ||
456 | va = istate->cr_ifa; /* faulting address */ |
456 | va = istate->cr_ifa; /* faulting address */ |
457 | rr.word = rr_read(VA2VRN(va)); |
457 | rr.word = rr_read(VA2VRN(va)); |
458 | rid = rr.map.rid; |
458 | rid = rr.map.rid; |
459 | 459 | ||
460 | page_table_lock(AS, true); |
460 | page_table_lock(AS, true); |
461 | t = page_mapping_find(AS, va); |
461 | t = page_mapping_find(AS, va); |
462 | if (t) { |
462 | if (t) { |
463 | /* |
463 | /* |
464 | * The mapping was found in software page hash table. |
464 | * The mapping was found in software page hash table. |
465 | * Insert it into data translation cache. |
465 | * Insert it into data translation cache. |
466 | */ |
466 | */ |
467 | itc_pte_copy(t); |
467 | itc_pte_copy(t); |
468 | page_table_unlock(AS, true); |
468 | page_table_unlock(AS, true); |
469 | } else { |
469 | } else { |
470 | /* |
470 | /* |
471 | * Forward the page fault to address space page fault handler. |
471 | * Forward the page fault to address space page fault handler. |
472 | */ |
472 | */ |
473 | page_table_unlock(AS, true); |
473 | page_table_unlock(AS, true); |
474 | if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) { |
474 | if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) { |
475 | fault_if_from_uspace(istate,"Page fault at %p.",va); |
475 | fault_if_from_uspace(istate,"Page fault at %p.",va); |
476 | panic("%s: va=%p, rid=%d, iip=%p.", __func__, va, rid, |
476 | panic("%s: va=%p, rid=%d, iip=%p.", __func__, va, rid, |
477 | istate->cr_iip); |
477 | istate->cr_iip); |
478 | } |
478 | } |
479 | } |
479 | } |
480 | } |
480 | } |
481 | 481 | ||
482 | static int is_io_page_accessible(int page) |
482 | static int is_io_page_accessible(int page) |
483 | { |
483 | { |
484 | if (TASK->arch.iomap) |
484 | if (TASK->arch.iomap) |
485 | return bitmap_get(TASK->arch.iomap, page); |
485 | return bitmap_get(TASK->arch.iomap, page); |
486 | else |
486 | else |
487 | return 0; |
487 | return 0; |
488 | } |
488 | } |
489 | 489 | ||
490 | #define IO_FRAME_BASE 0xFFFFC000000 |
490 | #define IO_FRAME_BASE 0xFFFFC000000 |
491 | 491 | ||
492 | /** |
492 | /** |
493 | * There is special handling of memory mapped legacy io, because of 4KB sized |
493 | * There is special handling of memory mapped legacy io, because of 4KB sized |
494 | * access for userspace. |
494 | * access for userspace. |
495 | * |
495 | * |
496 | * @param va Virtual address of page fault. |
496 | * @param va Virtual address of page fault. |
497 | * @param istate Structure with saved interruption state. |
497 | * @param istate Structure with saved interruption state. |
498 | * |
498 | * |
499 | * @return One on success, zero on failure. |
499 | * @return One on success, zero on failure. |
500 | */ |
500 | */ |
501 | static int try_memmap_io_insertion(uintptr_t va, istate_t *istate) |
501 | static int try_memmap_io_insertion(uintptr_t va, istate_t *istate) |
502 | { |
502 | { |
503 | if ((va >= IO_OFFSET ) && (va < IO_OFFSET + (1 << IO_PAGE_WIDTH))) { |
503 | if ((va >= IO_OFFSET ) && (va < IO_OFFSET + (1 << IO_PAGE_WIDTH))) { |
504 | if (TASK) { |
504 | if (TASK) { |
505 | uint64_t io_page = (va & ((1 << IO_PAGE_WIDTH) - 1)) >> |
505 | uint64_t io_page = (va & ((1 << IO_PAGE_WIDTH) - 1)) >> |
506 | USPACE_IO_PAGE_WIDTH; |
506 | USPACE_IO_PAGE_WIDTH; |
507 | 507 | ||
508 | if (is_io_page_accessible(io_page)) { |
508 | if (is_io_page_accessible(io_page)) { |
509 | uint64_t page, frame; |
509 | uint64_t page, frame; |
510 | 510 | ||
511 | page = IO_OFFSET + |
511 | page = IO_OFFSET + |
512 | (1 << USPACE_IO_PAGE_WIDTH) * io_page; |
512 | (1 << USPACE_IO_PAGE_WIDTH) * io_page; |
513 | frame = IO_FRAME_BASE + |
513 | frame = IO_FRAME_BASE + |
514 | (1 << USPACE_IO_PAGE_WIDTH) * io_page; |
514 | (1 << USPACE_IO_PAGE_WIDTH) * io_page; |
515 | 515 | ||
516 | tlb_entry_t entry; |
516 | tlb_entry_t entry; |
517 | 517 | ||
518 | entry.word[0] = 0; |
518 | entry.word[0] = 0; |
519 | entry.word[1] = 0; |
519 | entry.word[1] = 0; |
520 | 520 | ||
521 | entry.p = true; /* present */ |
521 | entry.p = true; /* present */ |
522 | entry.ma = MA_UNCACHEABLE; |
522 | entry.ma = MA_UNCACHEABLE; |
523 | entry.a = true; /* already accessed */ |
523 | entry.a = true; /* already accessed */ |
524 | entry.d = true; /* already dirty */ |
524 | entry.d = true; /* already dirty */ |
525 | entry.pl = PL_USER; |
525 | entry.pl = PL_USER; |
526 | entry.ar = AR_READ | AR_WRITE; |
526 | entry.ar = AR_READ | AR_WRITE; |
527 | entry.ppn = frame >> PPN_SHIFT; |
527 | entry.ppn = frame >> PPN_SHIFT; |
528 | entry.ps = USPACE_IO_PAGE_WIDTH; |
528 | entry.ps = USPACE_IO_PAGE_WIDTH; |
529 | 529 | ||
530 | dtc_mapping_insert(page, TASK->as->asid, entry); |
530 | dtc_mapping_insert(page, TASK->as->asid, entry); |
531 | return 1; |
531 | return 1; |
532 | } else { |
532 | } else { |
533 | fault_if_from_uspace(istate, |
533 | fault_if_from_uspace(istate, |
534 | "IO access fault at %p.", va); |
534 | "IO access fault at %p.", va); |
535 | } |
535 | } |
536 | } |
536 | } |
537 | } |
537 | } |
538 | 538 | ||
539 | return 0; |
539 | return 0; |
540 | } |
540 | } |
541 | 541 | ||
542 | /** Data TLB fault handler for faults with VHPT turned off. |
542 | /** Data TLB fault handler for faults with VHPT turned off. |
543 | * |
543 | * |
544 | * @param vector Interruption vector. |
544 | * @param vector Interruption vector. |
545 | * @param istate Structure with saved interruption state. |
545 | * @param istate Structure with saved interruption state. |
546 | */ |
546 | */ |
547 | void alternate_data_tlb_fault(uint64_t vector, istate_t *istate) |
547 | void alternate_data_tlb_fault(uint64_t vector, istate_t *istate) |
548 | { |
548 | { |
549 | region_register rr; |
549 | region_register rr; |
550 | rid_t rid; |
550 | rid_t rid; |
551 | uintptr_t va; |
551 | uintptr_t va; |
552 | pte_t *t; |
552 | pte_t *t; |
553 | 553 | ||
554 | va = istate->cr_ifa; /* faulting address */ |
554 | va = istate->cr_ifa; /* faulting address */ |
555 | rr.word = rr_read(VA2VRN(va)); |
555 | rr.word = rr_read(VA2VRN(va)); |
556 | rid = rr.map.rid; |
556 | rid = rr.map.rid; |
557 | if (RID2ASID(rid) == ASID_KERNEL) { |
557 | if (RID2ASID(rid) == ASID_KERNEL) { |
558 | if (VA2VRN(va) == VRN_KERNEL) { |
558 | if (VA2VRN(va) == VRN_KERNEL) { |
559 | /* |
559 | /* |
560 | * Provide KA2PA(identity) mapping for faulting piece of |
560 | * Provide KA2PA(identity) mapping for faulting piece of |
561 | * kernel address space. |
561 | * kernel address space. |
562 | */ |
562 | */ |
563 | dtlb_kernel_mapping_insert(va, KA2PA(va), false, 0); |
563 | dtlb_kernel_mapping_insert(va, KA2PA(va), false, 0); |
564 | return; |
564 | return; |
565 | } |
565 | } |
566 | } |
566 | } |
567 | 567 | ||
568 | page_table_lock(AS, true); |
568 | page_table_lock(AS, true); |
569 | t = page_mapping_find(AS, va); |
569 | t = page_mapping_find(AS, va); |
570 | if (t) { |
570 | if (t) { |
571 | /* |
571 | /* |
572 | * The mapping was found in the software page hash table. |
572 | * The mapping was found in the software page hash table. |
573 | * Insert it into data translation cache. |
573 | * Insert it into data translation cache. |
574 | */ |
574 | */ |
575 | dtc_pte_copy(t); |
575 | dtc_pte_copy(t); |
576 | page_table_unlock(AS, true); |
576 | page_table_unlock(AS, true); |
577 | } else { |
577 | } else { |
578 | page_table_unlock(AS, true); |
578 | page_table_unlock(AS, true); |
579 | if (try_memmap_io_insertion(va, istate)) |
579 | if (try_memmap_io_insertion(va, istate)) |
580 | return; |
580 | return; |
581 | /* |
581 | /* |
582 | * Forward the page fault to the address space page fault |
582 | * Forward the page fault to the address space page fault |
583 | * handler. |
583 | * handler. |
584 | */ |
584 | */ |
585 | if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) { |
585 | if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) { |
586 | fault_if_from_uspace(istate,"Page fault at %p.",va); |
586 | fault_if_from_uspace(istate,"Page fault at %p.",va); |
587 | panic("%s: va=%p, rid=%d, iip=%p.", __func__, va, rid, |
587 | panic("%s: va=%p, rid=%d, iip=%p.", __func__, va, rid, |
588 | istate->cr_iip); |
588 | istate->cr_iip); |
589 | } |
589 | } |
590 | } |
590 | } |
591 | } |
591 | } |
592 | 592 | ||
593 | /** Data nested TLB fault handler. |
593 | /** Data nested TLB fault handler. |
594 | * |
594 | * |
595 | * This fault should not occur. |
595 | * This fault should not occur. |
596 | * |
596 | * |
597 | * @param vector Interruption vector. |
597 | * @param vector Interruption vector. |
598 | * @param istate Structure with saved interruption state. |
598 | * @param istate Structure with saved interruption state. |
599 | */ |
599 | */ |
600 | void data_nested_tlb_fault(uint64_t vector, istate_t *istate) |
600 | void data_nested_tlb_fault(uint64_t vector, istate_t *istate) |
601 | { |
601 | { |
602 | panic("%s.", __func__); |
602 | panic("%s.", __func__); |
603 | } |
603 | } |
604 | 604 | ||
605 | /** Data Dirty bit fault handler. |
605 | /** Data Dirty bit fault handler. |
606 | * |
606 | * |
607 | * @param vector Interruption vector. |
607 | * @param vector Interruption vector. |
608 | * @param istate Structure with saved interruption state. |
608 | * @param istate Structure with saved interruption state. |
609 | */ |
609 | */ |
610 | void data_dirty_bit_fault(uint64_t vector, istate_t *istate) |
610 | void data_dirty_bit_fault(uint64_t vector, istate_t *istate) |
611 | { |
611 | { |
612 | region_register rr; |
612 | region_register rr; |
613 | rid_t rid; |
613 | rid_t rid; |
614 | uintptr_t va; |
614 | uintptr_t va; |
615 | pte_t *t; |
615 | pte_t *t; |
616 | 616 | ||
617 | va = istate->cr_ifa; /* faulting address */ |
617 | va = istate->cr_ifa; /* faulting address */ |
618 | rr.word = rr_read(VA2VRN(va)); |
618 | rr.word = rr_read(VA2VRN(va)); |
619 | rid = rr.map.rid; |
619 | rid = rr.map.rid; |
620 | 620 | ||
621 | page_table_lock(AS, true); |
621 | page_table_lock(AS, true); |
622 | t = page_mapping_find(AS, va); |
622 | t = page_mapping_find(AS, va); |
623 | ASSERT(t && t->p); |
623 | ASSERT(t && t->p); |
624 | if (t && t->p && t->w) { |
624 | if (t && t->p && t->w) { |
625 | /* |
625 | /* |
626 | * Update the Dirty bit in page tables and reinsert |
626 | * Update the Dirty bit in page tables and reinsert |
627 | * the mapping into DTC. |
627 | * the mapping into DTC. |
628 | */ |
628 | */ |
629 | t->d = true; |
629 | t->d = true; |
630 | dtc_pte_copy(t); |
630 | dtc_pte_copy(t); |
631 | } else { |
631 | } else { |
632 | if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) { |
632 | if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) { |
633 | fault_if_from_uspace(istate,"Page fault at %p.",va); |
633 | fault_if_from_uspace(istate,"Page fault at %p.",va); |
634 | panic("%s: va=%p, rid=%d, iip=%p.", __func__, va, rid, |
634 | panic("%s: va=%p, rid=%d, iip=%p.", __func__, va, rid, |
635 | istate->cr_iip); |
635 | istate->cr_iip); |
636 | } |
636 | } |
637 | } |
637 | } |
638 | page_table_unlock(AS, true); |
638 | page_table_unlock(AS, true); |
639 | } |
639 | } |
640 | 640 | ||
641 | /** Instruction access bit fault handler. |
641 | /** Instruction access bit fault handler. |
642 | * |
642 | * |
643 | * @param vector Interruption vector. |
643 | * @param vector Interruption vector. |
644 | * @param istate Structure with saved interruption state. |
644 | * @param istate Structure with saved interruption state. |
645 | */ |
645 | */ |
646 | void instruction_access_bit_fault(uint64_t vector, istate_t *istate) |
646 | void instruction_access_bit_fault(uint64_t vector, istate_t *istate) |
647 | { |
647 | { |
648 | region_register rr; |
648 | region_register rr; |
649 | rid_t rid; |
649 | rid_t rid; |
650 | uintptr_t va; |
650 | uintptr_t va; |
651 | pte_t *t; |
651 | pte_t *t; |
652 | 652 | ||
653 | va = istate->cr_ifa; /* faulting address */ |
653 | va = istate->cr_ifa; /* faulting address */ |
654 | rr.word = rr_read(VA2VRN(va)); |
654 | rr.word = rr_read(VA2VRN(va)); |
655 | rid = rr.map.rid; |
655 | rid = rr.map.rid; |
656 | 656 | ||
657 | page_table_lock(AS, true); |
657 | page_table_lock(AS, true); |
658 | t = page_mapping_find(AS, va); |
658 | t = page_mapping_find(AS, va); |
659 | ASSERT(t && t->p); |
659 | ASSERT(t && t->p); |
660 | if (t && t->p && t->x) { |
660 | if (t && t->p && t->x) { |
661 | /* |
661 | /* |
662 | * Update the Accessed bit in page tables and reinsert |
662 | * Update the Accessed bit in page tables and reinsert |
663 | * the mapping into ITC. |
663 | * the mapping into ITC. |
664 | */ |
664 | */ |
665 | t->a = true; |
665 | t->a = true; |
666 | itc_pte_copy(t); |
666 | itc_pte_copy(t); |
667 | } else { |
667 | } else { |
668 | if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) { |
668 | if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) { |
669 | fault_if_from_uspace(istate, "Page fault at %p.", va); |
669 | fault_if_from_uspace(istate, "Page fault at %p.", va); |
670 | panic("%s: va=%p, rid=%d, iip=%p.", __func__, va, rid, |
670 | panic("%s: va=%p, rid=%d, iip=%p.", __func__, va, rid, |
671 | istate->cr_iip); |
671 | istate->cr_iip); |
672 | } |
672 | } |
673 | } |
673 | } |
674 | page_table_unlock(AS, true); |
674 | page_table_unlock(AS, true); |
675 | } |
675 | } |
676 | 676 | ||
677 | /** Data access bit fault handler. |
677 | /** Data access bit fault handler. |
678 | * |
678 | * |
679 | * @param vector Interruption vector. |
679 | * @param vector Interruption vector. |
680 | * @param istate Structure with saved interruption state. |
680 | * @param istate Structure with saved interruption state. |
681 | */ |
681 | */ |
682 | void data_access_bit_fault(uint64_t vector, istate_t *istate) |
682 | void data_access_bit_fault(uint64_t vector, istate_t *istate) |
683 | { |
683 | { |
684 | region_register rr; |
684 | region_register rr; |
685 | rid_t rid; |
685 | rid_t rid; |
686 | uintptr_t va; |
686 | uintptr_t va; |
687 | pte_t *t; |
687 | pte_t *t; |
688 | 688 | ||
689 | va = istate->cr_ifa; /* faulting address */ |
689 | va = istate->cr_ifa; /* faulting address */ |
690 | rr.word = rr_read(VA2VRN(va)); |
690 | rr.word = rr_read(VA2VRN(va)); |
691 | rid = rr.map.rid; |
691 | rid = rr.map.rid; |
692 | 692 | ||
693 | page_table_lock(AS, true); |
693 | page_table_lock(AS, true); |
694 | t = page_mapping_find(AS, va); |
694 | t = page_mapping_find(AS, va); |
695 | ASSERT(t && t->p); |
695 | ASSERT(t && t->p); |
696 | if (t && t->p) { |
696 | if (t && t->p) { |
697 | /* |
697 | /* |
698 | * Update the Accessed bit in page tables and reinsert |
698 | * Update the Accessed bit in page tables and reinsert |
699 | * the mapping into DTC. |
699 | * the mapping into DTC. |
700 | */ |
700 | */ |
701 | t->a = true; |
701 | t->a = true; |
702 | dtc_pte_copy(t); |
702 | dtc_pte_copy(t); |
703 | } else { |
703 | } else { |
704 | if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) { |
704 | if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) { |
705 | fault_if_from_uspace(istate, "Page fault at %p.", va); |
705 | fault_if_from_uspace(istate, "Page fault at %p.", va); |
706 | panic("%s: va=%p, rid=%d, iip=%p.", __func__, va, rid, |
706 | panic("%s: va=%p, rid=%d, iip=%p.", __func__, va, rid, |
707 | istate->cr_iip); |
707 | istate->cr_iip); |
708 | } |
708 | } |
709 | } |
709 | } |
- | 710 | page_table_unlock(AS, true); |
|
- | 711 | } |
|
- | 712 | ||
- | 713 | /** Data access rights fault handler. |
|
- | 714 | * |
|
- | 715 | * @param vector Interruption vector. |
|
- | 716 | * @param istate Structure with saved interruption state. |
|
- | 717 | */ |
|
- | 718 | void data_access_rights_fault(uint64_t vector, istate_t *istate) |
|
- | 719 | { |
|
- | 720 | region_register rr; |
|
- | 721 | rid_t rid; |
|
- | 722 | uintptr_t va; |
|
- | 723 | pte_t *t; |
|
- | 724 | ||
- | 725 | va = istate->cr_ifa; /* faulting address */ |
|
- | 726 | rr.word = rr_read(VA2VRN(va)); |
|
- | 727 | rid = rr.map.rid; |
|
- | 728 | ||
- | 729 | /* |
|
- | 730 | * Assume a write to a read-only page. |
|
- | 731 | */ |
|
- | 732 | page_table_lock(AS, true); |
|
- | 733 | t = page_mapping_find(AS, va); |
|
- | 734 | ASSERT(t && t->p); |
|
- | 735 | ASSERT(!t->w); |
|
- | 736 | if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) { |
|
- | 737 | fault_if_from_uspace(istate, "Page fault at %p.", va); |
|
- | 738 | panic("%s: va=%p, rid=%d, iip=%p.", __func__, va, rid, |
|
- | 739 | istate->cr_iip); |
|
- | 740 | } |
|
710 | page_table_unlock(AS, true); |
741 | page_table_unlock(AS, true); |
711 | } |
742 | } |
712 | 743 | ||
713 | /** Page not present fault handler. |
744 | /** Page not present fault handler. |
714 | * |
745 | * |
715 | * @param vector Interruption vector. |
746 | * @param vector Interruption vector. |
716 | * @param istate Structure with saved interruption state. |
747 | * @param istate Structure with saved interruption state. |
717 | */ |
748 | */ |
718 | void page_not_present(uint64_t vector, istate_t *istate) |
749 | void page_not_present(uint64_t vector, istate_t *istate) |
719 | { |
750 | { |
720 | region_register rr; |
751 | region_register rr; |
721 | rid_t rid; |
752 | rid_t rid; |
722 | uintptr_t va; |
753 | uintptr_t va; |
723 | pte_t *t; |
754 | pte_t *t; |
724 | 755 | ||
725 | va = istate->cr_ifa; /* faulting address */ |
756 | va = istate->cr_ifa; /* faulting address */ |
726 | rr.word = rr_read(VA2VRN(va)); |
757 | rr.word = rr_read(VA2VRN(va)); |
727 | rid = rr.map.rid; |
758 | rid = rr.map.rid; |
728 | 759 | ||
729 | page_table_lock(AS, true); |
760 | page_table_lock(AS, true); |
730 | t = page_mapping_find(AS, va); |
761 | t = page_mapping_find(AS, va); |
731 | ASSERT(t); |
762 | ASSERT(t); |
732 | 763 | ||
733 | if (t->p) { |
764 | if (t->p) { |
734 | /* |
765 | /* |
735 | * If the Present bit is set in page hash table, just copy it |
766 | * If the Present bit is set in page hash table, just copy it |
736 | * and update ITC/DTC. |
767 | * and update ITC/DTC. |
737 | */ |
768 | */ |
738 | if (t->x) |
769 | if (t->x) |
739 | itc_pte_copy(t); |
770 | itc_pte_copy(t); |
740 | else |
771 | else |
741 | dtc_pte_copy(t); |
772 | dtc_pte_copy(t); |
742 | page_table_unlock(AS, true); |
773 | page_table_unlock(AS, true); |
743 | } else { |
774 | } else { |
744 | page_table_unlock(AS, true); |
775 | page_table_unlock(AS, true); |
745 | if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) { |
776 | if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) { |
746 | fault_if_from_uspace(istate, "Page fault at %p.", va); |
777 | fault_if_from_uspace(istate, "Page fault at %p.", va); |
747 | panic("%s: va=%p, rid=%d.", __func__, va, rid); |
778 | panic("%s: va=%p, rid=%d.", __func__, va, rid); |
748 | } |
779 | } |
749 | } |
780 | } |
750 | } |
781 | } |
751 | 782 | ||
752 | void tlb_arch_init(void) |
783 | void tlb_arch_init(void) |
753 | { |
784 | { |
754 | } |
785 | } |
755 | 786 | ||
756 | void tlb_print(void) |
787 | void tlb_print(void) |
757 | { |
788 | { |
758 | } |
789 | } |
759 | 790 | ||
760 | /** @} |
791 | /** @} |
761 | */ |
792 | */ |
762 | 793 |