Rev 2787 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 2787 | Rev 3675 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (c) 2006 Jakub Jermar |
2 | * Copyright (c) 2006 Jakub Jermar |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | /** @addtogroup ia64mm |
29 | /** @addtogroup ia64mm |
30 | * @{ |
30 | * @{ |
31 | */ |
31 | */ |
32 | /** @file |
32 | /** @file |
33 | */ |
33 | */ |
34 | 34 | ||
35 | /* |
35 | /* |
36 | * TLB management. |
36 | * TLB management. |
37 | */ |
37 | */ |
38 | 38 | ||
39 | #include <mm/tlb.h> |
39 | #include <mm/tlb.h> |
40 | #include <mm/asid.h> |
40 | #include <mm/asid.h> |
41 | #include <mm/page.h> |
41 | #include <mm/page.h> |
42 | #include <mm/as.h> |
42 | #include <mm/as.h> |
43 | #include <arch/mm/tlb.h> |
43 | #include <arch/mm/tlb.h> |
44 | #include <arch/mm/page.h> |
44 | #include <arch/mm/page.h> |
45 | #include <arch/mm/vhpt.h> |
45 | #include <arch/mm/vhpt.h> |
46 | #include <arch/barrier.h> |
46 | #include <arch/barrier.h> |
47 | #include <arch/interrupt.h> |
47 | #include <arch/interrupt.h> |
48 | #include <arch/pal/pal.h> |
48 | #include <arch/pal/pal.h> |
49 | #include <arch/asm.h> |
49 | #include <arch/asm.h> |
50 | #include <panic.h> |
50 | #include <panic.h> |
51 | #include <print.h> |
51 | #include <print.h> |
52 | #include <arch.h> |
52 | #include <arch.h> |
53 | #include <interrupt.h> |
53 | #include <interrupt.h> |
54 | 54 | ||
55 | /** Invalidate all TLB entries. */ |
55 | /** Invalidate all TLB entries. */ |
56 | void tlb_invalidate_all(void) |
56 | void tlb_invalidate_all(void) |
57 | { |
57 | { |
58 | ipl_t ipl; |
58 | ipl_t ipl; |
59 | uintptr_t adr; |
59 | uintptr_t adr; |
60 | uint32_t count1, count2, stride1, stride2; |
60 | uint32_t count1, count2, stride1, stride2; |
61 | 61 | ||
62 | unsigned int i, j; |
62 | unsigned int i, j; |
63 | 63 | ||
64 | adr = PAL_PTCE_INFO_BASE(); |
64 | adr = PAL_PTCE_INFO_BASE(); |
65 | count1 = PAL_PTCE_INFO_COUNT1(); |
65 | count1 = PAL_PTCE_INFO_COUNT1(); |
66 | count2 = PAL_PTCE_INFO_COUNT2(); |
66 | count2 = PAL_PTCE_INFO_COUNT2(); |
67 | stride1 = PAL_PTCE_INFO_STRIDE1(); |
67 | stride1 = PAL_PTCE_INFO_STRIDE1(); |
68 | stride2 = PAL_PTCE_INFO_STRIDE2(); |
68 | stride2 = PAL_PTCE_INFO_STRIDE2(); |
69 | 69 | ||
70 | ipl = interrupts_disable(); |
70 | ipl = interrupts_disable(); |
71 | 71 | ||
72 | for (i = 0; i < count1; i++) { |
72 | for (i = 0; i < count1; i++) { |
73 | for (j = 0; j < count2; j++) { |
73 | for (j = 0; j < count2; j++) { |
74 | asm volatile ( |
74 | asm volatile ( |
75 | "ptc.e %0 ;;" |
75 | "ptc.e %0 ;;" |
76 | : |
76 | : |
77 | : "r" (adr) |
77 | : "r" (adr) |
78 | ); |
78 | ); |
79 | adr += stride2; |
79 | adr += stride2; |
80 | } |
80 | } |
81 | adr += stride1; |
81 | adr += stride1; |
82 | } |
82 | } |
83 | 83 | ||
84 | interrupts_restore(ipl); |
84 | interrupts_restore(ipl); |
85 | 85 | ||
86 | srlz_d(); |
86 | srlz_d(); |
87 | srlz_i(); |
87 | srlz_i(); |
88 | #ifdef CONFIG_VHPT |
88 | #ifdef CONFIG_VHPT |
89 | vhpt_invalidate_all(); |
89 | vhpt_invalidate_all(); |
90 | #endif |
90 | #endif |
91 | } |
91 | } |
92 | 92 | ||
93 | /** Invalidate entries belonging to an address space. |
93 | /** Invalidate entries belonging to an address space. |
94 | * |
94 | * |
95 | * @param asid Address space identifier. |
95 | * @param asid Address space identifier. |
96 | */ |
96 | */ |
97 | void tlb_invalidate_asid(asid_t asid) |
97 | void tlb_invalidate_asid(asid_t asid) |
98 | { |
98 | { |
99 | tlb_invalidate_all(); |
99 | tlb_invalidate_all(); |
100 | } |
100 | } |
101 | 101 | ||
102 | 102 | ||
103 | void tlb_invalidate_pages(asid_t asid, uintptr_t page, count_t cnt) |
103 | void tlb_invalidate_pages(asid_t asid, uintptr_t page, count_t cnt) |
104 | { |
104 | { |
105 | region_register rr; |
105 | region_register rr; |
106 | bool restore_rr = false; |
106 | bool restore_rr = false; |
107 | int b = 0; |
107 | int b = 0; |
108 | int c = cnt; |
108 | int c = cnt; |
109 | 109 | ||
110 | uintptr_t va; |
110 | uintptr_t va; |
111 | va = page; |
111 | va = page; |
112 | 112 | ||
113 | rr.word = rr_read(VA2VRN(va)); |
113 | rr.word = rr_read(VA2VRN(va)); |
114 | if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) { |
114 | if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) { |
115 | /* |
115 | /* |
116 | * The selected region register does not contain required RID. |
116 | * The selected region register does not contain required RID. |
117 | * Save the old content of the register and replace the RID. |
117 | * Save the old content of the register and replace the RID. |
118 | */ |
118 | */ |
119 | region_register rr0; |
119 | region_register rr0; |
120 | 120 | ||
121 | rr0 = rr; |
121 | rr0 = rr; |
122 | rr0.map.rid = ASID2RID(asid, VA2VRN(va)); |
122 | rr0.map.rid = ASID2RID(asid, VA2VRN(va)); |
123 | rr_write(VA2VRN(va), rr0.word); |
123 | rr_write(VA2VRN(va), rr0.word); |
124 | srlz_d(); |
124 | srlz_d(); |
125 | srlz_i(); |
125 | srlz_i(); |
126 | } |
126 | } |
127 | 127 | ||
128 | while(c >>= 1) |
128 | while(c >>= 1) |
129 | b++; |
129 | b++; |
130 | b >>= 1; |
130 | b >>= 1; |
131 | uint64_t ps; |
131 | uint64_t ps; |
132 | 132 | ||
133 | switch (b) { |
133 | switch (b) { |
134 | case 0: /*cnt 1-3*/ |
134 | case 0: /*cnt 1-3*/ |
135 | ps = PAGE_WIDTH; |
135 | ps = PAGE_WIDTH; |
136 | break; |
136 | break; |
137 | case 1: /*cnt 4-15*/ |
137 | case 1: /*cnt 4-15*/ |
138 | /*cnt=((cnt-1)/4)+1;*/ |
138 | /*cnt=((cnt-1)/4)+1;*/ |
139 | ps = PAGE_WIDTH+2; |
139 | ps = PAGE_WIDTH+2; |
140 | va &= ~((1<<ps)-1); |
140 | va &= ~((1<<ps)-1); |
141 | break; |
141 | break; |
142 | case 2: /*cnt 16-63*/ |
142 | case 2: /*cnt 16-63*/ |
143 | /*cnt=((cnt-1)/16)+1;*/ |
143 | /*cnt=((cnt-1)/16)+1;*/ |
144 | ps = PAGE_WIDTH+4; |
144 | ps = PAGE_WIDTH+4; |
145 | va &= ~((1<<ps)-1); |
145 | va &= ~((1<<ps)-1); |
146 | break; |
146 | break; |
147 | case 3: /*cnt 64-255*/ |
147 | case 3: /*cnt 64-255*/ |
148 | /*cnt=((cnt-1)/64)+1;*/ |
148 | /*cnt=((cnt-1)/64)+1;*/ |
149 | ps = PAGE_WIDTH+6; |
149 | ps = PAGE_WIDTH+6; |
150 | va &= ~((1<<ps)-1); |
150 | va &= ~((1<<ps)-1); |
151 | break; |
151 | break; |
152 | case 4: /*cnt 256-1023*/ |
152 | case 4: /*cnt 256-1023*/ |
153 | /*cnt=((cnt-1)/256)+1;*/ |
153 | /*cnt=((cnt-1)/256)+1;*/ |
154 | ps = PAGE_WIDTH+8; |
154 | ps = PAGE_WIDTH+8; |
155 | va &= ~((1<<ps)-1); |
155 | va &= ~((1<<ps)-1); |
156 | break; |
156 | break; |
157 | case 5: /*cnt 1024-4095*/ |
157 | case 5: /*cnt 1024-4095*/ |
158 | /*cnt=((cnt-1)/1024)+1;*/ |
158 | /*cnt=((cnt-1)/1024)+1;*/ |
159 | ps = PAGE_WIDTH+10; |
159 | ps = PAGE_WIDTH+10; |
160 | va &= ~((1<<ps)-1); |
160 | va &= ~((1<<ps)-1); |
161 | break; |
161 | break; |
162 | case 6: /*cnt 4096-16383*/ |
162 | case 6: /*cnt 4096-16383*/ |
163 | /*cnt=((cnt-1)/4096)+1;*/ |
163 | /*cnt=((cnt-1)/4096)+1;*/ |
164 | ps = PAGE_WIDTH+12; |
164 | ps = PAGE_WIDTH+12; |
165 | va &= ~((1<<ps)-1); |
165 | va &= ~((1<<ps)-1); |
166 | break; |
166 | break; |
167 | case 7: /*cnt 16384-65535*/ |
167 | case 7: /*cnt 16384-65535*/ |
168 | case 8: /*cnt 65536-(256K-1)*/ |
168 | case 8: /*cnt 65536-(256K-1)*/ |
169 | /*cnt=((cnt-1)/16384)+1;*/ |
169 | /*cnt=((cnt-1)/16384)+1;*/ |
170 | ps = PAGE_WIDTH+14; |
170 | ps = PAGE_WIDTH+14; |
171 | va &= ~((1<<ps)-1); |
171 | va &= ~((1<<ps)-1); |
172 | break; |
172 | break; |
173 | default: |
173 | default: |
174 | /*cnt=((cnt-1)/(16384*16))+1;*/ |
174 | /*cnt=((cnt-1)/(16384*16))+1;*/ |
175 | ps=PAGE_WIDTH+18; |
175 | ps=PAGE_WIDTH+18; |
176 | va&=~((1<<ps)-1); |
176 | va&=~((1<<ps)-1); |
177 | break; |
177 | break; |
178 | } |
178 | } |
179 | /*cnt+=(page!=va);*/ |
179 | /*cnt+=(page!=va);*/ |
180 | for(; va<(page+cnt*(PAGE_SIZE)); va += (1<<ps)) { |
180 | for(; va<(page+cnt*(PAGE_SIZE)); va += (1<<ps)) { |
181 | asm volatile ( |
181 | asm volatile ( |
182 | "ptc.l %0,%1;;" |
182 | "ptc.l %0,%1;;" |
183 | : |
183 | : |
184 | : "r" (va), "r" (ps<<2) |
184 | : "r" (va), "r" (ps<<2) |
185 | ); |
185 | ); |
186 | } |
186 | } |
187 | srlz_d(); |
187 | srlz_d(); |
188 | srlz_i(); |
188 | srlz_i(); |
189 | 189 | ||
190 | if (restore_rr) { |
190 | if (restore_rr) { |
191 | rr_write(VA2VRN(va), rr.word); |
191 | rr_write(VA2VRN(va), rr.word); |
192 | srlz_d(); |
192 | srlz_d(); |
193 | srlz_i(); |
193 | srlz_i(); |
194 | } |
194 | } |
195 | } |
195 | } |
196 | 196 | ||
197 | /** Insert data into data translation cache. |
197 | /** Insert data into data translation cache. |
198 | * |
198 | * |
199 | * @param va Virtual page address. |
199 | * @param va Virtual page address. |
200 | * @param asid Address space identifier. |
200 | * @param asid Address space identifier. |
201 | * @param entry The rest of TLB entry as required by TLB insertion format. |
201 | * @param entry The rest of TLB entry as required by TLB insertion format. |
202 | */ |
202 | */ |
203 | void dtc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry) |
203 | void dtc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry) |
204 | { |
204 | { |
205 | tc_mapping_insert(va, asid, entry, true); |
205 | tc_mapping_insert(va, asid, entry, true); |
206 | } |
206 | } |
207 | 207 | ||
208 | /** Insert data into instruction translation cache. |
208 | /** Insert data into instruction translation cache. |
209 | * |
209 | * |
210 | * @param va Virtual page address. |
210 | * @param va Virtual page address. |
211 | * @param asid Address space identifier. |
211 | * @param asid Address space identifier. |
212 | * @param entry The rest of TLB entry as required by TLB insertion format. |
212 | * @param entry The rest of TLB entry as required by TLB insertion format. |
213 | */ |
213 | */ |
214 | void itc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry) |
214 | void itc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry) |
215 | { |
215 | { |
216 | tc_mapping_insert(va, asid, entry, false); |
216 | tc_mapping_insert(va, asid, entry, false); |
217 | } |
217 | } |
218 | 218 | ||
219 | /** Insert data into instruction or data translation cache. |
219 | /** Insert data into instruction or data translation cache. |
220 | * |
220 | * |
221 | * @param va Virtual page address. |
221 | * @param va Virtual page address. |
222 | * @param asid Address space identifier. |
222 | * @param asid Address space identifier. |
223 | * @param entry The rest of TLB entry as required by TLB insertion format. |
223 | * @param entry The rest of TLB entry as required by TLB insertion format. |
224 | * @param dtc If true, insert into data translation cache, use instruction translation cache otherwise. |
224 | * @param dtc If true, insert into data translation cache, use instruction translation cache otherwise. |
225 | */ |
225 | */ |
226 | void tc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtc) |
226 | void tc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtc) |
227 | { |
227 | { |
228 | region_register rr; |
228 | region_register rr; |
229 | bool restore_rr = false; |
229 | bool restore_rr = false; |
230 | 230 | ||
231 | rr.word = rr_read(VA2VRN(va)); |
231 | rr.word = rr_read(VA2VRN(va)); |
232 | if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) { |
232 | if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) { |
233 | /* |
233 | /* |
234 | * The selected region register does not contain required RID. |
234 | * The selected region register does not contain required RID. |
235 | * Save the old content of the register and replace the RID. |
235 | * Save the old content of the register and replace the RID. |
236 | */ |
236 | */ |
237 | region_register rr0; |
237 | region_register rr0; |
238 | 238 | ||
239 | rr0 = rr; |
239 | rr0 = rr; |
240 | rr0.map.rid = ASID2RID(asid, VA2VRN(va)); |
240 | rr0.map.rid = ASID2RID(asid, VA2VRN(va)); |
241 | rr_write(VA2VRN(va), rr0.word); |
241 | rr_write(VA2VRN(va), rr0.word); |
242 | srlz_d(); |
242 | srlz_d(); |
243 | srlz_i(); |
243 | srlz_i(); |
244 | } |
244 | } |
245 | 245 | ||
246 | asm volatile ( |
246 | asm volatile ( |
247 | "mov r8=psr;;\n" |
247 | "mov r8=psr;;\n" |
248 | "rsm %0;;\n" /* PSR_IC_MASK */ |
248 | "rsm %0;;\n" /* PSR_IC_MASK */ |
249 | "srlz.d;;\n" |
249 | "srlz.d;;\n" |
250 | "srlz.i;;\n" |
250 | "srlz.i;;\n" |
251 | "mov cr.ifa=%1\n" /* va */ |
251 | "mov cr.ifa=%1\n" /* va */ |
252 | "mov cr.itir=%2;;\n" /* entry.word[1] */ |
252 | "mov cr.itir=%2;;\n" /* entry.word[1] */ |
253 | "cmp.eq p6,p7 = %4,r0;;\n" /* decide between itc and dtc */ |
253 | "cmp.eq p6,p7 = %4,r0;;\n" /* decide between itc and dtc */ |
254 | "(p6) itc.i %3;;\n" |
254 | "(p6) itc.i %3;;\n" |
255 | "(p7) itc.d %3;;\n" |
255 | "(p7) itc.d %3;;\n" |
256 | "mov psr.l=r8;;\n" |
256 | "mov psr.l=r8;;\n" |
257 | "srlz.d;;\n" |
257 | "srlz.d;;\n" |
258 | : |
258 | : |
259 | : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (dtc) |
259 | : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (dtc) |
260 | : "p6", "p7", "r8" |
260 | : "p6", "p7", "r8" |
261 | ); |
261 | ); |
262 | 262 | ||
263 | if (restore_rr) { |
263 | if (restore_rr) { |
264 | rr_write(VA2VRN(va), rr.word); |
264 | rr_write(VA2VRN(va), rr.word); |
265 | srlz_d(); |
265 | srlz_d(); |
266 | srlz_i(); |
266 | srlz_i(); |
267 | } |
267 | } |
268 | } |
268 | } |
269 | 269 | ||
270 | /** Insert data into instruction translation register. |
270 | /** Insert data into instruction translation register. |
271 | * |
271 | * |
272 | * @param va Virtual page address. |
272 | * @param va Virtual page address. |
273 | * @param asid Address space identifier. |
273 | * @param asid Address space identifier. |
274 | * @param entry The rest of TLB entry as required by TLB insertion format. |
274 | * @param entry The rest of TLB entry as required by TLB insertion format. |
275 | * @param tr Translation register. |
275 | * @param tr Translation register. |
276 | */ |
276 | */ |
277 | void itr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, index_t tr) |
277 | void itr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, index_t tr) |
278 | { |
278 | { |
279 | tr_mapping_insert(va, asid, entry, false, tr); |
279 | tr_mapping_insert(va, asid, entry, false, tr); |
280 | } |
280 | } |
281 | 281 | ||
282 | /** Insert data into data translation register. |
282 | /** Insert data into data translation register. |
283 | * |
283 | * |
284 | * @param va Virtual page address. |
284 | * @param va Virtual page address. |
285 | * @param asid Address space identifier. |
285 | * @param asid Address space identifier. |
286 | * @param entry The rest of TLB entry as required by TLB insertion format. |
286 | * @param entry The rest of TLB entry as required by TLB insertion format. |
287 | * @param tr Translation register. |
287 | * @param tr Translation register. |
288 | */ |
288 | */ |
289 | void dtr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, index_t tr) |
289 | void dtr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, index_t tr) |
290 | { |
290 | { |
291 | tr_mapping_insert(va, asid, entry, true, tr); |
291 | tr_mapping_insert(va, asid, entry, true, tr); |
292 | } |
292 | } |
293 | 293 | ||
294 | /** Insert data into instruction or data translation register. |
294 | /** Insert data into instruction or data translation register. |
295 | * |
295 | * |
296 | * @param va Virtual page address. |
296 | * @param va Virtual page address. |
297 | * @param asid Address space identifier. |
297 | * @param asid Address space identifier. |
298 | * @param entry The rest of TLB entry as required by TLB insertion format. |
298 | * @param entry The rest of TLB entry as required by TLB insertion format. |
299 | * @param dtr If true, insert into data translation register, use instruction translation register otherwise. |
299 | * @param dtr If true, insert into data translation register, use instruction translation register otherwise. |
300 | * @param tr Translation register. |
300 | * @param tr Translation register. |
301 | */ |
301 | */ |
302 | void tr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtr, index_t tr) |
302 | void tr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtr, index_t tr) |
303 | { |
303 | { |
304 | region_register rr; |
304 | region_register rr; |
305 | bool restore_rr = false; |
305 | bool restore_rr = false; |
306 | 306 | ||
307 | rr.word = rr_read(VA2VRN(va)); |
307 | rr.word = rr_read(VA2VRN(va)); |
308 | if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) { |
308 | if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) { |
309 | /* |
309 | /* |
310 | * The selected region register does not contain required RID. |
310 | * The selected region register does not contain required RID. |
311 | * Save the old content of the register and replace the RID. |
311 | * Save the old content of the register and replace the RID. |
312 | */ |
312 | */ |
313 | region_register rr0; |
313 | region_register rr0; |
314 | 314 | ||
315 | rr0 = rr; |
315 | rr0 = rr; |
316 | rr0.map.rid = ASID2RID(asid, VA2VRN(va)); |
316 | rr0.map.rid = ASID2RID(asid, VA2VRN(va)); |
317 | rr_write(VA2VRN(va), rr0.word); |
317 | rr_write(VA2VRN(va), rr0.word); |
318 | srlz_d(); |
318 | srlz_d(); |
319 | srlz_i(); |
319 | srlz_i(); |
320 | } |
320 | } |
321 | 321 | ||
322 | asm volatile ( |
322 | asm volatile ( |
323 | "mov r8=psr;;\n" |
323 | "mov r8=psr;;\n" |
324 | "rsm %0;;\n" /* PSR_IC_MASK */ |
324 | "rsm %0;;\n" /* PSR_IC_MASK */ |
325 | "srlz.d;;\n" |
325 | "srlz.d;;\n" |
326 | "srlz.i;;\n" |
326 | "srlz.i;;\n" |
327 | "mov cr.ifa=%1\n" /* va */ |
327 | "mov cr.ifa=%1\n" /* va */ |
328 | "mov cr.itir=%2;;\n" /* entry.word[1] */ |
328 | "mov cr.itir=%2;;\n" /* entry.word[1] */ |
329 | "cmp.eq p6,p7=%5,r0;;\n" /* decide between itr and dtr */ |
329 | "cmp.eq p6,p7=%5,r0;;\n" /* decide between itr and dtr */ |
330 | "(p6) itr.i itr[%4]=%3;;\n" |
330 | "(p6) itr.i itr[%4]=%3;;\n" |
331 | "(p7) itr.d dtr[%4]=%3;;\n" |
331 | "(p7) itr.d dtr[%4]=%3;;\n" |
332 | "mov psr.l=r8;;\n" |
332 | "mov psr.l=r8;;\n" |
333 | "srlz.d;;\n" |
333 | "srlz.d;;\n" |
334 | : |
334 | : |
335 | : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (tr), "r" (dtr) |
335 | : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (tr), "r" (dtr) |
336 | : "p6", "p7", "r8" |
336 | : "p6", "p7", "r8" |
337 | ); |
337 | ); |
338 | 338 | ||
339 | if (restore_rr) { |
339 | if (restore_rr) { |
340 | rr_write(VA2VRN(va), rr.word); |
340 | rr_write(VA2VRN(va), rr.word); |
341 | srlz_d(); |
341 | srlz_d(); |
342 | srlz_i(); |
342 | srlz_i(); |
343 | } |
343 | } |
344 | } |
344 | } |
345 | 345 | ||
346 | /** Insert data into DTLB. |
346 | /** Insert data into DTLB. |
347 | * |
347 | * |
348 | * @param page Virtual page address including VRN bits. |
348 | * @param page Virtual page address including VRN bits. |
349 | * @param frame Physical frame address. |
349 | * @param frame Physical frame address. |
350 | * @param dtr If true, insert into data translation register, use data translation cache otherwise. |
350 | * @param dtr If true, insert into data translation register, use data translation cache otherwise. |
351 | * @param tr Translation register if dtr is true, ignored otherwise. |
351 | * @param tr Translation register if dtr is true, ignored otherwise. |
352 | */ |
352 | */ |
353 | void dtlb_kernel_mapping_insert(uintptr_t page, uintptr_t frame, bool dtr, index_t tr) |
353 | void dtlb_kernel_mapping_insert(uintptr_t page, uintptr_t frame, bool dtr, index_t tr) |
354 | { |
354 | { |
355 | tlb_entry_t entry; |
355 | tlb_entry_t entry; |
356 | 356 | ||
357 | entry.word[0] = 0; |
357 | entry.word[0] = 0; |
358 | entry.word[1] = 0; |
358 | entry.word[1] = 0; |
359 | 359 | ||
360 | entry.p = true; /* present */ |
360 | entry.p = true; /* present */ |
361 | entry.ma = MA_WRITEBACK; |
361 | entry.ma = MA_WRITEBACK; |
362 | entry.a = true; /* already accessed */ |
362 | entry.a = true; /* already accessed */ |
363 | entry.d = true; /* already dirty */ |
363 | entry.d = true; /* already dirty */ |
364 | entry.pl = PL_KERNEL; |
364 | entry.pl = PL_KERNEL; |
365 | entry.ar = AR_READ | AR_WRITE; |
365 | entry.ar = AR_READ | AR_WRITE; |
366 | entry.ppn = frame >> PPN_SHIFT; |
366 | entry.ppn = frame >> PPN_SHIFT; |
367 | entry.ps = PAGE_WIDTH; |
367 | entry.ps = PAGE_WIDTH; |
368 | 368 | ||
369 | if (dtr) |
369 | if (dtr) |
370 | dtr_mapping_insert(page, ASID_KERNEL, entry, tr); |
370 | dtr_mapping_insert(page, ASID_KERNEL, entry, tr); |
371 | else |
371 | else |
372 | dtc_mapping_insert(page, ASID_KERNEL, entry); |
372 | dtc_mapping_insert(page, ASID_KERNEL, entry); |
373 | } |
373 | } |
374 | 374 | ||
375 | /** Purge kernel entries from DTR. |
375 | /** Purge kernel entries from DTR. |
376 | * |
376 | * |
377 | * Purge DTR entries used by the kernel. |
377 | * Purge DTR entries used by the kernel. |
378 | * |
378 | * |
379 | * @param page Virtual page address including VRN bits. |
379 | * @param page Virtual page address including VRN bits. |
380 | * @param width Width of the purge in bits. |
380 | * @param width Width of the purge in bits. |
381 | */ |
381 | */ |
382 | void dtr_purge(uintptr_t page, count_t width) |
382 | void dtr_purge(uintptr_t page, count_t width) |
383 | { |
383 | { |
384 | asm volatile ("ptr.d %0, %1\n" : : "r" (page), "r" (width<<2)); |
384 | asm volatile ("ptr.d %0, %1\n" : : "r" (page), "r" (width<<2)); |
385 | } |
385 | } |
386 | 386 | ||
387 | 387 | ||
388 | /** Copy content of PTE into data translation cache. |
388 | /** Copy content of PTE into data translation cache. |
389 | * |
389 | * |
390 | * @param t PTE. |
390 | * @param t PTE. |
391 | */ |
391 | */ |
392 | void dtc_pte_copy(pte_t *t) |
392 | void dtc_pte_copy(pte_t *t) |
393 | { |
393 | { |
394 | tlb_entry_t entry; |
394 | tlb_entry_t entry; |
395 | 395 | ||
396 | entry.word[0] = 0; |
396 | entry.word[0] = 0; |
397 | entry.word[1] = 0; |
397 | entry.word[1] = 0; |
398 | 398 | ||
399 | entry.p = t->p; |
399 | entry.p = t->p; |
400 | entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE; |
400 | entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE; |
401 | entry.a = t->a; |
401 | entry.a = t->a; |
402 | entry.d = t->d; |
402 | entry.d = t->d; |
403 | entry.pl = t->k ? PL_KERNEL : PL_USER; |
403 | entry.pl = t->k ? PL_KERNEL : PL_USER; |
404 | entry.ar = t->w ? AR_WRITE : AR_READ; |
404 | entry.ar = t->w ? AR_WRITE : AR_READ; |
405 | entry.ppn = t->frame >> PPN_SHIFT; |
405 | entry.ppn = t->frame >> PPN_SHIFT; |
406 | entry.ps = PAGE_WIDTH; |
406 | entry.ps = PAGE_WIDTH; |
407 | 407 | ||
408 | dtc_mapping_insert(t->page, t->as->asid, entry); |
408 | dtc_mapping_insert(t->page, t->as->asid, entry); |
409 | #ifdef CONFIG_VHPT |
409 | #ifdef CONFIG_VHPT |
410 | vhpt_mapping_insert(t->page, t->as->asid, entry); |
410 | vhpt_mapping_insert(t->page, t->as->asid, entry); |
411 | #endif |
411 | #endif |
412 | } |
412 | } |
413 | 413 | ||
414 | /** Copy content of PTE into instruction translation cache. |
414 | /** Copy content of PTE into instruction translation cache. |
415 | * |
415 | * |
416 | * @param t PTE. |
416 | * @param t PTE. |
417 | */ |
417 | */ |
418 | void itc_pte_copy(pte_t *t) |
418 | void itc_pte_copy(pte_t *t) |
419 | { |
419 | { |
420 | tlb_entry_t entry; |
420 | tlb_entry_t entry; |
421 | 421 | ||
422 | entry.word[0] = 0; |
422 | entry.word[0] = 0; |
423 | entry.word[1] = 0; |
423 | entry.word[1] = 0; |
424 | 424 | ||
425 | ASSERT(t->x); |
425 | ASSERT(t->x); |
426 | 426 | ||
427 | entry.p = t->p; |
427 | entry.p = t->p; |
428 | entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE; |
428 | entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE; |
429 | entry.a = t->a; |
429 | entry.a = t->a; |
430 | entry.pl = t->k ? PL_KERNEL : PL_USER; |
430 | entry.pl = t->k ? PL_KERNEL : PL_USER; |
431 | entry.ar = t->x ? (AR_EXECUTE | AR_READ) : AR_READ; |
431 | entry.ar = t->x ? (AR_EXECUTE | AR_READ) : AR_READ; |
432 | entry.ppn = t->frame >> PPN_SHIFT; |
432 | entry.ppn = t->frame >> PPN_SHIFT; |
433 | entry.ps = PAGE_WIDTH; |
433 | entry.ps = PAGE_WIDTH; |
434 | 434 | ||
435 | itc_mapping_insert(t->page, t->as->asid, entry); |
435 | itc_mapping_insert(t->page, t->as->asid, entry); |
436 | #ifdef CONFIG_VHPT |
436 | #ifdef CONFIG_VHPT |
437 | vhpt_mapping_insert(t->page, t->as->asid, entry); |
437 | vhpt_mapping_insert(t->page, t->as->asid, entry); |
438 | #endif |
438 | #endif |
439 | } |
439 | } |
440 | 440 | ||
441 | /** Instruction TLB fault handler for faults with VHPT turned off. |
441 | /** Instruction TLB fault handler for faults with VHPT turned off. |
442 | * |
442 | * |
443 | * @param vector Interruption vector. |
443 | * @param vector Interruption vector. |
444 | * @param istate Structure with saved interruption state. |
444 | * @param istate Structure with saved interruption state. |
445 | */ |
445 | */ |
446 | void alternate_instruction_tlb_fault(uint64_t vector, istate_t *istate) |
446 | void alternate_instruction_tlb_fault(uint64_t vector, istate_t *istate) |
447 | { |
447 | { |
448 | region_register rr; |
448 | region_register rr; |
449 | rid_t rid; |
449 | rid_t rid; |
450 | uintptr_t va; |
450 | uintptr_t va; |
451 | pte_t *t; |
451 | pte_t *t; |
452 | 452 | ||
453 | va = istate->cr_ifa; /* faulting address */ |
453 | va = istate->cr_ifa; /* faulting address */ |
454 | rr.word = rr_read(VA2VRN(va)); |
454 | rr.word = rr_read(VA2VRN(va)); |
455 | rid = rr.map.rid; |
455 | rid = rr.map.rid; |
456 | 456 | ||
457 | page_table_lock(AS, true); |
457 | page_table_lock(AS, true); |
458 | t = page_mapping_find(AS, va); |
458 | t = page_mapping_find(AS, va); |
459 | if (t) { |
459 | if (t) { |
460 | /* |
460 | /* |
461 | * The mapping was found in software page hash table. |
461 | * The mapping was found in software page hash table. |
462 | * Insert it into data translation cache. |
462 | * Insert it into data translation cache. |
463 | */ |
463 | */ |
464 | itc_pte_copy(t); |
464 | itc_pte_copy(t); |
465 | page_table_unlock(AS, true); |
465 | page_table_unlock(AS, true); |
466 | } else { |
466 | } else { |
467 | /* |
467 | /* |
468 | * Forward the page fault to address space page fault handler. |
468 | * Forward the page fault to address space page fault handler. |
469 | */ |
469 | */ |
470 | page_table_unlock(AS, true); |
470 | page_table_unlock(AS, true); |
471 | if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) { |
471 | if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) { |
472 | fault_if_from_uspace(istate,"Page fault at %p",va); |
472 | fault_if_from_uspace(istate,"Page fault at %p",va); |
473 | panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid, istate->cr_iip); |
473 | panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid, istate->cr_iip); |
474 | } |
474 | } |
475 | } |
475 | } |
476 | } |
476 | } |
477 | 477 | ||
- | 478 | ||
- | 479 | ||
- | 480 | static int is_io_page_accessible(int page) |
|
- | 481 | { |
|
- | 482 | if(TASK->arch.iomap) return bitmap_get(TASK->arch.iomap,page); |
|
- | 483 | else return 0; |
|
- | 484 | } |
|
- | 485 | ||
- | 486 | #define IO_FRAME_BASE 0xFFFFC000000 |
|
- | 487 | ||
- | 488 | /** There is special handling of memmaped lagacy io, because |
|
- | 489 | * of 4KB sized access |
|
- | 490 | * only for userspace |
|
- | 491 | * |
|
- | 492 | * @param va virtual address of page fault |
|
- | 493 | * @param istate Structure with saved interruption state. |
|
- | 494 | * |
|
- | 495 | * |
|
- | 496 | * @return 1 on success, 0 on fail |
|
- | 497 | */ |
|
- | 498 | static int try_memmap_io_insertion(uintptr_t va, istate_t *istate) |
|
- | 499 | { |
|
- | 500 | if((va >= IO_OFFSET ) && (va < IO_OFFSET + (1<<IO_PAGE_WIDTH))) |
|
- | 501 | if(TASK){ |
|
- | 502 | ||
- | 503 | uint64_t io_page=(va & ((1<<IO_PAGE_WIDTH)-1)) >> (USPACE_IO_PAGE_WIDTH); |
|
- | 504 | if(is_io_page_accessible(io_page)){ |
|
- | 505 | //printf("Insert %llX\n",va); |
|
- | 506 | ||
- | 507 | uint64_t page,frame; |
|
- | 508 | ||
- | 509 | page = IO_OFFSET + (1 << USPACE_IO_PAGE_WIDTH) * io_page; |
|
- | 510 | frame = IO_FRAME_BASE + (1 << USPACE_IO_PAGE_WIDTH) * io_page; |
|
- | 511 | ||
- | 512 | ||
- | 513 | tlb_entry_t entry; |
|
- | 514 | ||
- | 515 | entry.word[0] = 0; |
|
- | 516 | entry.word[1] = 0; |
|
- | 517 | ||
- | 518 | entry.p = true; /* present */ |
|
- | 519 | entry.ma = MA_UNCACHEABLE; |
|
- | 520 | entry.a = true; /* already accessed */ |
|
- | 521 | entry.d = true; /* already dirty */ |
|
- | 522 | entry.pl = PL_USER; |
|
- | 523 | entry.ar = AR_READ | AR_WRITE; |
|
- | 524 | entry.ppn = frame >> PPN_SHIFT; //MUSIM spocitat frame |
|
- | 525 | entry.ps = USPACE_IO_PAGE_WIDTH; |
|
- | 526 | ||
- | 527 | dtc_mapping_insert(page, TASK->as->asid, entry); //Musim zjistit ASID |
|
- | 528 | return 1; |
|
- | 529 | }else { |
|
- | 530 | fault_if_from_uspace(istate,"IO access fault at %p",va); |
|
- | 531 | return 0; |
|
- | 532 | } |
|
- | 533 | } else |
|
- | 534 | return 0; |
|
- | 535 | else |
|
- | 536 | return 0; |
|
- | 537 | ||
- | 538 | return 0; |
|
- | 539 | ||
- | 540 | } |
|
- | 541 | ||
- | 542 | ||
- | 543 | ||
- | 544 | ||
478 | /** Data TLB fault handler for faults with VHPT turned off. |
545 | /** Data TLB fault handler for faults with VHPT turned off. |
479 | * |
546 | * |
480 | * @param vector Interruption vector. |
547 | * @param vector Interruption vector. |
481 | * @param istate Structure with saved interruption state. |
548 | * @param istate Structure with saved interruption state. |
482 | */ |
549 | */ |
483 | void alternate_data_tlb_fault(uint64_t vector, istate_t *istate) |
550 | void alternate_data_tlb_fault(uint64_t vector, istate_t *istate) |
484 | { |
551 | { |
485 | region_register rr; |
552 | region_register rr; |
486 | rid_t rid; |
553 | rid_t rid; |
487 | uintptr_t va; |
554 | uintptr_t va; |
488 | pte_t *t; |
555 | pte_t *t; |
489 | 556 | ||
490 | va = istate->cr_ifa; /* faulting address */ |
557 | va = istate->cr_ifa; /* faulting address */ |
491 | rr.word = rr_read(VA2VRN(va)); |
558 | rr.word = rr_read(VA2VRN(va)); |
492 | rid = rr.map.rid; |
559 | rid = rr.map.rid; |
493 | if (RID2ASID(rid) == ASID_KERNEL) { |
560 | if (RID2ASID(rid) == ASID_KERNEL) { |
494 | if (VA2VRN(va) == VRN_KERNEL) { |
561 | if (VA2VRN(va) == VRN_KERNEL) { |
495 | /* |
562 | /* |
496 | * Provide KA2PA(identity) mapping for faulting piece of |
563 | * Provide KA2PA(identity) mapping for faulting piece of |
497 | * kernel address space. |
564 | * kernel address space. |
498 | */ |
565 | */ |
499 | dtlb_kernel_mapping_insert(va, KA2PA(va), false, 0); |
566 | dtlb_kernel_mapping_insert(va, KA2PA(va), false, 0); |
500 | return; |
567 | return; |
501 | } |
568 | } |
502 | } |
569 | } |
503 | 570 | ||
504 | page_table_lock(AS, true); |
571 | page_table_lock(AS, true); |
505 | t = page_mapping_find(AS, va); |
572 | t = page_mapping_find(AS, va); |
506 | if (t) { |
573 | if (t) { |
507 | /* |
574 | /* |
508 | * The mapping was found in the software page hash table. |
575 | * The mapping was found in the software page hash table. |
509 | * Insert it into data translation cache. |
576 | * Insert it into data translation cache. |
510 | */ |
577 | */ |
511 | dtc_pte_copy(t); |
578 | dtc_pte_copy(t); |
512 | page_table_unlock(AS, true); |
579 | page_table_unlock(AS, true); |
513 | } else { |
580 | } else { |
- | 581 | page_table_unlock(AS, true); |
|
- | 582 | if (try_memmap_io_insertion(va,istate)) return; |
|
514 | /* |
583 | /* |
515 | * Forward the page fault to the address space page fault handler. |
584 | * Forward the page fault to the address space page fault handler. |
516 | */ |
585 | */ |
517 | page_table_unlock(AS, true); |
- | |
518 | if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) { |
586 | if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) { |
519 | fault_if_from_uspace(istate,"Page fault at %p",va); |
587 | fault_if_from_uspace(istate,"Page fault at %p",va); |
520 | panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid, istate->cr_iip); |
588 | panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid, istate->cr_iip); |
521 | } |
589 | } |
522 | } |
590 | } |
523 | } |
591 | } |
524 | 592 | ||
525 | /** Data nested TLB fault handler. |
593 | /** Data nested TLB fault handler. |
526 | * |
594 | * |
527 | * This fault should not occur. |
595 | * This fault should not occur. |
528 | * |
596 | * |
529 | * @param vector Interruption vector. |
597 | * @param vector Interruption vector. |
530 | * @param istate Structure with saved interruption state. |
598 | * @param istate Structure with saved interruption state. |
531 | */ |
599 | */ |
532 | void data_nested_tlb_fault(uint64_t vector, istate_t *istate) |
600 | void data_nested_tlb_fault(uint64_t vector, istate_t *istate) |
533 | { |
601 | { |
534 | panic("%s\n", __func__); |
602 | panic("%s\n", __func__); |
535 | } |
603 | } |
536 | 604 | ||
537 | /** Data Dirty bit fault handler. |
605 | /** Data Dirty bit fault handler. |
538 | * |
606 | * |
539 | * @param vector Interruption vector. |
607 | * @param vector Interruption vector. |
540 | * @param istate Structure with saved interruption state. |
608 | * @param istate Structure with saved interruption state. |
541 | */ |
609 | */ |
542 | void data_dirty_bit_fault(uint64_t vector, istate_t *istate) |
610 | void data_dirty_bit_fault(uint64_t vector, istate_t *istate) |
543 | { |
611 | { |
544 | region_register rr; |
612 | region_register rr; |
545 | rid_t rid; |
613 | rid_t rid; |
546 | uintptr_t va; |
614 | uintptr_t va; |
547 | pte_t *t; |
615 | pte_t *t; |
548 | 616 | ||
549 | va = istate->cr_ifa; /* faulting address */ |
617 | va = istate->cr_ifa; /* faulting address */ |
550 | rr.word = rr_read(VA2VRN(va)); |
618 | rr.word = rr_read(VA2VRN(va)); |
551 | rid = rr.map.rid; |
619 | rid = rr.map.rid; |
552 | 620 | ||
553 | page_table_lock(AS, true); |
621 | page_table_lock(AS, true); |
554 | t = page_mapping_find(AS, va); |
622 | t = page_mapping_find(AS, va); |
555 | ASSERT(t && t->p); |
623 | ASSERT(t && t->p); |
556 | if (t && t->p && t->w) { |
624 | if (t && t->p && t->w) { |
557 | /* |
625 | /* |
558 | * Update the Dirty bit in page tables and reinsert |
626 | * Update the Dirty bit in page tables and reinsert |
559 | * the mapping into DTC. |
627 | * the mapping into DTC. |
560 | */ |
628 | */ |
561 | t->d = true; |
629 | t->d = true; |
562 | dtc_pte_copy(t); |
630 | dtc_pte_copy(t); |
563 | } else { |
631 | } else { |
564 | if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) { |
632 | if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) { |
565 | fault_if_from_uspace(istate,"Page fault at %p",va); |
633 | fault_if_from_uspace(istate,"Page fault at %p",va); |
566 | panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid, istate->cr_iip); |
634 | panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid, istate->cr_iip); |
567 | t->d = true; |
635 | t->d = true; |
568 | dtc_pte_copy(t); |
636 | dtc_pte_copy(t); |
569 | } |
637 | } |
570 | } |
638 | } |
571 | page_table_unlock(AS, true); |
639 | page_table_unlock(AS, true); |
572 | } |
640 | } |
573 | 641 | ||
574 | /** Instruction access bit fault handler. |
642 | /** Instruction access bit fault handler. |
575 | * |
643 | * |
576 | * @param vector Interruption vector. |
644 | * @param vector Interruption vector. |
577 | * @param istate Structure with saved interruption state. |
645 | * @param istate Structure with saved interruption state. |
578 | */ |
646 | */ |
579 | void instruction_access_bit_fault(uint64_t vector, istate_t *istate) |
647 | void instruction_access_bit_fault(uint64_t vector, istate_t *istate) |
580 | { |
648 | { |
581 | region_register rr; |
649 | region_register rr; |
582 | rid_t rid; |
650 | rid_t rid; |
583 | uintptr_t va; |
651 | uintptr_t va; |
584 | pte_t *t; |
652 | pte_t *t; |
585 | 653 | ||
586 | va = istate->cr_ifa; /* faulting address */ |
654 | va = istate->cr_ifa; /* faulting address */ |
587 | rr.word = rr_read(VA2VRN(va)); |
655 | rr.word = rr_read(VA2VRN(va)); |
588 | rid = rr.map.rid; |
656 | rid = rr.map.rid; |
589 | 657 | ||
590 | page_table_lock(AS, true); |
658 | page_table_lock(AS, true); |
591 | t = page_mapping_find(AS, va); |
659 | t = page_mapping_find(AS, va); |
592 | ASSERT(t && t->p); |
660 | ASSERT(t && t->p); |
593 | if (t && t->p && t->x) { |
661 | if (t && t->p && t->x) { |
594 | /* |
662 | /* |
595 | * Update the Accessed bit in page tables and reinsert |
663 | * Update the Accessed bit in page tables and reinsert |
596 | * the mapping into ITC. |
664 | * the mapping into ITC. |
597 | */ |
665 | */ |
598 | t->a = true; |
666 | t->a = true; |
599 | itc_pte_copy(t); |
667 | itc_pte_copy(t); |
600 | } else { |
668 | } else { |
601 | if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) { |
669 | if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) { |
602 | fault_if_from_uspace(istate,"Page fault at %p",va); |
670 | fault_if_from_uspace(istate,"Page fault at %p",va); |
603 | panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid, istate->cr_iip); |
671 | panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid, istate->cr_iip); |
604 | t->a = true; |
672 | t->a = true; |
605 | itc_pte_copy(t); |
673 | itc_pte_copy(t); |
606 | } |
674 | } |
607 | } |
675 | } |
608 | page_table_unlock(AS, true); |
676 | page_table_unlock(AS, true); |
609 | } |
677 | } |
610 | 678 | ||
611 | /** Data access bit fault handler. |
679 | /** Data access bit fault handler. |
612 | * |
680 | * |
613 | * @param vector Interruption vector. |
681 | * @param vector Interruption vector. |
614 | * @param istate Structure with saved interruption state. |
682 | * @param istate Structure with saved interruption state. |
615 | */ |
683 | */ |
616 | void data_access_bit_fault(uint64_t vector, istate_t *istate) |
684 | void data_access_bit_fault(uint64_t vector, istate_t *istate) |
617 | { |
685 | { |
618 | region_register rr; |
686 | region_register rr; |
619 | rid_t rid; |
687 | rid_t rid; |
620 | uintptr_t va; |
688 | uintptr_t va; |
621 | pte_t *t; |
689 | pte_t *t; |
622 | 690 | ||
623 | va = istate->cr_ifa; /* faulting address */ |
691 | va = istate->cr_ifa; /* faulting address */ |
624 | rr.word = rr_read(VA2VRN(va)); |
692 | rr.word = rr_read(VA2VRN(va)); |
625 | rid = rr.map.rid; |
693 | rid = rr.map.rid; |
626 | 694 | ||
627 | page_table_lock(AS, true); |
695 | page_table_lock(AS, true); |
628 | t = page_mapping_find(AS, va); |
696 | t = page_mapping_find(AS, va); |
629 | ASSERT(t && t->p); |
697 | ASSERT(t && t->p); |
630 | if (t && t->p) { |
698 | if (t && t->p) { |
631 | /* |
699 | /* |
632 | * Update the Accessed bit in page tables and reinsert |
700 | * Update the Accessed bit in page tables and reinsert |
633 | * the mapping into DTC. |
701 | * the mapping into DTC. |
634 | */ |
702 | */ |
635 | t->a = true; |
703 | t->a = true; |
636 | dtc_pte_copy(t); |
704 | dtc_pte_copy(t); |
637 | } else { |
705 | } else { |
638 | if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) { |
706 | if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) { |
639 | fault_if_from_uspace(istate,"Page fault at %p",va); |
707 | fault_if_from_uspace(istate,"Page fault at %p",va); |
640 | panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid, istate->cr_iip); |
708 | panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid, istate->cr_iip); |
641 | t->a = true; |
709 | t->a = true; |
642 | itc_pte_copy(t); |
710 | itc_pte_copy(t); |
643 | } |
711 | } |
644 | } |
712 | } |
645 | page_table_unlock(AS, true); |
713 | page_table_unlock(AS, true); |
646 | } |
714 | } |
647 | 715 | ||
648 | /** Page not present fault handler. |
716 | /** Page not present fault handler. |
649 | * |
717 | * |
650 | * @param vector Interruption vector. |
718 | * @param vector Interruption vector. |
651 | * @param istate Structure with saved interruption state. |
719 | * @param istate Structure with saved interruption state. |
652 | */ |
720 | */ |
653 | void page_not_present(uint64_t vector, istate_t *istate) |
721 | void page_not_present(uint64_t vector, istate_t *istate) |
654 | { |
722 | { |
655 | region_register rr; |
723 | region_register rr; |
656 | rid_t rid; |
724 | rid_t rid; |
657 | uintptr_t va; |
725 | uintptr_t va; |
658 | pte_t *t; |
726 | pte_t *t; |
659 | 727 | ||
660 | va = istate->cr_ifa; /* faulting address */ |
728 | va = istate->cr_ifa; /* faulting address */ |
661 | rr.word = rr_read(VA2VRN(va)); |
729 | rr.word = rr_read(VA2VRN(va)); |
662 | rid = rr.map.rid; |
730 | rid = rr.map.rid; |
663 | 731 | ||
664 | page_table_lock(AS, true); |
732 | page_table_lock(AS, true); |
665 | t = page_mapping_find(AS, va); |
733 | t = page_mapping_find(AS, va); |
666 | ASSERT(t); |
734 | ASSERT(t); |
667 | 735 | ||
668 | if (t->p) { |
736 | if (t->p) { |
669 | /* |
737 | /* |
670 | * If the Present bit is set in page hash table, just copy it |
738 | * If the Present bit is set in page hash table, just copy it |
671 | * and update ITC/DTC. |
739 | * and update ITC/DTC. |
672 | */ |
740 | */ |
673 | if (t->x) |
741 | if (t->x) |
674 | itc_pte_copy(t); |
742 | itc_pte_copy(t); |
675 | else |
743 | else |
676 | dtc_pte_copy(t); |
744 | dtc_pte_copy(t); |
677 | page_table_unlock(AS, true); |
745 | page_table_unlock(AS, true); |
678 | } else { |
746 | } else { |
679 | page_table_unlock(AS, true); |
747 | page_table_unlock(AS, true); |
680 | if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) { |
748 | if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) { |
681 | fault_if_from_uspace(istate,"Page fault at %p",va); |
749 | fault_if_from_uspace(istate,"Page fault at %p",va); |
682 | panic("%s: va=%p, rid=%d\n", __func__, va, rid); |
750 | panic("%s: va=%p, rid=%d\n", __func__, va, rid); |
683 | } |
751 | } |
684 | } |
752 | } |
685 | } |
753 | } |
686 | 754 | ||
687 | /** @} |
755 | /** @} |
688 | */ |
756 | */ |
689 | 757 |