Rev 1621 | Rev 1708 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 1621 | Rev 1675 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (C) 2006 Jakub Jermar |
2 | * Copyright (C) 2006 Jakub Jermar |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | /* |
29 | /* |
30 | * TLB management. |
30 | * TLB management. |
31 | */ |
31 | */ |
32 | 32 | ||
33 | #include <mm/tlb.h> |
33 | #include <mm/tlb.h> |
34 | #include <mm/asid.h> |
34 | #include <mm/asid.h> |
35 | #include <mm/page.h> |
35 | #include <mm/page.h> |
36 | #include <mm/as.h> |
36 | #include <mm/as.h> |
37 | #include <arch/mm/tlb.h> |
37 | #include <arch/mm/tlb.h> |
38 | #include <arch/mm/page.h> |
38 | #include <arch/mm/page.h> |
39 | #include <arch/mm/vhpt.h> |
39 | #include <arch/mm/vhpt.h> |
40 | #include <arch/barrier.h> |
40 | #include <arch/barrier.h> |
41 | #include <arch/interrupt.h> |
41 | #include <arch/interrupt.h> |
42 | #include <arch/pal/pal.h> |
42 | #include <arch/pal/pal.h> |
43 | #include <arch/asm.h> |
43 | #include <arch/asm.h> |
44 | #include <typedefs.h> |
44 | #include <typedefs.h> |
45 | #include <panic.h> |
45 | #include <panic.h> |
46 | #include <print.h> |
46 | #include <print.h> |
47 | #include <arch.h> |
47 | #include <arch.h> |
48 | #include <interrupt.h> |
48 | #include <interrupt.h> |
49 | 49 | ||
50 | /** Invalidate all TLB entries. */ |
50 | /** Invalidate all TLB entries. */ |
51 | void tlb_invalidate_all(void) |
51 | void tlb_invalidate_all(void) |
52 | { |
52 | { |
53 | ipl_t ipl; |
53 | ipl_t ipl; |
54 | __address adr; |
54 | __address adr; |
55 | __u32 count1, count2, stride1, stride2; |
55 | __u32 count1, count2, stride1, stride2; |
56 | 56 | ||
57 | int i,j; |
57 | int i,j; |
58 | 58 | ||
59 | adr = PAL_PTCE_INFO_BASE(); |
59 | adr = PAL_PTCE_INFO_BASE(); |
60 | count1 = PAL_PTCE_INFO_COUNT1(); |
60 | count1 = PAL_PTCE_INFO_COUNT1(); |
61 | count2 = PAL_PTCE_INFO_COUNT2(); |
61 | count2 = PAL_PTCE_INFO_COUNT2(); |
62 | stride1 = PAL_PTCE_INFO_STRIDE1(); |
62 | stride1 = PAL_PTCE_INFO_STRIDE1(); |
63 | stride2 = PAL_PTCE_INFO_STRIDE2(); |
63 | stride2 = PAL_PTCE_INFO_STRIDE2(); |
64 | 64 | ||
65 | ipl = interrupts_disable(); |
65 | ipl = interrupts_disable(); |
66 | 66 | ||
67 | for(i = 0; i < count1; i++) { |
67 | for(i = 0; i < count1; i++) { |
68 | for(j = 0; j < count2; j++) { |
68 | for(j = 0; j < count2; j++) { |
69 | __asm__ volatile ( |
69 | __asm__ volatile ( |
70 | "ptc.e %0 ;;" |
70 | "ptc.e %0 ;;" |
71 | : |
71 | : |
72 | : "r" (adr) |
72 | : "r" (adr) |
73 | ); |
73 | ); |
74 | adr += stride2; |
74 | adr += stride2; |
75 | } |
75 | } |
76 | adr += stride1; |
76 | adr += stride1; |
77 | } |
77 | } |
78 | 78 | ||
79 | interrupts_restore(ipl); |
79 | interrupts_restore(ipl); |
80 | 80 | ||
81 | srlz_d(); |
81 | srlz_d(); |
82 | srlz_i(); |
82 | srlz_i(); |
83 | #ifdef CONFIG_VHPT |
83 | #ifdef CONFIG_VHPT |
84 | vhpt_invalidate_all(); |
84 | vhpt_invalidate_all(); |
85 | #endif |
85 | #endif |
86 | } |
86 | } |
87 | 87 | ||
88 | /** Invalidate entries belonging to an address space. |
88 | /** Invalidate entries belonging to an address space. |
89 | * |
89 | * |
90 | * @param asid Address space identifier. |
90 | * @param asid Address space identifier. |
91 | */ |
91 | */ |
92 | void tlb_invalidate_asid(asid_t asid) |
92 | void tlb_invalidate_asid(asid_t asid) |
93 | { |
93 | { |
94 | tlb_invalidate_all(); |
94 | tlb_invalidate_all(); |
95 | } |
95 | } |
96 | 96 | ||
97 | 97 | ||
98 | void tlb_invalidate_pages(asid_t asid, __address page, count_t cnt) |
98 | void tlb_invalidate_pages(asid_t asid, __address page, count_t cnt) |
99 | { |
99 | { |
100 | region_register rr; |
100 | region_register rr; |
101 | bool restore_rr = false; |
101 | bool restore_rr = false; |
102 | int b = 0; |
102 | int b = 0; |
103 | int c = cnt; |
103 | int c = cnt; |
104 | 104 | ||
105 | __address va; |
105 | __address va; |
106 | va = page; |
106 | va = page; |
107 | 107 | ||
108 | rr.word = rr_read(VA2VRN(va)); |
108 | rr.word = rr_read(VA2VRN(va)); |
109 | if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) { |
109 | if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) { |
110 | /* |
110 | /* |
111 | * The selected region register does not contain required RID. |
111 | * The selected region register does not contain required RID. |
112 | * Save the old content of the register and replace the RID. |
112 | * Save the old content of the register and replace the RID. |
113 | */ |
113 | */ |
114 | region_register rr0; |
114 | region_register rr0; |
115 | 115 | ||
116 | rr0 = rr; |
116 | rr0 = rr; |
117 | rr0.map.rid = ASID2RID(asid, VA2VRN(va)); |
117 | rr0.map.rid = ASID2RID(asid, VA2VRN(va)); |
118 | rr_write(VA2VRN(va), rr0.word); |
118 | rr_write(VA2VRN(va), rr0.word); |
119 | srlz_d(); |
119 | srlz_d(); |
120 | srlz_i(); |
120 | srlz_i(); |
121 | } |
121 | } |
122 | 122 | ||
123 | while(c >>= 1) |
123 | while(c >>= 1) |
124 | b++; |
124 | b++; |
125 | b >>= 1; |
125 | b >>= 1; |
126 | __u64 ps; |
126 | __u64 ps; |
127 | 127 | ||
128 | switch (b) { |
128 | switch (b) { |
129 | case 0: /*cnt 1-3*/ |
129 | case 0: /*cnt 1-3*/ |
130 | ps = PAGE_WIDTH; |
130 | ps = PAGE_WIDTH; |
131 | break; |
131 | break; |
132 | case 1: /*cnt 4-15*/ |
132 | case 1: /*cnt 4-15*/ |
133 | /*cnt=((cnt-1)/4)+1;*/ |
133 | /*cnt=((cnt-1)/4)+1;*/ |
134 | ps = PAGE_WIDTH+2; |
134 | ps = PAGE_WIDTH+2; |
135 | va &= ~((1<<ps)-1); |
135 | va &= ~((1<<ps)-1); |
136 | break; |
136 | break; |
137 | case 2: /*cnt 16-63*/ |
137 | case 2: /*cnt 16-63*/ |
138 | /*cnt=((cnt-1)/16)+1;*/ |
138 | /*cnt=((cnt-1)/16)+1;*/ |
139 | ps = PAGE_WIDTH+4; |
139 | ps = PAGE_WIDTH+4; |
140 | va &= ~((1<<ps)-1); |
140 | va &= ~((1<<ps)-1); |
141 | break; |
141 | break; |
142 | case 3: /*cnt 64-255*/ |
142 | case 3: /*cnt 64-255*/ |
143 | /*cnt=((cnt-1)/64)+1;*/ |
143 | /*cnt=((cnt-1)/64)+1;*/ |
144 | ps = PAGE_WIDTH+6; |
144 | ps = PAGE_WIDTH+6; |
145 | va &= ~((1<<ps)-1); |
145 | va &= ~((1<<ps)-1); |
146 | break; |
146 | break; |
147 | case 4: /*cnt 256-1023*/ |
147 | case 4: /*cnt 256-1023*/ |
148 | /*cnt=((cnt-1)/256)+1;*/ |
148 | /*cnt=((cnt-1)/256)+1;*/ |
149 | ps = PAGE_WIDTH+8; |
149 | ps = PAGE_WIDTH+8; |
150 | va &= ~((1<<ps)-1); |
150 | va &= ~((1<<ps)-1); |
151 | break; |
151 | break; |
152 | case 5: /*cnt 1024-4095*/ |
152 | case 5: /*cnt 1024-4095*/ |
153 | /*cnt=((cnt-1)/1024)+1;*/ |
153 | /*cnt=((cnt-1)/1024)+1;*/ |
154 | ps = PAGE_WIDTH+10; |
154 | ps = PAGE_WIDTH+10; |
155 | va &= ~((1<<ps)-1); |
155 | va &= ~((1<<ps)-1); |
156 | break; |
156 | break; |
157 | case 6: /*cnt 4096-16383*/ |
157 | case 6: /*cnt 4096-16383*/ |
158 | /*cnt=((cnt-1)/4096)+1;*/ |
158 | /*cnt=((cnt-1)/4096)+1;*/ |
159 | ps = PAGE_WIDTH+12; |
159 | ps = PAGE_WIDTH+12; |
160 | va &= ~((1<<ps)-1); |
160 | va &= ~((1<<ps)-1); |
161 | break; |
161 | break; |
162 | case 7: /*cnt 16384-65535*/ |
162 | case 7: /*cnt 16384-65535*/ |
163 | case 8: /*cnt 65536-(256K-1)*/ |
163 | case 8: /*cnt 65536-(256K-1)*/ |
164 | /*cnt=((cnt-1)/16384)+1;*/ |
164 | /*cnt=((cnt-1)/16384)+1;*/ |
165 | ps = PAGE_WIDTH+14; |
165 | ps = PAGE_WIDTH+14; |
166 | va &= ~((1<<ps)-1); |
166 | va &= ~((1<<ps)-1); |
167 | break; |
167 | break; |
168 | default: |
168 | default: |
169 | /*cnt=((cnt-1)/(16384*16))+1;*/ |
169 | /*cnt=((cnt-1)/(16384*16))+1;*/ |
170 | ps=PAGE_WIDTH+18; |
170 | ps=PAGE_WIDTH+18; |
171 | va&=~((1<<ps)-1); |
171 | va&=~((1<<ps)-1); |
172 | break; |
172 | break; |
173 | } |
173 | } |
174 | /*cnt+=(page!=va);*/ |
174 | /*cnt+=(page!=va);*/ |
175 | for(; va<(page+cnt*(PAGE_SIZE)); va += (1<<ps)) { |
175 | for(; va<(page+cnt*(PAGE_SIZE)); va += (1<<ps)) { |
176 | __asm__ volatile ( |
176 | __asm__ volatile ( |
177 | "ptc.l %0,%1;;" |
177 | "ptc.l %0,%1;;" |
178 | : |
178 | : |
179 | : "r" (va), "r" (ps<<2) |
179 | : "r" (va), "r" (ps<<2) |
180 | ); |
180 | ); |
181 | } |
181 | } |
182 | srlz_d(); |
182 | srlz_d(); |
183 | srlz_i(); |
183 | srlz_i(); |
184 | 184 | ||
185 | if (restore_rr) { |
185 | if (restore_rr) { |
186 | rr_write(VA2VRN(va), rr.word); |
186 | rr_write(VA2VRN(va), rr.word); |
187 | srlz_d(); |
187 | srlz_d(); |
188 | srlz_i(); |
188 | srlz_i(); |
189 | } |
189 | } |
190 | } |
190 | } |
191 | 191 | ||
192 | - | ||
193 | /** Insert data into data translation cache. |
192 | /** Insert data into data translation cache. |
194 | * |
193 | * |
195 | * @param va Virtual page address. |
194 | * @param va Virtual page address. |
196 | * @param asid Address space identifier. |
195 | * @param asid Address space identifier. |
197 | * @param entry The rest of TLB entry as required by TLB insertion format. |
196 | * @param entry The rest of TLB entry as required by TLB insertion format. |
198 | */ |
197 | */ |
199 | void dtc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry) |
198 | void dtc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry) |
200 | { |
199 | { |
201 | tc_mapping_insert(va, asid, entry, true); |
200 | tc_mapping_insert(va, asid, entry, true); |
202 | } |
201 | } |
203 | 202 | ||
204 | /** Insert data into instruction translation cache. |
203 | /** Insert data into instruction translation cache. |
205 | * |
204 | * |
206 | * @param va Virtual page address. |
205 | * @param va Virtual page address. |
207 | * @param asid Address space identifier. |
206 | * @param asid Address space identifier. |
208 | * @param entry The rest of TLB entry as required by TLB insertion format. |
207 | * @param entry The rest of TLB entry as required by TLB insertion format. |
209 | */ |
208 | */ |
210 | void itc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry) |
209 | void itc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry) |
211 | { |
210 | { |
212 | tc_mapping_insert(va, asid, entry, false); |
211 | tc_mapping_insert(va, asid, entry, false); |
213 | } |
212 | } |
214 | 213 | ||
215 | /** Insert data into instruction or data translation cache. |
214 | /** Insert data into instruction or data translation cache. |
216 | * |
215 | * |
217 | * @param va Virtual page address. |
216 | * @param va Virtual page address. |
218 | * @param asid Address space identifier. |
217 | * @param asid Address space identifier. |
219 | * @param entry The rest of TLB entry as required by TLB insertion format. |
218 | * @param entry The rest of TLB entry as required by TLB insertion format. |
220 | * @param dtc If true, insert into data translation cache, use instruction translation cache otherwise. |
219 | * @param dtc If true, insert into data translation cache, use instruction translation cache otherwise. |
221 | */ |
220 | */ |
222 | void tc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, bool dtc) |
221 | void tc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, bool dtc) |
223 | { |
222 | { |
224 | region_register rr; |
223 | region_register rr; |
225 | bool restore_rr = false; |
224 | bool restore_rr = false; |
226 | 225 | ||
227 | rr.word = rr_read(VA2VRN(va)); |
226 | rr.word = rr_read(VA2VRN(va)); |
228 | if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) { |
227 | if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) { |
229 | /* |
228 | /* |
230 | * The selected region register does not contain required RID. |
229 | * The selected region register does not contain required RID. |
231 | * Save the old content of the register and replace the RID. |
230 | * Save the old content of the register and replace the RID. |
232 | */ |
231 | */ |
233 | region_register rr0; |
232 | region_register rr0; |
234 | 233 | ||
235 | rr0 = rr; |
234 | rr0 = rr; |
236 | rr0.map.rid = ASID2RID(asid, VA2VRN(va)); |
235 | rr0.map.rid = ASID2RID(asid, VA2VRN(va)); |
237 | rr_write(VA2VRN(va), rr0.word); |
236 | rr_write(VA2VRN(va), rr0.word); |
238 | srlz_d(); |
237 | srlz_d(); |
239 | srlz_i(); |
238 | srlz_i(); |
240 | } |
239 | } |
241 | 240 | ||
242 | __asm__ volatile ( |
241 | __asm__ volatile ( |
243 | "mov r8=psr;;\n" |
242 | "mov r8=psr;;\n" |
244 | "rsm %0;;\n" /* PSR_IC_MASK */ |
243 | "rsm %0;;\n" /* PSR_IC_MASK */ |
245 | "srlz.d;;\n" |
244 | "srlz.d;;\n" |
246 | "srlz.i;;\n" |
245 | "srlz.i;;\n" |
247 | "mov cr.ifa=%1\n" /* va */ |
246 | "mov cr.ifa=%1\n" /* va */ |
248 | "mov cr.itir=%2;;\n" /* entry.word[1] */ |
247 | "mov cr.itir=%2;;\n" /* entry.word[1] */ |
249 | "cmp.eq p6,p7 = %4,r0;;\n" /* decide between itc and dtc */ |
248 | "cmp.eq p6,p7 = %4,r0;;\n" /* decide between itc and dtc */ |
250 | "(p6) itc.i %3;;\n" |
249 | "(p6) itc.i %3;;\n" |
251 | "(p7) itc.d %3;;\n" |
250 | "(p7) itc.d %3;;\n" |
252 | "mov psr.l=r8;;\n" |
251 | "mov psr.l=r8;;\n" |
253 | "srlz.d;;\n" |
252 | "srlz.d;;\n" |
254 | : |
253 | : |
255 | : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (dtc) |
254 | : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (dtc) |
256 | : "p6", "p7", "r8" |
255 | : "p6", "p7", "r8" |
257 | ); |
256 | ); |
258 | 257 | ||
259 | if (restore_rr) { |
258 | if (restore_rr) { |
260 | rr_write(VA2VRN(va), rr.word); |
259 | rr_write(VA2VRN(va), rr.word); |
261 | srlz_d(); |
260 | srlz_d(); |
262 | srlz_i(); |
261 | srlz_i(); |
263 | } |
262 | } |
264 | } |
263 | } |
265 | 264 | ||
266 | /** Insert data into instruction translation register. |
265 | /** Insert data into instruction translation register. |
267 | * |
266 | * |
268 | * @param va Virtual page address. |
267 | * @param va Virtual page address. |
269 | * @param asid Address space identifier. |
268 | * @param asid Address space identifier. |
270 | * @param entry The rest of TLB entry as required by TLB insertion format. |
269 | * @param entry The rest of TLB entry as required by TLB insertion format. |
271 | * @param tr Translation register. |
270 | * @param tr Translation register. |
272 | */ |
271 | */ |
273 | void itr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, index_t tr) |
272 | void itr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, index_t tr) |
274 | { |
273 | { |
275 | tr_mapping_insert(va, asid, entry, false, tr); |
274 | tr_mapping_insert(va, asid, entry, false, tr); |
276 | } |
275 | } |
277 | 276 | ||
278 | /** Insert data into data translation register. |
277 | /** Insert data into data translation register. |
279 | * |
278 | * |
280 | * @param va Virtual page address. |
279 | * @param va Virtual page address. |
281 | * @param asid Address space identifier. |
280 | * @param asid Address space identifier. |
282 | * @param entry The rest of TLB entry as required by TLB insertion format. |
281 | * @param entry The rest of TLB entry as required by TLB insertion format. |
283 | * @param tr Translation register. |
282 | * @param tr Translation register. |
284 | */ |
283 | */ |
285 | void dtr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, index_t tr) |
284 | void dtr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, index_t tr) |
286 | { |
285 | { |
287 | tr_mapping_insert(va, asid, entry, true, tr); |
286 | tr_mapping_insert(va, asid, entry, true, tr); |
288 | } |
287 | } |
289 | 288 | ||
290 | /** Insert data into instruction or data translation register. |
289 | /** Insert data into instruction or data translation register. |
291 | * |
290 | * |
292 | * @param va Virtual page address. |
291 | * @param va Virtual page address. |
293 | * @param asid Address space identifier. |
292 | * @param asid Address space identifier. |
294 | * @param entry The rest of TLB entry as required by TLB insertion format. |
293 | * @param entry The rest of TLB entry as required by TLB insertion format. |
295 | * @param dtc If true, insert into data translation register, use instruction translation register otherwise. |
294 | * @param dtc If true, insert into data translation register, use instruction translation register otherwise. |
296 | * @param tr Translation register. |
295 | * @param tr Translation register. |
297 | */ |
296 | */ |
298 | void tr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, bool dtr, index_t tr) |
297 | void tr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, bool dtr, index_t tr) |
299 | { |
298 | { |
300 | region_register rr; |
299 | region_register rr; |
301 | bool restore_rr = false; |
300 | bool restore_rr = false; |
302 | 301 | ||
303 | rr.word = rr_read(VA2VRN(va)); |
302 | rr.word = rr_read(VA2VRN(va)); |
304 | if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) { |
303 | if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) { |
305 | /* |
304 | /* |
306 | * The selected region register does not contain required RID. |
305 | * The selected region register does not contain required RID. |
307 | * Save the old content of the register and replace the RID. |
306 | * Save the old content of the register and replace the RID. |
308 | */ |
307 | */ |
309 | region_register rr0; |
308 | region_register rr0; |
310 | 309 | ||
311 | rr0 = rr; |
310 | rr0 = rr; |
312 | rr0.map.rid = ASID2RID(asid, VA2VRN(va)); |
311 | rr0.map.rid = ASID2RID(asid, VA2VRN(va)); |
313 | rr_write(VA2VRN(va), rr0.word); |
312 | rr_write(VA2VRN(va), rr0.word); |
314 | srlz_d(); |
313 | srlz_d(); |
315 | srlz_i(); |
314 | srlz_i(); |
316 | } |
315 | } |
317 | 316 | ||
318 | __asm__ volatile ( |
317 | __asm__ volatile ( |
319 | "mov r8=psr;;\n" |
318 | "mov r8=psr;;\n" |
320 | "rsm %0;;\n" /* PSR_IC_MASK */ |
319 | "rsm %0;;\n" /* PSR_IC_MASK */ |
321 | "srlz.d;;\n" |
320 | "srlz.d;;\n" |
322 | "srlz.i;;\n" |
321 | "srlz.i;;\n" |
323 | "mov cr.ifa=%1\n" /* va */ |
322 | "mov cr.ifa=%1\n" /* va */ |
324 | "mov cr.itir=%2;;\n" /* entry.word[1] */ |
323 | "mov cr.itir=%2;;\n" /* entry.word[1] */ |
325 | "cmp.eq p6,p7=%5,r0;;\n" /* decide between itr and dtr */ |
324 | "cmp.eq p6,p7=%5,r0;;\n" /* decide between itr and dtr */ |
326 | "(p6) itr.i itr[%4]=%3;;\n" |
325 | "(p6) itr.i itr[%4]=%3;;\n" |
327 | "(p7) itr.d dtr[%4]=%3;;\n" |
326 | "(p7) itr.d dtr[%4]=%3;;\n" |
328 | "mov psr.l=r8;;\n" |
327 | "mov psr.l=r8;;\n" |
329 | "srlz.d;;\n" |
328 | "srlz.d;;\n" |
330 | : |
329 | : |
331 | : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (tr), "r" (dtr) |
330 | : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (tr), "r" (dtr) |
332 | : "p6", "p7", "r8" |
331 | : "p6", "p7", "r8" |
333 | ); |
332 | ); |
334 | 333 | ||
335 | if (restore_rr) { |
334 | if (restore_rr) { |
336 | rr_write(VA2VRN(va), rr.word); |
335 | rr_write(VA2VRN(va), rr.word); |
337 | srlz_d(); |
336 | srlz_d(); |
338 | srlz_i(); |
337 | srlz_i(); |
339 | } |
338 | } |
340 | } |
339 | } |
341 | 340 | ||
342 | /** Insert data into DTLB. |
341 | /** Insert data into DTLB. |
343 | * |
342 | * |
344 | * @param va Virtual page address. |
343 | * @param page Virtual page address including VRN bits. |
345 | * @param asid Address space identifier. |
344 | * @param frame Physical frame address. |
346 | * @param entry The rest of TLB entry as required by TLB insertion format. |
- | |
347 | * @param dtr If true, insert into data translation register, use data translation cache otherwise. |
345 | * @param dtr If true, insert into data translation register, use data translation cache otherwise. |
348 | * @param tr Translation register if dtr is true, ignored otherwise. |
346 | * @param tr Translation register if dtr is true, ignored otherwise. |
349 | */ |
347 | */ |
350 | void dtlb_kernel_mapping_insert(__address page, __address frame, bool dtr, index_t tr) |
348 | void dtlb_kernel_mapping_insert(__address page, __address frame, bool dtr, index_t tr) |
351 | { |
349 | { |
352 | tlb_entry_t entry; |
350 | tlb_entry_t entry; |
353 | 351 | ||
354 | entry.word[0] = 0; |
352 | entry.word[0] = 0; |
355 | entry.word[1] = 0; |
353 | entry.word[1] = 0; |
356 | 354 | ||
357 | entry.p = true; /* present */ |
355 | entry.p = true; /* present */ |
358 | entry.ma = MA_WRITEBACK; |
356 | entry.ma = MA_WRITEBACK; |
359 | entry.a = true; /* already accessed */ |
357 | entry.a = true; /* already accessed */ |
360 | entry.d = true; /* already dirty */ |
358 | entry.d = true; /* already dirty */ |
361 | entry.pl = PL_KERNEL; |
359 | entry.pl = PL_KERNEL; |
362 | entry.ar = AR_READ | AR_WRITE; |
360 | entry.ar = AR_READ | AR_WRITE; |
363 | entry.ppn = frame >> PPN_SHIFT; |
361 | entry.ppn = frame >> PPN_SHIFT; |
364 | entry.ps = PAGE_WIDTH; |
362 | entry.ps = PAGE_WIDTH; |
365 | 363 | ||
366 | if (dtr) |
364 | if (dtr) |
367 | dtr_mapping_insert(page, ASID_KERNEL, entry, tr); |
365 | dtr_mapping_insert(page, ASID_KERNEL, entry, tr); |
368 | else |
366 | else |
369 | dtc_mapping_insert(page, ASID_KERNEL, entry); |
367 | dtc_mapping_insert(page, ASID_KERNEL, entry); |
370 | } |
368 | } |
371 | 369 | ||
- | 370 | /** Purge kernel entries from DTR. |
|
- | 371 | * |
|
- | 372 | * Purge DTR entries used by the kernel. |
|
- | 373 | * |
|
- | 374 | * @param page Virtual page address including VRN bits. |
|
- | 375 | * @param width Width of the purge in bits. |
|
- | 376 | */ |
|
- | 377 | void dtr_purge(__address page, count_t width) |
|
- | 378 | { |
|
- | 379 | __asm__ volatile ("ptr.d %0, %1\n" : : "r" (page), "r" (width<<2)); |
|
- | 380 | } |
|
- | 381 | ||
- | 382 | ||
372 | /** Copy content of PTE into data translation cache. |
383 | /** Copy content of PTE into data translation cache. |
373 | * |
384 | * |
374 | * @param t PTE. |
385 | * @param t PTE. |
375 | */ |
386 | */ |
376 | void dtc_pte_copy(pte_t *t) |
387 | void dtc_pte_copy(pte_t *t) |
377 | { |
388 | { |
378 | tlb_entry_t entry; |
389 | tlb_entry_t entry; |
379 | 390 | ||
380 | entry.word[0] = 0; |
391 | entry.word[0] = 0; |
381 | entry.word[1] = 0; |
392 | entry.word[1] = 0; |
382 | 393 | ||
383 | entry.p = t->p; |
394 | entry.p = t->p; |
384 | entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE; |
395 | entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE; |
385 | entry.a = t->a; |
396 | entry.a = t->a; |
386 | entry.d = t->d; |
397 | entry.d = t->d; |
387 | entry.pl = t->k ? PL_KERNEL : PL_USER; |
398 | entry.pl = t->k ? PL_KERNEL : PL_USER; |
388 | entry.ar = t->w ? AR_WRITE : AR_READ; |
399 | entry.ar = t->w ? AR_WRITE : AR_READ; |
389 | entry.ppn = t->frame >> PPN_SHIFT; |
400 | entry.ppn = t->frame >> PPN_SHIFT; |
390 | entry.ps = PAGE_WIDTH; |
401 | entry.ps = PAGE_WIDTH; |
391 | 402 | ||
392 | dtc_mapping_insert(t->page, t->as->asid, entry); |
403 | dtc_mapping_insert(t->page, t->as->asid, entry); |
393 | #ifdef CONFIG_VHPT |
404 | #ifdef CONFIG_VHPT |
394 | vhpt_mapping_insert(t->page, t->as->asid, entry); |
405 | vhpt_mapping_insert(t->page, t->as->asid, entry); |
395 | #endif |
406 | #endif |
396 | } |
407 | } |
397 | 408 | ||
398 | /** Copy content of PTE into instruction translation cache. |
409 | /** Copy content of PTE into instruction translation cache. |
399 | * |
410 | * |
400 | * @param t PTE. |
411 | * @param t PTE. |
401 | */ |
412 | */ |
402 | void itc_pte_copy(pte_t *t) |
413 | void itc_pte_copy(pte_t *t) |
403 | { |
414 | { |
404 | tlb_entry_t entry; |
415 | tlb_entry_t entry; |
405 | 416 | ||
406 | entry.word[0] = 0; |
417 | entry.word[0] = 0; |
407 | entry.word[1] = 0; |
418 | entry.word[1] = 0; |
408 | 419 | ||
409 | ASSERT(t->x); |
420 | ASSERT(t->x); |
410 | 421 | ||
411 | entry.p = t->p; |
422 | entry.p = t->p; |
412 | entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE; |
423 | entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE; |
413 | entry.a = t->a; |
424 | entry.a = t->a; |
414 | entry.pl = t->k ? PL_KERNEL : PL_USER; |
425 | entry.pl = t->k ? PL_KERNEL : PL_USER; |
415 | entry.ar = t->x ? (AR_EXECUTE | AR_READ) : AR_READ; |
426 | entry.ar = t->x ? (AR_EXECUTE | AR_READ) : AR_READ; |
416 | entry.ppn = t->frame >> PPN_SHIFT; |
427 | entry.ppn = t->frame >> PPN_SHIFT; |
417 | entry.ps = PAGE_WIDTH; |
428 | entry.ps = PAGE_WIDTH; |
418 | 429 | ||
419 | itc_mapping_insert(t->page, t->as->asid, entry); |
430 | itc_mapping_insert(t->page, t->as->asid, entry); |
420 | #ifdef CONFIG_VHPT |
431 | #ifdef CONFIG_VHPT |
421 | vhpt_mapping_insert(t->page, t->as->asid, entry); |
432 | vhpt_mapping_insert(t->page, t->as->asid, entry); |
422 | #endif |
433 | #endif |
423 | } |
434 | } |
424 | 435 | ||
425 | /** Instruction TLB fault handler for faults with VHPT turned off. |
436 | /** Instruction TLB fault handler for faults with VHPT turned off. |
426 | * |
437 | * |
427 | * @param vector Interruption vector. |
438 | * @param vector Interruption vector. |
428 | * @param istate Structure with saved interruption state. |
439 | * @param istate Structure with saved interruption state. |
429 | */ |
440 | */ |
430 | void alternate_instruction_tlb_fault(__u64 vector, istate_t *istate) |
441 | void alternate_instruction_tlb_fault(__u64 vector, istate_t *istate) |
431 | { |
442 | { |
432 | region_register rr; |
443 | region_register rr; |
433 | rid_t rid; |
444 | rid_t rid; |
434 | __address va; |
445 | __address va; |
435 | pte_t *t; |
446 | pte_t *t; |
436 | 447 | ||
437 | va = istate->cr_ifa; /* faulting address */ |
448 | va = istate->cr_ifa; /* faulting address */ |
438 | rr.word = rr_read(VA2VRN(va)); |
449 | rr.word = rr_read(VA2VRN(va)); |
439 | rid = rr.map.rid; |
450 | rid = rr.map.rid; |
440 | 451 | ||
441 | page_table_lock(AS, true); |
452 | page_table_lock(AS, true); |
442 | t = page_mapping_find(AS, va); |
453 | t = page_mapping_find(AS, va); |
443 | if (t) { |
454 | if (t) { |
444 | /* |
455 | /* |
445 | * The mapping was found in software page hash table. |
456 | * The mapping was found in software page hash table. |
446 | * Insert it into data translation cache. |
457 | * Insert it into data translation cache. |
447 | */ |
458 | */ |
448 | itc_pte_copy(t); |
459 | itc_pte_copy(t); |
449 | page_table_unlock(AS, true); |
460 | page_table_unlock(AS, true); |
450 | } else { |
461 | } else { |
451 | /* |
462 | /* |
452 | * Forward the page fault to address space page fault handler. |
463 | * Forward the page fault to address space page fault handler. |
453 | */ |
464 | */ |
454 | page_table_unlock(AS, true); |
465 | page_table_unlock(AS, true); |
455 | if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) { |
466 | if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) { |
456 | fault_if_from_uspace(istate,"Page fault at %P",va); |
467 | fault_if_from_uspace(istate,"Page fault at %P",va); |
457 | panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip); |
468 | panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip); |
458 | } |
469 | } |
459 | } |
470 | } |
460 | } |
471 | } |
461 | 472 | ||
462 | /** Data TLB fault handler for faults with VHPT turned off. |
473 | /** Data TLB fault handler for faults with VHPT turned off. |
463 | * |
474 | * |
464 | * @param vector Interruption vector. |
475 | * @param vector Interruption vector. |
465 | * @param istate Structure with saved interruption state. |
476 | * @param istate Structure with saved interruption state. |
466 | */ |
477 | */ |
467 | void alternate_data_tlb_fault(__u64 vector, istate_t *istate) |
478 | void alternate_data_tlb_fault(__u64 vector, istate_t *istate) |
468 | { |
479 | { |
469 | region_register rr; |
480 | region_register rr; |
470 | rid_t rid; |
481 | rid_t rid; |
471 | __address va; |
482 | __address va; |
472 | pte_t *t; |
483 | pte_t *t; |
473 | 484 | ||
474 | va = istate->cr_ifa; /* faulting address */ |
485 | va = istate->cr_ifa; /* faulting address */ |
475 | rr.word = rr_read(VA2VRN(va)); |
486 | rr.word = rr_read(VA2VRN(va)); |
476 | rid = rr.map.rid; |
487 | rid = rr.map.rid; |
477 | if (RID2ASID(rid) == ASID_KERNEL) { |
488 | if (RID2ASID(rid) == ASID_KERNEL) { |
478 | if (VA2VRN(va) == VRN_KERNEL) { |
489 | if (VA2VRN(va) == VRN_KERNEL) { |
479 | /* |
490 | /* |
480 | * Provide KA2PA(identity) mapping for faulting piece of |
491 | * Provide KA2PA(identity) mapping for faulting piece of |
481 | * kernel address space. |
492 | * kernel address space. |
482 | */ |
493 | */ |
483 | dtlb_kernel_mapping_insert(va, KA2PA(va), false, 0); |
494 | dtlb_kernel_mapping_insert(va, KA2PA(va), false, 0); |
484 | return; |
495 | return; |
485 | } |
496 | } |
486 | } |
497 | } |
487 | 498 | ||
488 | page_table_lock(AS, true); |
499 | page_table_lock(AS, true); |
489 | t = page_mapping_find(AS, va); |
500 | t = page_mapping_find(AS, va); |
490 | if (t) { |
501 | if (t) { |
491 | /* |
502 | /* |
492 | * The mapping was found in software page hash table. |
503 | * The mapping was found in software page hash table. |
493 | * Insert it into data translation cache. |
504 | * Insert it into data translation cache. |
494 | */ |
505 | */ |
495 | dtc_pte_copy(t); |
506 | dtc_pte_copy(t); |
496 | page_table_unlock(AS, true); |
507 | page_table_unlock(AS, true); |
497 | } else { |
508 | } else { |
498 | /* |
509 | /* |
499 | * Forward the page fault to address space page fault handler. |
510 | * Forward the page fault to address space page fault handler. |
500 | */ |
511 | */ |
501 | page_table_unlock(AS, true); |
512 | page_table_unlock(AS, true); |
502 | if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) { |
513 | if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) { |
503 | fault_if_from_uspace(istate,"Page fault at %P",va); |
514 | fault_if_from_uspace(istate,"Page fault at %P",va); |
504 | panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip); |
515 | panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip); |
505 | } |
516 | } |
506 | } |
517 | } |
507 | } |
518 | } |
508 | 519 | ||
509 | /** Data nested TLB fault handler. |
520 | /** Data nested TLB fault handler. |
510 | * |
521 | * |
511 | * This fault should not occur. |
522 | * This fault should not occur. |
512 | * |
523 | * |
513 | * @param vector Interruption vector. |
524 | * @param vector Interruption vector. |
514 | * @param istate Structure with saved interruption state. |
525 | * @param istate Structure with saved interruption state. |
515 | */ |
526 | */ |
516 | void data_nested_tlb_fault(__u64 vector, istate_t *istate) |
527 | void data_nested_tlb_fault(__u64 vector, istate_t *istate) |
517 | { |
528 | { |
518 | panic("%s\n", __FUNCTION__); |
529 | panic("%s\n", __FUNCTION__); |
519 | } |
530 | } |
520 | 531 | ||
521 | /** Data Dirty bit fault handler. |
532 | /** Data Dirty bit fault handler. |
522 | * |
533 | * |
523 | * @param vector Interruption vector. |
534 | * @param vector Interruption vector. |
524 | * @param istate Structure with saved interruption state. |
535 | * @param istate Structure with saved interruption state. |
525 | */ |
536 | */ |
526 | void data_dirty_bit_fault(__u64 vector, istate_t *istate) |
537 | void data_dirty_bit_fault(__u64 vector, istate_t *istate) |
527 | { |
538 | { |
528 | region_register rr; |
539 | region_register rr; |
529 | rid_t rid; |
540 | rid_t rid; |
530 | __address va; |
541 | __address va; |
531 | pte_t *t; |
542 | pte_t *t; |
532 | 543 | ||
533 | va = istate->cr_ifa; /* faulting address */ |
544 | va = istate->cr_ifa; /* faulting address */ |
534 | rr.word = rr_read(VA2VRN(va)); |
545 | rr.word = rr_read(VA2VRN(va)); |
535 | rid = rr.map.rid; |
546 | rid = rr.map.rid; |
536 | 547 | ||
537 | page_table_lock(AS, true); |
548 | page_table_lock(AS, true); |
538 | t = page_mapping_find(AS, va); |
549 | t = page_mapping_find(AS, va); |
539 | ASSERT(t && t->p); |
550 | ASSERT(t && t->p); |
540 | if (t && t->p && t->w) { |
551 | if (t && t->p && t->w) { |
541 | /* |
552 | /* |
542 | * Update the Dirty bit in page tables and reinsert |
553 | * Update the Dirty bit in page tables and reinsert |
543 | * the mapping into DTC. |
554 | * the mapping into DTC. |
544 | */ |
555 | */ |
545 | t->d = true; |
556 | t->d = true; |
546 | dtc_pte_copy(t); |
557 | dtc_pte_copy(t); |
547 | } else { |
558 | } else { |
548 | if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) { |
559 | if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) { |
549 | fault_if_from_uspace(istate,"Page fault at %P",va); |
560 | fault_if_from_uspace(istate,"Page fault at %P",va); |
550 | panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip); |
561 | panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip); |
551 | t->d = true; |
562 | t->d = true; |
552 | dtc_pte_copy(t); |
563 | dtc_pte_copy(t); |
553 | } |
564 | } |
554 | } |
565 | } |
555 | page_table_unlock(AS, true); |
566 | page_table_unlock(AS, true); |
556 | } |
567 | } |
557 | 568 | ||
558 | /** Instruction access bit fault handler. |
569 | /** Instruction access bit fault handler. |
559 | * |
570 | * |
560 | * @param vector Interruption vector. |
571 | * @param vector Interruption vector. |
561 | * @param istate Structure with saved interruption state. |
572 | * @param istate Structure with saved interruption state. |
562 | */ |
573 | */ |
563 | void instruction_access_bit_fault(__u64 vector, istate_t *istate) |
574 | void instruction_access_bit_fault(__u64 vector, istate_t *istate) |
564 | { |
575 | { |
565 | region_register rr; |
576 | region_register rr; |
566 | rid_t rid; |
577 | rid_t rid; |
567 | __address va; |
578 | __address va; |
568 | pte_t *t; |
579 | pte_t *t; |
569 | 580 | ||
570 | va = istate->cr_ifa; /* faulting address */ |
581 | va = istate->cr_ifa; /* faulting address */ |
571 | rr.word = rr_read(VA2VRN(va)); |
582 | rr.word = rr_read(VA2VRN(va)); |
572 | rid = rr.map.rid; |
583 | rid = rr.map.rid; |
573 | 584 | ||
574 | page_table_lock(AS, true); |
585 | page_table_lock(AS, true); |
575 | t = page_mapping_find(AS, va); |
586 | t = page_mapping_find(AS, va); |
576 | ASSERT(t && t->p); |
587 | ASSERT(t && t->p); |
577 | if (t && t->p && t->x) { |
588 | if (t && t->p && t->x) { |
578 | /* |
589 | /* |
579 | * Update the Accessed bit in page tables and reinsert |
590 | * Update the Accessed bit in page tables and reinsert |
580 | * the mapping into ITC. |
591 | * the mapping into ITC. |
581 | */ |
592 | */ |
582 | t->a = true; |
593 | t->a = true; |
583 | itc_pte_copy(t); |
594 | itc_pte_copy(t); |
584 | } else { |
595 | } else { |
585 | if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) { |
596 | if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) { |
586 | fault_if_from_uspace(istate,"Page fault at %P",va); |
597 | fault_if_from_uspace(istate,"Page fault at %P",va); |
587 | panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip); |
598 | panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip); |
588 | t->a = true; |
599 | t->a = true; |
589 | itc_pte_copy(t); |
600 | itc_pte_copy(t); |
590 | } |
601 | } |
591 | } |
602 | } |
592 | page_table_unlock(AS, true); |
603 | page_table_unlock(AS, true); |
593 | } |
604 | } |
594 | 605 | ||
595 | /** Data access bit fault handler. |
606 | /** Data access bit fault handler. |
596 | * |
607 | * |
597 | * @param vector Interruption vector. |
608 | * @param vector Interruption vector. |
598 | * @param istate Structure with saved interruption state. |
609 | * @param istate Structure with saved interruption state. |
599 | */ |
610 | */ |
600 | void data_access_bit_fault(__u64 vector, istate_t *istate) |
611 | void data_access_bit_fault(__u64 vector, istate_t *istate) |
601 | { |
612 | { |
602 | region_register rr; |
613 | region_register rr; |
603 | rid_t rid; |
614 | rid_t rid; |
604 | __address va; |
615 | __address va; |
605 | pte_t *t; |
616 | pte_t *t; |
606 | 617 | ||
607 | va = istate->cr_ifa; /* faulting address */ |
618 | va = istate->cr_ifa; /* faulting address */ |
608 | rr.word = rr_read(VA2VRN(va)); |
619 | rr.word = rr_read(VA2VRN(va)); |
609 | rid = rr.map.rid; |
620 | rid = rr.map.rid; |
610 | 621 | ||
611 | page_table_lock(AS, true); |
622 | page_table_lock(AS, true); |
612 | t = page_mapping_find(AS, va); |
623 | t = page_mapping_find(AS, va); |
613 | ASSERT(t && t->p); |
624 | ASSERT(t && t->p); |
614 | if (t && t->p) { |
625 | if (t && t->p) { |
615 | /* |
626 | /* |
616 | * Update the Accessed bit in page tables and reinsert |
627 | * Update the Accessed bit in page tables and reinsert |
617 | * the mapping into DTC. |
628 | * the mapping into DTC. |
618 | */ |
629 | */ |
619 | t->a = true; |
630 | t->a = true; |
620 | dtc_pte_copy(t); |
631 | dtc_pte_copy(t); |
621 | } else { |
632 | } else { |
622 | if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) { |
633 | if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) { |
623 | fault_if_from_uspace(istate,"Page fault at %P",va); |
634 | fault_if_from_uspace(istate,"Page fault at %P",va); |
624 | panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip); |
635 | panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip); |
625 | t->a = true; |
636 | t->a = true; |
626 | itc_pte_copy(t); |
637 | itc_pte_copy(t); |
627 | } |
638 | } |
628 | } |
639 | } |
629 | page_table_unlock(AS, true); |
640 | page_table_unlock(AS, true); |
630 | } |
641 | } |
631 | 642 | ||
632 | /** Page not present fault handler. |
643 | /** Page not present fault handler. |
633 | * |
644 | * |
634 | * @param vector Interruption vector. |
645 | * @param vector Interruption vector. |
635 | * @param istate Structure with saved interruption state. |
646 | * @param istate Structure with saved interruption state. |
636 | */ |
647 | */ |
637 | void page_not_present(__u64 vector, istate_t *istate) |
648 | void page_not_present(__u64 vector, istate_t *istate) |
638 | { |
649 | { |
639 | region_register rr; |
650 | region_register rr; |
640 | rid_t rid; |
651 | rid_t rid; |
641 | __address va; |
652 | __address va; |
642 | pte_t *t; |
653 | pte_t *t; |
643 | 654 | ||
644 | va = istate->cr_ifa; /* faulting address */ |
655 | va = istate->cr_ifa; /* faulting address */ |
645 | rr.word = rr_read(VA2VRN(va)); |
656 | rr.word = rr_read(VA2VRN(va)); |
646 | rid = rr.map.rid; |
657 | rid = rr.map.rid; |
647 | 658 | ||
648 | page_table_lock(AS, true); |
659 | page_table_lock(AS, true); |
649 | t = page_mapping_find(AS, va); |
660 | t = page_mapping_find(AS, va); |
650 | ASSERT(t); |
661 | ASSERT(t); |
651 | 662 | ||
652 | if (t->p) { |
663 | if (t->p) { |
653 | /* |
664 | /* |
654 | * If the Present bit is set in page hash table, just copy it |
665 | * If the Present bit is set in page hash table, just copy it |
655 | * and update ITC/DTC. |
666 | * and update ITC/DTC. |
656 | */ |
667 | */ |
657 | if (t->x) |
668 | if (t->x) |
658 | itc_pte_copy(t); |
669 | itc_pte_copy(t); |
659 | else |
670 | else |
660 | dtc_pte_copy(t); |
671 | dtc_pte_copy(t); |
661 | page_table_unlock(AS, true); |
672 | page_table_unlock(AS, true); |
662 | } else { |
673 | } else { |
663 | page_table_unlock(AS, true); |
674 | page_table_unlock(AS, true); |
664 | if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) { |
675 | if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) { |
665 | fault_if_from_uspace(istate,"Page fault at %P",va); |
676 | fault_if_from_uspace(istate,"Page fault at %P",va); |
666 | panic("%s: va=%p, rid=%d\n", __FUNCTION__, va, rid); |
677 | panic("%s: va=%p, rid=%d\n", __FUNCTION__, va, rid); |
667 | } |
678 | } |
668 | } |
679 | } |
669 | } |
680 | } |
670 | 681 |