Rev 395 | Rev 397 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 395 | Rev 396 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (C) 2003-2004 Jakub Jermar |
2 | * Copyright (C) 2003-2004 Jakub Jermar |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | #include <arch/mm/tlb.h> |
29 | #include <arch/mm/tlb.h> |
30 | #include <arch/mm/asid.h> |
30 | #include <arch/mm/asid.h> |
31 | #include <mm/tlb.h> |
31 | #include <mm/tlb.h> |
32 | #include <mm/page.h> |
32 | #include <mm/page.h> |
33 | #include <mm/vm.h> |
33 | #include <mm/vm.h> |
34 | #include <arch/cp0.h> |
34 | #include <arch/cp0.h> |
35 | #include <panic.h> |
35 | #include <panic.h> |
36 | #include <arch.h> |
36 | #include <arch.h> |
37 | #include <symtab.h> |
37 | #include <symtab.h> |
38 | #include <synch/spinlock.h> |
38 | #include <synch/spinlock.h> |
39 | #include <print.h> |
39 | #include <print.h> |
- | 40 | #include <debug.h> |
|
40 | 41 | ||
41 | static void tlb_refill_fail(struct exception_regdump *pstate); |
42 | static void tlb_refill_fail(struct exception_regdump *pstate); |
42 | static void tlb_invalid_fail(struct exception_regdump *pstate); |
43 | static void tlb_invalid_fail(struct exception_regdump *pstate); |
43 | static void tlb_modified_fail(struct exception_regdump *pstate); |
44 | static void tlb_modified_fail(struct exception_regdump *pstate); |
44 | 45 | ||
45 | static pte_t *find_mapping_and_check(__address badvaddr); |
46 | static pte_t *find_mapping_and_check(__address badvaddr); |
46 | static void prepare_entry_lo(struct entry_lo *lo, bool g, bool v, bool d, int c, __address pfn); |
47 | static void prepare_entry_lo(entry_lo_t *lo, bool g, bool v, bool d, int c, __address pfn); |
47 | 48 | ||
48 | /** Initialize TLB |
49 | /** Initialize TLB |
49 | * |
50 | * |
50 | * Initialize TLB. |
51 | * Initialize TLB. |
51 | * Invalidate all entries and mark wired entries. |
52 | * Invalidate all entries and mark wired entries. |
52 | */ |
53 | */ |
53 | void tlb_init_arch(void) |
54 | void tlb_init_arch(void) |
54 | { |
55 | { |
55 | int i; |
56 | int i; |
56 | 57 | ||
57 | cp0_pagemask_write(TLB_PAGE_MASK_16K); |
58 | cp0_pagemask_write(TLB_PAGE_MASK_16K); |
58 | cp0_entry_hi_write(0); |
59 | cp0_entry_hi_write(0); |
59 | cp0_entry_lo0_write(0); |
60 | cp0_entry_lo0_write(0); |
60 | cp0_entry_lo1_write(0); |
61 | cp0_entry_lo1_write(0); |
61 | 62 | ||
62 | /* |
63 | /* |
63 | * Invalidate all entries. |
64 | * Invalidate all entries. |
64 | */ |
65 | */ |
65 | for (i = 0; i < TLB_SIZE; i++) { |
66 | for (i = 0; i < TLB_SIZE; i++) { |
66 | cp0_index_write(i); |
67 | cp0_index_write(i); |
67 | tlbwi(); |
68 | tlbwi(); |
68 | } |
69 | } |
69 | 70 | ||
70 | /* |
71 | /* |
71 | * The kernel is going to make use of some wired |
72 | * The kernel is going to make use of some wired |
72 | * entries (e.g. mapping kernel stacks in kseg3). |
73 | * entries (e.g. mapping kernel stacks in kseg3). |
73 | */ |
74 | */ |
74 | cp0_wired_write(TLB_WIRED); |
75 | cp0_wired_write(TLB_WIRED); |
75 | } |
76 | } |
76 | 77 | ||
77 | /** Process TLB Refill Exception |
78 | /** Process TLB Refill Exception |
78 | * |
79 | * |
79 | * Process TLB Refill Exception. |
80 | * Process TLB Refill Exception. |
80 | * |
81 | * |
81 | * @param pstate Interrupted register context. |
82 | * @param pstate Interrupted register context. |
82 | */ |
83 | */ |
83 | void tlb_refill(struct exception_regdump *pstate) |
84 | void tlb_refill(struct exception_regdump *pstate) |
84 | { |
85 | { |
85 | struct entry_lo lo; |
86 | entry_lo_t lo; |
86 | __address badvaddr; |
87 | __address badvaddr; |
87 | pte_t *pte; |
88 | pte_t *pte; |
88 | 89 | ||
89 | badvaddr = cp0_badvaddr_read(); |
90 | badvaddr = cp0_badvaddr_read(); |
90 | 91 | ||
91 | spinlock_lock(&VM->lock); |
92 | spinlock_lock(&VM->lock); |
92 | pte = find_mapping_and_check(badvaddr); |
93 | pte = find_mapping_and_check(badvaddr); |
93 | if (!pte) |
94 | if (!pte) |
94 | goto fail; |
95 | goto fail; |
95 | 96 | ||
96 | /* |
97 | /* |
97 | * Record access to PTE. |
98 | * Record access to PTE. |
98 | */ |
99 | */ |
99 | pte->a = 1; |
100 | pte->a = 1; |
100 | 101 | ||
101 | prepare_entry_lo(&lo, pte->g, pte->v, pte->d, pte->c, pte->pfn); |
102 | prepare_entry_lo(&lo, pte->g, pte->v, pte->d, pte->c, pte->pfn); |
102 | 103 | ||
103 | /* |
104 | /* |
104 | * New entry is to be inserted into TLB |
105 | * New entry is to be inserted into TLB |
105 | */ |
106 | */ |
106 | if ((badvaddr/PAGE_SIZE) % 2 == 0) { |
107 | if ((badvaddr/PAGE_SIZE) % 2 == 0) { |
107 | cp0_entry_lo0_write(*((__u32 *) &lo)); |
108 | cp0_entry_lo0_write(lo.value); |
108 | cp0_entry_lo1_write(0); |
109 | cp0_entry_lo1_write(0); |
109 | } |
110 | } |
110 | else { |
111 | else { |
111 | cp0_entry_lo0_write(0); |
112 | cp0_entry_lo0_write(0); |
112 | cp0_entry_lo1_write(*((__u32 *) &lo)); |
113 | cp0_entry_lo1_write(lo.value); |
113 | } |
114 | } |
114 | tlbwr(); |
115 | tlbwr(); |
115 | 116 | ||
116 | spinlock_unlock(&VM->lock); |
117 | spinlock_unlock(&VM->lock); |
117 | return; |
118 | return; |
118 | 119 | ||
119 | fail: |
120 | fail: |
120 | spinlock_unlock(&VM->lock); |
121 | spinlock_unlock(&VM->lock); |
121 | tlb_refill_fail(pstate); |
122 | tlb_refill_fail(pstate); |
122 | } |
123 | } |
123 | 124 | ||
124 | /** Process TLB Invalid Exception |
125 | /** Process TLB Invalid Exception |
125 | * |
126 | * |
126 | * Process TLB Invalid Exception. |
127 | * Process TLB Invalid Exception. |
127 | * |
128 | * |
128 | * @param pstate Interrupted register context. |
129 | * @param pstate Interrupted register context. |
129 | */ |
130 | */ |
130 | void tlb_invalid(struct exception_regdump *pstate) |
131 | void tlb_invalid(struct exception_regdump *pstate) |
131 | { |
132 | { |
132 | struct index index; |
133 | tlb_index_t index; |
133 | __address badvaddr; |
134 | __address badvaddr; |
134 | struct entry_lo lo; |
135 | entry_lo_t lo; |
135 | pte_t *pte; |
136 | pte_t *pte; |
136 | 137 | ||
137 | badvaddr = cp0_badvaddr_read(); |
138 | badvaddr = cp0_badvaddr_read(); |
138 | 139 | ||
139 | /* |
140 | /* |
140 | * Locate the faulting entry in TLB. |
141 | * Locate the faulting entry in TLB. |
141 | */ |
142 | */ |
142 | tlbp(); |
143 | tlbp(); |
143 | *((__u32 *) &index) = cp0_index_read(); |
144 | index.value = cp0_index_read(); |
144 | 145 | ||
145 | spinlock_lock(&VM->lock); |
146 | spinlock_lock(&VM->lock); |
146 | 147 | ||
147 | /* |
148 | /* |
148 | * Fail if the entry is not in TLB. |
149 | * Fail if the entry is not in TLB. |
149 | */ |
150 | */ |
150 | if (index.p) |
151 | if (index.p) { |
- | 152 | printf("TLB entry not found.\n"); |
|
151 | goto fail; |
153 | goto fail; |
- | 154 | } |
|
152 | 155 | ||
153 | pte = find_mapping_and_check(badvaddr); |
156 | pte = find_mapping_and_check(badvaddr); |
154 | if (!pte) |
157 | if (!pte) |
155 | goto fail; |
158 | goto fail; |
156 | 159 | ||
157 | /* |
160 | /* |
158 | * Read the faulting TLB entry. |
161 | * Read the faulting TLB entry. |
159 | */ |
162 | */ |
160 | tlbr(); |
163 | tlbr(); |
161 | 164 | ||
162 | /* |
165 | /* |
163 | * Record access to PTE. |
166 | * Record access to PTE. |
164 | */ |
167 | */ |
165 | pte->a = 1; |
168 | pte->a = 1; |
166 | 169 | ||
167 | prepare_entry_lo(&lo, pte->g, pte->v, pte->d, pte->c, pte->pfn); |
170 | prepare_entry_lo(&lo, pte->g, pte->v, pte->d, pte->c, pte->pfn); |
168 | 171 | ||
169 | /* |
172 | /* |
170 | * The entry is to be updated in TLB. |
173 | * The entry is to be updated in TLB. |
171 | */ |
174 | */ |
172 | if ((badvaddr/PAGE_SIZE) % 2 == 0) |
175 | if ((badvaddr/PAGE_SIZE) % 2 == 0) |
173 | cp0_entry_lo0_write(*((__u32 *) &lo)); |
176 | cp0_entry_lo0_write(lo.value); |
174 | else |
177 | else |
175 | cp0_entry_lo1_write(*((__u32 *) &lo)); |
178 | cp0_entry_lo1_write(lo.value); |
176 | tlbwi(); |
179 | tlbwi(); |
177 | 180 | ||
178 | spinlock_unlock(&VM->lock); |
181 | spinlock_unlock(&VM->lock); |
179 | return; |
182 | return; |
180 | 183 | ||
181 | fail: |
184 | fail: |
182 | spinlock_unlock(&VM->lock); |
185 | spinlock_unlock(&VM->lock); |
183 | tlb_invalid_fail(pstate); |
186 | tlb_invalid_fail(pstate); |
184 | } |
187 | } |
185 | 188 | ||
186 | /** Process TLB Modified Exception |
189 | /** Process TLB Modified Exception |
187 | * |
190 | * |
188 | * Process TLB Modified Exception. |
191 | * Process TLB Modified Exception. |
189 | * |
192 | * |
190 | * @param pstate Interrupted register context. |
193 | * @param pstate Interrupted register context. |
191 | */ |
194 | */ |
192 | - | ||
193 | void tlb_modified(struct exception_regdump *pstate) |
195 | void tlb_modified(struct exception_regdump *pstate) |
194 | { |
196 | { |
195 | struct index index; |
197 | tlb_index_t index; |
196 | __address badvaddr; |
198 | __address badvaddr; |
197 | struct entry_lo lo; |
199 | entry_lo_t lo; |
198 | pte_t *pte; |
200 | pte_t *pte; |
199 | 201 | ||
200 | badvaddr = cp0_badvaddr_read(); |
202 | badvaddr = cp0_badvaddr_read(); |
201 | 203 | ||
202 | /* |
204 | /* |
203 | * Locate the faulting entry in TLB. |
205 | * Locate the faulting entry in TLB. |
204 | */ |
206 | */ |
205 | tlbp(); |
207 | tlbp(); |
206 | *((__u32 *) &index) = cp0_index_read(); |
208 | index.value = cp0_index_read(); |
207 | 209 | ||
208 | spinlock_lock(&VM->lock); |
210 | spinlock_lock(&VM->lock); |
209 | 211 | ||
210 | /* |
212 | /* |
211 | * Fail if the entry is not in TLB. |
213 | * Fail if the entry is not in TLB. |
212 | */ |
214 | */ |
213 | if (index.p) |
215 | if (index.p) { |
- | 216 | printf("TLB entry not found.\n"); |
|
214 | goto fail; |
217 | goto fail; |
- | 218 | } |
|
215 | 219 | ||
216 | pte = find_mapping_and_check(badvaddr); |
220 | pte = find_mapping_and_check(badvaddr); |
217 | if (!pte) |
221 | if (!pte) |
218 | goto fail; |
222 | goto fail; |
219 | 223 | ||
220 | /* |
224 | /* |
221 | * Fail if the page is not writable. |
225 | * Fail if the page is not writable. |
222 | */ |
226 | */ |
223 | if (!pte->w) |
227 | if (!pte->w) |
224 | goto fail; |
228 | goto fail; |
225 | 229 | ||
226 | /* |
230 | /* |
227 | * Read the faulting TLB entry. |
231 | * Read the faulting TLB entry. |
228 | */ |
232 | */ |
229 | tlbr(); |
233 | tlbr(); |
230 | 234 | ||
231 | /* |
235 | /* |
232 | * Record access and write to PTE. |
236 | * Record access and write to PTE. |
233 | */ |
237 | */ |
234 | pte->a = 1; |
238 | pte->a = 1; |
235 | pte->d = 1; |
239 | pte->d = 1; |
236 | 240 | ||
237 | prepare_entry_lo(&lo, pte->g, pte->v, pte->w, pte->c, pte->pfn); |
241 | prepare_entry_lo(&lo, pte->g, pte->v, pte->w, pte->c, pte->pfn); |
238 | 242 | ||
239 | /* |
243 | /* |
240 | * The entry is to be updated in TLB. |
244 | * The entry is to be updated in TLB. |
241 | */ |
245 | */ |
242 | if ((badvaddr/PAGE_SIZE) % 2 == 0) |
246 | if ((badvaddr/PAGE_SIZE) % 2 == 0) |
243 | cp0_entry_lo0_write(*((__u32 *) &lo)); |
247 | cp0_entry_lo0_write(lo.value); |
244 | else |
248 | else |
245 | cp0_entry_lo1_write(*((__u32 *) &lo)); |
249 | cp0_entry_lo1_write(lo.value); |
246 | tlbwi(); |
250 | tlbwi(); |
247 | 251 | ||
248 | spinlock_unlock(&VM->lock); |
252 | spinlock_unlock(&VM->lock); |
249 | return; |
253 | return; |
250 | 254 | ||
251 | fail: |
255 | fail: |
252 | spinlock_unlock(&VM->lock); |
256 | spinlock_unlock(&VM->lock); |
253 | tlb_modified_fail(pstate); |
257 | tlb_modified_fail(pstate); |
254 | } |
258 | } |
255 | 259 | ||
256 | void tlb_refill_fail(struct exception_regdump *pstate) |
260 | void tlb_refill_fail(struct exception_regdump *pstate) |
257 | { |
261 | { |
258 | char *symbol = ""; |
262 | char *symbol = ""; |
259 | char *sym2 = ""; |
263 | char *sym2 = ""; |
260 | 264 | ||
261 | char *s = get_symtab_entry(pstate->epc); |
265 | char *s = get_symtab_entry(pstate->epc); |
262 | if (s) |
266 | if (s) |
263 | symbol = s; |
267 | symbol = s; |
264 | s = get_symtab_entry(pstate->ra); |
268 | s = get_symtab_entry(pstate->ra); |
265 | if (s) |
269 | if (s) |
266 | sym2 = s; |
270 | sym2 = s; |
267 | panic("%X: TLB Refill Exception at %X(%s<-%s)\n", cp0_badvaddr_read(), pstate->epc, symbol, sym2); |
271 | panic("%X: TLB Refill Exception at %X(%s<-%s)\n", cp0_badvaddr_read(), pstate->epc, symbol, sym2); |
268 | } |
272 | } |
269 | 273 | ||
270 | 274 | ||
271 | void tlb_invalid_fail(struct exception_regdump *pstate) |
275 | void tlb_invalid_fail(struct exception_regdump *pstate) |
272 | { |
276 | { |
273 | char *symbol = ""; |
277 | char *symbol = ""; |
274 | 278 | ||
275 | char *s = get_symtab_entry(pstate->epc); |
279 | char *s = get_symtab_entry(pstate->epc); |
276 | if (s) |
280 | if (s) |
277 | symbol = s; |
281 | symbol = s; |
278 | panic("%X: TLB Invalid Exception at %X(%s)\n", cp0_badvaddr_read(), pstate->epc, symbol); |
282 | panic("%X: TLB Invalid Exception at %X(%s)\n", cp0_badvaddr_read(), pstate->epc, symbol); |
279 | } |
283 | } |
280 | 284 | ||
281 | void tlb_modified_fail(struct exception_regdump *pstate) |
285 | void tlb_modified_fail(struct exception_regdump *pstate) |
282 | { |
286 | { |
283 | char *symbol = ""; |
287 | char *symbol = ""; |
284 | 288 | ||
285 | char *s = get_symtab_entry(pstate->epc); |
289 | char *s = get_symtab_entry(pstate->epc); |
286 | if (s) |
290 | if (s) |
287 | symbol = s; |
291 | symbol = s; |
288 | panic("%X: TLB Modified Exception at %X(%s)\n", cp0_badvaddr_read(), pstate->epc, symbol); |
292 | panic("%X: TLB Modified Exception at %X(%s)\n", cp0_badvaddr_read(), pstate->epc, symbol); |
289 | } |
293 | } |
290 | 294 | ||
- | 295 | /** Invalidate TLB entries with specified ASID |
|
- | 296 | * |
|
- | 297 | * Invalidate TLB entries with specified ASID. |
|
291 | 298 | * |
|
- | 299 | * @param asid ASID. |
|
- | 300 | */ |
|
292 | void tlb_invalidate(int asid) |
301 | void tlb_invalidate(asid_t asid) |
293 | { |
302 | { |
- | 303 | entry_hi_t hi; |
|
294 | pri_t pri; |
304 | pri_t pri; |
- | 305 | int i; |
|
- | 306 | ||
- | 307 | ASSERT(asid != ASID_INVALID); |
|
295 | 308 | ||
296 | pri = cpu_priority_high(); |
309 | pri = cpu_priority_high(); |
297 | 310 | ||
- | 311 | for (i = 0; i < TLB_SIZE; i++) { |
|
- | 312 | cp0_index_write(i); |
|
298 | // TODO |
313 | tlbr(); |
- | 314 | ||
- | 315 | hi.value = cp0_entry_hi_read(); |
|
- | 316 | if (hi.asid == asid) { |
|
- | 317 | cp0_pagemask_write(TLB_PAGE_MASK_16K); |
|
- | 318 | cp0_entry_hi_write(0); |
|
- | 319 | cp0_entry_lo0_write(0); |
|
- | 320 | cp0_entry_lo1_write(0); |
|
- | 321 | tlbwi(); |
|
- | 322 | } |
|
- | 323 | } |
|
299 | 324 | ||
300 | cpu_priority_restore(pri); |
325 | cpu_priority_restore(pri); |
301 | } |
326 | } |
302 | 327 | ||
303 | /** Try to find PTE for faulting address |
328 | /** Try to find PTE for faulting address |
304 | * |
329 | * |
305 | * Try to find PTE for faulting address. |
330 | * Try to find PTE for faulting address. |
306 | * The VM->lock must be held on entry to this function. |
331 | * The VM->lock must be held on entry to this function. |
307 | * |
332 | * |
308 | * @param badvaddr Faulting virtual address. |
333 | * @param badvaddr Faulting virtual address. |
309 | * |
334 | * |
310 | * @return PTE on success, NULL otherwise. |
335 | * @return PTE on success, NULL otherwise. |
311 | */ |
336 | */ |
312 | pte_t *find_mapping_and_check(__address badvaddr) |
337 | pte_t *find_mapping_and_check(__address badvaddr) |
313 | { |
338 | { |
314 | struct entry_hi hi; |
339 | entry_hi_t hi; |
315 | pte_t *pte; |
340 | pte_t *pte; |
316 | 341 | ||
317 | *((__u32 *) &hi) = cp0_entry_hi_read(); |
342 | hi.value = cp0_entry_hi_read(); |
318 | 343 | ||
319 | /* |
344 | /* |
320 | * Handler cannot succeed if the ASIDs don't match. |
345 | * Handler cannot succeed if the ASIDs don't match. |
321 | */ |
346 | */ |
322 | if (hi.asid != VM->asid) |
347 | if (hi.asid != VM->asid) { |
- | 348 | printf("EntryHi.asid=%d, VM->asid=%d\n", hi.asid, VM->asid); |
|
323 | return NULL; |
349 | return NULL; |
- | 350 | } |
|
324 | 351 | ||
325 | /* |
352 | /* |
326 | * Handler cannot succeed if badvaddr has no mapping. |
353 | * Handler cannot succeed if badvaddr has no mapping. |
327 | */ |
354 | */ |
328 | pte = find_mapping(badvaddr, 0); |
355 | pte = find_mapping(badvaddr, 0); |
329 | if (!pte) |
356 | if (!pte) { |
- | 357 | printf("No such mapping.\n"); |
|
330 | return NULL; |
358 | return NULL; |
- | 359 | } |
|
331 | 360 | ||
332 | /* |
361 | /* |
333 | * Handler cannot succeed if the mapping is marked as invalid. |
362 | * Handler cannot succeed if the mapping is marked as invalid. |
334 | */ |
363 | */ |
335 | if (!pte->v) |
364 | if (!pte->v) { |
- | 365 | printf("Invalid mapping.\n"); |
|
336 | return NULL; |
366 | return NULL; |
- | 367 | } |
|
337 | 368 | ||
338 | return pte; |
369 | return pte; |
339 | } |
370 | } |
340 | 371 | ||
341 | void prepare_entry_lo(struct entry_lo *lo, bool g, bool v, bool d, int c, __address pfn) |
372 | void prepare_entry_lo(entry_lo_t *lo, bool g, bool v, bool d, int c, __address pfn) |
342 | { |
373 | { |
343 | lo->g = g; |
374 | lo->g = g; |
344 | lo->v = v; |
375 | lo->v = v; |
345 | lo->d = d; |
376 | lo->d = d; |
346 | lo->c = c; |
377 | lo->c = c; |
347 | lo->pfn = pfn; |
378 | lo->pfn = pfn; |
348 | lo->zero = 0; |
379 | lo->zero = 0; |
349 | } |
380 | } |
350 | 381 |