Rev 1378 | Rev 1384 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
| Rev 1378 | Rev 1383 | ||
|---|---|---|---|
| Line 34... | Line 34... | ||
| 34 | #include <mm/page.h> |
34 | #include <mm/page.h> |
| 35 | #include <mm/as.h> |
35 | #include <mm/as.h> |
| 36 | #include <arch.h> |
36 | #include <arch.h> |
| 37 | #include <arch/types.h> |
37 | #include <arch/types.h> |
| 38 | #include <arch/exception.h> |
38 | #include <arch/exception.h> |
| - | 39 | #include <align.h> |
|
| 39 | #include <config.h> |
40 | #include <config.h> |
| 40 | #include <print.h> |
41 | #include <print.h> |
| 41 | #include <symtab.h> |
42 | #include <symtab.h> |
| 42 | 43 | ||
| 43 | static phte_t *phte; |
44 | static phte_t *phte; |
| 44 | 45 | ||
| 45 | 46 | ||
| 46 | /** Try to find PTE for faulting address |
47 | /** Try to find PTE for faulting address |
| 47 | * |
48 | * |
| 48 | * Try to find PTE for faulting address. |
49 | * Try to find PTE for faulting address. |
| 49 | * The AS->lock must be held on entry to this function. |
50 | * The as->lock must be held on entry to this function |
| - | 51 | * if lock is true. |
|
| 50 | * |
52 | * |
| - | 53 | * @param as Address space. |
|
| - | 54 | * @param lock Lock/unlock the address space. |
|
| 51 | * @param badvaddr Faulting virtual address. |
55 | * @param badvaddr Faulting virtual address. |
| 52 | * @param istate Pointer to interrupted state. |
56 | * @param istate Pointer to interrupted state. |
| 53 | * @param pfrc Pointer to variable where as_page_fault() return code will be stored. |
57 | * @param pfrc Pointer to variable where as_page_fault() return code will be stored. |
| 54 | * @return PTE on success, NULL otherwise. |
58 | * @return PTE on success, NULL otherwise. |
| 55 | * |
59 | * |
| 56 | */ |
60 | */ |
| 57 | static pte_t *find_mapping_and_check(__address badvaddr, istate_t *istate, int *pfcr) |
61 | static pte_t *find_mapping_and_check(as_t *as, bool lock, __address badvaddr, istate_t *istate, int *pfcr) |
| 58 | { |
62 | { |
| 59 | /* |
63 | /* |
| 60 | * Check if the mapping exists in page tables. |
64 | * Check if the mapping exists in page tables. |
| 61 | */ |
65 | */ |
| 62 | pte_t *pte = page_mapping_find(AS, badvaddr); |
66 | pte_t *pte = page_mapping_find(as, badvaddr); |
| 63 | if ((pte) && (pte->p)) { |
67 | if ((pte) && (pte->p)) { |
| 64 | /* |
68 | /* |
| 65 | * Mapping found in page tables. |
69 | * Mapping found in page tables. |
| 66 | * Immediately succeed. |
70 | * Immediately succeed. |
| 67 | */ |
71 | */ |
| Line 71... | Line 75... | ||
| 71 | 75 | ||
| 72 | /* |
76 | /* |
| 73 | * Mapping not found in page tables. |
77 | * Mapping not found in page tables. |
| 74 | * Resort to higher-level page fault handler. |
78 | * Resort to higher-level page fault handler. |
| 75 | */ |
79 | */ |
| 76 | page_table_unlock(AS, true); |
80 | page_table_unlock(as, lock); |
| 77 | switch (rc = as_page_fault(badvaddr, istate)) { |
81 | switch (rc = as_page_fault(badvaddr, istate)) { |
| 78 | case AS_PF_OK: |
82 | case AS_PF_OK: |
| 79 | /* |
83 | /* |
| 80 | * The higher-level page fault handler succeeded, |
84 | * The higher-level page fault handler succeeded, |
| 81 | * The mapping ought to be in place. |
85 | * The mapping ought to be in place. |
| 82 | */ |
86 | */ |
| 83 | page_table_lock(AS, true); |
87 | page_table_lock(as, lock); |
| 84 | pte = page_mapping_find(AS, badvaddr); |
88 | pte = page_mapping_find(as, badvaddr); |
| 85 | ASSERT((pte) && (pte->p)); |
89 | ASSERT((pte) && (pte->p)); |
| 86 | return pte; |
90 | return pte; |
| 87 | case AS_PF_DEFER: |
91 | case AS_PF_DEFER: |
| 88 | page_table_lock(AS, true); |
92 | page_table_lock(as, lock); |
| 89 | *pfcr = rc; |
93 | *pfcr = rc; |
| 90 | return NULL; |
94 | return NULL; |
| 91 | case AS_PF_FAULT: |
95 | case AS_PF_FAULT: |
| 92 | page_table_lock(AS, true); |
96 | page_table_lock(as, lock); |
| 93 | printf("Page fault.\n"); |
97 | printf("Page fault.\n"); |
| 94 | *pfcr = rc; |
98 | *pfcr = rc; |
| 95 | return NULL; |
99 | return NULL; |
| 96 | default: |
100 | default: |
| 97 | panic("unexpected rc (%d)\n", rc); |
101 | panic("unexpected rc (%d)\n", rc); |
| Line 178... | Line 182... | ||
| 178 | void pht_refill(bool data, istate_t *istate) |
182 | void pht_refill(bool data, istate_t *istate) |
| 179 | { |
183 | { |
| 180 | asid_t asid; |
184 | asid_t asid; |
| 181 | __address badvaddr; |
185 | __address badvaddr; |
| 182 | pte_t *pte; |
186 | pte_t *pte; |
| 183 | - | ||
| 184 | int pfcr; |
187 | int pfcr; |
| - | 188 | as_t *as; |
|
| - | 189 | bool lock; |
|
| - | 190 | ||
| - | 191 | if (AS == NULL) { |
|
| - | 192 | as = AS_KERNEL; |
|
| - | 193 | lock = false; |
|
| - | 194 | } else { |
|
| - | 195 | as = AS; |
|
| - | 196 | lock = true; |
|
| - | 197 | } |
|
| 185 | 198 | ||
| 186 | if (data) { |
199 | if (data) { |
| 187 | asm volatile ( |
200 | asm volatile ( |
| 188 | "mfdar %0\n" |
201 | "mfdar %0\n" |
| 189 | : "=r" (badvaddr) |
202 | : "=r" (badvaddr) |
| 190 | ); |
203 | ); |
| 191 | } else |
204 | } else |
| 192 | badvaddr = istate->pc; |
205 | badvaddr = istate->pc; |
| 193 | 206 | ||
| 194 | spinlock_lock(&AS->lock); |
207 | spinlock_lock(&as->lock); |
| 195 | asid = AS->asid; |
208 | asid = as->asid; |
| 196 | spinlock_unlock(&AS->lock); |
209 | spinlock_unlock(&as->lock); |
| 197 | 210 | ||
| 198 | page_table_lock(AS, true); |
211 | page_table_lock(as, lock); |
| 199 | 212 | ||
| 200 | pte = find_mapping_and_check(badvaddr, istate, &pfcr); |
213 | pte = find_mapping_and_check(as, lock, badvaddr, istate, &pfcr); |
| 201 | if (!pte) { |
214 | if (!pte) { |
| 202 | switch (pfcr) { |
215 | switch (pfcr) { |
| 203 | case AS_PF_FAULT: |
216 | case AS_PF_FAULT: |
| 204 | goto fail; |
217 | goto fail; |
| 205 | break; |
218 | break; |
| 206 | case AS_PF_DEFER: |
219 | case AS_PF_DEFER: |
| 207 | /* |
220 | /* |
| 208 | * The page fault came during copy_from_uspace() |
221 | * The page fault came during copy_from_uspace() |
| 209 | * or copy_to_uspace(). |
222 | * or copy_to_uspace(). |
| 210 | */ |
223 | */ |
| 211 | page_table_unlock(AS, true); |
224 | page_table_unlock(as, lock); |
| 212 | return; |
225 | return; |
| 213 | default: |
226 | default: |
| 214 | panic("Unexpected pfrc (%d)\n", pfcr); |
227 | panic("Unexpected pfrc (%d)\n", pfcr); |
| 215 | } |
228 | } |
| 216 | } |
229 | } |
| 217 | 230 | ||
| 218 | pte->a = 1; /* Record access to PTE */ |
231 | pte->a = 1; /* Record access to PTE */ |
| 219 | pht_insert(badvaddr, pte->pfn); |
232 | pht_insert(badvaddr, pte->pfn); |
| 220 | 233 | ||
| 221 | page_table_unlock(AS, true); |
234 | page_table_unlock(as, lock); |
| 222 | return; |
235 | return; |
| 223 | 236 | ||
| 224 | fail: |
237 | fail: |
| 225 | page_table_unlock(AS, true); |
238 | page_table_unlock(as, lock); |
| 226 | pht_refill_fail(badvaddr, istate); |
239 | pht_refill_fail(badvaddr, istate); |
| 227 | } |
240 | } |
| 228 | 241 | ||
| 229 | 242 | ||
| 230 | void pht_init(void) |
243 | void pht_init(void) |
| Line 236... | Line 249... | ||
| 236 | void page_arch_init(void) |
249 | void page_arch_init(void) |
| 237 | { |
250 | { |
| 238 | if (config.cpu_active == 1) { |
251 | if (config.cpu_active == 1) { |
| 239 | page_mapping_operations = &pt_mapping_operations; |
252 | page_mapping_operations = &pt_mapping_operations; |
| 240 | 253 | ||
| 241 | /* |
- | |
| 242 | * PA2KA(identity) mapping for all frames until last_frame. |
- | |
| 243 | */ |
- | |
| 244 | __address cur; |
- | |
| 245 | int flags; |
- | |
| 246 | - | ||
| 247 | for (cur = 0; cur < last_frame; cur += FRAME_SIZE) { |
- | |
| 248 | flags = PAGE_CACHEABLE; |
- | |
| 249 | if ((PA2KA(cur) >= config.base) && (PA2KA(cur) < config.base + config.kernel_size)) |
- | |
| 250 | flags |= PAGE_GLOBAL; |
- | |
| 251 | page_mapping_insert(AS_KERNEL, PA2KA(cur), cur, flags); |
- | |
| 252 | } |
- | |
| 253 | - | ||
| 254 | /* Allocate page hash table */ |
254 | /* Allocate page hash table */ |
| 255 | phte_t *physical_phte = (phte_t *) PFN2ADDR(frame_alloc(PHT_ORDER, FRAME_KA | FRAME_PANIC)); |
255 | phte_t *physical_phte = (phte_t *) PFN2ADDR(frame_alloc(PHT_ORDER, FRAME_KA | FRAME_PANIC)); |
| 256 | phte = (phte_t *) PA2KA((__address) physical_phte); |
256 | phte = (phte_t *) PA2KA((__address) physical_phte); |
| 257 | 257 | ||
| 258 | ASSERT((__address) physical_phte % (1 << PHT_BITS) == 0); |
258 | ASSERT((__address) physical_phte % (1 << PHT_BITS) == 0); |
| Line 263... | Line 263... | ||
| 263 | : |
263 | : |
| 264 | : "r" ((__address) physical_phte) |
264 | : "r" ((__address) physical_phte) |
| 265 | ); |
265 | ); |
| 266 | } |
266 | } |
| 267 | } |
267 | } |
| - | 268 | ||
| - | 269 | ||
| - | 270 | __address hw_map(__address physaddr, size_t size) |
|
| - | 271 | { |
|
| - | 272 | if (last_frame + ALIGN_UP(size, PAGE_SIZE) > KA2PA(KERNEL_ADDRESS_SPACE_END_ARCH)) |
|
| - | 273 | panic("Unable to map physical memory %p (%d bytes)", physaddr, size) |
|
| - | 274 | ||
| - | 275 | __address virtaddr = PA2KA(last_frame); |
|
| - | 276 | pfn_t i; |
|
| - | 277 | for (i = 0; i < ADDR2PFN(ALIGN_UP(size, PAGE_SIZE)); i++) |
|
| - | 278 | page_mapping_insert(AS_KERNEL, virtaddr + PFN2ADDR(i), physaddr + PFN2ADDR(i), PAGE_NOT_CACHEABLE); |
|
| - | 279 | ||
| - | 280 | last_frame = ALIGN_UP(last_frame + size, FRAME_SIZE); |
|
| - | 281 | ||
| - | 282 | return virtaddr; |
|
| - | 283 | } |
|