Rev 1329 | Rev 1380 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
703 | jermar | 1 | /* |
2 | * Copyright (C) 2001-2006 Jakub Jermar |
||
3 | * All rights reserved. |
||
4 | * |
||
5 | * Redistribution and use in source and binary forms, with or without |
||
6 | * modification, are permitted provided that the following conditions |
||
7 | * are met: |
||
8 | * |
||
9 | * - Redistributions of source code must retain the above copyright |
||
10 | * notice, this list of conditions and the following disclaimer. |
||
11 | * - Redistributions in binary form must reproduce the above copyright |
||
12 | * notice, this list of conditions and the following disclaimer in the |
||
13 | * documentation and/or other materials provided with the distribution. |
||
14 | * - The name of the author may not be used to endorse or promote products |
||
15 | * derived from this software without specific prior written permission. |
||
16 | * |
||
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
||
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
||
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
||
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
||
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
||
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
||
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
||
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
||
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
||
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
||
27 | */ |
||
28 | |||
1248 | jermar | 29 | /** |
30 | * @file as.c |
||
31 | * @brief Address space related functions. |
||
32 | * |
||
703 | jermar | 33 | * This file contains address space manipulation functions. |
34 | * Roughly speaking, this is a higher-level client of |
||
35 | * Virtual Address Translation (VAT) subsystem. |
||
1248 | jermar | 36 | * |
37 | * Functionality provided by this file allows one to |
||
38 | * create address space and create, resize and share |
||
39 | * address space areas. |
||
40 | * |
||
41 | * @see page.c |
||
42 | * |
||
703 | jermar | 43 | */ |
44 | |||
45 | #include <mm/as.h> |
||
756 | jermar | 46 | #include <arch/mm/as.h> |
703 | jermar | 47 | #include <mm/page.h> |
48 | #include <mm/frame.h> |
||
814 | palkovsky | 49 | #include <mm/slab.h> |
703 | jermar | 50 | #include <mm/tlb.h> |
51 | #include <arch/mm/page.h> |
||
52 | #include <genarch/mm/page_pt.h> |
||
1108 | jermar | 53 | #include <genarch/mm/page_ht.h> |
727 | jermar | 54 | #include <mm/asid.h> |
703 | jermar | 55 | #include <arch/mm/asid.h> |
56 | #include <synch/spinlock.h> |
||
788 | jermar | 57 | #include <adt/list.h> |
1147 | jermar | 58 | #include <adt/btree.h> |
1235 | jermar | 59 | #include <proc/task.h> |
1288 | jermar | 60 | #include <proc/thread.h> |
1235 | jermar | 61 | #include <arch/asm.h> |
703 | jermar | 62 | #include <panic.h> |
63 | #include <debug.h> |
||
1235 | jermar | 64 | #include <print.h> |
703 | jermar | 65 | #include <memstr.h> |
1070 | jermar | 66 | #include <macros.h> |
703 | jermar | 67 | #include <arch.h> |
1235 | jermar | 68 | #include <errno.h> |
69 | #include <config.h> |
||
70 | #include <arch/types.h> |
||
71 | #include <typedefs.h> |
||
1288 | jermar | 72 | #include <syscall/copy.h> |
73 | #include <arch/interrupt.h> |
||
703 | jermar | 74 | |
756 | jermar | 75 | as_operations_t *as_operations = NULL; |
703 | jermar | 76 | |
823 | jermar | 77 | /** Address space lock. It protects inactive_as_with_asid_head. */ |
78 | SPINLOCK_INITIALIZE(as_lock); |
||
79 | |||
80 | /** |
||
81 | * This list contains address spaces that are not active on any |
||
82 | * processor and that have valid ASID. |
||
83 | */ |
||
84 | LIST_INITIALIZE(inactive_as_with_asid_head); |
||
85 | |||
757 | jermar | 86 | /** Kernel address space. */ |
87 | as_t *AS_KERNEL = NULL; |
||
88 | |||
1235 | jermar | 89 | static int area_flags_to_page_flags(int aflags); |
754 | jermar | 90 | static int get_area_flags(as_area_t *a); |
977 | jermar | 91 | static as_area_t *find_area_and_lock(as_t *as, __address va); |
1048 | jermar | 92 | static bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area); |
703 | jermar | 93 | |
756 | jermar | 94 | /** Initialize address space subsystem. */ |
95 | void as_init(void) |
||
96 | { |
||
97 | as_arch_init(); |
||
789 | palkovsky | 98 | AS_KERNEL = as_create(FLAG_AS_KERNEL); |
756 | jermar | 99 | if (!AS_KERNEL) |
100 | panic("can't create kernel address space\n"); |
||
101 | } |
||
102 | |||
757 | jermar | 103 | /** Create address space. |
104 | * |
||
105 | * @param flags Flags that influence way in wich the address space is created. |
||
106 | */ |
||
756 | jermar | 107 | as_t *as_create(int flags) |
703 | jermar | 108 | { |
109 | as_t *as; |
||
110 | |||
822 | palkovsky | 111 | as = (as_t *) malloc(sizeof(as_t), 0); |
823 | jermar | 112 | link_initialize(&as->inactive_as_with_asid_link); |
822 | palkovsky | 113 | spinlock_initialize(&as->lock, "as_lock"); |
1147 | jermar | 114 | btree_create(&as->as_area_btree); |
822 | palkovsky | 115 | |
116 | if (flags & FLAG_AS_KERNEL) |
||
117 | as->asid = ASID_KERNEL; |
||
118 | else |
||
119 | as->asid = ASID_INVALID; |
||
120 | |||
823 | jermar | 121 | as->refcount = 0; |
822 | palkovsky | 122 | as->page_table = page_table_create(flags); |
703 | jermar | 123 | |
124 | return as; |
||
125 | } |
||
126 | |||
973 | palkovsky | 127 | /** Free Adress space */ |
128 | void as_free(as_t *as) |
||
129 | { |
||
130 | ASSERT(as->refcount == 0); |
||
131 | |||
132 | /* TODO: free as_areas and other resources held by as */ |
||
133 | /* TODO: free page table */ |
||
134 | free(as); |
||
135 | } |
||
136 | |||
703 | jermar | 137 | /** Create address space area of common attributes. |
138 | * |
||
139 | * The created address space area is added to the target address space. |
||
140 | * |
||
141 | * @param as Target address space. |
||
1239 | jermar | 142 | * @param flags Flags of the area memory. |
1048 | jermar | 143 | * @param size Size of area. |
703 | jermar | 144 | * @param base Base address of area. |
1239 | jermar | 145 | * @param attrs Attributes of the area. |
703 | jermar | 146 | * |
147 | * @return Address space area on success or NULL on failure. |
||
148 | */ |
||
1239 | jermar | 149 | as_area_t *as_area_create(as_t *as, int flags, size_t size, __address base, int attrs) |
703 | jermar | 150 | { |
151 | ipl_t ipl; |
||
152 | as_area_t *a; |
||
153 | |||
154 | if (base % PAGE_SIZE) |
||
1048 | jermar | 155 | return NULL; |
156 | |||
1233 | jermar | 157 | if (!size) |
158 | return NULL; |
||
159 | |||
1048 | jermar | 160 | /* Writeable executable areas are not supported. */ |
161 | if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE)) |
||
162 | return NULL; |
||
703 | jermar | 163 | |
164 | ipl = interrupts_disable(); |
||
165 | spinlock_lock(&as->lock); |
||
166 | |||
1048 | jermar | 167 | if (!check_area_conflicts(as, base, size, NULL)) { |
168 | spinlock_unlock(&as->lock); |
||
169 | interrupts_restore(ipl); |
||
170 | return NULL; |
||
171 | } |
||
703 | jermar | 172 | |
822 | palkovsky | 173 | a = (as_area_t *) malloc(sizeof(as_area_t), 0); |
703 | jermar | 174 | |
822 | palkovsky | 175 | spinlock_initialize(&a->lock, "as_area_lock"); |
176 | |||
1026 | jermar | 177 | a->flags = flags; |
1239 | jermar | 178 | a->attributes = attrs; |
1048 | jermar | 179 | a->pages = SIZE2FRAMES(size); |
822 | palkovsky | 180 | a->base = base; |
181 | |||
1147 | jermar | 182 | btree_insert(&as->as_area_btree, base, (void *) a, NULL); |
822 | palkovsky | 183 | |
703 | jermar | 184 | spinlock_unlock(&as->lock); |
185 | interrupts_restore(ipl); |
||
704 | jermar | 186 | |
703 | jermar | 187 | return a; |
188 | } |
||
189 | |||
1235 | jermar | 190 | /** Find address space area and change it. |
191 | * |
||
192 | * @param as Address space. |
||
193 | * @param address Virtual address belonging to the area to be changed. Must be page-aligned. |
||
194 | * @param size New size of the virtual memory block starting at address. |
||
195 | * @param flags Flags influencing the remap operation. Currently unused. |
||
196 | * |
||
1306 | jermar | 197 | * @return Zero on success or a value from @ref errno.h otherwise. |
1235 | jermar | 198 | */ |
1306 | jermar | 199 | int as_area_resize(as_t *as, __address address, size_t size, int flags) |
1235 | jermar | 200 | { |
1306 | jermar | 201 | as_area_t *area; |
1235 | jermar | 202 | ipl_t ipl; |
203 | size_t pages; |
||
204 | |||
205 | ipl = interrupts_disable(); |
||
206 | spinlock_lock(&as->lock); |
||
207 | |||
208 | /* |
||
209 | * Locate the area. |
||
210 | */ |
||
211 | area = find_area_and_lock(as, address); |
||
212 | if (!area) { |
||
213 | spinlock_unlock(&as->lock); |
||
214 | interrupts_restore(ipl); |
||
1306 | jermar | 215 | return ENOENT; |
1235 | jermar | 216 | } |
217 | |||
218 | if (area->flags & AS_AREA_DEVICE) { |
||
219 | /* |
||
220 | * Remapping of address space areas associated |
||
221 | * with memory mapped devices is not supported. |
||
222 | */ |
||
223 | spinlock_unlock(&area->lock); |
||
224 | spinlock_unlock(&as->lock); |
||
225 | interrupts_restore(ipl); |
||
1306 | jermar | 226 | return ENOTSUP; |
1235 | jermar | 227 | } |
228 | |||
229 | pages = SIZE2FRAMES((address - area->base) + size); |
||
230 | if (!pages) { |
||
231 | /* |
||
232 | * Zero size address space areas are not allowed. |
||
233 | */ |
||
234 | spinlock_unlock(&area->lock); |
||
235 | spinlock_unlock(&as->lock); |
||
236 | interrupts_restore(ipl); |
||
1306 | jermar | 237 | return EPERM; |
1235 | jermar | 238 | } |
239 | |||
240 | if (pages < area->pages) { |
||
241 | int i; |
||
242 | |||
243 | /* |
||
244 | * Shrinking the area. |
||
245 | * No need to check for overlaps. |
||
246 | */ |
||
247 | for (i = pages; i < area->pages; i++) { |
||
248 | pte_t *pte; |
||
249 | |||
250 | /* |
||
251 | * Releasing physical memory. |
||
252 | * This depends on the fact that the memory was allocated using frame_alloc(). |
||
253 | */ |
||
254 | page_table_lock(as, false); |
||
255 | pte = page_mapping_find(as, area->base + i*PAGE_SIZE); |
||
256 | if (pte && PTE_VALID(pte)) { |
||
257 | __address frame; |
||
258 | |||
259 | ASSERT(PTE_PRESENT(pte)); |
||
260 | frame = PTE_GET_FRAME(pte); |
||
261 | page_mapping_remove(as, area->base + i*PAGE_SIZE); |
||
262 | page_table_unlock(as, false); |
||
263 | |||
264 | frame_free(ADDR2PFN(frame)); |
||
265 | } else { |
||
266 | page_table_unlock(as, false); |
||
267 | } |
||
268 | } |
||
269 | /* |
||
270 | * Invalidate TLB's. |
||
271 | */ |
||
272 | tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages); |
||
273 | tlb_invalidate_pages(AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages); |
||
274 | tlb_shootdown_finalize(); |
||
275 | } else { |
||
276 | /* |
||
277 | * Growing the area. |
||
278 | * Check for overlaps with other address space areas. |
||
279 | */ |
||
280 | if (!check_area_conflicts(as, address, pages * PAGE_SIZE, area)) { |
||
281 | spinlock_unlock(&area->lock); |
||
282 | spinlock_unlock(&as->lock); |
||
283 | interrupts_restore(ipl); |
||
1306 | jermar | 284 | return EADDRNOTAVAIL; |
1235 | jermar | 285 | } |
286 | } |
||
287 | |||
288 | area->pages = pages; |
||
289 | |||
290 | spinlock_unlock(&area->lock); |
||
291 | spinlock_unlock(&as->lock); |
||
292 | interrupts_restore(ipl); |
||
293 | |||
1306 | jermar | 294 | return 0; |
1235 | jermar | 295 | } |
296 | |||
1306 | jermar | 297 | /** Destroy address space area. |
298 | * |
||
299 | * @param as Address space. |
||
300 | * @param address Address withing the area to be deleted. |
||
301 | * |
||
302 | * @return Zero on success or a value from @ref errno.h on failure. |
||
303 | */ |
||
304 | int as_area_destroy(as_t *as, __address address) |
||
305 | { |
||
306 | as_area_t *area; |
||
307 | __address base; |
||
308 | ipl_t ipl; |
||
309 | int i; |
||
310 | |||
311 | ipl = interrupts_disable(); |
||
312 | spinlock_lock(&as->lock); |
||
313 | |||
314 | area = find_area_and_lock(as, address); |
||
315 | if (!area) { |
||
316 | spinlock_unlock(&as->lock); |
||
317 | interrupts_restore(ipl); |
||
318 | return ENOENT; |
||
319 | } |
||
320 | |||
321 | base = area->base; |
||
322 | for (i = 0; i < area->pages; i++) { |
||
323 | pte_t *pte; |
||
324 | |||
325 | /* |
||
326 | * Releasing physical memory. |
||
327 | * Areas mapping memory-mapped devices are treated differently than |
||
328 | * areas backing frame_alloc()'ed memory. |
||
329 | */ |
||
330 | page_table_lock(as, false); |
||
331 | pte = page_mapping_find(as, area->base + i*PAGE_SIZE); |
||
332 | if (pte && PTE_VALID(pte)) { |
||
333 | ASSERT(PTE_PRESENT(pte)); |
||
334 | page_mapping_remove(as, area->base + i*PAGE_SIZE); |
||
335 | if (area->flags & AS_AREA_DEVICE) { |
||
336 | __address frame; |
||
337 | frame = PTE_GET_FRAME(pte); |
||
338 | frame_free(ADDR2PFN(frame)); |
||
339 | } |
||
340 | page_table_unlock(as, false); |
||
341 | } else { |
||
342 | page_table_unlock(as, false); |
||
343 | } |
||
344 | } |
||
345 | /* |
||
346 | * Invalidate TLB's. |
||
347 | */ |
||
348 | tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base, area->pages); |
||
349 | tlb_invalidate_pages(AS->asid, area->base, area->pages); |
||
350 | tlb_shootdown_finalize(); |
||
351 | |||
1309 | jermar | 352 | area->attributes |= AS_AREA_ATTR_PARTIAL; |
1306 | jermar | 353 | spinlock_unlock(&area->lock); |
354 | |||
355 | /* |
||
356 | * Remove the empty area from address space. |
||
357 | */ |
||
358 | btree_remove(&AS->as_area_btree, base, NULL); |
||
359 | |||
1309 | jermar | 360 | free(area); |
361 | |||
1306 | jermar | 362 | spinlock_unlock(&AS->lock); |
363 | interrupts_restore(ipl); |
||
364 | return 0; |
||
365 | } |
||
366 | |||
1329 | palkovsky | 367 | /** Steal address space area from another task. |
1235 | jermar | 368 | * |
1329 | palkovsky | 369 | * Address space area is stolen from another task |
370 | * Moreover, any existing mapping |
||
1235 | jermar | 371 | * is copied as well, providing thus a mechanism |
372 | * for sharing group of pages. The source address |
||
373 | * space area and any associated mapping is preserved. |
||
374 | * |
||
1329 | palkovsky | 375 | * @param src_task Pointer of source task |
1239 | jermar | 376 | * @param src_base Base address of the source address space area. |
1329 | palkovsky | 377 | * @param acc_size Expected size of the source area |
378 | * @param dst_base Target base address |
||
1235 | jermar | 379 | * |
1306 | jermar | 380 | * @return Zero on success or ENOENT if there is no such task or |
1235 | jermar | 381 | * if there is no such address space area, |
382 | * EPERM if there was a problem in accepting the area or |
||
383 | * ENOMEM if there was a problem in allocating destination |
||
384 | * address space area. |
||
385 | */ |
||
1329 | palkovsky | 386 | int as_area_steal(task_t *src_task, __address src_base, size_t acc_size, |
387 | __address dst_base) |
||
1235 | jermar | 388 | { |
389 | ipl_t ipl; |
||
390 | count_t i; |
||
1329 | palkovsky | 391 | as_t *src_as; |
1239 | jermar | 392 | int src_flags; |
393 | size_t src_size; |
||
394 | as_area_t *src_area, *dst_area; |
||
1329 | palkovsky | 395 | |
1235 | jermar | 396 | ipl = interrupts_disable(); |
1329 | palkovsky | 397 | spinlock_lock(&src_task->lock); |
398 | src_as = src_task->as; |
||
1235 | jermar | 399 | |
1329 | palkovsky | 400 | spinlock_lock(&src_as->lock); |
401 | src_area = find_area_and_lock(src_as, src_base); |
||
1239 | jermar | 402 | if (!src_area) { |
1238 | jermar | 403 | /* |
404 | * Could not find the source address space area. |
||
405 | */ |
||
1329 | palkovsky | 406 | spinlock_unlock(&src_task->lock); |
407 | spinlock_unlock(&src_as->lock); |
||
1238 | jermar | 408 | interrupts_restore(ipl); |
409 | return ENOENT; |
||
410 | } |
||
1239 | jermar | 411 | src_size = src_area->pages * PAGE_SIZE; |
412 | src_flags = src_area->flags; |
||
413 | spinlock_unlock(&src_area->lock); |
||
1329 | palkovsky | 414 | spinlock_unlock(&src_as->lock); |
1235 | jermar | 415 | |
1329 | palkovsky | 416 | |
417 | if (src_size != acc_size) { |
||
418 | spinlock_unlock(&src_task->lock); |
||
1235 | jermar | 419 | interrupts_restore(ipl); |
420 | return EPERM; |
||
421 | } |
||
422 | /* |
||
1239 | jermar | 423 | * Create copy of the source address space area. |
424 | * The destination area is created with AS_AREA_ATTR_PARTIAL |
||
425 | * attribute set which prevents race condition with |
||
426 | * preliminary as_page_fault() calls. |
||
1235 | jermar | 427 | */ |
1329 | palkovsky | 428 | dst_area = as_area_create(AS, src_flags, src_size, dst_base, AS_AREA_ATTR_PARTIAL); |
1239 | jermar | 429 | if (!dst_area) { |
1235 | jermar | 430 | /* |
431 | * Destination address space area could not be created. |
||
432 | */ |
||
1329 | palkovsky | 433 | spinlock_unlock(&src_task->lock); |
1235 | jermar | 434 | interrupts_restore(ipl); |
435 | return ENOMEM; |
||
436 | } |
||
437 | |||
1329 | palkovsky | 438 | spinlock_unlock(&src_task->lock); |
1235 | jermar | 439 | |
440 | /* |
||
441 | * Avoid deadlock by first locking the address space with lower address. |
||
442 | */ |
||
1329 | palkovsky | 443 | if (AS < src_as) { |
1235 | jermar | 444 | spinlock_lock(&AS->lock); |
1329 | palkovsky | 445 | spinlock_lock(&src_as->lock); |
1235 | jermar | 446 | } else { |
447 | spinlock_lock(&AS->lock); |
||
1329 | palkovsky | 448 | spinlock_lock(&src_as->lock); |
1235 | jermar | 449 | } |
450 | |||
1239 | jermar | 451 | for (i = 0; i < SIZE2FRAMES(src_size); i++) { |
1235 | jermar | 452 | pte_t *pte; |
453 | __address frame; |
||
454 | |||
1329 | palkovsky | 455 | page_table_lock(src_as, false); |
456 | pte = page_mapping_find(src_as, src_base + i*PAGE_SIZE); |
||
1235 | jermar | 457 | if (pte && PTE_VALID(pte)) { |
458 | ASSERT(PTE_PRESENT(pte)); |
||
459 | frame = PTE_GET_FRAME(pte); |
||
1239 | jermar | 460 | if (!(src_flags & AS_AREA_DEVICE)) |
1236 | jermar | 461 | frame_reference_add(ADDR2PFN(frame)); |
1329 | palkovsky | 462 | page_table_unlock(src_as, false); |
1235 | jermar | 463 | } else { |
1329 | palkovsky | 464 | page_table_unlock(src_as, false); |
1235 | jermar | 465 | continue; |
466 | } |
||
467 | |||
1329 | palkovsky | 468 | page_table_lock(AS, false); |
469 | page_mapping_insert(AS, dst_base + i*PAGE_SIZE, frame, area_flags_to_page_flags(src_flags)); |
||
470 | page_table_unlock(AS, false); |
||
1235 | jermar | 471 | } |
1239 | jermar | 472 | |
473 | /* |
||
474 | * Now the destination address space area has been |
||
475 | * fully initialized. Clear the AS_AREA_ATTR_PARTIAL |
||
476 | * attribute. |
||
477 | */ |
||
478 | spinlock_lock(&dst_area->lock); |
||
479 | dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL; |
||
480 | spinlock_unlock(&dst_area->lock); |
||
1235 | jermar | 481 | |
482 | spinlock_unlock(&AS->lock); |
||
1329 | palkovsky | 483 | spinlock_unlock(&src_as->lock); |
1235 | jermar | 484 | interrupts_restore(ipl); |
485 | |||
486 | return 0; |
||
487 | } |
||
488 | |||
754 | jermar | 489 | /** Initialize mapping for one page of address space. |
703 | jermar | 490 | * |
754 | jermar | 491 | * This functions maps 'page' to 'frame' according |
492 | * to attributes of the address space area to |
||
493 | * wich 'page' belongs. |
||
703 | jermar | 494 | * |
840 | jermar | 495 | * @param as Target address space. |
754 | jermar | 496 | * @param page Virtual page within the area. |
497 | * @param frame Physical frame to which page will be mapped. |
||
703 | jermar | 498 | */ |
754 | jermar | 499 | void as_set_mapping(as_t *as, __address page, __address frame) |
703 | jermar | 500 | { |
977 | jermar | 501 | as_area_t *area; |
703 | jermar | 502 | ipl_t ipl; |
503 | |||
504 | ipl = interrupts_disable(); |
||
1044 | jermar | 505 | page_table_lock(as, true); |
703 | jermar | 506 | |
977 | jermar | 507 | area = find_area_and_lock(as, page); |
754 | jermar | 508 | if (!area) { |
509 | panic("page not part of any as_area\n"); |
||
510 | } |
||
511 | |||
756 | jermar | 512 | page_mapping_insert(as, page, frame, get_area_flags(area)); |
754 | jermar | 513 | |
514 | spinlock_unlock(&area->lock); |
||
1044 | jermar | 515 | page_table_unlock(as, true); |
703 | jermar | 516 | interrupts_restore(ipl); |
517 | } |
||
518 | |||
519 | /** Handle page fault within the current address space. |
||
520 | * |
||
521 | * This is the high-level page fault handler. |
||
522 | * Interrupts are assumed disabled. |
||
523 | * |
||
524 | * @param page Faulting page. |
||
1288 | jermar | 525 | * @param istate Pointer to interrupted state. |
703 | jermar | 526 | * |
1288 | jermar | 527 | * @return 0 on page fault, 1 on success or 2 if the fault was caused by copy_to_uspace() or copy_from_uspace(). |
703 | jermar | 528 | */ |
1288 | jermar | 529 | int as_page_fault(__address page, istate_t *istate) |
703 | jermar | 530 | { |
1044 | jermar | 531 | pte_t *pte; |
977 | jermar | 532 | as_area_t *area; |
703 | jermar | 533 | __address frame; |
534 | |||
535 | ASSERT(AS); |
||
1044 | jermar | 536 | |
703 | jermar | 537 | spinlock_lock(&AS->lock); |
977 | jermar | 538 | area = find_area_and_lock(AS, page); |
703 | jermar | 539 | if (!area) { |
540 | /* |
||
541 | * No area contained mapping for 'page'. |
||
542 | * Signal page fault to low-level handler. |
||
543 | */ |
||
544 | spinlock_unlock(&AS->lock); |
||
1288 | jermar | 545 | goto page_fault; |
703 | jermar | 546 | } |
547 | |||
1239 | jermar | 548 | if (area->attributes & AS_AREA_ATTR_PARTIAL) { |
549 | /* |
||
550 | * The address space area is not fully initialized. |
||
551 | * Avoid possible race by returning error. |
||
552 | */ |
||
553 | spinlock_unlock(&area->lock); |
||
554 | spinlock_unlock(&AS->lock); |
||
1288 | jermar | 555 | goto page_fault; |
1239 | jermar | 556 | } |
557 | |||
1179 | jermar | 558 | ASSERT(!(area->flags & AS_AREA_DEVICE)); |
559 | |||
1044 | jermar | 560 | page_table_lock(AS, false); |
561 | |||
703 | jermar | 562 | /* |
1044 | jermar | 563 | * To avoid race condition between two page faults |
564 | * on the same address, we need to make sure |
||
565 | * the mapping has not been already inserted. |
||
566 | */ |
||
567 | if ((pte = page_mapping_find(AS, page))) { |
||
568 | if (PTE_PRESENT(pte)) { |
||
569 | page_table_unlock(AS, false); |
||
570 | spinlock_unlock(&area->lock); |
||
571 | spinlock_unlock(&AS->lock); |
||
572 | return 1; |
||
573 | } |
||
574 | } |
||
575 | |||
576 | /* |
||
754 | jermar | 577 | * In general, there can be several reasons that |
578 | * can have caused this fault. |
||
579 | * |
||
580 | * - non-existent mapping: the area is a scratch |
||
581 | * area (e.g. stack) and so far has not been |
||
582 | * allocated a frame for the faulting page |
||
583 | * |
||
584 | * - non-present mapping: another possibility, |
||
585 | * currently not implemented, would be frame |
||
586 | * reuse; when this becomes a possibility, |
||
587 | * do not forget to distinguish between |
||
588 | * the different causes |
||
703 | jermar | 589 | */ |
814 | palkovsky | 590 | frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0)); |
754 | jermar | 591 | memsetb(PA2KA(frame), FRAME_SIZE, 0); |
703 | jermar | 592 | |
593 | /* |
||
594 | * Map 'page' to 'frame'. |
||
595 | * Note that TLB shootdown is not attempted as only new information is being |
||
596 | * inserted into page tables. |
||
597 | */ |
||
756 | jermar | 598 | page_mapping_insert(AS, page, frame, get_area_flags(area)); |
1044 | jermar | 599 | page_table_unlock(AS, false); |
703 | jermar | 600 | |
601 | spinlock_unlock(&area->lock); |
||
602 | spinlock_unlock(&AS->lock); |
||
1288 | jermar | 603 | return AS_PF_OK; |
604 | |||
605 | page_fault: |
||
606 | if (!THREAD) |
||
607 | return AS_PF_FAULT; |
||
608 | |||
609 | if (THREAD->in_copy_from_uspace) { |
||
610 | THREAD->in_copy_from_uspace = false; |
||
611 | istate_set_retaddr(istate, (__address) &memcpy_from_uspace_failover_address); |
||
612 | } else if (THREAD->in_copy_to_uspace) { |
||
613 | THREAD->in_copy_to_uspace = false; |
||
614 | istate_set_retaddr(istate, (__address) &memcpy_to_uspace_failover_address); |
||
615 | } else { |
||
616 | return AS_PF_FAULT; |
||
617 | } |
||
618 | |||
619 | return AS_PF_DEFER; |
||
703 | jermar | 620 | } |
621 | |||
823 | jermar | 622 | /** Switch address spaces. |
703 | jermar | 623 | * |
823 | jermar | 624 | * @param old Old address space or NULL. |
625 | * @param new New address space. |
||
703 | jermar | 626 | */ |
823 | jermar | 627 | void as_switch(as_t *old, as_t *new) |
703 | jermar | 628 | { |
629 | ipl_t ipl; |
||
823 | jermar | 630 | bool needs_asid = false; |
703 | jermar | 631 | |
632 | ipl = interrupts_disable(); |
||
823 | jermar | 633 | spinlock_lock(&as_lock); |
703 | jermar | 634 | |
635 | /* |
||
823 | jermar | 636 | * First, take care of the old address space. |
637 | */ |
||
638 | if (old) { |
||
639 | spinlock_lock(&old->lock); |
||
640 | ASSERT(old->refcount); |
||
641 | if((--old->refcount == 0) && (old != AS_KERNEL)) { |
||
642 | /* |
||
643 | * The old address space is no longer active on |
||
644 | * any processor. It can be appended to the |
||
645 | * list of inactive address spaces with assigned |
||
646 | * ASID. |
||
647 | */ |
||
648 | ASSERT(old->asid != ASID_INVALID); |
||
649 | list_append(&old->inactive_as_with_asid_link, &inactive_as_with_asid_head); |
||
650 | } |
||
651 | spinlock_unlock(&old->lock); |
||
652 | } |
||
653 | |||
654 | /* |
||
655 | * Second, prepare the new address space. |
||
656 | */ |
||
657 | spinlock_lock(&new->lock); |
||
658 | if ((new->refcount++ == 0) && (new != AS_KERNEL)) { |
||
659 | if (new->asid != ASID_INVALID) |
||
660 | list_remove(&new->inactive_as_with_asid_link); |
||
661 | else |
||
662 | needs_asid = true; /* defer call to asid_get() until new->lock is released */ |
||
663 | } |
||
664 | SET_PTL0_ADDRESS(new->page_table); |
||
665 | spinlock_unlock(&new->lock); |
||
666 | |||
667 | if (needs_asid) { |
||
668 | /* |
||
669 | * Allocation of new ASID was deferred |
||
670 | * until now in order to avoid deadlock. |
||
671 | */ |
||
672 | asid_t asid; |
||
673 | |||
674 | asid = asid_get(); |
||
675 | spinlock_lock(&new->lock); |
||
676 | new->asid = asid; |
||
677 | spinlock_unlock(&new->lock); |
||
678 | } |
||
679 | spinlock_unlock(&as_lock); |
||
680 | interrupts_restore(ipl); |
||
681 | |||
682 | /* |
||
703 | jermar | 683 | * Perform architecture-specific steps. |
727 | jermar | 684 | * (e.g. write ASID to hardware register etc.) |
703 | jermar | 685 | */ |
823 | jermar | 686 | as_install_arch(new); |
703 | jermar | 687 | |
823 | jermar | 688 | AS = new; |
703 | jermar | 689 | } |
754 | jermar | 690 | |
1235 | jermar | 691 | /** Convert address space area flags to page flags. |
754 | jermar | 692 | * |
1235 | jermar | 693 | * @param aflags Flags of some address space area. |
754 | jermar | 694 | * |
1235 | jermar | 695 | * @return Flags to be passed to page_mapping_insert(). |
754 | jermar | 696 | */ |
1235 | jermar | 697 | int area_flags_to_page_flags(int aflags) |
754 | jermar | 698 | { |
699 | int flags; |
||
700 | |||
1178 | jermar | 701 | flags = PAGE_USER | PAGE_PRESENT; |
754 | jermar | 702 | |
1235 | jermar | 703 | if (aflags & AS_AREA_READ) |
1026 | jermar | 704 | flags |= PAGE_READ; |
705 | |||
1235 | jermar | 706 | if (aflags & AS_AREA_WRITE) |
1026 | jermar | 707 | flags |= PAGE_WRITE; |
708 | |||
1235 | jermar | 709 | if (aflags & AS_AREA_EXEC) |
1026 | jermar | 710 | flags |= PAGE_EXEC; |
711 | |||
1235 | jermar | 712 | if (!(aflags & AS_AREA_DEVICE)) |
1178 | jermar | 713 | flags |= PAGE_CACHEABLE; |
714 | |||
754 | jermar | 715 | return flags; |
716 | } |
||
756 | jermar | 717 | |
1235 | jermar | 718 | /** Compute flags for virtual address translation subsytem. |
719 | * |
||
720 | * The address space area must be locked. |
||
721 | * Interrupts must be disabled. |
||
722 | * |
||
723 | * @param a Address space area. |
||
724 | * |
||
725 | * @return Flags to be used in page_mapping_insert(). |
||
726 | */ |
||
727 | int get_area_flags(as_area_t *a) |
||
728 | { |
||
729 | return area_flags_to_page_flags(a->flags); |
||
730 | } |
||
731 | |||
756 | jermar | 732 | /** Create page table. |
733 | * |
||
734 | * Depending on architecture, create either address space |
||
735 | * private or global page table. |
||
736 | * |
||
737 | * @param flags Flags saying whether the page table is for kernel address space. |
||
738 | * |
||
739 | * @return First entry of the page table. |
||
740 | */ |
||
741 | pte_t *page_table_create(int flags) |
||
742 | { |
||
743 | ASSERT(as_operations); |
||
744 | ASSERT(as_operations->page_table_create); |
||
745 | |||
746 | return as_operations->page_table_create(flags); |
||
747 | } |
||
977 | jermar | 748 | |
1044 | jermar | 749 | /** Lock page table. |
750 | * |
||
751 | * This function should be called before any page_mapping_insert(), |
||
752 | * page_mapping_remove() and page_mapping_find(). |
||
753 | * |
||
754 | * Locking order is such that address space areas must be locked |
||
755 | * prior to this call. Address space can be locked prior to this |
||
756 | * call in which case the lock argument is false. |
||
757 | * |
||
758 | * @param as Address space. |
||
1248 | jermar | 759 | * @param lock If false, do not attempt to lock as->lock. |
1044 | jermar | 760 | */ |
761 | void page_table_lock(as_t *as, bool lock) |
||
762 | { |
||
763 | ASSERT(as_operations); |
||
764 | ASSERT(as_operations->page_table_lock); |
||
765 | |||
766 | as_operations->page_table_lock(as, lock); |
||
767 | } |
||
768 | |||
769 | /** Unlock page table. |
||
770 | * |
||
771 | * @param as Address space. |
||
1248 | jermar | 772 | * @param unlock If false, do not attempt to unlock as->lock. |
1044 | jermar | 773 | */ |
774 | void page_table_unlock(as_t *as, bool unlock) |
||
775 | { |
||
776 | ASSERT(as_operations); |
||
777 | ASSERT(as_operations->page_table_unlock); |
||
778 | |||
779 | as_operations->page_table_unlock(as, unlock); |
||
780 | } |
||
781 | |||
977 | jermar | 782 | |
783 | /** Find address space area and lock it. |
||
784 | * |
||
785 | * The address space must be locked and interrupts must be disabled. |
||
786 | * |
||
787 | * @param as Address space. |
||
788 | * @param va Virtual address. |
||
789 | * |
||
790 | * @return Locked address space area containing va on success or NULL on failure. |
||
791 | */ |
||
792 | as_area_t *find_area_and_lock(as_t *as, __address va) |
||
793 | { |
||
794 | as_area_t *a; |
||
1147 | jermar | 795 | btree_node_t *leaf, *lnode; |
796 | int i; |
||
977 | jermar | 797 | |
1147 | jermar | 798 | a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf); |
799 | if (a) { |
||
800 | /* va is the base address of an address space area */ |
||
977 | jermar | 801 | spinlock_lock(&a->lock); |
1147 | jermar | 802 | return a; |
803 | } |
||
804 | |||
805 | /* |
||
1150 | jermar | 806 | * Search the leaf node and the righmost record of its left neighbour |
1147 | jermar | 807 | * to find out whether this is a miss or va belongs to an address |
808 | * space area found there. |
||
809 | */ |
||
810 | |||
811 | /* First, search the leaf node itself. */ |
||
812 | for (i = 0; i < leaf->keys; i++) { |
||
813 | a = (as_area_t *) leaf->value[i]; |
||
814 | spinlock_lock(&a->lock); |
||
815 | if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) { |
||
816 | return a; |
||
817 | } |
||
818 | spinlock_unlock(&a->lock); |
||
819 | } |
||
977 | jermar | 820 | |
1147 | jermar | 821 | /* |
1150 | jermar | 822 | * Second, locate the left neighbour and test its last record. |
1148 | jermar | 823 | * Because of its position in the B+tree, it must have base < va. |
1147 | jermar | 824 | */ |
1150 | jermar | 825 | if ((lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) { |
1147 | jermar | 826 | a = (as_area_t *) lnode->value[lnode->keys - 1]; |
827 | spinlock_lock(&a->lock); |
||
828 | if (va < a->base + a->pages * PAGE_SIZE) { |
||
1048 | jermar | 829 | return a; |
1147 | jermar | 830 | } |
977 | jermar | 831 | spinlock_unlock(&a->lock); |
832 | } |
||
833 | |||
834 | return NULL; |
||
835 | } |
||
1048 | jermar | 836 | |
837 | /** Check area conflicts with other areas. |
||
838 | * |
||
839 | * The address space must be locked and interrupts must be disabled. |
||
840 | * |
||
841 | * @param as Address space. |
||
842 | * @param va Starting virtual address of the area being tested. |
||
843 | * @param size Size of the area being tested. |
||
844 | * @param avoid_area Do not touch this area. |
||
845 | * |
||
846 | * @return True if there is no conflict, false otherwise. |
||
847 | */ |
||
848 | bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area) |
||
849 | { |
||
850 | as_area_t *a; |
||
1147 | jermar | 851 | btree_node_t *leaf, *node; |
852 | int i; |
||
1048 | jermar | 853 | |
1070 | jermar | 854 | /* |
855 | * We don't want any area to have conflicts with NULL page. |
||
856 | */ |
||
857 | if (overlaps(va, size, NULL, PAGE_SIZE)) |
||
858 | return false; |
||
859 | |||
1147 | jermar | 860 | /* |
861 | * The leaf node is found in O(log n), where n is proportional to |
||
862 | * the number of address space areas belonging to as. |
||
863 | * The check for conflicts is then attempted on the rightmost |
||
1150 | jermar | 864 | * record in the left neighbour, the leftmost record in the right |
865 | * neighbour and all records in the leaf node itself. |
||
1147 | jermar | 866 | */ |
1048 | jermar | 867 | |
1147 | jermar | 868 | if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) { |
869 | if (a != avoid_area) |
||
870 | return false; |
||
871 | } |
||
872 | |||
873 | /* First, check the two border cases. */ |
||
1150 | jermar | 874 | if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) { |
1147 | jermar | 875 | a = (as_area_t *) node->value[node->keys - 1]; |
876 | spinlock_lock(&a->lock); |
||
877 | if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) { |
||
878 | spinlock_unlock(&a->lock); |
||
879 | return false; |
||
880 | } |
||
881 | spinlock_unlock(&a->lock); |
||
882 | } |
||
1150 | jermar | 883 | if ((node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf))) { |
1147 | jermar | 884 | a = (as_area_t *) node->value[0]; |
885 | spinlock_lock(&a->lock); |
||
886 | if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) { |
||
887 | spinlock_unlock(&a->lock); |
||
888 | return false; |
||
889 | } |
||
890 | spinlock_unlock(&a->lock); |
||
891 | } |
||
892 | |||
893 | /* Second, check the leaf node. */ |
||
894 | for (i = 0; i < leaf->keys; i++) { |
||
895 | a = (as_area_t *) leaf->value[i]; |
||
896 | |||
1048 | jermar | 897 | if (a == avoid_area) |
898 | continue; |
||
1147 | jermar | 899 | |
1048 | jermar | 900 | spinlock_lock(&a->lock); |
1147 | jermar | 901 | if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) { |
902 | spinlock_unlock(&a->lock); |
||
903 | return false; |
||
904 | } |
||
1048 | jermar | 905 | spinlock_unlock(&a->lock); |
906 | } |
||
907 | |||
1070 | jermar | 908 | /* |
909 | * So far, the area does not conflict with other areas. |
||
910 | * Check if it doesn't conflict with kernel address space. |
||
911 | */ |
||
912 | if (!KERNEL_ADDRESS_SPACE_SHADOWED) { |
||
913 | return !overlaps(va, size, |
||
914 | KERNEL_ADDRESS_SPACE_START, KERNEL_ADDRESS_SPACE_END-KERNEL_ADDRESS_SPACE_START); |
||
915 | } |
||
916 | |||
1048 | jermar | 917 | return true; |
918 | } |
||
1235 | jermar | 919 | |
1329 | palkovsky | 920 | /** Return size of address space of current task pointed to by base */ |
921 | size_t as_get_size(__address base) |
||
922 | { |
||
923 | ipl_t ipl; |
||
924 | as_area_t *src_area; |
||
925 | size_t size; |
||
926 | |||
927 | ipl = interrupts_disable(); |
||
928 | src_area = find_area_and_lock(AS, base); |
||
929 | if (src_area){ |
||
930 | size = src_area->pages * PAGE_SIZE; |
||
931 | spinlock_unlock(&src_area->lock); |
||
932 | } else { |
||
933 | size = 0; |
||
934 | } |
||
935 | interrupts_restore(ipl); |
||
936 | return size; |
||
937 | } |
||
938 | |||
1235 | jermar | 939 | /* |
940 | * Address space related syscalls. |
||
941 | */ |
||
942 | |||
943 | /** Wrapper for as_area_create(). */ |
||
944 | __native sys_as_area_create(__address address, size_t size, int flags) |
||
945 | { |
||
1239 | jermar | 946 | if (as_area_create(AS, flags, size, address, AS_AREA_ATTR_NONE)) |
1235 | jermar | 947 | return (__native) address; |
948 | else |
||
949 | return (__native) -1; |
||
950 | } |
||
951 | |||
952 | /** Wrapper for as_area_resize. */ |
||
953 | __native sys_as_area_resize(__address address, size_t size, int flags) |
||
954 | { |
||
1306 | jermar | 955 | return (__native) as_area_resize(AS, address, size, 0); |
1235 | jermar | 956 | } |
957 | |||
1306 | jermar | 958 | /** Wrapper for as_area_destroy. */ |
959 | __native sys_as_area_destroy(__address address) |
||
960 | { |
||
961 | return (__native) as_area_destroy(AS, address); |
||
962 | } |