Rev 2007 | Rev 2015 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
703 | jermar | 1 | /* |
2 | * Copyright (C) 2001-2006 Jakub Jermar |
||
3 | * All rights reserved. |
||
4 | * |
||
5 | * Redistribution and use in source and binary forms, with or without |
||
6 | * modification, are permitted provided that the following conditions |
||
7 | * are met: |
||
8 | * |
||
9 | * - Redistributions of source code must retain the above copyright |
||
10 | * notice, this list of conditions and the following disclaimer. |
||
11 | * - Redistributions in binary form must reproduce the above copyright |
||
12 | * notice, this list of conditions and the following disclaimer in the |
||
13 | * documentation and/or other materials provided with the distribution. |
||
14 | * - The name of the author may not be used to endorse or promote products |
||
15 | * derived from this software without specific prior written permission. |
||
16 | * |
||
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
||
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
||
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
||
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
||
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
||
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
||
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
||
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
||
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
||
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
||
27 | */ |
||
28 | |||
1757 | jermar | 29 | /** @addtogroup genericmm |
1702 | cejka | 30 | * @{ |
31 | */ |
||
32 | |||
1248 | jermar | 33 | /** |
1702 | cejka | 34 | * @file |
1248 | jermar | 35 | * @brief Address space related functions. |
36 | * |
||
703 | jermar | 37 | * This file contains address space manipulation functions. |
38 | * Roughly speaking, this is a higher-level client of |
||
39 | * Virtual Address Translation (VAT) subsystem. |
||
1248 | jermar | 40 | * |
41 | * Functionality provided by this file allows one to |
||
1757 | jermar | 42 | * create address spaces and create, resize and share |
1248 | jermar | 43 | * address space areas. |
44 | * |
||
45 | * @see page.c |
||
46 | * |
||
703 | jermar | 47 | */ |
48 | |||
49 | #include <mm/as.h> |
||
756 | jermar | 50 | #include <arch/mm/as.h> |
703 | jermar | 51 | #include <mm/page.h> |
52 | #include <mm/frame.h> |
||
814 | palkovsky | 53 | #include <mm/slab.h> |
703 | jermar | 54 | #include <mm/tlb.h> |
55 | #include <arch/mm/page.h> |
||
56 | #include <genarch/mm/page_pt.h> |
||
1108 | jermar | 57 | #include <genarch/mm/page_ht.h> |
727 | jermar | 58 | #include <mm/asid.h> |
703 | jermar | 59 | #include <arch/mm/asid.h> |
60 | #include <synch/spinlock.h> |
||
1380 | jermar | 61 | #include <synch/mutex.h> |
788 | jermar | 62 | #include <adt/list.h> |
1147 | jermar | 63 | #include <adt/btree.h> |
1235 | jermar | 64 | #include <proc/task.h> |
1288 | jermar | 65 | #include <proc/thread.h> |
1235 | jermar | 66 | #include <arch/asm.h> |
703 | jermar | 67 | #include <panic.h> |
68 | #include <debug.h> |
||
1235 | jermar | 69 | #include <print.h> |
703 | jermar | 70 | #include <memstr.h> |
1070 | jermar | 71 | #include <macros.h> |
703 | jermar | 72 | #include <arch.h> |
1235 | jermar | 73 | #include <errno.h> |
74 | #include <config.h> |
||
1387 | jermar | 75 | #include <align.h> |
1235 | jermar | 76 | #include <arch/types.h> |
77 | #include <typedefs.h> |
||
1288 | jermar | 78 | #include <syscall/copy.h> |
79 | #include <arch/interrupt.h> |
||
703 | jermar | 80 | |
2009 | jermar | 81 | #ifdef CONFIG_VIRT_IDX_DCACHE |
82 | #include <arch/mm/cache.h> |
||
83 | #endif /* CONFIG_VIRT_IDX_DCACHE */ |
||
84 | |||
1757 | jermar | 85 | /** |
86 | * Each architecture decides what functions will be used to carry out |
||
87 | * address space operations such as creating or locking page tables. |
||
88 | */ |
||
756 | jermar | 89 | as_operations_t *as_operations = NULL; |
703 | jermar | 90 | |
1890 | jermar | 91 | /** |
92 | * Slab for as_t objects. |
||
93 | */ |
||
94 | static slab_cache_t *as_slab; |
||
95 | |||
1415 | jermar | 96 | /** This lock protects inactive_as_with_asid_head list. It must be acquired before as_t mutex. */ |
97 | SPINLOCK_INITIALIZE(inactive_as_with_asid_lock); |
||
823 | jermar | 98 | |
99 | /** |
||
100 | * This list contains address spaces that are not active on any |
||
101 | * processor and that have valid ASID. |
||
102 | */ |
||
103 | LIST_INITIALIZE(inactive_as_with_asid_head); |
||
104 | |||
757 | jermar | 105 | /** Kernel address space. */ |
106 | as_t *AS_KERNEL = NULL; |
||
107 | |||
1235 | jermar | 108 | static int area_flags_to_page_flags(int aflags); |
1780 | jermar | 109 | static as_area_t *find_area_and_lock(as_t *as, uintptr_t va); |
110 | static bool check_area_conflicts(as_t *as, uintptr_t va, size_t size, as_area_t *avoid_area); |
||
1409 | jermar | 111 | static void sh_info_remove_reference(share_info_t *sh_info); |
703 | jermar | 112 | |
1891 | jermar | 113 | static int as_constructor(void *obj, int flags) |
114 | { |
||
115 | as_t *as = (as_t *) obj; |
||
116 | int rc; |
||
117 | |||
118 | link_initialize(&as->inactive_as_with_asid_link); |
||
119 | mutex_initialize(&as->lock); |
||
120 | |||
121 | rc = as_constructor_arch(as, flags); |
||
122 | |||
123 | return rc; |
||
124 | } |
||
125 | |||
126 | static int as_destructor(void *obj) |
||
127 | { |
||
128 | as_t *as = (as_t *) obj; |
||
129 | |||
130 | return as_destructor_arch(as); |
||
131 | } |
||
132 | |||
756 | jermar | 133 | /** Initialize address space subsystem. */ |
134 | void as_init(void) |
||
135 | { |
||
136 | as_arch_init(); |
||
1890 | jermar | 137 | |
1891 | jermar | 138 | as_slab = slab_cache_create("as_slab", sizeof(as_t), 0, |
139 | as_constructor, as_destructor, SLAB_CACHE_MAGDEFERRED); |
||
1890 | jermar | 140 | |
789 | palkovsky | 141 | AS_KERNEL = as_create(FLAG_AS_KERNEL); |
1383 | decky | 142 | if (!AS_KERNEL) |
143 | panic("can't create kernel address space\n"); |
||
144 | |||
756 | jermar | 145 | } |
146 | |||
757 | jermar | 147 | /** Create address space. |
148 | * |
||
149 | * @param flags Flags that influence way in wich the address space is created. |
||
150 | */ |
||
756 | jermar | 151 | as_t *as_create(int flags) |
703 | jermar | 152 | { |
153 | as_t *as; |
||
154 | |||
1890 | jermar | 155 | as = (as_t *) slab_alloc(as_slab, 0); |
1891 | jermar | 156 | (void) as_create_arch(as, 0); |
157 | |||
1147 | jermar | 158 | btree_create(&as->as_area_btree); |
822 | palkovsky | 159 | |
160 | if (flags & FLAG_AS_KERNEL) |
||
161 | as->asid = ASID_KERNEL; |
||
162 | else |
||
163 | as->asid = ASID_INVALID; |
||
164 | |||
1468 | jermar | 165 | as->refcount = 0; |
1415 | jermar | 166 | as->cpu_refcount = 0; |
822 | palkovsky | 167 | as->page_table = page_table_create(flags); |
703 | jermar | 168 | |
2009 | jermar | 169 | #ifdef CONFIG_VIRT_IDX_DCACHE |
170 | as->dcache_flush_on_install = false; |
||
171 | as->dcache_flush_on_deinstall = false; |
||
172 | #endif /* CONFIG_VIRT_IDX_DCACHE */ |
||
173 | |||
703 | jermar | 174 | return as; |
175 | } |
||
176 | |||
1468 | jermar | 177 | /** Destroy adress space. |
178 | * |
||
179 | * When there are no tasks referencing this address space (i.e. its refcount is zero), |
||
180 | * the address space can be destroyed. |
||
181 | */ |
||
182 | void as_destroy(as_t *as) |
||
973 | palkovsky | 183 | { |
1468 | jermar | 184 | ipl_t ipl; |
1594 | jermar | 185 | bool cond; |
973 | palkovsky | 186 | |
1468 | jermar | 187 | ASSERT(as->refcount == 0); |
188 | |||
189 | /* |
||
190 | * Since there is no reference to this area, |
||
191 | * it is safe not to lock its mutex. |
||
192 | */ |
||
193 | ipl = interrupts_disable(); |
||
194 | spinlock_lock(&inactive_as_with_asid_lock); |
||
1587 | jermar | 195 | if (as->asid != ASID_INVALID && as != AS_KERNEL) { |
1594 | jermar | 196 | if (as != AS && as->cpu_refcount == 0) |
1587 | jermar | 197 | list_remove(&as->inactive_as_with_asid_link); |
1468 | jermar | 198 | asid_put(as->asid); |
199 | } |
||
200 | spinlock_unlock(&inactive_as_with_asid_lock); |
||
201 | |||
202 | /* |
||
203 | * Destroy address space areas of the address space. |
||
1954 | jermar | 204 | * The B+tree must be walked carefully because it is |
1594 | jermar | 205 | * also being destroyed. |
1468 | jermar | 206 | */ |
1594 | jermar | 207 | for (cond = true; cond; ) { |
1468 | jermar | 208 | btree_node_t *node; |
1594 | jermar | 209 | |
210 | ASSERT(!list_empty(&as->as_area_btree.leaf_head)); |
||
211 | node = list_get_instance(as->as_area_btree.leaf_head.next, btree_node_t, leaf_link); |
||
212 | |||
213 | if ((cond = node->keys)) { |
||
214 | as_area_destroy(as, node->key[0]); |
||
215 | } |
||
1468 | jermar | 216 | } |
1495 | jermar | 217 | |
1483 | jermar | 218 | btree_destroy(&as->as_area_btree); |
1468 | jermar | 219 | page_table_destroy(as->page_table); |
220 | |||
221 | interrupts_restore(ipl); |
||
222 | |||
1890 | jermar | 223 | slab_free(as_slab, as); |
973 | palkovsky | 224 | } |
225 | |||
703 | jermar | 226 | /** Create address space area of common attributes. |
227 | * |
||
228 | * The created address space area is added to the target address space. |
||
229 | * |
||
230 | * @param as Target address space. |
||
1239 | jermar | 231 | * @param flags Flags of the area memory. |
1048 | jermar | 232 | * @param size Size of area. |
703 | jermar | 233 | * @param base Base address of area. |
1239 | jermar | 234 | * @param attrs Attributes of the area. |
1409 | jermar | 235 | * @param backend Address space area backend. NULL if no backend is used. |
236 | * @param backend_data NULL or a pointer to an array holding two void *. |
||
703 | jermar | 237 | * |
238 | * @return Address space area on success or NULL on failure. |
||
239 | */ |
||
1780 | jermar | 240 | as_area_t *as_area_create(as_t *as, int flags, size_t size, uintptr_t base, int attrs, |
1424 | jermar | 241 | mem_backend_t *backend, mem_backend_data_t *backend_data) |
703 | jermar | 242 | { |
243 | ipl_t ipl; |
||
244 | as_area_t *a; |
||
245 | |||
246 | if (base % PAGE_SIZE) |
||
1048 | jermar | 247 | return NULL; |
248 | |||
1233 | jermar | 249 | if (!size) |
250 | return NULL; |
||
251 | |||
1048 | jermar | 252 | /* Writeable executable areas are not supported. */ |
253 | if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE)) |
||
254 | return NULL; |
||
703 | jermar | 255 | |
256 | ipl = interrupts_disable(); |
||
1380 | jermar | 257 | mutex_lock(&as->lock); |
703 | jermar | 258 | |
1048 | jermar | 259 | if (!check_area_conflicts(as, base, size, NULL)) { |
1380 | jermar | 260 | mutex_unlock(&as->lock); |
1048 | jermar | 261 | interrupts_restore(ipl); |
262 | return NULL; |
||
263 | } |
||
703 | jermar | 264 | |
822 | palkovsky | 265 | a = (as_area_t *) malloc(sizeof(as_area_t), 0); |
703 | jermar | 266 | |
1380 | jermar | 267 | mutex_initialize(&a->lock); |
822 | palkovsky | 268 | |
1424 | jermar | 269 | a->as = as; |
1026 | jermar | 270 | a->flags = flags; |
1239 | jermar | 271 | a->attributes = attrs; |
1048 | jermar | 272 | a->pages = SIZE2FRAMES(size); |
822 | palkovsky | 273 | a->base = base; |
1409 | jermar | 274 | a->sh_info = NULL; |
275 | a->backend = backend; |
||
1424 | jermar | 276 | if (backend_data) |
277 | a->backend_data = *backend_data; |
||
278 | else |
||
1780 | jermar | 279 | memsetb((uintptr_t) &a->backend_data, sizeof(a->backend_data), 0); |
1424 | jermar | 280 | |
2009 | jermar | 281 | #ifdef CONFIG_VIRT_IDX_DCACHE |
282 | /* |
||
283 | * When the area is being created with the AS_AREA_ATTR_PARTIAL flag, the |
||
284 | * orig_color is probably wrong until the flag is reset. In other words, it is |
||
285 | * initialized with the color of the area being created and not with the color |
||
286 | * of the original address space area at the beginning of the share chain. Of |
||
287 | * course, the correct color is set by as_area_share() before the flag is |
||
288 | * reset. |
||
289 | */ |
||
290 | a->orig_color = PAGE_COLOR(base); |
||
291 | #endif /* CONFIG_VIRT_IDX_DCACHE */ |
||
292 | |||
1387 | jermar | 293 | btree_create(&a->used_space); |
822 | palkovsky | 294 | |
1147 | jermar | 295 | btree_insert(&as->as_area_btree, base, (void *) a, NULL); |
822 | palkovsky | 296 | |
1380 | jermar | 297 | mutex_unlock(&as->lock); |
703 | jermar | 298 | interrupts_restore(ipl); |
704 | jermar | 299 | |
703 | jermar | 300 | return a; |
301 | } |
||
302 | |||
1235 | jermar | 303 | /** Find address space area and change it. |
304 | * |
||
305 | * @param as Address space. |
||
306 | * @param address Virtual address belonging to the area to be changed. Must be page-aligned. |
||
307 | * @param size New size of the virtual memory block starting at address. |
||
308 | * @param flags Flags influencing the remap operation. Currently unused. |
||
309 | * |
||
1306 | jermar | 310 | * @return Zero on success or a value from @ref errno.h otherwise. |
1235 | jermar | 311 | */ |
1780 | jermar | 312 | int as_area_resize(as_t *as, uintptr_t address, size_t size, int flags) |
1235 | jermar | 313 | { |
1306 | jermar | 314 | as_area_t *area; |
1235 | jermar | 315 | ipl_t ipl; |
316 | size_t pages; |
||
317 | |||
318 | ipl = interrupts_disable(); |
||
1380 | jermar | 319 | mutex_lock(&as->lock); |
1235 | jermar | 320 | |
321 | /* |
||
322 | * Locate the area. |
||
323 | */ |
||
324 | area = find_area_and_lock(as, address); |
||
325 | if (!area) { |
||
1380 | jermar | 326 | mutex_unlock(&as->lock); |
1235 | jermar | 327 | interrupts_restore(ipl); |
1306 | jermar | 328 | return ENOENT; |
1235 | jermar | 329 | } |
330 | |||
1424 | jermar | 331 | if (area->backend == &phys_backend) { |
1235 | jermar | 332 | /* |
333 | * Remapping of address space areas associated |
||
334 | * with memory mapped devices is not supported. |
||
335 | */ |
||
1380 | jermar | 336 | mutex_unlock(&area->lock); |
337 | mutex_unlock(&as->lock); |
||
1235 | jermar | 338 | interrupts_restore(ipl); |
1306 | jermar | 339 | return ENOTSUP; |
1235 | jermar | 340 | } |
1409 | jermar | 341 | if (area->sh_info) { |
342 | /* |
||
343 | * Remapping of shared address space areas |
||
344 | * is not supported. |
||
345 | */ |
||
346 | mutex_unlock(&area->lock); |
||
347 | mutex_unlock(&as->lock); |
||
348 | interrupts_restore(ipl); |
||
349 | return ENOTSUP; |
||
350 | } |
||
1235 | jermar | 351 | |
352 | pages = SIZE2FRAMES((address - area->base) + size); |
||
353 | if (!pages) { |
||
354 | /* |
||
355 | * Zero size address space areas are not allowed. |
||
356 | */ |
||
1380 | jermar | 357 | mutex_unlock(&area->lock); |
358 | mutex_unlock(&as->lock); |
||
1235 | jermar | 359 | interrupts_restore(ipl); |
1306 | jermar | 360 | return EPERM; |
1235 | jermar | 361 | } |
362 | |||
363 | if (pages < area->pages) { |
||
1403 | jermar | 364 | bool cond; |
1780 | jermar | 365 | uintptr_t start_free = area->base + pages*PAGE_SIZE; |
1235 | jermar | 366 | |
367 | /* |
||
368 | * Shrinking the area. |
||
369 | * No need to check for overlaps. |
||
370 | */ |
||
1403 | jermar | 371 | |
372 | /* |
||
1436 | jermar | 373 | * Start TLB shootdown sequence. |
374 | */ |
||
375 | tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages); |
||
376 | |||
377 | /* |
||
1403 | jermar | 378 | * Remove frames belonging to used space starting from |
379 | * the highest addresses downwards until an overlap with |
||
380 | * the resized address space area is found. Note that this |
||
381 | * is also the right way to remove part of the used_space |
||
382 | * B+tree leaf list. |
||
383 | */ |
||
384 | for (cond = true; cond;) { |
||
385 | btree_node_t *node; |
||
386 | |||
387 | ASSERT(!list_empty(&area->used_space.leaf_head)); |
||
388 | node = list_get_instance(area->used_space.leaf_head.prev, btree_node_t, leaf_link); |
||
389 | if ((cond = (bool) node->keys)) { |
||
1780 | jermar | 390 | uintptr_t b = node->key[node->keys - 1]; |
1403 | jermar | 391 | count_t c = (count_t) node->value[node->keys - 1]; |
392 | int i = 0; |
||
1235 | jermar | 393 | |
1403 | jermar | 394 | if (overlaps(b, c*PAGE_SIZE, area->base, pages*PAGE_SIZE)) { |
395 | |||
396 | if (b + c*PAGE_SIZE <= start_free) { |
||
397 | /* |
||
398 | * The whole interval fits completely |
||
399 | * in the resized address space area. |
||
400 | */ |
||
401 | break; |
||
402 | } |
||
403 | |||
404 | /* |
||
405 | * Part of the interval corresponding to b and c |
||
406 | * overlaps with the resized address space area. |
||
407 | */ |
||
408 | |||
409 | cond = false; /* we are almost done */ |
||
410 | i = (start_free - b) >> PAGE_WIDTH; |
||
411 | if (!used_space_remove(area, start_free, c - i)) |
||
1889 | jermar | 412 | panic("Could not remove used space.\n"); |
1403 | jermar | 413 | } else { |
414 | /* |
||
415 | * The interval of used space can be completely removed. |
||
416 | */ |
||
417 | if (!used_space_remove(area, b, c)) |
||
418 | panic("Could not remove used space.\n"); |
||
419 | } |
||
420 | |||
421 | for (; i < c; i++) { |
||
422 | pte_t *pte; |
||
423 | |||
424 | page_table_lock(as, false); |
||
425 | pte = page_mapping_find(as, b + i*PAGE_SIZE); |
||
426 | ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte)); |
||
1424 | jermar | 427 | if (area->backend && area->backend->frame_free) { |
428 | area->backend->frame_free(area, |
||
1409 | jermar | 429 | b + i*PAGE_SIZE, PTE_GET_FRAME(pte)); |
430 | } |
||
1403 | jermar | 431 | page_mapping_remove(as, b + i*PAGE_SIZE); |
432 | page_table_unlock(as, false); |
||
433 | } |
||
1235 | jermar | 434 | } |
435 | } |
||
1436 | jermar | 436 | |
1235 | jermar | 437 | /* |
1436 | jermar | 438 | * Finish TLB shootdown sequence. |
1235 | jermar | 439 | */ |
1954 | jermar | 440 | tlb_invalidate_pages(as->asid, area->base + pages*PAGE_SIZE, area->pages - pages); |
1235 | jermar | 441 | tlb_shootdown_finalize(); |
1889 | jermar | 442 | |
443 | /* |
||
444 | * Invalidate software translation caches (e.g. TSB on sparc64). |
||
445 | */ |
||
446 | as_invalidate_translation_cache(as, area->base + pages*PAGE_SIZE, area->pages - pages); |
||
1235 | jermar | 447 | } else { |
448 | /* |
||
449 | * Growing the area. |
||
450 | * Check for overlaps with other address space areas. |
||
451 | */ |
||
452 | if (!check_area_conflicts(as, address, pages * PAGE_SIZE, area)) { |
||
1380 | jermar | 453 | mutex_unlock(&area->lock); |
454 | mutex_unlock(&as->lock); |
||
1235 | jermar | 455 | interrupts_restore(ipl); |
1306 | jermar | 456 | return EADDRNOTAVAIL; |
1235 | jermar | 457 | } |
458 | } |
||
459 | |||
460 | area->pages = pages; |
||
461 | |||
1380 | jermar | 462 | mutex_unlock(&area->lock); |
463 | mutex_unlock(&as->lock); |
||
1235 | jermar | 464 | interrupts_restore(ipl); |
465 | |||
1306 | jermar | 466 | return 0; |
1235 | jermar | 467 | } |
468 | |||
1306 | jermar | 469 | /** Destroy address space area. |
470 | * |
||
471 | * @param as Address space. |
||
472 | * @param address Address withing the area to be deleted. |
||
473 | * |
||
474 | * @return Zero on success or a value from @ref errno.h on failure. |
||
475 | */ |
||
1780 | jermar | 476 | int as_area_destroy(as_t *as, uintptr_t address) |
1306 | jermar | 477 | { |
478 | as_area_t *area; |
||
1780 | jermar | 479 | uintptr_t base; |
1495 | jermar | 480 | link_t *cur; |
1306 | jermar | 481 | ipl_t ipl; |
482 | |||
483 | ipl = interrupts_disable(); |
||
1380 | jermar | 484 | mutex_lock(&as->lock); |
1306 | jermar | 485 | |
486 | area = find_area_and_lock(as, address); |
||
487 | if (!area) { |
||
1380 | jermar | 488 | mutex_unlock(&as->lock); |
1306 | jermar | 489 | interrupts_restore(ipl); |
490 | return ENOENT; |
||
491 | } |
||
492 | |||
1403 | jermar | 493 | base = area->base; |
494 | |||
1411 | jermar | 495 | /* |
1436 | jermar | 496 | * Start TLB shootdown sequence. |
497 | */ |
||
1889 | jermar | 498 | tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, area->pages); |
1436 | jermar | 499 | |
500 | /* |
||
1411 | jermar | 501 | * Visit only the pages mapped by used_space B+tree. |
502 | */ |
||
1495 | jermar | 503 | for (cur = area->used_space.leaf_head.next; cur != &area->used_space.leaf_head; cur = cur->next) { |
1411 | jermar | 504 | btree_node_t *node; |
1495 | jermar | 505 | int i; |
1403 | jermar | 506 | |
1495 | jermar | 507 | node = list_get_instance(cur, btree_node_t, leaf_link); |
508 | for (i = 0; i < node->keys; i++) { |
||
1780 | jermar | 509 | uintptr_t b = node->key[i]; |
1495 | jermar | 510 | count_t j; |
1411 | jermar | 511 | pte_t *pte; |
1403 | jermar | 512 | |
1495 | jermar | 513 | for (j = 0; j < (count_t) node->value[i]; j++) { |
1411 | jermar | 514 | page_table_lock(as, false); |
1495 | jermar | 515 | pte = page_mapping_find(as, b + j*PAGE_SIZE); |
1411 | jermar | 516 | ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte)); |
1424 | jermar | 517 | if (area->backend && area->backend->frame_free) { |
518 | area->backend->frame_free(area, |
||
1495 | jermar | 519 | b + j*PAGE_SIZE, PTE_GET_FRAME(pte)); |
1403 | jermar | 520 | } |
1889 | jermar | 521 | page_mapping_remove(as, b + j*PAGE_SIZE); |
1411 | jermar | 522 | page_table_unlock(as, false); |
1306 | jermar | 523 | } |
524 | } |
||
525 | } |
||
1403 | jermar | 526 | |
1306 | jermar | 527 | /* |
1436 | jermar | 528 | * Finish TLB shootdown sequence. |
1306 | jermar | 529 | */ |
1889 | jermar | 530 | tlb_invalidate_pages(as->asid, area->base, area->pages); |
1306 | jermar | 531 | tlb_shootdown_finalize(); |
1436 | jermar | 532 | |
1889 | jermar | 533 | /* |
534 | * Invalidate potential software translation caches (e.g. TSB on sparc64). |
||
535 | */ |
||
536 | as_invalidate_translation_cache(as, area->base, area->pages); |
||
537 | |||
1436 | jermar | 538 | btree_destroy(&area->used_space); |
1306 | jermar | 539 | |
1309 | jermar | 540 | area->attributes |= AS_AREA_ATTR_PARTIAL; |
1409 | jermar | 541 | |
542 | if (area->sh_info) |
||
543 | sh_info_remove_reference(area->sh_info); |
||
544 | |||
1380 | jermar | 545 | mutex_unlock(&area->lock); |
1306 | jermar | 546 | |
547 | /* |
||
548 | * Remove the empty area from address space. |
||
549 | */ |
||
1889 | jermar | 550 | btree_remove(&as->as_area_btree, base, NULL); |
1306 | jermar | 551 | |
1309 | jermar | 552 | free(area); |
553 | |||
1889 | jermar | 554 | mutex_unlock(&as->lock); |
1306 | jermar | 555 | interrupts_restore(ipl); |
556 | return 0; |
||
557 | } |
||
558 | |||
1413 | jermar | 559 | /** Share address space area with another or the same address space. |
1235 | jermar | 560 | * |
1424 | jermar | 561 | * Address space area mapping is shared with a new address space area. |
562 | * If the source address space area has not been shared so far, |
||
563 | * a new sh_info is created. The new address space area simply gets the |
||
564 | * sh_info of the source area. The process of duplicating the |
||
565 | * mapping is done through the backend share function. |
||
1413 | jermar | 566 | * |
1417 | jermar | 567 | * @param src_as Pointer to source address space. |
1239 | jermar | 568 | * @param src_base Base address of the source address space area. |
1417 | jermar | 569 | * @param acc_size Expected size of the source area. |
1428 | palkovsky | 570 | * @param dst_as Pointer to destination address space. |
1417 | jermar | 571 | * @param dst_base Target base address. |
572 | * @param dst_flags_mask Destination address space area flags mask. |
||
1235 | jermar | 573 | * |
2007 | jermar | 574 | * @return Zero on success or ENOENT if there is no such task or if there is no |
575 | * such address space area, EPERM if there was a problem in accepting the area |
||
576 | * or ENOMEM if there was a problem in allocating destination address space |
||
577 | * area. ENOTSUP is returned if the address space area backend does not support |
||
2009 | jermar | 578 | * sharing. |
1235 | jermar | 579 | */ |
1780 | jermar | 580 | int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size, |
581 | as_t *dst_as, uintptr_t dst_base, int dst_flags_mask) |
||
1235 | jermar | 582 | { |
583 | ipl_t ipl; |
||
1239 | jermar | 584 | int src_flags; |
585 | size_t src_size; |
||
2009 | jermar | 586 | int src_orig_color; |
1239 | jermar | 587 | as_area_t *src_area, *dst_area; |
1413 | jermar | 588 | share_info_t *sh_info; |
1424 | jermar | 589 | mem_backend_t *src_backend; |
590 | mem_backend_data_t src_backend_data; |
||
1434 | palkovsky | 591 | |
1235 | jermar | 592 | ipl = interrupts_disable(); |
1380 | jermar | 593 | mutex_lock(&src_as->lock); |
1329 | palkovsky | 594 | src_area = find_area_and_lock(src_as, src_base); |
1239 | jermar | 595 | if (!src_area) { |
1238 | jermar | 596 | /* |
597 | * Could not find the source address space area. |
||
598 | */ |
||
1380 | jermar | 599 | mutex_unlock(&src_as->lock); |
1238 | jermar | 600 | interrupts_restore(ipl); |
601 | return ENOENT; |
||
602 | } |
||
1413 | jermar | 603 | |
2007 | jermar | 604 | |
1424 | jermar | 605 | if (!src_area->backend || !src_area->backend->share) { |
1413 | jermar | 606 | /* |
1851 | jermar | 607 | * There is no backend or the backend does not |
1424 | jermar | 608 | * know how to share the area. |
1413 | jermar | 609 | */ |
610 | mutex_unlock(&src_area->lock); |
||
611 | mutex_unlock(&src_as->lock); |
||
612 | interrupts_restore(ipl); |
||
613 | return ENOTSUP; |
||
614 | } |
||
615 | |||
1239 | jermar | 616 | src_size = src_area->pages * PAGE_SIZE; |
617 | src_flags = src_area->flags; |
||
1424 | jermar | 618 | src_backend = src_area->backend; |
619 | src_backend_data = src_area->backend_data; |
||
2009 | jermar | 620 | src_orig_color = src_area->orig_color; |
1544 | palkovsky | 621 | |
622 | /* Share the cacheable flag from the original mapping */ |
||
623 | if (src_flags & AS_AREA_CACHEABLE) |
||
624 | dst_flags_mask |= AS_AREA_CACHEABLE; |
||
625 | |||
1461 | palkovsky | 626 | if (src_size != acc_size || (src_flags & dst_flags_mask) != dst_flags_mask) { |
1413 | jermar | 627 | mutex_unlock(&src_area->lock); |
628 | mutex_unlock(&src_as->lock); |
||
1235 | jermar | 629 | interrupts_restore(ipl); |
630 | return EPERM; |
||
631 | } |
||
1413 | jermar | 632 | |
1235 | jermar | 633 | /* |
1413 | jermar | 634 | * Now we are committed to sharing the area. |
1954 | jermar | 635 | * First, prepare the area for sharing. |
1413 | jermar | 636 | * Then it will be safe to unlock it. |
637 | */ |
||
638 | sh_info = src_area->sh_info; |
||
639 | if (!sh_info) { |
||
640 | sh_info = (share_info_t *) malloc(sizeof(share_info_t), 0); |
||
641 | mutex_initialize(&sh_info->lock); |
||
642 | sh_info->refcount = 2; |
||
643 | btree_create(&sh_info->pagemap); |
||
644 | src_area->sh_info = sh_info; |
||
645 | } else { |
||
646 | mutex_lock(&sh_info->lock); |
||
647 | sh_info->refcount++; |
||
648 | mutex_unlock(&sh_info->lock); |
||
649 | } |
||
650 | |||
1424 | jermar | 651 | src_area->backend->share(src_area); |
1413 | jermar | 652 | |
653 | mutex_unlock(&src_area->lock); |
||
654 | mutex_unlock(&src_as->lock); |
||
655 | |||
656 | /* |
||
1239 | jermar | 657 | * Create copy of the source address space area. |
658 | * The destination area is created with AS_AREA_ATTR_PARTIAL |
||
659 | * attribute set which prevents race condition with |
||
660 | * preliminary as_page_fault() calls. |
||
1417 | jermar | 661 | * The flags of the source area are masked against dst_flags_mask |
662 | * to support sharing in less privileged mode. |
||
1235 | jermar | 663 | */ |
1461 | palkovsky | 664 | dst_area = as_area_create(dst_as, dst_flags_mask, src_size, dst_base, |
1424 | jermar | 665 | AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data); |
1239 | jermar | 666 | if (!dst_area) { |
1235 | jermar | 667 | /* |
668 | * Destination address space area could not be created. |
||
669 | */ |
||
1413 | jermar | 670 | sh_info_remove_reference(sh_info); |
671 | |||
1235 | jermar | 672 | interrupts_restore(ipl); |
673 | return ENOMEM; |
||
674 | } |
||
2009 | jermar | 675 | |
1235 | jermar | 676 | /* |
1239 | jermar | 677 | * Now the destination address space area has been |
678 | * fully initialized. Clear the AS_AREA_ATTR_PARTIAL |
||
1413 | jermar | 679 | * attribute and set the sh_info. |
1239 | jermar | 680 | */ |
2009 | jermar | 681 | mutex_lock(&dst_as->lock); |
1380 | jermar | 682 | mutex_lock(&dst_area->lock); |
1239 | jermar | 683 | dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL; |
1413 | jermar | 684 | dst_area->sh_info = sh_info; |
2009 | jermar | 685 | dst_area->orig_color = src_orig_color; |
686 | #ifdef CONFIG_VIRT_IDX_DCACHE |
||
687 | if (src_orig_color != PAGE_COLOR(dst_base)) { |
||
688 | /* |
||
689 | * We have just detected an attempt to create an invalid address |
||
690 | * alias. We allow this and set a special flag that tells the |
||
691 | * architecture specific code to flush the D-cache when the |
||
692 | * offending address space is installed and deinstalled |
||
693 | * (cleanup). |
||
694 | * |
||
695 | * In order for the flags to take effect immediately, we also |
||
696 | * perform a global D-cache shootdown. |
||
697 | */ |
||
698 | dcache_shootdown_start(); |
||
699 | dst_as->dcache_flush_on_install = true; |
||
700 | dst_as->dcache_flush_on_deinstall = true; |
||
701 | dcache_flush(); |
||
702 | dcache_shootdown_finalize(); |
||
703 | } |
||
704 | #endif /* CONFIG_VIRT_IDX_DCACHE */ |
||
1380 | jermar | 705 | mutex_unlock(&dst_area->lock); |
2009 | jermar | 706 | mutex_unlock(&dst_as->lock); |
707 | |||
1235 | jermar | 708 | interrupts_restore(ipl); |
709 | |||
710 | return 0; |
||
711 | } |
||
712 | |||
1423 | jermar | 713 | /** Check access mode for address space area. |
714 | * |
||
715 | * The address space area must be locked prior to this call. |
||
716 | * |
||
717 | * @param area Address space area. |
||
718 | * @param access Access mode. |
||
719 | * |
||
720 | * @return False if access violates area's permissions, true otherwise. |
||
721 | */ |
||
722 | bool as_area_check_access(as_area_t *area, pf_access_t access) |
||
723 | { |
||
724 | int flagmap[] = { |
||
725 | [PF_ACCESS_READ] = AS_AREA_READ, |
||
726 | [PF_ACCESS_WRITE] = AS_AREA_WRITE, |
||
727 | [PF_ACCESS_EXEC] = AS_AREA_EXEC |
||
728 | }; |
||
729 | |||
730 | if (!(area->flags & flagmap[access])) |
||
731 | return false; |
||
732 | |||
733 | return true; |
||
734 | } |
||
735 | |||
703 | jermar | 736 | /** Handle page fault within the current address space. |
737 | * |
||
1409 | jermar | 738 | * This is the high-level page fault handler. It decides |
739 | * whether the page fault can be resolved by any backend |
||
740 | * and if so, it invokes the backend to resolve the page |
||
741 | * fault. |
||
742 | * |
||
703 | jermar | 743 | * Interrupts are assumed disabled. |
744 | * |
||
745 | * @param page Faulting page. |
||
1411 | jermar | 746 | * @param access Access mode that caused the fault (i.e. read/write/exec). |
1288 | jermar | 747 | * @param istate Pointer to interrupted state. |
703 | jermar | 748 | * |
1409 | jermar | 749 | * @return AS_PF_FAULT on page fault, AS_PF_OK on success or AS_PF_DEFER if the |
750 | * fault was caused by copy_to_uspace() or copy_from_uspace(). |
||
703 | jermar | 751 | */ |
1780 | jermar | 752 | int as_page_fault(uintptr_t page, pf_access_t access, istate_t *istate) |
703 | jermar | 753 | { |
1044 | jermar | 754 | pte_t *pte; |
977 | jermar | 755 | as_area_t *area; |
703 | jermar | 756 | |
1380 | jermar | 757 | if (!THREAD) |
1409 | jermar | 758 | return AS_PF_FAULT; |
1380 | jermar | 759 | |
703 | jermar | 760 | ASSERT(AS); |
1044 | jermar | 761 | |
1380 | jermar | 762 | mutex_lock(&AS->lock); |
977 | jermar | 763 | area = find_area_and_lock(AS, page); |
703 | jermar | 764 | if (!area) { |
765 | /* |
||
766 | * No area contained mapping for 'page'. |
||
767 | * Signal page fault to low-level handler. |
||
768 | */ |
||
1380 | jermar | 769 | mutex_unlock(&AS->lock); |
1288 | jermar | 770 | goto page_fault; |
703 | jermar | 771 | } |
772 | |||
1239 | jermar | 773 | if (area->attributes & AS_AREA_ATTR_PARTIAL) { |
774 | /* |
||
775 | * The address space area is not fully initialized. |
||
776 | * Avoid possible race by returning error. |
||
777 | */ |
||
1380 | jermar | 778 | mutex_unlock(&area->lock); |
779 | mutex_unlock(&AS->lock); |
||
1288 | jermar | 780 | goto page_fault; |
1239 | jermar | 781 | } |
782 | |||
1424 | jermar | 783 | if (!area->backend || !area->backend->page_fault) { |
1409 | jermar | 784 | /* |
785 | * The address space area is not backed by any backend |
||
786 | * or the backend cannot handle page faults. |
||
787 | */ |
||
788 | mutex_unlock(&area->lock); |
||
789 | mutex_unlock(&AS->lock); |
||
790 | goto page_fault; |
||
791 | } |
||
1179 | jermar | 792 | |
1044 | jermar | 793 | page_table_lock(AS, false); |
794 | |||
703 | jermar | 795 | /* |
1044 | jermar | 796 | * To avoid race condition between two page faults |
797 | * on the same address, we need to make sure |
||
798 | * the mapping has not been already inserted. |
||
799 | */ |
||
800 | if ((pte = page_mapping_find(AS, page))) { |
||
801 | if (PTE_PRESENT(pte)) { |
||
1423 | jermar | 802 | if (((access == PF_ACCESS_READ) && PTE_READABLE(pte)) || |
803 | (access == PF_ACCESS_WRITE && PTE_WRITABLE(pte)) || |
||
804 | (access == PF_ACCESS_EXEC && PTE_EXECUTABLE(pte))) { |
||
805 | page_table_unlock(AS, false); |
||
806 | mutex_unlock(&area->lock); |
||
807 | mutex_unlock(&AS->lock); |
||
808 | return AS_PF_OK; |
||
809 | } |
||
1044 | jermar | 810 | } |
811 | } |
||
1409 | jermar | 812 | |
1044 | jermar | 813 | /* |
1409 | jermar | 814 | * Resort to the backend page fault handler. |
703 | jermar | 815 | */ |
1424 | jermar | 816 | if (area->backend->page_fault(area, page, access) != AS_PF_OK) { |
1409 | jermar | 817 | page_table_unlock(AS, false); |
818 | mutex_unlock(&area->lock); |
||
819 | mutex_unlock(&AS->lock); |
||
820 | goto page_fault; |
||
821 | } |
||
703 | jermar | 822 | |
1044 | jermar | 823 | page_table_unlock(AS, false); |
1380 | jermar | 824 | mutex_unlock(&area->lock); |
825 | mutex_unlock(&AS->lock); |
||
1288 | jermar | 826 | return AS_PF_OK; |
827 | |||
828 | page_fault: |
||
829 | if (THREAD->in_copy_from_uspace) { |
||
830 | THREAD->in_copy_from_uspace = false; |
||
1780 | jermar | 831 | istate_set_retaddr(istate, (uintptr_t) &memcpy_from_uspace_failover_address); |
1288 | jermar | 832 | } else if (THREAD->in_copy_to_uspace) { |
833 | THREAD->in_copy_to_uspace = false; |
||
1780 | jermar | 834 | istate_set_retaddr(istate, (uintptr_t) &memcpy_to_uspace_failover_address); |
1288 | jermar | 835 | } else { |
836 | return AS_PF_FAULT; |
||
837 | } |
||
838 | |||
839 | return AS_PF_DEFER; |
||
703 | jermar | 840 | } |
841 | |||
823 | jermar | 842 | /** Switch address spaces. |
703 | jermar | 843 | * |
1380 | jermar | 844 | * Note that this function cannot sleep as it is essentially a part of |
1415 | jermar | 845 | * scheduling. Sleeping here would lead to deadlock on wakeup. |
1380 | jermar | 846 | * |
823 | jermar | 847 | * @param old Old address space or NULL. |
848 | * @param new New address space. |
||
703 | jermar | 849 | */ |
823 | jermar | 850 | void as_switch(as_t *old, as_t *new) |
703 | jermar | 851 | { |
852 | ipl_t ipl; |
||
823 | jermar | 853 | bool needs_asid = false; |
703 | jermar | 854 | |
855 | ipl = interrupts_disable(); |
||
1415 | jermar | 856 | spinlock_lock(&inactive_as_with_asid_lock); |
703 | jermar | 857 | |
858 | /* |
||
823 | jermar | 859 | * First, take care of the old address space. |
860 | */ |
||
861 | if (old) { |
||
1380 | jermar | 862 | mutex_lock_active(&old->lock); |
1415 | jermar | 863 | ASSERT(old->cpu_refcount); |
864 | if((--old->cpu_refcount == 0) && (old != AS_KERNEL)) { |
||
823 | jermar | 865 | /* |
866 | * The old address space is no longer active on |
||
867 | * any processor. It can be appended to the |
||
868 | * list of inactive address spaces with assigned |
||
869 | * ASID. |
||
870 | */ |
||
871 | ASSERT(old->asid != ASID_INVALID); |
||
872 | list_append(&old->inactive_as_with_asid_link, &inactive_as_with_asid_head); |
||
873 | } |
||
1380 | jermar | 874 | mutex_unlock(&old->lock); |
1890 | jermar | 875 | |
876 | /* |
||
877 | * Perform architecture-specific tasks when the address space |
||
878 | * is being removed from the CPU. |
||
879 | */ |
||
880 | as_deinstall_arch(old); |
||
823 | jermar | 881 | } |
882 | |||
883 | /* |
||
884 | * Second, prepare the new address space. |
||
885 | */ |
||
1380 | jermar | 886 | mutex_lock_active(&new->lock); |
1415 | jermar | 887 | if ((new->cpu_refcount++ == 0) && (new != AS_KERNEL)) { |
823 | jermar | 888 | if (new->asid != ASID_INVALID) |
889 | list_remove(&new->inactive_as_with_asid_link); |
||
890 | else |
||
891 | needs_asid = true; /* defer call to asid_get() until new->lock is released */ |
||
892 | } |
||
893 | SET_PTL0_ADDRESS(new->page_table); |
||
1380 | jermar | 894 | mutex_unlock(&new->lock); |
823 | jermar | 895 | |
896 | if (needs_asid) { |
||
897 | /* |
||
898 | * Allocation of new ASID was deferred |
||
899 | * until now in order to avoid deadlock. |
||
900 | */ |
||
901 | asid_t asid; |
||
902 | |||
903 | asid = asid_get(); |
||
1380 | jermar | 904 | mutex_lock_active(&new->lock); |
823 | jermar | 905 | new->asid = asid; |
1380 | jermar | 906 | mutex_unlock(&new->lock); |
823 | jermar | 907 | } |
1415 | jermar | 908 | spinlock_unlock(&inactive_as_with_asid_lock); |
823 | jermar | 909 | interrupts_restore(ipl); |
910 | |||
911 | /* |
||
703 | jermar | 912 | * Perform architecture-specific steps. |
727 | jermar | 913 | * (e.g. write ASID to hardware register etc.) |
703 | jermar | 914 | */ |
823 | jermar | 915 | as_install_arch(new); |
703 | jermar | 916 | |
823 | jermar | 917 | AS = new; |
703 | jermar | 918 | } |
754 | jermar | 919 | |
1235 | jermar | 920 | /** Convert address space area flags to page flags. |
754 | jermar | 921 | * |
1235 | jermar | 922 | * @param aflags Flags of some address space area. |
754 | jermar | 923 | * |
1235 | jermar | 924 | * @return Flags to be passed to page_mapping_insert(). |
754 | jermar | 925 | */ |
1235 | jermar | 926 | int area_flags_to_page_flags(int aflags) |
754 | jermar | 927 | { |
928 | int flags; |
||
929 | |||
1178 | jermar | 930 | flags = PAGE_USER | PAGE_PRESENT; |
754 | jermar | 931 | |
1235 | jermar | 932 | if (aflags & AS_AREA_READ) |
1026 | jermar | 933 | flags |= PAGE_READ; |
934 | |||
1235 | jermar | 935 | if (aflags & AS_AREA_WRITE) |
1026 | jermar | 936 | flags |= PAGE_WRITE; |
937 | |||
1235 | jermar | 938 | if (aflags & AS_AREA_EXEC) |
1026 | jermar | 939 | flags |= PAGE_EXEC; |
940 | |||
1424 | jermar | 941 | if (aflags & AS_AREA_CACHEABLE) |
1178 | jermar | 942 | flags |= PAGE_CACHEABLE; |
943 | |||
754 | jermar | 944 | return flags; |
945 | } |
||
756 | jermar | 946 | |
1235 | jermar | 947 | /** Compute flags for virtual address translation subsytem. |
948 | * |
||
949 | * The address space area must be locked. |
||
950 | * Interrupts must be disabled. |
||
951 | * |
||
952 | * @param a Address space area. |
||
953 | * |
||
954 | * @return Flags to be used in page_mapping_insert(). |
||
955 | */ |
||
1409 | jermar | 956 | int as_area_get_flags(as_area_t *a) |
1235 | jermar | 957 | { |
958 | return area_flags_to_page_flags(a->flags); |
||
959 | } |
||
960 | |||
756 | jermar | 961 | /** Create page table. |
962 | * |
||
963 | * Depending on architecture, create either address space |
||
964 | * private or global page table. |
||
965 | * |
||
966 | * @param flags Flags saying whether the page table is for kernel address space. |
||
967 | * |
||
968 | * @return First entry of the page table. |
||
969 | */ |
||
970 | pte_t *page_table_create(int flags) |
||
971 | { |
||
972 | ASSERT(as_operations); |
||
973 | ASSERT(as_operations->page_table_create); |
||
974 | |||
975 | return as_operations->page_table_create(flags); |
||
976 | } |
||
977 | jermar | 977 | |
1468 | jermar | 978 | /** Destroy page table. |
979 | * |
||
980 | * Destroy page table in architecture specific way. |
||
981 | * |
||
982 | * @param page_table Physical address of PTL0. |
||
983 | */ |
||
984 | void page_table_destroy(pte_t *page_table) |
||
985 | { |
||
986 | ASSERT(as_operations); |
||
987 | ASSERT(as_operations->page_table_destroy); |
||
988 | |||
989 | as_operations->page_table_destroy(page_table); |
||
990 | } |
||
991 | |||
1044 | jermar | 992 | /** Lock page table. |
993 | * |
||
994 | * This function should be called before any page_mapping_insert(), |
||
995 | * page_mapping_remove() and page_mapping_find(). |
||
996 | * |
||
997 | * Locking order is such that address space areas must be locked |
||
998 | * prior to this call. Address space can be locked prior to this |
||
999 | * call in which case the lock argument is false. |
||
1000 | * |
||
1001 | * @param as Address space. |
||
1248 | jermar | 1002 | * @param lock If false, do not attempt to lock as->lock. |
1044 | jermar | 1003 | */ |
1004 | void page_table_lock(as_t *as, bool lock) |
||
1005 | { |
||
1006 | ASSERT(as_operations); |
||
1007 | ASSERT(as_operations->page_table_lock); |
||
1008 | |||
1009 | as_operations->page_table_lock(as, lock); |
||
1010 | } |
||
1011 | |||
1012 | /** Unlock page table. |
||
1013 | * |
||
1014 | * @param as Address space. |
||
1248 | jermar | 1015 | * @param unlock If false, do not attempt to unlock as->lock. |
1044 | jermar | 1016 | */ |
1017 | void page_table_unlock(as_t *as, bool unlock) |
||
1018 | { |
||
1019 | ASSERT(as_operations); |
||
1020 | ASSERT(as_operations->page_table_unlock); |
||
1021 | |||
1022 | as_operations->page_table_unlock(as, unlock); |
||
1023 | } |
||
1024 | |||
977 | jermar | 1025 | |
1026 | /** Find address space area and lock it. |
||
1027 | * |
||
1028 | * The address space must be locked and interrupts must be disabled. |
||
1029 | * |
||
1030 | * @param as Address space. |
||
1031 | * @param va Virtual address. |
||
1032 | * |
||
1033 | * @return Locked address space area containing va on success or NULL on failure. |
||
1034 | */ |
||
1780 | jermar | 1035 | as_area_t *find_area_and_lock(as_t *as, uintptr_t va) |
977 | jermar | 1036 | { |
1037 | as_area_t *a; |
||
1147 | jermar | 1038 | btree_node_t *leaf, *lnode; |
1039 | int i; |
||
977 | jermar | 1040 | |
1147 | jermar | 1041 | a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf); |
1042 | if (a) { |
||
1043 | /* va is the base address of an address space area */ |
||
1380 | jermar | 1044 | mutex_lock(&a->lock); |
1147 | jermar | 1045 | return a; |
1046 | } |
||
1047 | |||
1048 | /* |
||
1150 | jermar | 1049 | * Search the leaf node and the righmost record of its left neighbour |
1147 | jermar | 1050 | * to find out whether this is a miss or va belongs to an address |
1051 | * space area found there. |
||
1052 | */ |
||
1053 | |||
1054 | /* First, search the leaf node itself. */ |
||
1055 | for (i = 0; i < leaf->keys; i++) { |
||
1056 | a = (as_area_t *) leaf->value[i]; |
||
1380 | jermar | 1057 | mutex_lock(&a->lock); |
1147 | jermar | 1058 | if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) { |
1059 | return a; |
||
1060 | } |
||
1380 | jermar | 1061 | mutex_unlock(&a->lock); |
1147 | jermar | 1062 | } |
977 | jermar | 1063 | |
1147 | jermar | 1064 | /* |
1150 | jermar | 1065 | * Second, locate the left neighbour and test its last record. |
1148 | jermar | 1066 | * Because of its position in the B+tree, it must have base < va. |
1147 | jermar | 1067 | */ |
1150 | jermar | 1068 | if ((lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) { |
1147 | jermar | 1069 | a = (as_area_t *) lnode->value[lnode->keys - 1]; |
1380 | jermar | 1070 | mutex_lock(&a->lock); |
1147 | jermar | 1071 | if (va < a->base + a->pages * PAGE_SIZE) { |
1048 | jermar | 1072 | return a; |
1147 | jermar | 1073 | } |
1380 | jermar | 1074 | mutex_unlock(&a->lock); |
977 | jermar | 1075 | } |
1076 | |||
1077 | return NULL; |
||
1078 | } |
||
1048 | jermar | 1079 | |
1080 | /** Check area conflicts with other areas. |
||
1081 | * |
||
1082 | * The address space must be locked and interrupts must be disabled. |
||
1083 | * |
||
1084 | * @param as Address space. |
||
1085 | * @param va Starting virtual address of the area being tested. |
||
1086 | * @param size Size of the area being tested. |
||
1087 | * @param avoid_area Do not touch this area. |
||
1088 | * |
||
1089 | * @return True if there is no conflict, false otherwise. |
||
1090 | */ |
||
1780 | jermar | 1091 | bool check_area_conflicts(as_t *as, uintptr_t va, size_t size, as_area_t *avoid_area) |
1048 | jermar | 1092 | { |
1093 | as_area_t *a; |
||
1147 | jermar | 1094 | btree_node_t *leaf, *node; |
1095 | int i; |
||
1048 | jermar | 1096 | |
1070 | jermar | 1097 | /* |
1098 | * We don't want any area to have conflicts with NULL page. |
||
1099 | */ |
||
1100 | if (overlaps(va, size, NULL, PAGE_SIZE)) |
||
1101 | return false; |
||
1102 | |||
1147 | jermar | 1103 | /* |
1104 | * The leaf node is found in O(log n), where n is proportional to |
||
1105 | * the number of address space areas belonging to as. |
||
1106 | * The check for conflicts is then attempted on the rightmost |
||
1150 | jermar | 1107 | * record in the left neighbour, the leftmost record in the right |
1108 | * neighbour and all records in the leaf node itself. |
||
1147 | jermar | 1109 | */ |
1048 | jermar | 1110 | |
1147 | jermar | 1111 | if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) { |
1112 | if (a != avoid_area) |
||
1113 | return false; |
||
1114 | } |
||
1115 | |||
1116 | /* First, check the two border cases. */ |
||
1150 | jermar | 1117 | if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) { |
1147 | jermar | 1118 | a = (as_area_t *) node->value[node->keys - 1]; |
1380 | jermar | 1119 | mutex_lock(&a->lock); |
1147 | jermar | 1120 | if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) { |
1380 | jermar | 1121 | mutex_unlock(&a->lock); |
1147 | jermar | 1122 | return false; |
1123 | } |
||
1380 | jermar | 1124 | mutex_unlock(&a->lock); |
1147 | jermar | 1125 | } |
1150 | jermar | 1126 | if ((node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf))) { |
1147 | jermar | 1127 | a = (as_area_t *) node->value[0]; |
1380 | jermar | 1128 | mutex_lock(&a->lock); |
1147 | jermar | 1129 | if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) { |
1380 | jermar | 1130 | mutex_unlock(&a->lock); |
1147 | jermar | 1131 | return false; |
1132 | } |
||
1380 | jermar | 1133 | mutex_unlock(&a->lock); |
1147 | jermar | 1134 | } |
1135 | |||
1136 | /* Second, check the leaf node. */ |
||
1137 | for (i = 0; i < leaf->keys; i++) { |
||
1138 | a = (as_area_t *) leaf->value[i]; |
||
1139 | |||
1048 | jermar | 1140 | if (a == avoid_area) |
1141 | continue; |
||
1147 | jermar | 1142 | |
1380 | jermar | 1143 | mutex_lock(&a->lock); |
1147 | jermar | 1144 | if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) { |
1380 | jermar | 1145 | mutex_unlock(&a->lock); |
1147 | jermar | 1146 | return false; |
1147 | } |
||
1380 | jermar | 1148 | mutex_unlock(&a->lock); |
1048 | jermar | 1149 | } |
1150 | |||
1070 | jermar | 1151 | /* |
1152 | * So far, the area does not conflict with other areas. |
||
1153 | * Check if it doesn't conflict with kernel address space. |
||
1154 | */ |
||
1155 | if (!KERNEL_ADDRESS_SPACE_SHADOWED) { |
||
1156 | return !overlaps(va, size, |
||
1157 | KERNEL_ADDRESS_SPACE_START, KERNEL_ADDRESS_SPACE_END-KERNEL_ADDRESS_SPACE_START); |
||
1158 | } |
||
1159 | |||
1048 | jermar | 1160 | return true; |
1161 | } |
||
1235 | jermar | 1162 | |
1380 | jermar | 1163 | /** Return size of the address space area with given base. */ |
1780 | jermar | 1164 | size_t as_get_size(uintptr_t base) |
1329 | palkovsky | 1165 | { |
1166 | ipl_t ipl; |
||
1167 | as_area_t *src_area; |
||
1168 | size_t size; |
||
1169 | |||
1170 | ipl = interrupts_disable(); |
||
1171 | src_area = find_area_and_lock(AS, base); |
||
1172 | if (src_area){ |
||
1173 | size = src_area->pages * PAGE_SIZE; |
||
1380 | jermar | 1174 | mutex_unlock(&src_area->lock); |
1329 | palkovsky | 1175 | } else { |
1176 | size = 0; |
||
1177 | } |
||
1178 | interrupts_restore(ipl); |
||
1179 | return size; |
||
1180 | } |
||
1181 | |||
1387 | jermar | 1182 | /** Mark portion of address space area as used. |
1183 | * |
||
1184 | * The address space area must be already locked. |
||
1185 | * |
||
1186 | * @param a Address space area. |
||
1187 | * @param page First page to be marked. |
||
1188 | * @param count Number of page to be marked. |
||
1189 | * |
||
1190 | * @return 0 on failure and 1 on success. |
||
1191 | */ |
||
1780 | jermar | 1192 | int used_space_insert(as_area_t *a, uintptr_t page, count_t count) |
1387 | jermar | 1193 | { |
1194 | btree_node_t *leaf, *node; |
||
1195 | count_t pages; |
||
1196 | int i; |
||
1197 | |||
1198 | ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE)); |
||
1199 | ASSERT(count); |
||
1200 | |||
1201 | pages = (count_t) btree_search(&a->used_space, page, &leaf); |
||
1202 | if (pages) { |
||
1203 | /* |
||
1204 | * We hit the beginning of some used space. |
||
1205 | */ |
||
1206 | return 0; |
||
1207 | } |
||
1208 | |||
1437 | jermar | 1209 | if (!leaf->keys) { |
1210 | btree_insert(&a->used_space, page, (void *) count, leaf); |
||
1211 | return 1; |
||
1212 | } |
||
1213 | |||
1387 | jermar | 1214 | node = btree_leaf_node_left_neighbour(&a->used_space, leaf); |
1215 | if (node) { |
||
1780 | jermar | 1216 | uintptr_t left_pg = node->key[node->keys - 1], right_pg = leaf->key[0]; |
1387 | jermar | 1217 | count_t left_cnt = (count_t) node->value[node->keys - 1], right_cnt = (count_t) leaf->value[0]; |
1218 | |||
1219 | /* |
||
1220 | * Examine the possibility that the interval fits |
||
1221 | * somewhere between the rightmost interval of |
||
1222 | * the left neigbour and the first interval of the leaf. |
||
1223 | */ |
||
1224 | |||
1225 | if (page >= right_pg) { |
||
1226 | /* Do nothing. */ |
||
1227 | } else if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) { |
||
1228 | /* The interval intersects with the left interval. */ |
||
1229 | return 0; |
||
1230 | } else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) { |
||
1231 | /* The interval intersects with the right interval. */ |
||
1232 | return 0; |
||
1233 | } else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) { |
||
1234 | /* The interval can be added by merging the two already present intervals. */ |
||
1403 | jermar | 1235 | node->value[node->keys - 1] += count + right_cnt; |
1387 | jermar | 1236 | btree_remove(&a->used_space, right_pg, leaf); |
1237 | return 1; |
||
1238 | } else if (page == left_pg + left_cnt*PAGE_SIZE) { |
||
1239 | /* The interval can be added by simply growing the left interval. */ |
||
1403 | jermar | 1240 | node->value[node->keys - 1] += count; |
1387 | jermar | 1241 | return 1; |
1242 | } else if (page + count*PAGE_SIZE == right_pg) { |
||
1243 | /* |
||
1244 | * The interval can be addded by simply moving base of the right |
||
1245 | * interval down and increasing its size accordingly. |
||
1246 | */ |
||
1403 | jermar | 1247 | leaf->value[0] += count; |
1387 | jermar | 1248 | leaf->key[0] = page; |
1249 | return 1; |
||
1250 | } else { |
||
1251 | /* |
||
1252 | * The interval is between both neigbouring intervals, |
||
1253 | * but cannot be merged with any of them. |
||
1254 | */ |
||
1255 | btree_insert(&a->used_space, page, (void *) count, leaf); |
||
1256 | return 1; |
||
1257 | } |
||
1258 | } else if (page < leaf->key[0]) { |
||
1780 | jermar | 1259 | uintptr_t right_pg = leaf->key[0]; |
1387 | jermar | 1260 | count_t right_cnt = (count_t) leaf->value[0]; |
1261 | |||
1262 | /* |
||
1263 | * Investigate the border case in which the left neighbour does not |
||
1264 | * exist but the interval fits from the left. |
||
1265 | */ |
||
1266 | |||
1267 | if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) { |
||
1268 | /* The interval intersects with the right interval. */ |
||
1269 | return 0; |
||
1270 | } else if (page + count*PAGE_SIZE == right_pg) { |
||
1271 | /* |
||
1272 | * The interval can be added by moving the base of the right interval down |
||
1273 | * and increasing its size accordingly. |
||
1274 | */ |
||
1275 | leaf->key[0] = page; |
||
1403 | jermar | 1276 | leaf->value[0] += count; |
1387 | jermar | 1277 | return 1; |
1278 | } else { |
||
1279 | /* |
||
1280 | * The interval doesn't adjoin with the right interval. |
||
1281 | * It must be added individually. |
||
1282 | */ |
||
1283 | btree_insert(&a->used_space, page, (void *) count, leaf); |
||
1284 | return 1; |
||
1285 | } |
||
1286 | } |
||
1287 | |||
1288 | node = btree_leaf_node_right_neighbour(&a->used_space, leaf); |
||
1289 | if (node) { |
||
1780 | jermar | 1290 | uintptr_t left_pg = leaf->key[leaf->keys - 1], right_pg = node->key[0]; |
1387 | jermar | 1291 | count_t left_cnt = (count_t) leaf->value[leaf->keys - 1], right_cnt = (count_t) node->value[0]; |
1292 | |||
1293 | /* |
||
1294 | * Examine the possibility that the interval fits |
||
1295 | * somewhere between the leftmost interval of |
||
1296 | * the right neigbour and the last interval of the leaf. |
||
1297 | */ |
||
1298 | |||
1299 | if (page < left_pg) { |
||
1300 | /* Do nothing. */ |
||
1301 | } else if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) { |
||
1302 | /* The interval intersects with the left interval. */ |
||
1303 | return 0; |
||
1304 | } else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) { |
||
1305 | /* The interval intersects with the right interval. */ |
||
1306 | return 0; |
||
1307 | } else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) { |
||
1308 | /* The interval can be added by merging the two already present intervals. */ |
||
1403 | jermar | 1309 | leaf->value[leaf->keys - 1] += count + right_cnt; |
1387 | jermar | 1310 | btree_remove(&a->used_space, right_pg, node); |
1311 | return 1; |
||
1312 | } else if (page == left_pg + left_cnt*PAGE_SIZE) { |
||
1313 | /* The interval can be added by simply growing the left interval. */ |
||
1403 | jermar | 1314 | leaf->value[leaf->keys - 1] += count; |
1387 | jermar | 1315 | return 1; |
1316 | } else if (page + count*PAGE_SIZE == right_pg) { |
||
1317 | /* |
||
1318 | * The interval can be addded by simply moving base of the right |
||
1319 | * interval down and increasing its size accordingly. |
||
1320 | */ |
||
1403 | jermar | 1321 | node->value[0] += count; |
1387 | jermar | 1322 | node->key[0] = page; |
1323 | return 1; |
||
1324 | } else { |
||
1325 | /* |
||
1326 | * The interval is between both neigbouring intervals, |
||
1327 | * but cannot be merged with any of them. |
||
1328 | */ |
||
1329 | btree_insert(&a->used_space, page, (void *) count, leaf); |
||
1330 | return 1; |
||
1331 | } |
||
1332 | } else if (page >= leaf->key[leaf->keys - 1]) { |
||
1780 | jermar | 1333 | uintptr_t left_pg = leaf->key[leaf->keys - 1]; |
1387 | jermar | 1334 | count_t left_cnt = (count_t) leaf->value[leaf->keys - 1]; |
1335 | |||
1336 | /* |
||
1337 | * Investigate the border case in which the right neighbour does not |
||
1338 | * exist but the interval fits from the right. |
||
1339 | */ |
||
1340 | |||
1341 | if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) { |
||
1403 | jermar | 1342 | /* The interval intersects with the left interval. */ |
1387 | jermar | 1343 | return 0; |
1344 | } else if (left_pg + left_cnt*PAGE_SIZE == page) { |
||
1345 | /* The interval can be added by growing the left interval. */ |
||
1403 | jermar | 1346 | leaf->value[leaf->keys - 1] += count; |
1387 | jermar | 1347 | return 1; |
1348 | } else { |
||
1349 | /* |
||
1350 | * The interval doesn't adjoin with the left interval. |
||
1351 | * It must be added individually. |
||
1352 | */ |
||
1353 | btree_insert(&a->used_space, page, (void *) count, leaf); |
||
1354 | return 1; |
||
1355 | } |
||
1356 | } |
||
1357 | |||
1358 | /* |
||
1359 | * Note that if the algorithm made it thus far, the interval can fit only |
||
1360 | * between two other intervals of the leaf. The two border cases were already |
||
1361 | * resolved. |
||
1362 | */ |
||
1363 | for (i = 1; i < leaf->keys; i++) { |
||
1364 | if (page < leaf->key[i]) { |
||
1780 | jermar | 1365 | uintptr_t left_pg = leaf->key[i - 1], right_pg = leaf->key[i]; |
1387 | jermar | 1366 | count_t left_cnt = (count_t) leaf->value[i - 1], right_cnt = (count_t) leaf->value[i]; |
1367 | |||
1368 | /* |
||
1369 | * The interval fits between left_pg and right_pg. |
||
1370 | */ |
||
1371 | |||
1372 | if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) { |
||
1373 | /* The interval intersects with the left interval. */ |
||
1374 | return 0; |
||
1375 | } else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) { |
||
1376 | /* The interval intersects with the right interval. */ |
||
1377 | return 0; |
||
1378 | } else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) { |
||
1379 | /* The interval can be added by merging the two already present intervals. */ |
||
1403 | jermar | 1380 | leaf->value[i - 1] += count + right_cnt; |
1387 | jermar | 1381 | btree_remove(&a->used_space, right_pg, leaf); |
1382 | return 1; |
||
1383 | } else if (page == left_pg + left_cnt*PAGE_SIZE) { |
||
1384 | /* The interval can be added by simply growing the left interval. */ |
||
1403 | jermar | 1385 | leaf->value[i - 1] += count; |
1387 | jermar | 1386 | return 1; |
1387 | } else if (page + count*PAGE_SIZE == right_pg) { |
||
1388 | /* |
||
1389 | * The interval can be addded by simply moving base of the right |
||
1390 | * interval down and increasing its size accordingly. |
||
1391 | */ |
||
1403 | jermar | 1392 | leaf->value[i] += count; |
1387 | jermar | 1393 | leaf->key[i] = page; |
1394 | return 1; |
||
1395 | } else { |
||
1396 | /* |
||
1397 | * The interval is between both neigbouring intervals, |
||
1398 | * but cannot be merged with any of them. |
||
1399 | */ |
||
1400 | btree_insert(&a->used_space, page, (void *) count, leaf); |
||
1401 | return 1; |
||
1402 | } |
||
1403 | } |
||
1404 | } |
||
1405 | |||
1735 | decky | 1406 | panic("Inconsistency detected while adding %d pages of used space at %p.\n", count, page); |
1387 | jermar | 1407 | } |
1408 | |||
1409 | /** Mark portion of address space area as unused. |
||
1410 | * |
||
1411 | * The address space area must be already locked. |
||
1412 | * |
||
1413 | * @param a Address space area. |
||
1414 | * @param page First page to be marked. |
||
1415 | * @param count Number of page to be marked. |
||
1416 | * |
||
1417 | * @return 0 on failure and 1 on success. |
||
1418 | */ |
||
1780 | jermar | 1419 | int used_space_remove(as_area_t *a, uintptr_t page, count_t count) |
1387 | jermar | 1420 | { |
1421 | btree_node_t *leaf, *node; |
||
1422 | count_t pages; |
||
1423 | int i; |
||
1424 | |||
1425 | ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE)); |
||
1426 | ASSERT(count); |
||
1427 | |||
1428 | pages = (count_t) btree_search(&a->used_space, page, &leaf); |
||
1429 | if (pages) { |
||
1430 | /* |
||
1431 | * We are lucky, page is the beginning of some interval. |
||
1432 | */ |
||
1433 | if (count > pages) { |
||
1434 | return 0; |
||
1435 | } else if (count == pages) { |
||
1436 | btree_remove(&a->used_space, page, leaf); |
||
1403 | jermar | 1437 | return 1; |
1387 | jermar | 1438 | } else { |
1439 | /* |
||
1440 | * Find the respective interval. |
||
1441 | * Decrease its size and relocate its start address. |
||
1442 | */ |
||
1443 | for (i = 0; i < leaf->keys; i++) { |
||
1444 | if (leaf->key[i] == page) { |
||
1445 | leaf->key[i] += count*PAGE_SIZE; |
||
1403 | jermar | 1446 | leaf->value[i] -= count; |
1387 | jermar | 1447 | return 1; |
1448 | } |
||
1449 | } |
||
1450 | goto error; |
||
1451 | } |
||
1452 | } |
||
1453 | |||
1454 | node = btree_leaf_node_left_neighbour(&a->used_space, leaf); |
||
1455 | if (node && page < leaf->key[0]) { |
||
1780 | jermar | 1456 | uintptr_t left_pg = node->key[node->keys - 1]; |
1387 | jermar | 1457 | count_t left_cnt = (count_t) node->value[node->keys - 1]; |
1458 | |||
1459 | if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) { |
||
1460 | if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) { |
||
1461 | /* |
||
1462 | * The interval is contained in the rightmost interval |
||
1463 | * of the left neighbour and can be removed by |
||
1464 | * updating the size of the bigger interval. |
||
1465 | */ |
||
1403 | jermar | 1466 | node->value[node->keys - 1] -= count; |
1387 | jermar | 1467 | return 1; |
1468 | } else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) { |
||
1403 | jermar | 1469 | count_t new_cnt; |
1387 | jermar | 1470 | |
1471 | /* |
||
1472 | * The interval is contained in the rightmost interval |
||
1473 | * of the left neighbour but its removal requires |
||
1474 | * both updating the size of the original interval and |
||
1475 | * also inserting a new interval. |
||
1476 | */ |
||
1403 | jermar | 1477 | new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH; |
1478 | node->value[node->keys - 1] -= count + new_cnt; |
||
1387 | jermar | 1479 | btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf); |
1480 | return 1; |
||
1481 | } |
||
1482 | } |
||
1483 | return 0; |
||
1484 | } else if (page < leaf->key[0]) { |
||
1485 | return 0; |
||
1486 | } |
||
1487 | |||
1488 | if (page > leaf->key[leaf->keys - 1]) { |
||
1780 | jermar | 1489 | uintptr_t left_pg = leaf->key[leaf->keys - 1]; |
1387 | jermar | 1490 | count_t left_cnt = (count_t) leaf->value[leaf->keys - 1]; |
1491 | |||
1492 | if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) { |
||
1493 | if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) { |
||
1494 | /* |
||
1495 | * The interval is contained in the rightmost interval |
||
1496 | * of the leaf and can be removed by updating the size |
||
1497 | * of the bigger interval. |
||
1498 | */ |
||
1403 | jermar | 1499 | leaf->value[leaf->keys - 1] -= count; |
1387 | jermar | 1500 | return 1; |
1501 | } else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) { |
||
1403 | jermar | 1502 | count_t new_cnt; |
1387 | jermar | 1503 | |
1504 | /* |
||
1505 | * The interval is contained in the rightmost interval |
||
1506 | * of the leaf but its removal requires both updating |
||
1507 | * the size of the original interval and |
||
1508 | * also inserting a new interval. |
||
1509 | */ |
||
1403 | jermar | 1510 | new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH; |
1511 | leaf->value[leaf->keys - 1] -= count + new_cnt; |
||
1387 | jermar | 1512 | btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf); |
1513 | return 1; |
||
1514 | } |
||
1515 | } |
||
1516 | return 0; |
||
1517 | } |
||
1518 | |||
1519 | /* |
||
1520 | * The border cases have been already resolved. |
||
1521 | * Now the interval can be only between intervals of the leaf. |
||
1522 | */ |
||
1523 | for (i = 1; i < leaf->keys - 1; i++) { |
||
1524 | if (page < leaf->key[i]) { |
||
1780 | jermar | 1525 | uintptr_t left_pg = leaf->key[i - 1]; |
1387 | jermar | 1526 | count_t left_cnt = (count_t) leaf->value[i - 1]; |
1527 | |||
1528 | /* |
||
1529 | * Now the interval is between intervals corresponding to (i - 1) and i. |
||
1530 | */ |
||
1531 | if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) { |
||
1532 | if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) { |
||
1533 | /* |
||
1534 | * The interval is contained in the interval (i - 1) |
||
1535 | * of the leaf and can be removed by updating the size |
||
1536 | * of the bigger interval. |
||
1537 | */ |
||
1403 | jermar | 1538 | leaf->value[i - 1] -= count; |
1387 | jermar | 1539 | return 1; |
1540 | } else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) { |
||
1403 | jermar | 1541 | count_t new_cnt; |
1387 | jermar | 1542 | |
1543 | /* |
||
1544 | * The interval is contained in the interval (i - 1) |
||
1545 | * of the leaf but its removal requires both updating |
||
1546 | * the size of the original interval and |
||
1547 | * also inserting a new interval. |
||
1548 | */ |
||
1403 | jermar | 1549 | new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH; |
1550 | leaf->value[i - 1] -= count + new_cnt; |
||
1387 | jermar | 1551 | btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf); |
1552 | return 1; |
||
1553 | } |
||
1554 | } |
||
1555 | return 0; |
||
1556 | } |
||
1557 | } |
||
1558 | |||
1559 | error: |
||
1735 | decky | 1560 | panic("Inconsistency detected while removing %d pages of used space from %p.\n", count, page); |
1387 | jermar | 1561 | } |
1562 | |||
1409 | jermar | 1563 | /** Remove reference to address space area share info. |
1564 | * |
||
1565 | * If the reference count drops to 0, the sh_info is deallocated. |
||
1566 | * |
||
1567 | * @param sh_info Pointer to address space area share info. |
||
1568 | */ |
||
1569 | void sh_info_remove_reference(share_info_t *sh_info) |
||
1570 | { |
||
1571 | bool dealloc = false; |
||
1572 | |||
1573 | mutex_lock(&sh_info->lock); |
||
1574 | ASSERT(sh_info->refcount); |
||
1575 | if (--sh_info->refcount == 0) { |
||
1576 | dealloc = true; |
||
1495 | jermar | 1577 | link_t *cur; |
1409 | jermar | 1578 | |
1579 | /* |
||
1580 | * Now walk carefully the pagemap B+tree and free/remove |
||
1581 | * reference from all frames found there. |
||
1582 | */ |
||
1495 | jermar | 1583 | for (cur = sh_info->pagemap.leaf_head.next; cur != &sh_info->pagemap.leaf_head; cur = cur->next) { |
1409 | jermar | 1584 | btree_node_t *node; |
1495 | jermar | 1585 | int i; |
1409 | jermar | 1586 | |
1495 | jermar | 1587 | node = list_get_instance(cur, btree_node_t, leaf_link); |
1588 | for (i = 0; i < node->keys; i++) |
||
1780 | jermar | 1589 | frame_free((uintptr_t) node->value[i]); |
1409 | jermar | 1590 | } |
1591 | |||
1592 | } |
||
1593 | mutex_unlock(&sh_info->lock); |
||
1594 | |||
1595 | if (dealloc) { |
||
1596 | btree_destroy(&sh_info->pagemap); |
||
1597 | free(sh_info); |
||
1598 | } |
||
1599 | } |
||
1600 | |||
1235 | jermar | 1601 | /* |
1602 | * Address space related syscalls. |
||
1603 | */ |
||
1604 | |||
1605 | /** Wrapper for as_area_create(). */ |
||
1780 | jermar | 1606 | unative_t sys_as_area_create(uintptr_t address, size_t size, int flags) |
1235 | jermar | 1607 | { |
1424 | jermar | 1608 | if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address, AS_AREA_ATTR_NONE, &anon_backend, NULL)) |
1780 | jermar | 1609 | return (unative_t) address; |
1235 | jermar | 1610 | else |
1780 | jermar | 1611 | return (unative_t) -1; |
1235 | jermar | 1612 | } |
1613 | |||
1793 | jermar | 1614 | /** Wrapper for as_area_resize(). */ |
1780 | jermar | 1615 | unative_t sys_as_area_resize(uintptr_t address, size_t size, int flags) |
1235 | jermar | 1616 | { |
1780 | jermar | 1617 | return (unative_t) as_area_resize(AS, address, size, 0); |
1235 | jermar | 1618 | } |
1619 | |||
1793 | jermar | 1620 | /** Wrapper for as_area_destroy(). */ |
1780 | jermar | 1621 | unative_t sys_as_area_destroy(uintptr_t address) |
1306 | jermar | 1622 | { |
1780 | jermar | 1623 | return (unative_t) as_area_destroy(AS, address); |
1306 | jermar | 1624 | } |
1702 | cejka | 1625 | |
1914 | jermar | 1626 | /** Print out information about address space. |
1627 | * |
||
1628 | * @param as Address space. |
||
1629 | */ |
||
1630 | void as_print(as_t *as) |
||
1631 | { |
||
1632 | ipl_t ipl; |
||
1633 | |||
1634 | ipl = interrupts_disable(); |
||
1635 | mutex_lock(&as->lock); |
||
1636 | |||
1637 | /* print out info about address space areas */ |
||
1638 | link_t *cur; |
||
1639 | for (cur = as->as_area_btree.leaf_head.next; cur != &as->as_area_btree.leaf_head; cur = cur->next) { |
||
1915 | jermar | 1640 | btree_node_t *node = list_get_instance(cur, btree_node_t, leaf_link); |
1914 | jermar | 1641 | |
1642 | int i; |
||
1643 | for (i = 0; i < node->keys; i++) { |
||
1915 | jermar | 1644 | as_area_t *area = node->value[i]; |
1914 | jermar | 1645 | |
1646 | mutex_lock(&area->lock); |
||
1647 | printf("as_area: %p, base=%p, pages=%d (%p - %p)\n", |
||
1648 | area, area->base, area->pages, area->base, area->base + area->pages*PAGE_SIZE); |
||
1649 | mutex_unlock(&area->lock); |
||
1650 | } |
||
1651 | } |
||
1652 | |||
1653 | mutex_unlock(&as->lock); |
||
1654 | interrupts_restore(ipl); |
||
1655 | } |
||
1656 | |||
1757 | jermar | 1657 | /** @} |
1702 | cejka | 1658 | */ |