Rev 1889 | Rev 1891 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
703 | jermar | 1 | /* |
2 | * Copyright (C) 2001-2006 Jakub Jermar |
||
3 | * All rights reserved. |
||
4 | * |
||
5 | * Redistribution and use in source and binary forms, with or without |
||
6 | * modification, are permitted provided that the following conditions |
||
7 | * are met: |
||
8 | * |
||
9 | * - Redistributions of source code must retain the above copyright |
||
10 | * notice, this list of conditions and the following disclaimer. |
||
11 | * - Redistributions in binary form must reproduce the above copyright |
||
12 | * notice, this list of conditions and the following disclaimer in the |
||
13 | * documentation and/or other materials provided with the distribution. |
||
14 | * - The name of the author may not be used to endorse or promote products |
||
15 | * derived from this software without specific prior written permission. |
||
16 | * |
||
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
||
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
||
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
||
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
||
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
||
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
||
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
||
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
||
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
||
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
||
27 | */ |
||
28 | |||
1757 | jermar | 29 | /** @addtogroup genericmm |
1702 | cejka | 30 | * @{ |
31 | */ |
||
32 | |||
1248 | jermar | 33 | /** |
1702 | cejka | 34 | * @file |
1248 | jermar | 35 | * @brief Address space related functions. |
36 | * |
||
703 | jermar | 37 | * This file contains address space manipulation functions. |
38 | * Roughly speaking, this is a higher-level client of |
||
39 | * Virtual Address Translation (VAT) subsystem. |
||
1248 | jermar | 40 | * |
41 | * Functionality provided by this file allows one to |
||
1757 | jermar | 42 | * create address spaces and create, resize and share |
1248 | jermar | 43 | * address space areas. |
44 | * |
||
45 | * @see page.c |
||
46 | * |
||
703 | jermar | 47 | */ |
48 | |||
49 | #include <mm/as.h> |
||
756 | jermar | 50 | #include <arch/mm/as.h> |
703 | jermar | 51 | #include <mm/page.h> |
52 | #include <mm/frame.h> |
||
814 | palkovsky | 53 | #include <mm/slab.h> |
703 | jermar | 54 | #include <mm/tlb.h> |
55 | #include <arch/mm/page.h> |
||
56 | #include <genarch/mm/page_pt.h> |
||
1108 | jermar | 57 | #include <genarch/mm/page_ht.h> |
727 | jermar | 58 | #include <mm/asid.h> |
703 | jermar | 59 | #include <arch/mm/asid.h> |
60 | #include <synch/spinlock.h> |
||
1380 | jermar | 61 | #include <synch/mutex.h> |
788 | jermar | 62 | #include <adt/list.h> |
1147 | jermar | 63 | #include <adt/btree.h> |
1235 | jermar | 64 | #include <proc/task.h> |
1288 | jermar | 65 | #include <proc/thread.h> |
1235 | jermar | 66 | #include <arch/asm.h> |
703 | jermar | 67 | #include <panic.h> |
68 | #include <debug.h> |
||
1235 | jermar | 69 | #include <print.h> |
703 | jermar | 70 | #include <memstr.h> |
1070 | jermar | 71 | #include <macros.h> |
703 | jermar | 72 | #include <arch.h> |
1235 | jermar | 73 | #include <errno.h> |
74 | #include <config.h> |
||
1387 | jermar | 75 | #include <align.h> |
1235 | jermar | 76 | #include <arch/types.h> |
77 | #include <typedefs.h> |
||
1288 | jermar | 78 | #include <syscall/copy.h> |
79 | #include <arch/interrupt.h> |
||
703 | jermar | 80 | |
1757 | jermar | 81 | /** |
82 | * Each architecture decides what functions will be used to carry out |
||
83 | * address space operations such as creating or locking page tables. |
||
84 | */ |
||
756 | jermar | 85 | as_operations_t *as_operations = NULL; |
703 | jermar | 86 | |
1890 | jermar | 87 | /** |
88 | * Slab for as_t objects. |
||
89 | */ |
||
90 | static slab_cache_t *as_slab; |
||
91 | |||
1415 | jermar | 92 | /** This lock protects inactive_as_with_asid_head list. It must be acquired before as_t mutex. */ |
93 | SPINLOCK_INITIALIZE(inactive_as_with_asid_lock); |
||
823 | jermar | 94 | |
95 | /** |
||
96 | * This list contains address spaces that are not active on any |
||
97 | * processor and that have valid ASID. |
||
98 | */ |
||
99 | LIST_INITIALIZE(inactive_as_with_asid_head); |
||
100 | |||
757 | jermar | 101 | /** Kernel address space. */ |
102 | as_t *AS_KERNEL = NULL; |
||
103 | |||
1235 | jermar | 104 | static int area_flags_to_page_flags(int aflags); |
1780 | jermar | 105 | static as_area_t *find_area_and_lock(as_t *as, uintptr_t va); |
106 | static bool check_area_conflicts(as_t *as, uintptr_t va, size_t size, as_area_t *avoid_area); |
||
1409 | jermar | 107 | static void sh_info_remove_reference(share_info_t *sh_info); |
703 | jermar | 108 | |
756 | jermar | 109 | /** Initialize address space subsystem. */ |
110 | void as_init(void) |
||
111 | { |
||
112 | as_arch_init(); |
||
1890 | jermar | 113 | |
114 | as_slab = slab_cache_create("as_slab", sizeof(as_t), 0, NULL, NULL, SLAB_CACHE_MAGDEFERRED); |
||
115 | |||
789 | palkovsky | 116 | AS_KERNEL = as_create(FLAG_AS_KERNEL); |
1383 | decky | 117 | if (!AS_KERNEL) |
118 | panic("can't create kernel address space\n"); |
||
119 | |||
756 | jermar | 120 | } |
121 | |||
757 | jermar | 122 | /** Create address space. |
123 | * |
||
124 | * @param flags Flags that influence way in wich the address space is created. |
||
125 | */ |
||
756 | jermar | 126 | as_t *as_create(int flags) |
703 | jermar | 127 | { |
128 | as_t *as; |
||
129 | |||
1890 | jermar | 130 | as = (as_t *) slab_alloc(as_slab, 0); |
823 | jermar | 131 | link_initialize(&as->inactive_as_with_asid_link); |
1380 | jermar | 132 | mutex_initialize(&as->lock); |
1147 | jermar | 133 | btree_create(&as->as_area_btree); |
822 | palkovsky | 134 | |
135 | if (flags & FLAG_AS_KERNEL) |
||
136 | as->asid = ASID_KERNEL; |
||
137 | else |
||
138 | as->asid = ASID_INVALID; |
||
139 | |||
1468 | jermar | 140 | as->refcount = 0; |
1415 | jermar | 141 | as->cpu_refcount = 0; |
822 | palkovsky | 142 | as->page_table = page_table_create(flags); |
703 | jermar | 143 | |
144 | return as; |
||
145 | } |
||
146 | |||
1468 | jermar | 147 | /** Destroy adress space. |
148 | * |
||
149 | * When there are no tasks referencing this address space (i.e. its refcount is zero), |
||
150 | * the address space can be destroyed. |
||
151 | */ |
||
152 | void as_destroy(as_t *as) |
||
973 | palkovsky | 153 | { |
1468 | jermar | 154 | ipl_t ipl; |
1594 | jermar | 155 | bool cond; |
973 | palkovsky | 156 | |
1468 | jermar | 157 | ASSERT(as->refcount == 0); |
158 | |||
159 | /* |
||
160 | * Since there is no reference to this area, |
||
161 | * it is safe not to lock its mutex. |
||
162 | */ |
||
163 | ipl = interrupts_disable(); |
||
164 | spinlock_lock(&inactive_as_with_asid_lock); |
||
1587 | jermar | 165 | if (as->asid != ASID_INVALID && as != AS_KERNEL) { |
1594 | jermar | 166 | if (as != AS && as->cpu_refcount == 0) |
1587 | jermar | 167 | list_remove(&as->inactive_as_with_asid_link); |
1468 | jermar | 168 | asid_put(as->asid); |
169 | } |
||
170 | spinlock_unlock(&inactive_as_with_asid_lock); |
||
171 | |||
172 | /* |
||
173 | * Destroy address space areas of the address space. |
||
1594 | jermar | 174 | * The B+tee must be walked carefully because it is |
175 | * also being destroyed. |
||
1468 | jermar | 176 | */ |
1594 | jermar | 177 | for (cond = true; cond; ) { |
1468 | jermar | 178 | btree_node_t *node; |
1594 | jermar | 179 | |
180 | ASSERT(!list_empty(&as->as_area_btree.leaf_head)); |
||
181 | node = list_get_instance(as->as_area_btree.leaf_head.next, btree_node_t, leaf_link); |
||
182 | |||
183 | if ((cond = node->keys)) { |
||
184 | as_area_destroy(as, node->key[0]); |
||
185 | } |
||
1468 | jermar | 186 | } |
1495 | jermar | 187 | |
1483 | jermar | 188 | btree_destroy(&as->as_area_btree); |
1468 | jermar | 189 | page_table_destroy(as->page_table); |
190 | |||
191 | interrupts_restore(ipl); |
||
192 | |||
1890 | jermar | 193 | slab_free(as_slab, as); |
973 | palkovsky | 194 | } |
195 | |||
703 | jermar | 196 | /** Create address space area of common attributes. |
197 | * |
||
198 | * The created address space area is added to the target address space. |
||
199 | * |
||
200 | * @param as Target address space. |
||
1239 | jermar | 201 | * @param flags Flags of the area memory. |
1048 | jermar | 202 | * @param size Size of area. |
703 | jermar | 203 | * @param base Base address of area. |
1239 | jermar | 204 | * @param attrs Attributes of the area. |
1409 | jermar | 205 | * @param backend Address space area backend. NULL if no backend is used. |
206 | * @param backend_data NULL or a pointer to an array holding two void *. |
||
703 | jermar | 207 | * |
208 | * @return Address space area on success or NULL on failure. |
||
209 | */ |
||
1780 | jermar | 210 | as_area_t *as_area_create(as_t *as, int flags, size_t size, uintptr_t base, int attrs, |
1424 | jermar | 211 | mem_backend_t *backend, mem_backend_data_t *backend_data) |
703 | jermar | 212 | { |
213 | ipl_t ipl; |
||
214 | as_area_t *a; |
||
215 | |||
216 | if (base % PAGE_SIZE) |
||
1048 | jermar | 217 | return NULL; |
218 | |||
1233 | jermar | 219 | if (!size) |
220 | return NULL; |
||
221 | |||
1048 | jermar | 222 | /* Writeable executable areas are not supported. */ |
223 | if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE)) |
||
224 | return NULL; |
||
703 | jermar | 225 | |
226 | ipl = interrupts_disable(); |
||
1380 | jermar | 227 | mutex_lock(&as->lock); |
703 | jermar | 228 | |
1048 | jermar | 229 | if (!check_area_conflicts(as, base, size, NULL)) { |
1380 | jermar | 230 | mutex_unlock(&as->lock); |
1048 | jermar | 231 | interrupts_restore(ipl); |
232 | return NULL; |
||
233 | } |
||
703 | jermar | 234 | |
822 | palkovsky | 235 | a = (as_area_t *) malloc(sizeof(as_area_t), 0); |
703 | jermar | 236 | |
1380 | jermar | 237 | mutex_initialize(&a->lock); |
822 | palkovsky | 238 | |
1424 | jermar | 239 | a->as = as; |
1026 | jermar | 240 | a->flags = flags; |
1239 | jermar | 241 | a->attributes = attrs; |
1048 | jermar | 242 | a->pages = SIZE2FRAMES(size); |
822 | palkovsky | 243 | a->base = base; |
1409 | jermar | 244 | a->sh_info = NULL; |
245 | a->backend = backend; |
||
1424 | jermar | 246 | if (backend_data) |
247 | a->backend_data = *backend_data; |
||
248 | else |
||
1780 | jermar | 249 | memsetb((uintptr_t) &a->backend_data, sizeof(a->backend_data), 0); |
1424 | jermar | 250 | |
1387 | jermar | 251 | btree_create(&a->used_space); |
822 | palkovsky | 252 | |
1147 | jermar | 253 | btree_insert(&as->as_area_btree, base, (void *) a, NULL); |
822 | palkovsky | 254 | |
1380 | jermar | 255 | mutex_unlock(&as->lock); |
703 | jermar | 256 | interrupts_restore(ipl); |
704 | jermar | 257 | |
703 | jermar | 258 | return a; |
259 | } |
||
260 | |||
1235 | jermar | 261 | /** Find address space area and change it. |
262 | * |
||
263 | * @param as Address space. |
||
264 | * @param address Virtual address belonging to the area to be changed. Must be page-aligned. |
||
265 | * @param size New size of the virtual memory block starting at address. |
||
266 | * @param flags Flags influencing the remap operation. Currently unused. |
||
267 | * |
||
1306 | jermar | 268 | * @return Zero on success or a value from @ref errno.h otherwise. |
1235 | jermar | 269 | */ |
1780 | jermar | 270 | int as_area_resize(as_t *as, uintptr_t address, size_t size, int flags) |
1235 | jermar | 271 | { |
1306 | jermar | 272 | as_area_t *area; |
1235 | jermar | 273 | ipl_t ipl; |
274 | size_t pages; |
||
275 | |||
276 | ipl = interrupts_disable(); |
||
1380 | jermar | 277 | mutex_lock(&as->lock); |
1235 | jermar | 278 | |
279 | /* |
||
280 | * Locate the area. |
||
281 | */ |
||
282 | area = find_area_and_lock(as, address); |
||
283 | if (!area) { |
||
1380 | jermar | 284 | mutex_unlock(&as->lock); |
1235 | jermar | 285 | interrupts_restore(ipl); |
1306 | jermar | 286 | return ENOENT; |
1235 | jermar | 287 | } |
288 | |||
1424 | jermar | 289 | if (area->backend == &phys_backend) { |
1235 | jermar | 290 | /* |
291 | * Remapping of address space areas associated |
||
292 | * with memory mapped devices is not supported. |
||
293 | */ |
||
1380 | jermar | 294 | mutex_unlock(&area->lock); |
295 | mutex_unlock(&as->lock); |
||
1235 | jermar | 296 | interrupts_restore(ipl); |
1306 | jermar | 297 | return ENOTSUP; |
1235 | jermar | 298 | } |
1409 | jermar | 299 | if (area->sh_info) { |
300 | /* |
||
301 | * Remapping of shared address space areas |
||
302 | * is not supported. |
||
303 | */ |
||
304 | mutex_unlock(&area->lock); |
||
305 | mutex_unlock(&as->lock); |
||
306 | interrupts_restore(ipl); |
||
307 | return ENOTSUP; |
||
308 | } |
||
1235 | jermar | 309 | |
310 | pages = SIZE2FRAMES((address - area->base) + size); |
||
311 | if (!pages) { |
||
312 | /* |
||
313 | * Zero size address space areas are not allowed. |
||
314 | */ |
||
1380 | jermar | 315 | mutex_unlock(&area->lock); |
316 | mutex_unlock(&as->lock); |
||
1235 | jermar | 317 | interrupts_restore(ipl); |
1306 | jermar | 318 | return EPERM; |
1235 | jermar | 319 | } |
320 | |||
321 | if (pages < area->pages) { |
||
1403 | jermar | 322 | bool cond; |
1780 | jermar | 323 | uintptr_t start_free = area->base + pages*PAGE_SIZE; |
1235 | jermar | 324 | |
325 | /* |
||
326 | * Shrinking the area. |
||
327 | * No need to check for overlaps. |
||
328 | */ |
||
1403 | jermar | 329 | |
330 | /* |
||
1436 | jermar | 331 | * Start TLB shootdown sequence. |
332 | */ |
||
333 | tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages); |
||
334 | |||
335 | /* |
||
1403 | jermar | 336 | * Remove frames belonging to used space starting from |
337 | * the highest addresses downwards until an overlap with |
||
338 | * the resized address space area is found. Note that this |
||
339 | * is also the right way to remove part of the used_space |
||
340 | * B+tree leaf list. |
||
341 | */ |
||
342 | for (cond = true; cond;) { |
||
343 | btree_node_t *node; |
||
344 | |||
345 | ASSERT(!list_empty(&area->used_space.leaf_head)); |
||
346 | node = list_get_instance(area->used_space.leaf_head.prev, btree_node_t, leaf_link); |
||
347 | if ((cond = (bool) node->keys)) { |
||
1780 | jermar | 348 | uintptr_t b = node->key[node->keys - 1]; |
1403 | jermar | 349 | count_t c = (count_t) node->value[node->keys - 1]; |
350 | int i = 0; |
||
1235 | jermar | 351 | |
1403 | jermar | 352 | if (overlaps(b, c*PAGE_SIZE, area->base, pages*PAGE_SIZE)) { |
353 | |||
354 | if (b + c*PAGE_SIZE <= start_free) { |
||
355 | /* |
||
356 | * The whole interval fits completely |
||
357 | * in the resized address space area. |
||
358 | */ |
||
359 | break; |
||
360 | } |
||
361 | |||
362 | /* |
||
363 | * Part of the interval corresponding to b and c |
||
364 | * overlaps with the resized address space area. |
||
365 | */ |
||
366 | |||
367 | cond = false; /* we are almost done */ |
||
368 | i = (start_free - b) >> PAGE_WIDTH; |
||
369 | if (!used_space_remove(area, start_free, c - i)) |
||
1889 | jermar | 370 | panic("Could not remove used space.\n"); |
1403 | jermar | 371 | } else { |
372 | /* |
||
373 | * The interval of used space can be completely removed. |
||
374 | */ |
||
375 | if (!used_space_remove(area, b, c)) |
||
376 | panic("Could not remove used space.\n"); |
||
377 | } |
||
378 | |||
379 | for (; i < c; i++) { |
||
380 | pte_t *pte; |
||
381 | |||
382 | page_table_lock(as, false); |
||
383 | pte = page_mapping_find(as, b + i*PAGE_SIZE); |
||
384 | ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte)); |
||
1424 | jermar | 385 | if (area->backend && area->backend->frame_free) { |
386 | area->backend->frame_free(area, |
||
1409 | jermar | 387 | b + i*PAGE_SIZE, PTE_GET_FRAME(pte)); |
388 | } |
||
1403 | jermar | 389 | page_mapping_remove(as, b + i*PAGE_SIZE); |
390 | page_table_unlock(as, false); |
||
391 | } |
||
1235 | jermar | 392 | } |
393 | } |
||
1436 | jermar | 394 | |
1235 | jermar | 395 | /* |
1436 | jermar | 396 | * Finish TLB shootdown sequence. |
1235 | jermar | 397 | */ |
398 | tlb_invalidate_pages(AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages); |
||
399 | tlb_shootdown_finalize(); |
||
1889 | jermar | 400 | |
401 | /* |
||
402 | * Invalidate software translation caches (e.g. TSB on sparc64). |
||
403 | */ |
||
404 | as_invalidate_translation_cache(as, area->base + pages*PAGE_SIZE, area->pages - pages); |
||
1235 | jermar | 405 | } else { |
406 | /* |
||
407 | * Growing the area. |
||
408 | * Check for overlaps with other address space areas. |
||
409 | */ |
||
410 | if (!check_area_conflicts(as, address, pages * PAGE_SIZE, area)) { |
||
1380 | jermar | 411 | mutex_unlock(&area->lock); |
412 | mutex_unlock(&as->lock); |
||
1235 | jermar | 413 | interrupts_restore(ipl); |
1306 | jermar | 414 | return EADDRNOTAVAIL; |
1235 | jermar | 415 | } |
416 | } |
||
417 | |||
418 | area->pages = pages; |
||
419 | |||
1380 | jermar | 420 | mutex_unlock(&area->lock); |
421 | mutex_unlock(&as->lock); |
||
1235 | jermar | 422 | interrupts_restore(ipl); |
423 | |||
1306 | jermar | 424 | return 0; |
1235 | jermar | 425 | } |
426 | |||
1306 | jermar | 427 | /** Destroy address space area. |
428 | * |
||
429 | * @param as Address space. |
||
430 | * @param address Address withing the area to be deleted. |
||
431 | * |
||
432 | * @return Zero on success or a value from @ref errno.h on failure. |
||
433 | */ |
||
1780 | jermar | 434 | int as_area_destroy(as_t *as, uintptr_t address) |
1306 | jermar | 435 | { |
436 | as_area_t *area; |
||
1780 | jermar | 437 | uintptr_t base; |
1495 | jermar | 438 | link_t *cur; |
1306 | jermar | 439 | ipl_t ipl; |
440 | |||
441 | ipl = interrupts_disable(); |
||
1380 | jermar | 442 | mutex_lock(&as->lock); |
1306 | jermar | 443 | |
444 | area = find_area_and_lock(as, address); |
||
445 | if (!area) { |
||
1380 | jermar | 446 | mutex_unlock(&as->lock); |
1306 | jermar | 447 | interrupts_restore(ipl); |
448 | return ENOENT; |
||
449 | } |
||
450 | |||
1403 | jermar | 451 | base = area->base; |
452 | |||
1411 | jermar | 453 | /* |
1436 | jermar | 454 | * Start TLB shootdown sequence. |
455 | */ |
||
1889 | jermar | 456 | tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, area->pages); |
1436 | jermar | 457 | |
458 | /* |
||
1411 | jermar | 459 | * Visit only the pages mapped by used_space B+tree. |
460 | */ |
||
1495 | jermar | 461 | for (cur = area->used_space.leaf_head.next; cur != &area->used_space.leaf_head; cur = cur->next) { |
1411 | jermar | 462 | btree_node_t *node; |
1495 | jermar | 463 | int i; |
1403 | jermar | 464 | |
1495 | jermar | 465 | node = list_get_instance(cur, btree_node_t, leaf_link); |
466 | for (i = 0; i < node->keys; i++) { |
||
1780 | jermar | 467 | uintptr_t b = node->key[i]; |
1495 | jermar | 468 | count_t j; |
1411 | jermar | 469 | pte_t *pte; |
1403 | jermar | 470 | |
1495 | jermar | 471 | for (j = 0; j < (count_t) node->value[i]; j++) { |
1411 | jermar | 472 | page_table_lock(as, false); |
1495 | jermar | 473 | pte = page_mapping_find(as, b + j*PAGE_SIZE); |
1411 | jermar | 474 | ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte)); |
1424 | jermar | 475 | if (area->backend && area->backend->frame_free) { |
476 | area->backend->frame_free(area, |
||
1495 | jermar | 477 | b + j*PAGE_SIZE, PTE_GET_FRAME(pte)); |
1403 | jermar | 478 | } |
1889 | jermar | 479 | page_mapping_remove(as, b + j*PAGE_SIZE); |
1411 | jermar | 480 | page_table_unlock(as, false); |
1306 | jermar | 481 | } |
482 | } |
||
483 | } |
||
1403 | jermar | 484 | |
1306 | jermar | 485 | /* |
1436 | jermar | 486 | * Finish TLB shootdown sequence. |
1306 | jermar | 487 | */ |
1889 | jermar | 488 | tlb_invalidate_pages(as->asid, area->base, area->pages); |
1306 | jermar | 489 | tlb_shootdown_finalize(); |
1436 | jermar | 490 | |
1889 | jermar | 491 | /* |
492 | * Invalidate potential software translation caches (e.g. TSB on sparc64). |
||
493 | */ |
||
494 | as_invalidate_translation_cache(as, area->base, area->pages); |
||
495 | |||
1436 | jermar | 496 | btree_destroy(&area->used_space); |
1306 | jermar | 497 | |
1309 | jermar | 498 | area->attributes |= AS_AREA_ATTR_PARTIAL; |
1409 | jermar | 499 | |
500 | if (area->sh_info) |
||
501 | sh_info_remove_reference(area->sh_info); |
||
502 | |||
1380 | jermar | 503 | mutex_unlock(&area->lock); |
1306 | jermar | 504 | |
505 | /* |
||
506 | * Remove the empty area from address space. |
||
507 | */ |
||
1889 | jermar | 508 | btree_remove(&as->as_area_btree, base, NULL); |
1306 | jermar | 509 | |
1309 | jermar | 510 | free(area); |
511 | |||
1889 | jermar | 512 | mutex_unlock(&as->lock); |
1306 | jermar | 513 | interrupts_restore(ipl); |
514 | return 0; |
||
515 | } |
||
516 | |||
1413 | jermar | 517 | /** Share address space area with another or the same address space. |
1235 | jermar | 518 | * |
1424 | jermar | 519 | * Address space area mapping is shared with a new address space area. |
520 | * If the source address space area has not been shared so far, |
||
521 | * a new sh_info is created. The new address space area simply gets the |
||
522 | * sh_info of the source area. The process of duplicating the |
||
523 | * mapping is done through the backend share function. |
||
1413 | jermar | 524 | * |
1417 | jermar | 525 | * @param src_as Pointer to source address space. |
1239 | jermar | 526 | * @param src_base Base address of the source address space area. |
1417 | jermar | 527 | * @param acc_size Expected size of the source area. |
1428 | palkovsky | 528 | * @param dst_as Pointer to destination address space. |
1417 | jermar | 529 | * @param dst_base Target base address. |
530 | * @param dst_flags_mask Destination address space area flags mask. |
||
1235 | jermar | 531 | * |
1306 | jermar | 532 | * @return Zero on success or ENOENT if there is no such task or |
1235 | jermar | 533 | * if there is no such address space area, |
534 | * EPERM if there was a problem in accepting the area or |
||
535 | * ENOMEM if there was a problem in allocating destination |
||
1413 | jermar | 536 | * address space area. ENOTSUP is returned if an attempt |
537 | * to share non-anonymous address space area is detected. |
||
1235 | jermar | 538 | */ |
1780 | jermar | 539 | int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size, |
540 | as_t *dst_as, uintptr_t dst_base, int dst_flags_mask) |
||
1235 | jermar | 541 | { |
542 | ipl_t ipl; |
||
1239 | jermar | 543 | int src_flags; |
544 | size_t src_size; |
||
545 | as_area_t *src_area, *dst_area; |
||
1413 | jermar | 546 | share_info_t *sh_info; |
1424 | jermar | 547 | mem_backend_t *src_backend; |
548 | mem_backend_data_t src_backend_data; |
||
1434 | palkovsky | 549 | |
1235 | jermar | 550 | ipl = interrupts_disable(); |
1380 | jermar | 551 | mutex_lock(&src_as->lock); |
1329 | palkovsky | 552 | src_area = find_area_and_lock(src_as, src_base); |
1239 | jermar | 553 | if (!src_area) { |
1238 | jermar | 554 | /* |
555 | * Could not find the source address space area. |
||
556 | */ |
||
1380 | jermar | 557 | mutex_unlock(&src_as->lock); |
1238 | jermar | 558 | interrupts_restore(ipl); |
559 | return ENOENT; |
||
560 | } |
||
1413 | jermar | 561 | |
1424 | jermar | 562 | if (!src_area->backend || !src_area->backend->share) { |
1413 | jermar | 563 | /* |
1851 | jermar | 564 | * There is no backend or the backend does not |
1424 | jermar | 565 | * know how to share the area. |
1413 | jermar | 566 | */ |
567 | mutex_unlock(&src_area->lock); |
||
568 | mutex_unlock(&src_as->lock); |
||
569 | interrupts_restore(ipl); |
||
570 | return ENOTSUP; |
||
571 | } |
||
572 | |||
1239 | jermar | 573 | src_size = src_area->pages * PAGE_SIZE; |
574 | src_flags = src_area->flags; |
||
1424 | jermar | 575 | src_backend = src_area->backend; |
576 | src_backend_data = src_area->backend_data; |
||
1544 | palkovsky | 577 | |
578 | /* Share the cacheable flag from the original mapping */ |
||
579 | if (src_flags & AS_AREA_CACHEABLE) |
||
580 | dst_flags_mask |= AS_AREA_CACHEABLE; |
||
581 | |||
1461 | palkovsky | 582 | if (src_size != acc_size || (src_flags & dst_flags_mask) != dst_flags_mask) { |
1413 | jermar | 583 | mutex_unlock(&src_area->lock); |
584 | mutex_unlock(&src_as->lock); |
||
1235 | jermar | 585 | interrupts_restore(ipl); |
586 | return EPERM; |
||
587 | } |
||
1413 | jermar | 588 | |
1235 | jermar | 589 | /* |
1413 | jermar | 590 | * Now we are committed to sharing the area. |
591 | * First prepare the area for sharing. |
||
592 | * Then it will be safe to unlock it. |
||
593 | */ |
||
594 | sh_info = src_area->sh_info; |
||
595 | if (!sh_info) { |
||
596 | sh_info = (share_info_t *) malloc(sizeof(share_info_t), 0); |
||
597 | mutex_initialize(&sh_info->lock); |
||
598 | sh_info->refcount = 2; |
||
599 | btree_create(&sh_info->pagemap); |
||
600 | src_area->sh_info = sh_info; |
||
601 | } else { |
||
602 | mutex_lock(&sh_info->lock); |
||
603 | sh_info->refcount++; |
||
604 | mutex_unlock(&sh_info->lock); |
||
605 | } |
||
606 | |||
1424 | jermar | 607 | src_area->backend->share(src_area); |
1413 | jermar | 608 | |
609 | mutex_unlock(&src_area->lock); |
||
610 | mutex_unlock(&src_as->lock); |
||
611 | |||
612 | /* |
||
1239 | jermar | 613 | * Create copy of the source address space area. |
614 | * The destination area is created with AS_AREA_ATTR_PARTIAL |
||
615 | * attribute set which prevents race condition with |
||
616 | * preliminary as_page_fault() calls. |
||
1417 | jermar | 617 | * The flags of the source area are masked against dst_flags_mask |
618 | * to support sharing in less privileged mode. |
||
1235 | jermar | 619 | */ |
1461 | palkovsky | 620 | dst_area = as_area_create(dst_as, dst_flags_mask, src_size, dst_base, |
1424 | jermar | 621 | AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data); |
1239 | jermar | 622 | if (!dst_area) { |
1235 | jermar | 623 | /* |
624 | * Destination address space area could not be created. |
||
625 | */ |
||
1413 | jermar | 626 | sh_info_remove_reference(sh_info); |
627 | |||
1235 | jermar | 628 | interrupts_restore(ipl); |
629 | return ENOMEM; |
||
630 | } |
||
631 | |||
632 | /* |
||
1239 | jermar | 633 | * Now the destination address space area has been |
634 | * fully initialized. Clear the AS_AREA_ATTR_PARTIAL |
||
1413 | jermar | 635 | * attribute and set the sh_info. |
1239 | jermar | 636 | */ |
1380 | jermar | 637 | mutex_lock(&dst_area->lock); |
1239 | jermar | 638 | dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL; |
1413 | jermar | 639 | dst_area->sh_info = sh_info; |
1380 | jermar | 640 | mutex_unlock(&dst_area->lock); |
1235 | jermar | 641 | |
642 | interrupts_restore(ipl); |
||
643 | |||
644 | return 0; |
||
645 | } |
||
646 | |||
1423 | jermar | 647 | /** Check access mode for address space area. |
648 | * |
||
649 | * The address space area must be locked prior to this call. |
||
650 | * |
||
651 | * @param area Address space area. |
||
652 | * @param access Access mode. |
||
653 | * |
||
654 | * @return False if access violates area's permissions, true otherwise. |
||
655 | */ |
||
656 | bool as_area_check_access(as_area_t *area, pf_access_t access) |
||
657 | { |
||
658 | int flagmap[] = { |
||
659 | [PF_ACCESS_READ] = AS_AREA_READ, |
||
660 | [PF_ACCESS_WRITE] = AS_AREA_WRITE, |
||
661 | [PF_ACCESS_EXEC] = AS_AREA_EXEC |
||
662 | }; |
||
663 | |||
664 | if (!(area->flags & flagmap[access])) |
||
665 | return false; |
||
666 | |||
667 | return true; |
||
668 | } |
||
669 | |||
703 | jermar | 670 | /** Handle page fault within the current address space. |
671 | * |
||
1409 | jermar | 672 | * This is the high-level page fault handler. It decides |
673 | * whether the page fault can be resolved by any backend |
||
674 | * and if so, it invokes the backend to resolve the page |
||
675 | * fault. |
||
676 | * |
||
703 | jermar | 677 | * Interrupts are assumed disabled. |
678 | * |
||
679 | * @param page Faulting page. |
||
1411 | jermar | 680 | * @param access Access mode that caused the fault (i.e. read/write/exec). |
1288 | jermar | 681 | * @param istate Pointer to interrupted state. |
703 | jermar | 682 | * |
1409 | jermar | 683 | * @return AS_PF_FAULT on page fault, AS_PF_OK on success or AS_PF_DEFER if the |
684 | * fault was caused by copy_to_uspace() or copy_from_uspace(). |
||
703 | jermar | 685 | */ |
1780 | jermar | 686 | int as_page_fault(uintptr_t page, pf_access_t access, istate_t *istate) |
703 | jermar | 687 | { |
1044 | jermar | 688 | pte_t *pte; |
977 | jermar | 689 | as_area_t *area; |
703 | jermar | 690 | |
1380 | jermar | 691 | if (!THREAD) |
1409 | jermar | 692 | return AS_PF_FAULT; |
1380 | jermar | 693 | |
703 | jermar | 694 | ASSERT(AS); |
1044 | jermar | 695 | |
1380 | jermar | 696 | mutex_lock(&AS->lock); |
977 | jermar | 697 | area = find_area_and_lock(AS, page); |
703 | jermar | 698 | if (!area) { |
699 | /* |
||
700 | * No area contained mapping for 'page'. |
||
701 | * Signal page fault to low-level handler. |
||
702 | */ |
||
1380 | jermar | 703 | mutex_unlock(&AS->lock); |
1288 | jermar | 704 | goto page_fault; |
703 | jermar | 705 | } |
706 | |||
1239 | jermar | 707 | if (area->attributes & AS_AREA_ATTR_PARTIAL) { |
708 | /* |
||
709 | * The address space area is not fully initialized. |
||
710 | * Avoid possible race by returning error. |
||
711 | */ |
||
1380 | jermar | 712 | mutex_unlock(&area->lock); |
713 | mutex_unlock(&AS->lock); |
||
1288 | jermar | 714 | goto page_fault; |
1239 | jermar | 715 | } |
716 | |||
1424 | jermar | 717 | if (!area->backend || !area->backend->page_fault) { |
1409 | jermar | 718 | /* |
719 | * The address space area is not backed by any backend |
||
720 | * or the backend cannot handle page faults. |
||
721 | */ |
||
722 | mutex_unlock(&area->lock); |
||
723 | mutex_unlock(&AS->lock); |
||
724 | goto page_fault; |
||
725 | } |
||
1179 | jermar | 726 | |
1044 | jermar | 727 | page_table_lock(AS, false); |
728 | |||
703 | jermar | 729 | /* |
1044 | jermar | 730 | * To avoid race condition between two page faults |
731 | * on the same address, we need to make sure |
||
732 | * the mapping has not been already inserted. |
||
733 | */ |
||
734 | if ((pte = page_mapping_find(AS, page))) { |
||
735 | if (PTE_PRESENT(pte)) { |
||
1423 | jermar | 736 | if (((access == PF_ACCESS_READ) && PTE_READABLE(pte)) || |
737 | (access == PF_ACCESS_WRITE && PTE_WRITABLE(pte)) || |
||
738 | (access == PF_ACCESS_EXEC && PTE_EXECUTABLE(pte))) { |
||
739 | page_table_unlock(AS, false); |
||
740 | mutex_unlock(&area->lock); |
||
741 | mutex_unlock(&AS->lock); |
||
742 | return AS_PF_OK; |
||
743 | } |
||
1044 | jermar | 744 | } |
745 | } |
||
1409 | jermar | 746 | |
1044 | jermar | 747 | /* |
1409 | jermar | 748 | * Resort to the backend page fault handler. |
703 | jermar | 749 | */ |
1424 | jermar | 750 | if (area->backend->page_fault(area, page, access) != AS_PF_OK) { |
1409 | jermar | 751 | page_table_unlock(AS, false); |
752 | mutex_unlock(&area->lock); |
||
753 | mutex_unlock(&AS->lock); |
||
754 | goto page_fault; |
||
755 | } |
||
703 | jermar | 756 | |
1044 | jermar | 757 | page_table_unlock(AS, false); |
1380 | jermar | 758 | mutex_unlock(&area->lock); |
759 | mutex_unlock(&AS->lock); |
||
1288 | jermar | 760 | return AS_PF_OK; |
761 | |||
762 | page_fault: |
||
763 | if (THREAD->in_copy_from_uspace) { |
||
764 | THREAD->in_copy_from_uspace = false; |
||
1780 | jermar | 765 | istate_set_retaddr(istate, (uintptr_t) &memcpy_from_uspace_failover_address); |
1288 | jermar | 766 | } else if (THREAD->in_copy_to_uspace) { |
767 | THREAD->in_copy_to_uspace = false; |
||
1780 | jermar | 768 | istate_set_retaddr(istate, (uintptr_t) &memcpy_to_uspace_failover_address); |
1288 | jermar | 769 | } else { |
770 | return AS_PF_FAULT; |
||
771 | } |
||
772 | |||
773 | return AS_PF_DEFER; |
||
703 | jermar | 774 | } |
775 | |||
823 | jermar | 776 | /** Switch address spaces. |
703 | jermar | 777 | * |
1380 | jermar | 778 | * Note that this function cannot sleep as it is essentially a part of |
1415 | jermar | 779 | * scheduling. Sleeping here would lead to deadlock on wakeup. |
1380 | jermar | 780 | * |
823 | jermar | 781 | * @param old Old address space or NULL. |
782 | * @param new New address space. |
||
703 | jermar | 783 | */ |
823 | jermar | 784 | void as_switch(as_t *old, as_t *new) |
703 | jermar | 785 | { |
786 | ipl_t ipl; |
||
823 | jermar | 787 | bool needs_asid = false; |
703 | jermar | 788 | |
789 | ipl = interrupts_disable(); |
||
1415 | jermar | 790 | spinlock_lock(&inactive_as_with_asid_lock); |
703 | jermar | 791 | |
792 | /* |
||
823 | jermar | 793 | * First, take care of the old address space. |
794 | */ |
||
795 | if (old) { |
||
1380 | jermar | 796 | mutex_lock_active(&old->lock); |
1415 | jermar | 797 | ASSERT(old->cpu_refcount); |
798 | if((--old->cpu_refcount == 0) && (old != AS_KERNEL)) { |
||
823 | jermar | 799 | /* |
800 | * The old address space is no longer active on |
||
801 | * any processor. It can be appended to the |
||
802 | * list of inactive address spaces with assigned |
||
803 | * ASID. |
||
804 | */ |
||
805 | ASSERT(old->asid != ASID_INVALID); |
||
806 | list_append(&old->inactive_as_with_asid_link, &inactive_as_with_asid_head); |
||
807 | } |
||
1380 | jermar | 808 | mutex_unlock(&old->lock); |
1890 | jermar | 809 | |
810 | /* |
||
811 | * Perform architecture-specific tasks when the address space |
||
812 | * is being removed from the CPU. |
||
813 | */ |
||
814 | as_deinstall_arch(old); |
||
823 | jermar | 815 | } |
816 | |||
817 | /* |
||
818 | * Second, prepare the new address space. |
||
819 | */ |
||
1380 | jermar | 820 | mutex_lock_active(&new->lock); |
1415 | jermar | 821 | if ((new->cpu_refcount++ == 0) && (new != AS_KERNEL)) { |
823 | jermar | 822 | if (new->asid != ASID_INVALID) |
823 | list_remove(&new->inactive_as_with_asid_link); |
||
824 | else |
||
825 | needs_asid = true; /* defer call to asid_get() until new->lock is released */ |
||
826 | } |
||
827 | SET_PTL0_ADDRESS(new->page_table); |
||
1380 | jermar | 828 | mutex_unlock(&new->lock); |
823 | jermar | 829 | |
830 | if (needs_asid) { |
||
831 | /* |
||
832 | * Allocation of new ASID was deferred |
||
833 | * until now in order to avoid deadlock. |
||
834 | */ |
||
835 | asid_t asid; |
||
836 | |||
837 | asid = asid_get(); |
||
1380 | jermar | 838 | mutex_lock_active(&new->lock); |
823 | jermar | 839 | new->asid = asid; |
1380 | jermar | 840 | mutex_unlock(&new->lock); |
823 | jermar | 841 | } |
1415 | jermar | 842 | spinlock_unlock(&inactive_as_with_asid_lock); |
823 | jermar | 843 | interrupts_restore(ipl); |
844 | |||
845 | /* |
||
703 | jermar | 846 | * Perform architecture-specific steps. |
727 | jermar | 847 | * (e.g. write ASID to hardware register etc.) |
703 | jermar | 848 | */ |
823 | jermar | 849 | as_install_arch(new); |
703 | jermar | 850 | |
823 | jermar | 851 | AS = new; |
703 | jermar | 852 | } |
754 | jermar | 853 | |
1235 | jermar | 854 | /** Convert address space area flags to page flags. |
754 | jermar | 855 | * |
1235 | jermar | 856 | * @param aflags Flags of some address space area. |
754 | jermar | 857 | * |
1235 | jermar | 858 | * @return Flags to be passed to page_mapping_insert(). |
754 | jermar | 859 | */ |
1235 | jermar | 860 | int area_flags_to_page_flags(int aflags) |
754 | jermar | 861 | { |
862 | int flags; |
||
863 | |||
1178 | jermar | 864 | flags = PAGE_USER | PAGE_PRESENT; |
754 | jermar | 865 | |
1235 | jermar | 866 | if (aflags & AS_AREA_READ) |
1026 | jermar | 867 | flags |= PAGE_READ; |
868 | |||
1235 | jermar | 869 | if (aflags & AS_AREA_WRITE) |
1026 | jermar | 870 | flags |= PAGE_WRITE; |
871 | |||
1235 | jermar | 872 | if (aflags & AS_AREA_EXEC) |
1026 | jermar | 873 | flags |= PAGE_EXEC; |
874 | |||
1424 | jermar | 875 | if (aflags & AS_AREA_CACHEABLE) |
1178 | jermar | 876 | flags |= PAGE_CACHEABLE; |
877 | |||
754 | jermar | 878 | return flags; |
879 | } |
||
756 | jermar | 880 | |
1235 | jermar | 881 | /** Compute flags for virtual address translation subsytem. |
882 | * |
||
883 | * The address space area must be locked. |
||
884 | * Interrupts must be disabled. |
||
885 | * |
||
886 | * @param a Address space area. |
||
887 | * |
||
888 | * @return Flags to be used in page_mapping_insert(). |
||
889 | */ |
||
1409 | jermar | 890 | int as_area_get_flags(as_area_t *a) |
1235 | jermar | 891 | { |
892 | return area_flags_to_page_flags(a->flags); |
||
893 | } |
||
894 | |||
756 | jermar | 895 | /** Create page table. |
896 | * |
||
897 | * Depending on architecture, create either address space |
||
898 | * private or global page table. |
||
899 | * |
||
900 | * @param flags Flags saying whether the page table is for kernel address space. |
||
901 | * |
||
902 | * @return First entry of the page table. |
||
903 | */ |
||
904 | pte_t *page_table_create(int flags) |
||
905 | { |
||
906 | ASSERT(as_operations); |
||
907 | ASSERT(as_operations->page_table_create); |
||
908 | |||
909 | return as_operations->page_table_create(flags); |
||
910 | } |
||
977 | jermar | 911 | |
1468 | jermar | 912 | /** Destroy page table. |
913 | * |
||
914 | * Destroy page table in architecture specific way. |
||
915 | * |
||
916 | * @param page_table Physical address of PTL0. |
||
917 | */ |
||
918 | void page_table_destroy(pte_t *page_table) |
||
919 | { |
||
920 | ASSERT(as_operations); |
||
921 | ASSERT(as_operations->page_table_destroy); |
||
922 | |||
923 | as_operations->page_table_destroy(page_table); |
||
924 | } |
||
925 | |||
1044 | jermar | 926 | /** Lock page table. |
927 | * |
||
928 | * This function should be called before any page_mapping_insert(), |
||
929 | * page_mapping_remove() and page_mapping_find(). |
||
930 | * |
||
931 | * Locking order is such that address space areas must be locked |
||
932 | * prior to this call. Address space can be locked prior to this |
||
933 | * call in which case the lock argument is false. |
||
934 | * |
||
935 | * @param as Address space. |
||
1248 | jermar | 936 | * @param lock If false, do not attempt to lock as->lock. |
1044 | jermar | 937 | */ |
938 | void page_table_lock(as_t *as, bool lock) |
||
939 | { |
||
940 | ASSERT(as_operations); |
||
941 | ASSERT(as_operations->page_table_lock); |
||
942 | |||
943 | as_operations->page_table_lock(as, lock); |
||
944 | } |
||
945 | |||
946 | /** Unlock page table. |
||
947 | * |
||
948 | * @param as Address space. |
||
1248 | jermar | 949 | * @param unlock If false, do not attempt to unlock as->lock. |
1044 | jermar | 950 | */ |
951 | void page_table_unlock(as_t *as, bool unlock) |
||
952 | { |
||
953 | ASSERT(as_operations); |
||
954 | ASSERT(as_operations->page_table_unlock); |
||
955 | |||
956 | as_operations->page_table_unlock(as, unlock); |
||
957 | } |
||
958 | |||
977 | jermar | 959 | |
960 | /** Find address space area and lock it. |
||
961 | * |
||
962 | * The address space must be locked and interrupts must be disabled. |
||
963 | * |
||
964 | * @param as Address space. |
||
965 | * @param va Virtual address. |
||
966 | * |
||
967 | * @return Locked address space area containing va on success or NULL on failure. |
||
968 | */ |
||
1780 | jermar | 969 | as_area_t *find_area_and_lock(as_t *as, uintptr_t va) |
977 | jermar | 970 | { |
971 | as_area_t *a; |
||
1147 | jermar | 972 | btree_node_t *leaf, *lnode; |
973 | int i; |
||
977 | jermar | 974 | |
1147 | jermar | 975 | a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf); |
976 | if (a) { |
||
977 | /* va is the base address of an address space area */ |
||
1380 | jermar | 978 | mutex_lock(&a->lock); |
1147 | jermar | 979 | return a; |
980 | } |
||
981 | |||
982 | /* |
||
1150 | jermar | 983 | * Search the leaf node and the righmost record of its left neighbour |
1147 | jermar | 984 | * to find out whether this is a miss or va belongs to an address |
985 | * space area found there. |
||
986 | */ |
||
987 | |||
988 | /* First, search the leaf node itself. */ |
||
989 | for (i = 0; i < leaf->keys; i++) { |
||
990 | a = (as_area_t *) leaf->value[i]; |
||
1380 | jermar | 991 | mutex_lock(&a->lock); |
1147 | jermar | 992 | if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) { |
993 | return a; |
||
994 | } |
||
1380 | jermar | 995 | mutex_unlock(&a->lock); |
1147 | jermar | 996 | } |
977 | jermar | 997 | |
1147 | jermar | 998 | /* |
1150 | jermar | 999 | * Second, locate the left neighbour and test its last record. |
1148 | jermar | 1000 | * Because of its position in the B+tree, it must have base < va. |
1147 | jermar | 1001 | */ |
1150 | jermar | 1002 | if ((lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) { |
1147 | jermar | 1003 | a = (as_area_t *) lnode->value[lnode->keys - 1]; |
1380 | jermar | 1004 | mutex_lock(&a->lock); |
1147 | jermar | 1005 | if (va < a->base + a->pages * PAGE_SIZE) { |
1048 | jermar | 1006 | return a; |
1147 | jermar | 1007 | } |
1380 | jermar | 1008 | mutex_unlock(&a->lock); |
977 | jermar | 1009 | } |
1010 | |||
1011 | return NULL; |
||
1012 | } |
||
1048 | jermar | 1013 | |
1014 | /** Check area conflicts with other areas. |
||
1015 | * |
||
1016 | * The address space must be locked and interrupts must be disabled. |
||
1017 | * |
||
1018 | * @param as Address space. |
||
1019 | * @param va Starting virtual address of the area being tested. |
||
1020 | * @param size Size of the area being tested. |
||
1021 | * @param avoid_area Do not touch this area. |
||
1022 | * |
||
1023 | * @return True if there is no conflict, false otherwise. |
||
1024 | */ |
||
1780 | jermar | 1025 | bool check_area_conflicts(as_t *as, uintptr_t va, size_t size, as_area_t *avoid_area) |
1048 | jermar | 1026 | { |
1027 | as_area_t *a; |
||
1147 | jermar | 1028 | btree_node_t *leaf, *node; |
1029 | int i; |
||
1048 | jermar | 1030 | |
1070 | jermar | 1031 | /* |
1032 | * We don't want any area to have conflicts with NULL page. |
||
1033 | */ |
||
1034 | if (overlaps(va, size, NULL, PAGE_SIZE)) |
||
1035 | return false; |
||
1036 | |||
1147 | jermar | 1037 | /* |
1038 | * The leaf node is found in O(log n), where n is proportional to |
||
1039 | * the number of address space areas belonging to as. |
||
1040 | * The check for conflicts is then attempted on the rightmost |
||
1150 | jermar | 1041 | * record in the left neighbour, the leftmost record in the right |
1042 | * neighbour and all records in the leaf node itself. |
||
1147 | jermar | 1043 | */ |
1048 | jermar | 1044 | |
1147 | jermar | 1045 | if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) { |
1046 | if (a != avoid_area) |
||
1047 | return false; |
||
1048 | } |
||
1049 | |||
1050 | /* First, check the two border cases. */ |
||
1150 | jermar | 1051 | if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) { |
1147 | jermar | 1052 | a = (as_area_t *) node->value[node->keys - 1]; |
1380 | jermar | 1053 | mutex_lock(&a->lock); |
1147 | jermar | 1054 | if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) { |
1380 | jermar | 1055 | mutex_unlock(&a->lock); |
1147 | jermar | 1056 | return false; |
1057 | } |
||
1380 | jermar | 1058 | mutex_unlock(&a->lock); |
1147 | jermar | 1059 | } |
1150 | jermar | 1060 | if ((node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf))) { |
1147 | jermar | 1061 | a = (as_area_t *) node->value[0]; |
1380 | jermar | 1062 | mutex_lock(&a->lock); |
1147 | jermar | 1063 | if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) { |
1380 | jermar | 1064 | mutex_unlock(&a->lock); |
1147 | jermar | 1065 | return false; |
1066 | } |
||
1380 | jermar | 1067 | mutex_unlock(&a->lock); |
1147 | jermar | 1068 | } |
1069 | |||
1070 | /* Second, check the leaf node. */ |
||
1071 | for (i = 0; i < leaf->keys; i++) { |
||
1072 | a = (as_area_t *) leaf->value[i]; |
||
1073 | |||
1048 | jermar | 1074 | if (a == avoid_area) |
1075 | continue; |
||
1147 | jermar | 1076 | |
1380 | jermar | 1077 | mutex_lock(&a->lock); |
1147 | jermar | 1078 | if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) { |
1380 | jermar | 1079 | mutex_unlock(&a->lock); |
1147 | jermar | 1080 | return false; |
1081 | } |
||
1380 | jermar | 1082 | mutex_unlock(&a->lock); |
1048 | jermar | 1083 | } |
1084 | |||
1070 | jermar | 1085 | /* |
1086 | * So far, the area does not conflict with other areas. |
||
1087 | * Check if it doesn't conflict with kernel address space. |
||
1088 | */ |
||
1089 | if (!KERNEL_ADDRESS_SPACE_SHADOWED) { |
||
1090 | return !overlaps(va, size, |
||
1091 | KERNEL_ADDRESS_SPACE_START, KERNEL_ADDRESS_SPACE_END-KERNEL_ADDRESS_SPACE_START); |
||
1092 | } |
||
1093 | |||
1048 | jermar | 1094 | return true; |
1095 | } |
||
1235 | jermar | 1096 | |
1380 | jermar | 1097 | /** Return size of the address space area with given base. */ |
1780 | jermar | 1098 | size_t as_get_size(uintptr_t base) |
1329 | palkovsky | 1099 | { |
1100 | ipl_t ipl; |
||
1101 | as_area_t *src_area; |
||
1102 | size_t size; |
||
1103 | |||
1104 | ipl = interrupts_disable(); |
||
1105 | src_area = find_area_and_lock(AS, base); |
||
1106 | if (src_area){ |
||
1107 | size = src_area->pages * PAGE_SIZE; |
||
1380 | jermar | 1108 | mutex_unlock(&src_area->lock); |
1329 | palkovsky | 1109 | } else { |
1110 | size = 0; |
||
1111 | } |
||
1112 | interrupts_restore(ipl); |
||
1113 | return size; |
||
1114 | } |
||
1115 | |||
1387 | jermar | 1116 | /** Mark portion of address space area as used. |
1117 | * |
||
1118 | * The address space area must be already locked. |
||
1119 | * |
||
1120 | * @param a Address space area. |
||
1121 | * @param page First page to be marked. |
||
1122 | * @param count Number of page to be marked. |
||
1123 | * |
||
1124 | * @return 0 on failure and 1 on success. |
||
1125 | */ |
||
1780 | jermar | 1126 | int used_space_insert(as_area_t *a, uintptr_t page, count_t count) |
1387 | jermar | 1127 | { |
1128 | btree_node_t *leaf, *node; |
||
1129 | count_t pages; |
||
1130 | int i; |
||
1131 | |||
1132 | ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE)); |
||
1133 | ASSERT(count); |
||
1134 | |||
1135 | pages = (count_t) btree_search(&a->used_space, page, &leaf); |
||
1136 | if (pages) { |
||
1137 | /* |
||
1138 | * We hit the beginning of some used space. |
||
1139 | */ |
||
1140 | return 0; |
||
1141 | } |
||
1142 | |||
1437 | jermar | 1143 | if (!leaf->keys) { |
1144 | btree_insert(&a->used_space, page, (void *) count, leaf); |
||
1145 | return 1; |
||
1146 | } |
||
1147 | |||
1387 | jermar | 1148 | node = btree_leaf_node_left_neighbour(&a->used_space, leaf); |
1149 | if (node) { |
||
1780 | jermar | 1150 | uintptr_t left_pg = node->key[node->keys - 1], right_pg = leaf->key[0]; |
1387 | jermar | 1151 | count_t left_cnt = (count_t) node->value[node->keys - 1], right_cnt = (count_t) leaf->value[0]; |
1152 | |||
1153 | /* |
||
1154 | * Examine the possibility that the interval fits |
||
1155 | * somewhere between the rightmost interval of |
||
1156 | * the left neigbour and the first interval of the leaf. |
||
1157 | */ |
||
1158 | |||
1159 | if (page >= right_pg) { |
||
1160 | /* Do nothing. */ |
||
1161 | } else if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) { |
||
1162 | /* The interval intersects with the left interval. */ |
||
1163 | return 0; |
||
1164 | } else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) { |
||
1165 | /* The interval intersects with the right interval. */ |
||
1166 | return 0; |
||
1167 | } else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) { |
||
1168 | /* The interval can be added by merging the two already present intervals. */ |
||
1403 | jermar | 1169 | node->value[node->keys - 1] += count + right_cnt; |
1387 | jermar | 1170 | btree_remove(&a->used_space, right_pg, leaf); |
1171 | return 1; |
||
1172 | } else if (page == left_pg + left_cnt*PAGE_SIZE) { |
||
1173 | /* The interval can be added by simply growing the left interval. */ |
||
1403 | jermar | 1174 | node->value[node->keys - 1] += count; |
1387 | jermar | 1175 | return 1; |
1176 | } else if (page + count*PAGE_SIZE == right_pg) { |
||
1177 | /* |
||
1178 | * The interval can be addded by simply moving base of the right |
||
1179 | * interval down and increasing its size accordingly. |
||
1180 | */ |
||
1403 | jermar | 1181 | leaf->value[0] += count; |
1387 | jermar | 1182 | leaf->key[0] = page; |
1183 | return 1; |
||
1184 | } else { |
||
1185 | /* |
||
1186 | * The interval is between both neigbouring intervals, |
||
1187 | * but cannot be merged with any of them. |
||
1188 | */ |
||
1189 | btree_insert(&a->used_space, page, (void *) count, leaf); |
||
1190 | return 1; |
||
1191 | } |
||
1192 | } else if (page < leaf->key[0]) { |
||
1780 | jermar | 1193 | uintptr_t right_pg = leaf->key[0]; |
1387 | jermar | 1194 | count_t right_cnt = (count_t) leaf->value[0]; |
1195 | |||
1196 | /* |
||
1197 | * Investigate the border case in which the left neighbour does not |
||
1198 | * exist but the interval fits from the left. |
||
1199 | */ |
||
1200 | |||
1201 | if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) { |
||
1202 | /* The interval intersects with the right interval. */ |
||
1203 | return 0; |
||
1204 | } else if (page + count*PAGE_SIZE == right_pg) { |
||
1205 | /* |
||
1206 | * The interval can be added by moving the base of the right interval down |
||
1207 | * and increasing its size accordingly. |
||
1208 | */ |
||
1209 | leaf->key[0] = page; |
||
1403 | jermar | 1210 | leaf->value[0] += count; |
1387 | jermar | 1211 | return 1; |
1212 | } else { |
||
1213 | /* |
||
1214 | * The interval doesn't adjoin with the right interval. |
||
1215 | * It must be added individually. |
||
1216 | */ |
||
1217 | btree_insert(&a->used_space, page, (void *) count, leaf); |
||
1218 | return 1; |
||
1219 | } |
||
1220 | } |
||
1221 | |||
1222 | node = btree_leaf_node_right_neighbour(&a->used_space, leaf); |
||
1223 | if (node) { |
||
1780 | jermar | 1224 | uintptr_t left_pg = leaf->key[leaf->keys - 1], right_pg = node->key[0]; |
1387 | jermar | 1225 | count_t left_cnt = (count_t) leaf->value[leaf->keys - 1], right_cnt = (count_t) node->value[0]; |
1226 | |||
1227 | /* |
||
1228 | * Examine the possibility that the interval fits |
||
1229 | * somewhere between the leftmost interval of |
||
1230 | * the right neigbour and the last interval of the leaf. |
||
1231 | */ |
||
1232 | |||
1233 | if (page < left_pg) { |
||
1234 | /* Do nothing. */ |
||
1235 | } else if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) { |
||
1236 | /* The interval intersects with the left interval. */ |
||
1237 | return 0; |
||
1238 | } else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) { |
||
1239 | /* The interval intersects with the right interval. */ |
||
1240 | return 0; |
||
1241 | } else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) { |
||
1242 | /* The interval can be added by merging the two already present intervals. */ |
||
1403 | jermar | 1243 | leaf->value[leaf->keys - 1] += count + right_cnt; |
1387 | jermar | 1244 | btree_remove(&a->used_space, right_pg, node); |
1245 | return 1; |
||
1246 | } else if (page == left_pg + left_cnt*PAGE_SIZE) { |
||
1247 | /* The interval can be added by simply growing the left interval. */ |
||
1403 | jermar | 1248 | leaf->value[leaf->keys - 1] += count; |
1387 | jermar | 1249 | return 1; |
1250 | } else if (page + count*PAGE_SIZE == right_pg) { |
||
1251 | /* |
||
1252 | * The interval can be addded by simply moving base of the right |
||
1253 | * interval down and increasing its size accordingly. |
||
1254 | */ |
||
1403 | jermar | 1255 | node->value[0] += count; |
1387 | jermar | 1256 | node->key[0] = page; |
1257 | return 1; |
||
1258 | } else { |
||
1259 | /* |
||
1260 | * The interval is between both neigbouring intervals, |
||
1261 | * but cannot be merged with any of them. |
||
1262 | */ |
||
1263 | btree_insert(&a->used_space, page, (void *) count, leaf); |
||
1264 | return 1; |
||
1265 | } |
||
1266 | } else if (page >= leaf->key[leaf->keys - 1]) { |
||
1780 | jermar | 1267 | uintptr_t left_pg = leaf->key[leaf->keys - 1]; |
1387 | jermar | 1268 | count_t left_cnt = (count_t) leaf->value[leaf->keys - 1]; |
1269 | |||
1270 | /* |
||
1271 | * Investigate the border case in which the right neighbour does not |
||
1272 | * exist but the interval fits from the right. |
||
1273 | */ |
||
1274 | |||
1275 | if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) { |
||
1403 | jermar | 1276 | /* The interval intersects with the left interval. */ |
1387 | jermar | 1277 | return 0; |
1278 | } else if (left_pg + left_cnt*PAGE_SIZE == page) { |
||
1279 | /* The interval can be added by growing the left interval. */ |
||
1403 | jermar | 1280 | leaf->value[leaf->keys - 1] += count; |
1387 | jermar | 1281 | return 1; |
1282 | } else { |
||
1283 | /* |
||
1284 | * The interval doesn't adjoin with the left interval. |
||
1285 | * It must be added individually. |
||
1286 | */ |
||
1287 | btree_insert(&a->used_space, page, (void *) count, leaf); |
||
1288 | return 1; |
||
1289 | } |
||
1290 | } |
||
1291 | |||
1292 | /* |
||
1293 | * Note that if the algorithm made it thus far, the interval can fit only |
||
1294 | * between two other intervals of the leaf. The two border cases were already |
||
1295 | * resolved. |
||
1296 | */ |
||
1297 | for (i = 1; i < leaf->keys; i++) { |
||
1298 | if (page < leaf->key[i]) { |
||
1780 | jermar | 1299 | uintptr_t left_pg = leaf->key[i - 1], right_pg = leaf->key[i]; |
1387 | jermar | 1300 | count_t left_cnt = (count_t) leaf->value[i - 1], right_cnt = (count_t) leaf->value[i]; |
1301 | |||
1302 | /* |
||
1303 | * The interval fits between left_pg and right_pg. |
||
1304 | */ |
||
1305 | |||
1306 | if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) { |
||
1307 | /* The interval intersects with the left interval. */ |
||
1308 | return 0; |
||
1309 | } else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) { |
||
1310 | /* The interval intersects with the right interval. */ |
||
1311 | return 0; |
||
1312 | } else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) { |
||
1313 | /* The interval can be added by merging the two already present intervals. */ |
||
1403 | jermar | 1314 | leaf->value[i - 1] += count + right_cnt; |
1387 | jermar | 1315 | btree_remove(&a->used_space, right_pg, leaf); |
1316 | return 1; |
||
1317 | } else if (page == left_pg + left_cnt*PAGE_SIZE) { |
||
1318 | /* The interval can be added by simply growing the left interval. */ |
||
1403 | jermar | 1319 | leaf->value[i - 1] += count; |
1387 | jermar | 1320 | return 1; |
1321 | } else if (page + count*PAGE_SIZE == right_pg) { |
||
1322 | /* |
||
1323 | * The interval can be addded by simply moving base of the right |
||
1324 | * interval down and increasing its size accordingly. |
||
1325 | */ |
||
1403 | jermar | 1326 | leaf->value[i] += count; |
1387 | jermar | 1327 | leaf->key[i] = page; |
1328 | return 1; |
||
1329 | } else { |
||
1330 | /* |
||
1331 | * The interval is between both neigbouring intervals, |
||
1332 | * but cannot be merged with any of them. |
||
1333 | */ |
||
1334 | btree_insert(&a->used_space, page, (void *) count, leaf); |
||
1335 | return 1; |
||
1336 | } |
||
1337 | } |
||
1338 | } |
||
1339 | |||
1735 | decky | 1340 | panic("Inconsistency detected while adding %d pages of used space at %p.\n", count, page); |
1387 | jermar | 1341 | } |
1342 | |||
1343 | /** Mark portion of address space area as unused. |
||
1344 | * |
||
1345 | * The address space area must be already locked. |
||
1346 | * |
||
1347 | * @param a Address space area. |
||
1348 | * @param page First page to be marked. |
||
1349 | * @param count Number of page to be marked. |
||
1350 | * |
||
1351 | * @return 0 on failure and 1 on success. |
||
1352 | */ |
||
1780 | jermar | 1353 | int used_space_remove(as_area_t *a, uintptr_t page, count_t count) |
1387 | jermar | 1354 | { |
1355 | btree_node_t *leaf, *node; |
||
1356 | count_t pages; |
||
1357 | int i; |
||
1358 | |||
1359 | ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE)); |
||
1360 | ASSERT(count); |
||
1361 | |||
1362 | pages = (count_t) btree_search(&a->used_space, page, &leaf); |
||
1363 | if (pages) { |
||
1364 | /* |
||
1365 | * We are lucky, page is the beginning of some interval. |
||
1366 | */ |
||
1367 | if (count > pages) { |
||
1368 | return 0; |
||
1369 | } else if (count == pages) { |
||
1370 | btree_remove(&a->used_space, page, leaf); |
||
1403 | jermar | 1371 | return 1; |
1387 | jermar | 1372 | } else { |
1373 | /* |
||
1374 | * Find the respective interval. |
||
1375 | * Decrease its size and relocate its start address. |
||
1376 | */ |
||
1377 | for (i = 0; i < leaf->keys; i++) { |
||
1378 | if (leaf->key[i] == page) { |
||
1379 | leaf->key[i] += count*PAGE_SIZE; |
||
1403 | jermar | 1380 | leaf->value[i] -= count; |
1387 | jermar | 1381 | return 1; |
1382 | } |
||
1383 | } |
||
1384 | goto error; |
||
1385 | } |
||
1386 | } |
||
1387 | |||
1388 | node = btree_leaf_node_left_neighbour(&a->used_space, leaf); |
||
1389 | if (node && page < leaf->key[0]) { |
||
1780 | jermar | 1390 | uintptr_t left_pg = node->key[node->keys - 1]; |
1387 | jermar | 1391 | count_t left_cnt = (count_t) node->value[node->keys - 1]; |
1392 | |||
1393 | if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) { |
||
1394 | if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) { |
||
1395 | /* |
||
1396 | * The interval is contained in the rightmost interval |
||
1397 | * of the left neighbour and can be removed by |
||
1398 | * updating the size of the bigger interval. |
||
1399 | */ |
||
1403 | jermar | 1400 | node->value[node->keys - 1] -= count; |
1387 | jermar | 1401 | return 1; |
1402 | } else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) { |
||
1403 | jermar | 1403 | count_t new_cnt; |
1387 | jermar | 1404 | |
1405 | /* |
||
1406 | * The interval is contained in the rightmost interval |
||
1407 | * of the left neighbour but its removal requires |
||
1408 | * both updating the size of the original interval and |
||
1409 | * also inserting a new interval. |
||
1410 | */ |
||
1403 | jermar | 1411 | new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH; |
1412 | node->value[node->keys - 1] -= count + new_cnt; |
||
1387 | jermar | 1413 | btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf); |
1414 | return 1; |
||
1415 | } |
||
1416 | } |
||
1417 | return 0; |
||
1418 | } else if (page < leaf->key[0]) { |
||
1419 | return 0; |
||
1420 | } |
||
1421 | |||
1422 | if (page > leaf->key[leaf->keys - 1]) { |
||
1780 | jermar | 1423 | uintptr_t left_pg = leaf->key[leaf->keys - 1]; |
1387 | jermar | 1424 | count_t left_cnt = (count_t) leaf->value[leaf->keys - 1]; |
1425 | |||
1426 | if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) { |
||
1427 | if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) { |
||
1428 | /* |
||
1429 | * The interval is contained in the rightmost interval |
||
1430 | * of the leaf and can be removed by updating the size |
||
1431 | * of the bigger interval. |
||
1432 | */ |
||
1403 | jermar | 1433 | leaf->value[leaf->keys - 1] -= count; |
1387 | jermar | 1434 | return 1; |
1435 | } else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) { |
||
1403 | jermar | 1436 | count_t new_cnt; |
1387 | jermar | 1437 | |
1438 | /* |
||
1439 | * The interval is contained in the rightmost interval |
||
1440 | * of the leaf but its removal requires both updating |
||
1441 | * the size of the original interval and |
||
1442 | * also inserting a new interval. |
||
1443 | */ |
||
1403 | jermar | 1444 | new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH; |
1445 | leaf->value[leaf->keys - 1] -= count + new_cnt; |
||
1387 | jermar | 1446 | btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf); |
1447 | return 1; |
||
1448 | } |
||
1449 | } |
||
1450 | return 0; |
||
1451 | } |
||
1452 | |||
1453 | /* |
||
1454 | * The border cases have been already resolved. |
||
1455 | * Now the interval can be only between intervals of the leaf. |
||
1456 | */ |
||
1457 | for (i = 1; i < leaf->keys - 1; i++) { |
||
1458 | if (page < leaf->key[i]) { |
||
1780 | jermar | 1459 | uintptr_t left_pg = leaf->key[i - 1]; |
1387 | jermar | 1460 | count_t left_cnt = (count_t) leaf->value[i - 1]; |
1461 | |||
1462 | /* |
||
1463 | * Now the interval is between intervals corresponding to (i - 1) and i. |
||
1464 | */ |
||
1465 | if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) { |
||
1466 | if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) { |
||
1467 | /* |
||
1468 | * The interval is contained in the interval (i - 1) |
||
1469 | * of the leaf and can be removed by updating the size |
||
1470 | * of the bigger interval. |
||
1471 | */ |
||
1403 | jermar | 1472 | leaf->value[i - 1] -= count; |
1387 | jermar | 1473 | return 1; |
1474 | } else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) { |
||
1403 | jermar | 1475 | count_t new_cnt; |
1387 | jermar | 1476 | |
1477 | /* |
||
1478 | * The interval is contained in the interval (i - 1) |
||
1479 | * of the leaf but its removal requires both updating |
||
1480 | * the size of the original interval and |
||
1481 | * also inserting a new interval. |
||
1482 | */ |
||
1403 | jermar | 1483 | new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH; |
1484 | leaf->value[i - 1] -= count + new_cnt; |
||
1387 | jermar | 1485 | btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf); |
1486 | return 1; |
||
1487 | } |
||
1488 | } |
||
1489 | return 0; |
||
1490 | } |
||
1491 | } |
||
1492 | |||
1493 | error: |
||
1735 | decky | 1494 | panic("Inconsistency detected while removing %d pages of used space from %p.\n", count, page); |
1387 | jermar | 1495 | } |
1496 | |||
1409 | jermar | 1497 | /** Remove reference to address space area share info. |
1498 | * |
||
1499 | * If the reference count drops to 0, the sh_info is deallocated. |
||
1500 | * |
||
1501 | * @param sh_info Pointer to address space area share info. |
||
1502 | */ |
||
1503 | void sh_info_remove_reference(share_info_t *sh_info) |
||
1504 | { |
||
1505 | bool dealloc = false; |
||
1506 | |||
1507 | mutex_lock(&sh_info->lock); |
||
1508 | ASSERT(sh_info->refcount); |
||
1509 | if (--sh_info->refcount == 0) { |
||
1510 | dealloc = true; |
||
1495 | jermar | 1511 | link_t *cur; |
1409 | jermar | 1512 | |
1513 | /* |
||
1514 | * Now walk carefully the pagemap B+tree and free/remove |
||
1515 | * reference from all frames found there. |
||
1516 | */ |
||
1495 | jermar | 1517 | for (cur = sh_info->pagemap.leaf_head.next; cur != &sh_info->pagemap.leaf_head; cur = cur->next) { |
1409 | jermar | 1518 | btree_node_t *node; |
1495 | jermar | 1519 | int i; |
1409 | jermar | 1520 | |
1495 | jermar | 1521 | node = list_get_instance(cur, btree_node_t, leaf_link); |
1522 | for (i = 0; i < node->keys; i++) |
||
1780 | jermar | 1523 | frame_free((uintptr_t) node->value[i]); |
1409 | jermar | 1524 | } |
1525 | |||
1526 | } |
||
1527 | mutex_unlock(&sh_info->lock); |
||
1528 | |||
1529 | if (dealloc) { |
||
1530 | btree_destroy(&sh_info->pagemap); |
||
1531 | free(sh_info); |
||
1532 | } |
||
1533 | } |
||
1534 | |||
1235 | jermar | 1535 | /* |
1536 | * Address space related syscalls. |
||
1537 | */ |
||
1538 | |||
1539 | /** Wrapper for as_area_create(). */ |
||
1780 | jermar | 1540 | unative_t sys_as_area_create(uintptr_t address, size_t size, int flags) |
1235 | jermar | 1541 | { |
1424 | jermar | 1542 | if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address, AS_AREA_ATTR_NONE, &anon_backend, NULL)) |
1780 | jermar | 1543 | return (unative_t) address; |
1235 | jermar | 1544 | else |
1780 | jermar | 1545 | return (unative_t) -1; |
1235 | jermar | 1546 | } |
1547 | |||
1793 | jermar | 1548 | /** Wrapper for as_area_resize(). */ |
1780 | jermar | 1549 | unative_t sys_as_area_resize(uintptr_t address, size_t size, int flags) |
1235 | jermar | 1550 | { |
1780 | jermar | 1551 | return (unative_t) as_area_resize(AS, address, size, 0); |
1235 | jermar | 1552 | } |
1553 | |||
1793 | jermar | 1554 | /** Wrapper for as_area_destroy(). */ |
1780 | jermar | 1555 | unative_t sys_as_area_destroy(uintptr_t address) |
1306 | jermar | 1556 | { |
1780 | jermar | 1557 | return (unative_t) as_area_destroy(AS, address); |
1306 | jermar | 1558 | } |
1702 | cejka | 1559 | |
1757 | jermar | 1560 | /** @} |
1702 | cejka | 1561 | */ |