Rev 2015 | Rev 2071 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
703 | jermar | 1 | /* |
2 | * Copyright (C) 2001-2006 Jakub Jermar |
||
3 | * All rights reserved. |
||
4 | * |
||
5 | * Redistribution and use in source and binary forms, with or without |
||
6 | * modification, are permitted provided that the following conditions |
||
7 | * are met: |
||
8 | * |
||
9 | * - Redistributions of source code must retain the above copyright |
||
10 | * notice, this list of conditions and the following disclaimer. |
||
11 | * - Redistributions in binary form must reproduce the above copyright |
||
12 | * notice, this list of conditions and the following disclaimer in the |
||
13 | * documentation and/or other materials provided with the distribution. |
||
14 | * - The name of the author may not be used to endorse or promote products |
||
15 | * derived from this software without specific prior written permission. |
||
16 | * |
||
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
||
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
||
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
||
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
||
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
||
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
||
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
||
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
||
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
||
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
||
27 | */ |
||
28 | |||
1757 | jermar | 29 | /** @addtogroup genericmm |
1702 | cejka | 30 | * @{ |
31 | */ |
||
32 | |||
1248 | jermar | 33 | /** |
1702 | cejka | 34 | * @file |
1248 | jermar | 35 | * @brief Address space related functions. |
36 | * |
||
703 | jermar | 37 | * This file contains address space manipulation functions. |
38 | * Roughly speaking, this is a higher-level client of |
||
39 | * Virtual Address Translation (VAT) subsystem. |
||
1248 | jermar | 40 | * |
41 | * Functionality provided by this file allows one to |
||
1757 | jermar | 42 | * create address spaces and create, resize and share |
1248 | jermar | 43 | * address space areas. |
44 | * |
||
45 | * @see page.c |
||
46 | * |
||
703 | jermar | 47 | */ |
48 | |||
49 | #include <mm/as.h> |
||
756 | jermar | 50 | #include <arch/mm/as.h> |
703 | jermar | 51 | #include <mm/page.h> |
52 | #include <mm/frame.h> |
||
814 | palkovsky | 53 | #include <mm/slab.h> |
703 | jermar | 54 | #include <mm/tlb.h> |
55 | #include <arch/mm/page.h> |
||
56 | #include <genarch/mm/page_pt.h> |
||
1108 | jermar | 57 | #include <genarch/mm/page_ht.h> |
727 | jermar | 58 | #include <mm/asid.h> |
703 | jermar | 59 | #include <arch/mm/asid.h> |
60 | #include <synch/spinlock.h> |
||
1380 | jermar | 61 | #include <synch/mutex.h> |
788 | jermar | 62 | #include <adt/list.h> |
1147 | jermar | 63 | #include <adt/btree.h> |
1235 | jermar | 64 | #include <proc/task.h> |
1288 | jermar | 65 | #include <proc/thread.h> |
1235 | jermar | 66 | #include <arch/asm.h> |
703 | jermar | 67 | #include <panic.h> |
68 | #include <debug.h> |
||
1235 | jermar | 69 | #include <print.h> |
703 | jermar | 70 | #include <memstr.h> |
1070 | jermar | 71 | #include <macros.h> |
703 | jermar | 72 | #include <arch.h> |
1235 | jermar | 73 | #include <errno.h> |
74 | #include <config.h> |
||
1387 | jermar | 75 | #include <align.h> |
1235 | jermar | 76 | #include <arch/types.h> |
77 | #include <typedefs.h> |
||
1288 | jermar | 78 | #include <syscall/copy.h> |
79 | #include <arch/interrupt.h> |
||
703 | jermar | 80 | |
2009 | jermar | 81 | #ifdef CONFIG_VIRT_IDX_DCACHE |
82 | #include <arch/mm/cache.h> |
||
83 | #endif /* CONFIG_VIRT_IDX_DCACHE */ |
||
84 | |||
1757 | jermar | 85 | /** |
86 | * Each architecture decides what functions will be used to carry out |
||
87 | * address space operations such as creating or locking page tables. |
||
88 | */ |
||
756 | jermar | 89 | as_operations_t *as_operations = NULL; |
703 | jermar | 90 | |
1890 | jermar | 91 | /** |
92 | * Slab for as_t objects. |
||
93 | */ |
||
94 | static slab_cache_t *as_slab; |
||
95 | |||
1415 | jermar | 96 | /** This lock protects inactive_as_with_asid_head list. It must be acquired before as_t mutex. */ |
97 | SPINLOCK_INITIALIZE(inactive_as_with_asid_lock); |
||
823 | jermar | 98 | |
99 | /** |
||
100 | * This list contains address spaces that are not active on any |
||
101 | * processor and that have valid ASID. |
||
102 | */ |
||
103 | LIST_INITIALIZE(inactive_as_with_asid_head); |
||
104 | |||
757 | jermar | 105 | /** Kernel address space. */ |
106 | as_t *AS_KERNEL = NULL; |
||
107 | |||
1235 | jermar | 108 | static int area_flags_to_page_flags(int aflags); |
1780 | jermar | 109 | static as_area_t *find_area_and_lock(as_t *as, uintptr_t va); |
110 | static bool check_area_conflicts(as_t *as, uintptr_t va, size_t size, as_area_t *avoid_area); |
||
1409 | jermar | 111 | static void sh_info_remove_reference(share_info_t *sh_info); |
703 | jermar | 112 | |
1891 | jermar | 113 | static int as_constructor(void *obj, int flags) |
114 | { |
||
115 | as_t *as = (as_t *) obj; |
||
116 | int rc; |
||
117 | |||
118 | link_initialize(&as->inactive_as_with_asid_link); |
||
119 | mutex_initialize(&as->lock); |
||
120 | |||
121 | rc = as_constructor_arch(as, flags); |
||
122 | |||
123 | return rc; |
||
124 | } |
||
125 | |||
126 | static int as_destructor(void *obj) |
||
127 | { |
||
128 | as_t *as = (as_t *) obj; |
||
129 | |||
130 | return as_destructor_arch(as); |
||
131 | } |
||
132 | |||
756 | jermar | 133 | /** Initialize address space subsystem. */ |
134 | void as_init(void) |
||
135 | { |
||
136 | as_arch_init(); |
||
1890 | jermar | 137 | |
1891 | jermar | 138 | as_slab = slab_cache_create("as_slab", sizeof(as_t), 0, |
139 | as_constructor, as_destructor, SLAB_CACHE_MAGDEFERRED); |
||
1890 | jermar | 140 | |
789 | palkovsky | 141 | AS_KERNEL = as_create(FLAG_AS_KERNEL); |
1383 | decky | 142 | if (!AS_KERNEL) |
143 | panic("can't create kernel address space\n"); |
||
144 | |||
756 | jermar | 145 | } |
146 | |||
757 | jermar | 147 | /** Create address space. |
148 | * |
||
149 | * @param flags Flags that influence way in wich the address space is created. |
||
150 | */ |
||
756 | jermar | 151 | as_t *as_create(int flags) |
703 | jermar | 152 | { |
153 | as_t *as; |
||
154 | |||
1890 | jermar | 155 | as = (as_t *) slab_alloc(as_slab, 0); |
1891 | jermar | 156 | (void) as_create_arch(as, 0); |
157 | |||
1147 | jermar | 158 | btree_create(&as->as_area_btree); |
822 | palkovsky | 159 | |
160 | if (flags & FLAG_AS_KERNEL) |
||
161 | as->asid = ASID_KERNEL; |
||
162 | else |
||
163 | as->asid = ASID_INVALID; |
||
164 | |||
1468 | jermar | 165 | as->refcount = 0; |
1415 | jermar | 166 | as->cpu_refcount = 0; |
822 | palkovsky | 167 | as->page_table = page_table_create(flags); |
703 | jermar | 168 | |
169 | return as; |
||
170 | } |
||
171 | |||
1468 | jermar | 172 | /** Destroy adress space. |
173 | * |
||
174 | * When there are no tasks referencing this address space (i.e. its refcount is zero), |
||
175 | * the address space can be destroyed. |
||
176 | */ |
||
177 | void as_destroy(as_t *as) |
||
973 | palkovsky | 178 | { |
1468 | jermar | 179 | ipl_t ipl; |
1594 | jermar | 180 | bool cond; |
973 | palkovsky | 181 | |
1468 | jermar | 182 | ASSERT(as->refcount == 0); |
183 | |||
184 | /* |
||
185 | * Since there is no reference to this area, |
||
186 | * it is safe not to lock its mutex. |
||
187 | */ |
||
188 | ipl = interrupts_disable(); |
||
189 | spinlock_lock(&inactive_as_with_asid_lock); |
||
1587 | jermar | 190 | if (as->asid != ASID_INVALID && as != AS_KERNEL) { |
1594 | jermar | 191 | if (as != AS && as->cpu_refcount == 0) |
1587 | jermar | 192 | list_remove(&as->inactive_as_with_asid_link); |
1468 | jermar | 193 | asid_put(as->asid); |
194 | } |
||
195 | spinlock_unlock(&inactive_as_with_asid_lock); |
||
196 | |||
197 | /* |
||
198 | * Destroy address space areas of the address space. |
||
1954 | jermar | 199 | * The B+tree must be walked carefully because it is |
1594 | jermar | 200 | * also being destroyed. |
1468 | jermar | 201 | */ |
1594 | jermar | 202 | for (cond = true; cond; ) { |
1468 | jermar | 203 | btree_node_t *node; |
1594 | jermar | 204 | |
205 | ASSERT(!list_empty(&as->as_area_btree.leaf_head)); |
||
206 | node = list_get_instance(as->as_area_btree.leaf_head.next, btree_node_t, leaf_link); |
||
207 | |||
208 | if ((cond = node->keys)) { |
||
209 | as_area_destroy(as, node->key[0]); |
||
210 | } |
||
1468 | jermar | 211 | } |
1495 | jermar | 212 | |
1483 | jermar | 213 | btree_destroy(&as->as_area_btree); |
1468 | jermar | 214 | page_table_destroy(as->page_table); |
215 | |||
216 | interrupts_restore(ipl); |
||
217 | |||
1890 | jermar | 218 | slab_free(as_slab, as); |
973 | palkovsky | 219 | } |
220 | |||
703 | jermar | 221 | /** Create address space area of common attributes. |
222 | * |
||
223 | * The created address space area is added to the target address space. |
||
224 | * |
||
225 | * @param as Target address space. |
||
1239 | jermar | 226 | * @param flags Flags of the area memory. |
1048 | jermar | 227 | * @param size Size of area. |
703 | jermar | 228 | * @param base Base address of area. |
1239 | jermar | 229 | * @param attrs Attributes of the area. |
1409 | jermar | 230 | * @param backend Address space area backend. NULL if no backend is used. |
231 | * @param backend_data NULL or a pointer to an array holding two void *. |
||
703 | jermar | 232 | * |
233 | * @return Address space area on success or NULL on failure. |
||
234 | */ |
||
2069 | jermar | 235 | as_area_t * |
236 | as_area_create(as_t *as, int flags, size_t size, uintptr_t base, int attrs, |
||
1424 | jermar | 237 | mem_backend_t *backend, mem_backend_data_t *backend_data) |
703 | jermar | 238 | { |
239 | ipl_t ipl; |
||
240 | as_area_t *a; |
||
241 | |||
242 | if (base % PAGE_SIZE) |
||
1048 | jermar | 243 | return NULL; |
244 | |||
1233 | jermar | 245 | if (!size) |
246 | return NULL; |
||
247 | |||
1048 | jermar | 248 | /* Writeable executable areas are not supported. */ |
249 | if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE)) |
||
250 | return NULL; |
||
703 | jermar | 251 | |
252 | ipl = interrupts_disable(); |
||
1380 | jermar | 253 | mutex_lock(&as->lock); |
703 | jermar | 254 | |
1048 | jermar | 255 | if (!check_area_conflicts(as, base, size, NULL)) { |
1380 | jermar | 256 | mutex_unlock(&as->lock); |
1048 | jermar | 257 | interrupts_restore(ipl); |
258 | return NULL; |
||
259 | } |
||
703 | jermar | 260 | |
822 | palkovsky | 261 | a = (as_area_t *) malloc(sizeof(as_area_t), 0); |
703 | jermar | 262 | |
1380 | jermar | 263 | mutex_initialize(&a->lock); |
822 | palkovsky | 264 | |
1424 | jermar | 265 | a->as = as; |
1026 | jermar | 266 | a->flags = flags; |
1239 | jermar | 267 | a->attributes = attrs; |
1048 | jermar | 268 | a->pages = SIZE2FRAMES(size); |
822 | palkovsky | 269 | a->base = base; |
1409 | jermar | 270 | a->sh_info = NULL; |
271 | a->backend = backend; |
||
1424 | jermar | 272 | if (backend_data) |
273 | a->backend_data = *backend_data; |
||
274 | else |
||
1780 | jermar | 275 | memsetb((uintptr_t) &a->backend_data, sizeof(a->backend_data), 0); |
1424 | jermar | 276 | |
1387 | jermar | 277 | btree_create(&a->used_space); |
822 | palkovsky | 278 | |
1147 | jermar | 279 | btree_insert(&as->as_area_btree, base, (void *) a, NULL); |
822 | palkovsky | 280 | |
1380 | jermar | 281 | mutex_unlock(&as->lock); |
703 | jermar | 282 | interrupts_restore(ipl); |
704 | jermar | 283 | |
703 | jermar | 284 | return a; |
285 | } |
||
286 | |||
1235 | jermar | 287 | /** Find address space area and change it. |
288 | * |
||
289 | * @param as Address space. |
||
290 | * @param address Virtual address belonging to the area to be changed. Must be page-aligned. |
||
291 | * @param size New size of the virtual memory block starting at address. |
||
292 | * @param flags Flags influencing the remap operation. Currently unused. |
||
293 | * |
||
1306 | jermar | 294 | * @return Zero on success or a value from @ref errno.h otherwise. |
1235 | jermar | 295 | */ |
1780 | jermar | 296 | int as_area_resize(as_t *as, uintptr_t address, size_t size, int flags) |
1235 | jermar | 297 | { |
1306 | jermar | 298 | as_area_t *area; |
1235 | jermar | 299 | ipl_t ipl; |
300 | size_t pages; |
||
301 | |||
302 | ipl = interrupts_disable(); |
||
1380 | jermar | 303 | mutex_lock(&as->lock); |
1235 | jermar | 304 | |
305 | /* |
||
306 | * Locate the area. |
||
307 | */ |
||
308 | area = find_area_and_lock(as, address); |
||
309 | if (!area) { |
||
1380 | jermar | 310 | mutex_unlock(&as->lock); |
1235 | jermar | 311 | interrupts_restore(ipl); |
1306 | jermar | 312 | return ENOENT; |
1235 | jermar | 313 | } |
314 | |||
1424 | jermar | 315 | if (area->backend == &phys_backend) { |
1235 | jermar | 316 | /* |
317 | * Remapping of address space areas associated |
||
318 | * with memory mapped devices is not supported. |
||
319 | */ |
||
1380 | jermar | 320 | mutex_unlock(&area->lock); |
321 | mutex_unlock(&as->lock); |
||
1235 | jermar | 322 | interrupts_restore(ipl); |
1306 | jermar | 323 | return ENOTSUP; |
1235 | jermar | 324 | } |
1409 | jermar | 325 | if (area->sh_info) { |
326 | /* |
||
327 | * Remapping of shared address space areas |
||
328 | * is not supported. |
||
329 | */ |
||
330 | mutex_unlock(&area->lock); |
||
331 | mutex_unlock(&as->lock); |
||
332 | interrupts_restore(ipl); |
||
333 | return ENOTSUP; |
||
334 | } |
||
1235 | jermar | 335 | |
336 | pages = SIZE2FRAMES((address - area->base) + size); |
||
337 | if (!pages) { |
||
338 | /* |
||
339 | * Zero size address space areas are not allowed. |
||
340 | */ |
||
1380 | jermar | 341 | mutex_unlock(&area->lock); |
342 | mutex_unlock(&as->lock); |
||
1235 | jermar | 343 | interrupts_restore(ipl); |
1306 | jermar | 344 | return EPERM; |
1235 | jermar | 345 | } |
346 | |||
347 | if (pages < area->pages) { |
||
1403 | jermar | 348 | bool cond; |
1780 | jermar | 349 | uintptr_t start_free = area->base + pages*PAGE_SIZE; |
1235 | jermar | 350 | |
351 | /* |
||
352 | * Shrinking the area. |
||
353 | * No need to check for overlaps. |
||
354 | */ |
||
1403 | jermar | 355 | |
356 | /* |
||
1436 | jermar | 357 | * Start TLB shootdown sequence. |
358 | */ |
||
359 | tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages); |
||
360 | |||
361 | /* |
||
1403 | jermar | 362 | * Remove frames belonging to used space starting from |
363 | * the highest addresses downwards until an overlap with |
||
364 | * the resized address space area is found. Note that this |
||
365 | * is also the right way to remove part of the used_space |
||
366 | * B+tree leaf list. |
||
367 | */ |
||
368 | for (cond = true; cond;) { |
||
369 | btree_node_t *node; |
||
370 | |||
371 | ASSERT(!list_empty(&area->used_space.leaf_head)); |
||
372 | node = list_get_instance(area->used_space.leaf_head.prev, btree_node_t, leaf_link); |
||
373 | if ((cond = (bool) node->keys)) { |
||
1780 | jermar | 374 | uintptr_t b = node->key[node->keys - 1]; |
1403 | jermar | 375 | count_t c = (count_t) node->value[node->keys - 1]; |
376 | int i = 0; |
||
1235 | jermar | 377 | |
1403 | jermar | 378 | if (overlaps(b, c*PAGE_SIZE, area->base, pages*PAGE_SIZE)) { |
379 | |||
380 | if (b + c*PAGE_SIZE <= start_free) { |
||
381 | /* |
||
382 | * The whole interval fits completely |
||
383 | * in the resized address space area. |
||
384 | */ |
||
385 | break; |
||
386 | } |
||
387 | |||
388 | /* |
||
389 | * Part of the interval corresponding to b and c |
||
390 | * overlaps with the resized address space area. |
||
391 | */ |
||
392 | |||
393 | cond = false; /* we are almost done */ |
||
394 | i = (start_free - b) >> PAGE_WIDTH; |
||
395 | if (!used_space_remove(area, start_free, c - i)) |
||
1889 | jermar | 396 | panic("Could not remove used space.\n"); |
1403 | jermar | 397 | } else { |
398 | /* |
||
399 | * The interval of used space can be completely removed. |
||
400 | */ |
||
401 | if (!used_space_remove(area, b, c)) |
||
402 | panic("Could not remove used space.\n"); |
||
403 | } |
||
404 | |||
405 | for (; i < c; i++) { |
||
406 | pte_t *pte; |
||
407 | |||
408 | page_table_lock(as, false); |
||
409 | pte = page_mapping_find(as, b + i*PAGE_SIZE); |
||
410 | ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte)); |
||
1424 | jermar | 411 | if (area->backend && area->backend->frame_free) { |
412 | area->backend->frame_free(area, |
||
1409 | jermar | 413 | b + i*PAGE_SIZE, PTE_GET_FRAME(pte)); |
414 | } |
||
1403 | jermar | 415 | page_mapping_remove(as, b + i*PAGE_SIZE); |
416 | page_table_unlock(as, false); |
||
417 | } |
||
1235 | jermar | 418 | } |
419 | } |
||
1436 | jermar | 420 | |
1235 | jermar | 421 | /* |
1436 | jermar | 422 | * Finish TLB shootdown sequence. |
1235 | jermar | 423 | */ |
1954 | jermar | 424 | tlb_invalidate_pages(as->asid, area->base + pages*PAGE_SIZE, area->pages - pages); |
1235 | jermar | 425 | tlb_shootdown_finalize(); |
1889 | jermar | 426 | |
427 | /* |
||
428 | * Invalidate software translation caches (e.g. TSB on sparc64). |
||
429 | */ |
||
430 | as_invalidate_translation_cache(as, area->base + pages*PAGE_SIZE, area->pages - pages); |
||
1235 | jermar | 431 | } else { |
432 | /* |
||
433 | * Growing the area. |
||
434 | * Check for overlaps with other address space areas. |
||
435 | */ |
||
436 | if (!check_area_conflicts(as, address, pages * PAGE_SIZE, area)) { |
||
1380 | jermar | 437 | mutex_unlock(&area->lock); |
438 | mutex_unlock(&as->lock); |
||
1235 | jermar | 439 | interrupts_restore(ipl); |
1306 | jermar | 440 | return EADDRNOTAVAIL; |
1235 | jermar | 441 | } |
442 | } |
||
443 | |||
444 | area->pages = pages; |
||
445 | |||
1380 | jermar | 446 | mutex_unlock(&area->lock); |
447 | mutex_unlock(&as->lock); |
||
1235 | jermar | 448 | interrupts_restore(ipl); |
449 | |||
1306 | jermar | 450 | return 0; |
1235 | jermar | 451 | } |
452 | |||
1306 | jermar | 453 | /** Destroy address space area. |
454 | * |
||
455 | * @param as Address space. |
||
456 | * @param address Address withing the area to be deleted. |
||
457 | * |
||
458 | * @return Zero on success or a value from @ref errno.h on failure. |
||
459 | */ |
||
1780 | jermar | 460 | int as_area_destroy(as_t *as, uintptr_t address) |
1306 | jermar | 461 | { |
462 | as_area_t *area; |
||
1780 | jermar | 463 | uintptr_t base; |
1495 | jermar | 464 | link_t *cur; |
1306 | jermar | 465 | ipl_t ipl; |
466 | |||
467 | ipl = interrupts_disable(); |
||
1380 | jermar | 468 | mutex_lock(&as->lock); |
1306 | jermar | 469 | |
470 | area = find_area_and_lock(as, address); |
||
471 | if (!area) { |
||
1380 | jermar | 472 | mutex_unlock(&as->lock); |
1306 | jermar | 473 | interrupts_restore(ipl); |
474 | return ENOENT; |
||
475 | } |
||
476 | |||
1403 | jermar | 477 | base = area->base; |
478 | |||
1411 | jermar | 479 | /* |
1436 | jermar | 480 | * Start TLB shootdown sequence. |
481 | */ |
||
1889 | jermar | 482 | tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, area->pages); |
1436 | jermar | 483 | |
484 | /* |
||
1411 | jermar | 485 | * Visit only the pages mapped by used_space B+tree. |
486 | */ |
||
1495 | jermar | 487 | for (cur = area->used_space.leaf_head.next; cur != &area->used_space.leaf_head; cur = cur->next) { |
1411 | jermar | 488 | btree_node_t *node; |
1495 | jermar | 489 | int i; |
1403 | jermar | 490 | |
1495 | jermar | 491 | node = list_get_instance(cur, btree_node_t, leaf_link); |
492 | for (i = 0; i < node->keys; i++) { |
||
1780 | jermar | 493 | uintptr_t b = node->key[i]; |
1495 | jermar | 494 | count_t j; |
1411 | jermar | 495 | pte_t *pte; |
1403 | jermar | 496 | |
1495 | jermar | 497 | for (j = 0; j < (count_t) node->value[i]; j++) { |
1411 | jermar | 498 | page_table_lock(as, false); |
1495 | jermar | 499 | pte = page_mapping_find(as, b + j*PAGE_SIZE); |
1411 | jermar | 500 | ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte)); |
1424 | jermar | 501 | if (area->backend && area->backend->frame_free) { |
502 | area->backend->frame_free(area, |
||
1495 | jermar | 503 | b + j*PAGE_SIZE, PTE_GET_FRAME(pte)); |
1403 | jermar | 504 | } |
1889 | jermar | 505 | page_mapping_remove(as, b + j*PAGE_SIZE); |
1411 | jermar | 506 | page_table_unlock(as, false); |
1306 | jermar | 507 | } |
508 | } |
||
509 | } |
||
1403 | jermar | 510 | |
1306 | jermar | 511 | /* |
1436 | jermar | 512 | * Finish TLB shootdown sequence. |
1306 | jermar | 513 | */ |
1889 | jermar | 514 | tlb_invalidate_pages(as->asid, area->base, area->pages); |
1306 | jermar | 515 | tlb_shootdown_finalize(); |
1436 | jermar | 516 | |
1889 | jermar | 517 | /* |
518 | * Invalidate potential software translation caches (e.g. TSB on sparc64). |
||
519 | */ |
||
520 | as_invalidate_translation_cache(as, area->base, area->pages); |
||
521 | |||
1436 | jermar | 522 | btree_destroy(&area->used_space); |
1306 | jermar | 523 | |
1309 | jermar | 524 | area->attributes |= AS_AREA_ATTR_PARTIAL; |
1409 | jermar | 525 | |
526 | if (area->sh_info) |
||
527 | sh_info_remove_reference(area->sh_info); |
||
528 | |||
1380 | jermar | 529 | mutex_unlock(&area->lock); |
1306 | jermar | 530 | |
531 | /* |
||
532 | * Remove the empty area from address space. |
||
533 | */ |
||
1889 | jermar | 534 | btree_remove(&as->as_area_btree, base, NULL); |
1306 | jermar | 535 | |
1309 | jermar | 536 | free(area); |
537 | |||
1889 | jermar | 538 | mutex_unlock(&as->lock); |
1306 | jermar | 539 | interrupts_restore(ipl); |
540 | return 0; |
||
541 | } |
||
542 | |||
1413 | jermar | 543 | /** Share address space area with another or the same address space. |
1235 | jermar | 544 | * |
1424 | jermar | 545 | * Address space area mapping is shared with a new address space area. |
546 | * If the source address space area has not been shared so far, |
||
547 | * a new sh_info is created. The new address space area simply gets the |
||
548 | * sh_info of the source area. The process of duplicating the |
||
549 | * mapping is done through the backend share function. |
||
1413 | jermar | 550 | * |
1417 | jermar | 551 | * @param src_as Pointer to source address space. |
1239 | jermar | 552 | * @param src_base Base address of the source address space area. |
1417 | jermar | 553 | * @param acc_size Expected size of the source area. |
1428 | palkovsky | 554 | * @param dst_as Pointer to destination address space. |
1417 | jermar | 555 | * @param dst_base Target base address. |
556 | * @param dst_flags_mask Destination address space area flags mask. |
||
1235 | jermar | 557 | * |
2007 | jermar | 558 | * @return Zero on success or ENOENT if there is no such task or if there is no |
559 | * such address space area, EPERM if there was a problem in accepting the area |
||
560 | * or ENOMEM if there was a problem in allocating destination address space |
||
561 | * area. ENOTSUP is returned if the address space area backend does not support |
||
2015 | jermar | 562 | * sharing or if the kernel detects an attempt to create an illegal address |
563 | * alias. |
||
1235 | jermar | 564 | */ |
1780 | jermar | 565 | int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size, |
566 | as_t *dst_as, uintptr_t dst_base, int dst_flags_mask) |
||
1235 | jermar | 567 | { |
568 | ipl_t ipl; |
||
1239 | jermar | 569 | int src_flags; |
570 | size_t src_size; |
||
571 | as_area_t *src_area, *dst_area; |
||
1413 | jermar | 572 | share_info_t *sh_info; |
1424 | jermar | 573 | mem_backend_t *src_backend; |
574 | mem_backend_data_t src_backend_data; |
||
1434 | palkovsky | 575 | |
1235 | jermar | 576 | ipl = interrupts_disable(); |
1380 | jermar | 577 | mutex_lock(&src_as->lock); |
1329 | palkovsky | 578 | src_area = find_area_and_lock(src_as, src_base); |
1239 | jermar | 579 | if (!src_area) { |
1238 | jermar | 580 | /* |
581 | * Could not find the source address space area. |
||
582 | */ |
||
1380 | jermar | 583 | mutex_unlock(&src_as->lock); |
1238 | jermar | 584 | interrupts_restore(ipl); |
585 | return ENOENT; |
||
586 | } |
||
2007 | jermar | 587 | |
1424 | jermar | 588 | if (!src_area->backend || !src_area->backend->share) { |
1413 | jermar | 589 | /* |
1851 | jermar | 590 | * There is no backend or the backend does not |
1424 | jermar | 591 | * know how to share the area. |
1413 | jermar | 592 | */ |
593 | mutex_unlock(&src_area->lock); |
||
594 | mutex_unlock(&src_as->lock); |
||
595 | interrupts_restore(ipl); |
||
596 | return ENOTSUP; |
||
597 | } |
||
598 | |||
1239 | jermar | 599 | src_size = src_area->pages * PAGE_SIZE; |
600 | src_flags = src_area->flags; |
||
1424 | jermar | 601 | src_backend = src_area->backend; |
602 | src_backend_data = src_area->backend_data; |
||
1544 | palkovsky | 603 | |
604 | /* Share the cacheable flag from the original mapping */ |
||
605 | if (src_flags & AS_AREA_CACHEABLE) |
||
606 | dst_flags_mask |= AS_AREA_CACHEABLE; |
||
607 | |||
1461 | palkovsky | 608 | if (src_size != acc_size || (src_flags & dst_flags_mask) != dst_flags_mask) { |
1413 | jermar | 609 | mutex_unlock(&src_area->lock); |
610 | mutex_unlock(&src_as->lock); |
||
1235 | jermar | 611 | interrupts_restore(ipl); |
612 | return EPERM; |
||
613 | } |
||
1413 | jermar | 614 | |
2015 | jermar | 615 | #ifdef CONFIG_VIRT_IDX_DCACHE |
616 | if (!(dst_flags_mask & AS_AREA_EXEC)) { |
||
617 | if (PAGE_COLOR(src_area->base) != PAGE_COLOR(dst_base)) { |
||
618 | /* |
||
619 | * Refuse to create an illegal address alias. |
||
620 | */ |
||
621 | mutex_unlock(&src_area->lock); |
||
622 | mutex_unlock(&src_as->lock); |
||
623 | interrupts_restore(ipl); |
||
624 | return ENOTSUP; |
||
625 | } |
||
626 | } |
||
627 | #endif /* CONFIG_VIRT_IDX_DCACHE */ |
||
628 | |||
1235 | jermar | 629 | /* |
1413 | jermar | 630 | * Now we are committed to sharing the area. |
1954 | jermar | 631 | * First, prepare the area for sharing. |
1413 | jermar | 632 | * Then it will be safe to unlock it. |
633 | */ |
||
634 | sh_info = src_area->sh_info; |
||
635 | if (!sh_info) { |
||
636 | sh_info = (share_info_t *) malloc(sizeof(share_info_t), 0); |
||
637 | mutex_initialize(&sh_info->lock); |
||
638 | sh_info->refcount = 2; |
||
639 | btree_create(&sh_info->pagemap); |
||
640 | src_area->sh_info = sh_info; |
||
641 | } else { |
||
642 | mutex_lock(&sh_info->lock); |
||
643 | sh_info->refcount++; |
||
644 | mutex_unlock(&sh_info->lock); |
||
645 | } |
||
646 | |||
1424 | jermar | 647 | src_area->backend->share(src_area); |
1413 | jermar | 648 | |
649 | mutex_unlock(&src_area->lock); |
||
650 | mutex_unlock(&src_as->lock); |
||
651 | |||
652 | /* |
||
1239 | jermar | 653 | * Create copy of the source address space area. |
654 | * The destination area is created with AS_AREA_ATTR_PARTIAL |
||
655 | * attribute set which prevents race condition with |
||
656 | * preliminary as_page_fault() calls. |
||
1417 | jermar | 657 | * The flags of the source area are masked against dst_flags_mask |
658 | * to support sharing in less privileged mode. |
||
1235 | jermar | 659 | */ |
1461 | palkovsky | 660 | dst_area = as_area_create(dst_as, dst_flags_mask, src_size, dst_base, |
1424 | jermar | 661 | AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data); |
1239 | jermar | 662 | if (!dst_area) { |
1235 | jermar | 663 | /* |
664 | * Destination address space area could not be created. |
||
665 | */ |
||
1413 | jermar | 666 | sh_info_remove_reference(sh_info); |
667 | |||
1235 | jermar | 668 | interrupts_restore(ipl); |
669 | return ENOMEM; |
||
670 | } |
||
2009 | jermar | 671 | |
1235 | jermar | 672 | /* |
1239 | jermar | 673 | * Now the destination address space area has been |
674 | * fully initialized. Clear the AS_AREA_ATTR_PARTIAL |
||
1413 | jermar | 675 | * attribute and set the sh_info. |
1239 | jermar | 676 | */ |
2009 | jermar | 677 | mutex_lock(&dst_as->lock); |
1380 | jermar | 678 | mutex_lock(&dst_area->lock); |
1239 | jermar | 679 | dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL; |
1413 | jermar | 680 | dst_area->sh_info = sh_info; |
1380 | jermar | 681 | mutex_unlock(&dst_area->lock); |
2009 | jermar | 682 | mutex_unlock(&dst_as->lock); |
683 | |||
1235 | jermar | 684 | interrupts_restore(ipl); |
685 | |||
686 | return 0; |
||
687 | } |
||
688 | |||
1423 | jermar | 689 | /** Check access mode for address space area. |
690 | * |
||
691 | * The address space area must be locked prior to this call. |
||
692 | * |
||
693 | * @param area Address space area. |
||
694 | * @param access Access mode. |
||
695 | * |
||
696 | * @return False if access violates area's permissions, true otherwise. |
||
697 | */ |
||
698 | bool as_area_check_access(as_area_t *area, pf_access_t access) |
||
699 | { |
||
700 | int flagmap[] = { |
||
701 | [PF_ACCESS_READ] = AS_AREA_READ, |
||
702 | [PF_ACCESS_WRITE] = AS_AREA_WRITE, |
||
703 | [PF_ACCESS_EXEC] = AS_AREA_EXEC |
||
704 | }; |
||
705 | |||
706 | if (!(area->flags & flagmap[access])) |
||
707 | return false; |
||
708 | |||
709 | return true; |
||
710 | } |
||
711 | |||
703 | jermar | 712 | /** Handle page fault within the current address space. |
713 | * |
||
1409 | jermar | 714 | * This is the high-level page fault handler. It decides |
715 | * whether the page fault can be resolved by any backend |
||
716 | * and if so, it invokes the backend to resolve the page |
||
717 | * fault. |
||
718 | * |
||
703 | jermar | 719 | * Interrupts are assumed disabled. |
720 | * |
||
721 | * @param page Faulting page. |
||
1411 | jermar | 722 | * @param access Access mode that caused the fault (i.e. read/write/exec). |
1288 | jermar | 723 | * @param istate Pointer to interrupted state. |
703 | jermar | 724 | * |
1409 | jermar | 725 | * @return AS_PF_FAULT on page fault, AS_PF_OK on success or AS_PF_DEFER if the |
726 | * fault was caused by copy_to_uspace() or copy_from_uspace(). |
||
703 | jermar | 727 | */ |
1780 | jermar | 728 | int as_page_fault(uintptr_t page, pf_access_t access, istate_t *istate) |
703 | jermar | 729 | { |
1044 | jermar | 730 | pte_t *pte; |
977 | jermar | 731 | as_area_t *area; |
703 | jermar | 732 | |
1380 | jermar | 733 | if (!THREAD) |
1409 | jermar | 734 | return AS_PF_FAULT; |
1380 | jermar | 735 | |
703 | jermar | 736 | ASSERT(AS); |
1044 | jermar | 737 | |
1380 | jermar | 738 | mutex_lock(&AS->lock); |
977 | jermar | 739 | area = find_area_and_lock(AS, page); |
703 | jermar | 740 | if (!area) { |
741 | /* |
||
742 | * No area contained mapping for 'page'. |
||
743 | * Signal page fault to low-level handler. |
||
744 | */ |
||
1380 | jermar | 745 | mutex_unlock(&AS->lock); |
1288 | jermar | 746 | goto page_fault; |
703 | jermar | 747 | } |
748 | |||
1239 | jermar | 749 | if (area->attributes & AS_AREA_ATTR_PARTIAL) { |
750 | /* |
||
751 | * The address space area is not fully initialized. |
||
752 | * Avoid possible race by returning error. |
||
753 | */ |
||
1380 | jermar | 754 | mutex_unlock(&area->lock); |
755 | mutex_unlock(&AS->lock); |
||
1288 | jermar | 756 | goto page_fault; |
1239 | jermar | 757 | } |
758 | |||
1424 | jermar | 759 | if (!area->backend || !area->backend->page_fault) { |
1409 | jermar | 760 | /* |
761 | * The address space area is not backed by any backend |
||
762 | * or the backend cannot handle page faults. |
||
763 | */ |
||
764 | mutex_unlock(&area->lock); |
||
765 | mutex_unlock(&AS->lock); |
||
766 | goto page_fault; |
||
767 | } |
||
1179 | jermar | 768 | |
1044 | jermar | 769 | page_table_lock(AS, false); |
770 | |||
703 | jermar | 771 | /* |
1044 | jermar | 772 | * To avoid race condition between two page faults |
773 | * on the same address, we need to make sure |
||
774 | * the mapping has not been already inserted. |
||
775 | */ |
||
776 | if ((pte = page_mapping_find(AS, page))) { |
||
777 | if (PTE_PRESENT(pte)) { |
||
1423 | jermar | 778 | if (((access == PF_ACCESS_READ) && PTE_READABLE(pte)) || |
779 | (access == PF_ACCESS_WRITE && PTE_WRITABLE(pte)) || |
||
780 | (access == PF_ACCESS_EXEC && PTE_EXECUTABLE(pte))) { |
||
781 | page_table_unlock(AS, false); |
||
782 | mutex_unlock(&area->lock); |
||
783 | mutex_unlock(&AS->lock); |
||
784 | return AS_PF_OK; |
||
785 | } |
||
1044 | jermar | 786 | } |
787 | } |
||
1409 | jermar | 788 | |
1044 | jermar | 789 | /* |
1409 | jermar | 790 | * Resort to the backend page fault handler. |
703 | jermar | 791 | */ |
1424 | jermar | 792 | if (area->backend->page_fault(area, page, access) != AS_PF_OK) { |
1409 | jermar | 793 | page_table_unlock(AS, false); |
794 | mutex_unlock(&area->lock); |
||
795 | mutex_unlock(&AS->lock); |
||
796 | goto page_fault; |
||
797 | } |
||
703 | jermar | 798 | |
1044 | jermar | 799 | page_table_unlock(AS, false); |
1380 | jermar | 800 | mutex_unlock(&area->lock); |
801 | mutex_unlock(&AS->lock); |
||
1288 | jermar | 802 | return AS_PF_OK; |
803 | |||
804 | page_fault: |
||
805 | if (THREAD->in_copy_from_uspace) { |
||
806 | THREAD->in_copy_from_uspace = false; |
||
1780 | jermar | 807 | istate_set_retaddr(istate, (uintptr_t) &memcpy_from_uspace_failover_address); |
1288 | jermar | 808 | } else if (THREAD->in_copy_to_uspace) { |
809 | THREAD->in_copy_to_uspace = false; |
||
1780 | jermar | 810 | istate_set_retaddr(istate, (uintptr_t) &memcpy_to_uspace_failover_address); |
1288 | jermar | 811 | } else { |
812 | return AS_PF_FAULT; |
||
813 | } |
||
814 | |||
815 | return AS_PF_DEFER; |
||
703 | jermar | 816 | } |
817 | |||
823 | jermar | 818 | /** Switch address spaces. |
703 | jermar | 819 | * |
1380 | jermar | 820 | * Note that this function cannot sleep as it is essentially a part of |
1415 | jermar | 821 | * scheduling. Sleeping here would lead to deadlock on wakeup. |
1380 | jermar | 822 | * |
823 | jermar | 823 | * @param old Old address space or NULL. |
824 | * @param new New address space. |
||
703 | jermar | 825 | */ |
823 | jermar | 826 | void as_switch(as_t *old, as_t *new) |
703 | jermar | 827 | { |
828 | ipl_t ipl; |
||
823 | jermar | 829 | bool needs_asid = false; |
703 | jermar | 830 | |
831 | ipl = interrupts_disable(); |
||
1415 | jermar | 832 | spinlock_lock(&inactive_as_with_asid_lock); |
703 | jermar | 833 | |
834 | /* |
||
823 | jermar | 835 | * First, take care of the old address space. |
836 | */ |
||
837 | if (old) { |
||
1380 | jermar | 838 | mutex_lock_active(&old->lock); |
1415 | jermar | 839 | ASSERT(old->cpu_refcount); |
840 | if((--old->cpu_refcount == 0) && (old != AS_KERNEL)) { |
||
823 | jermar | 841 | /* |
842 | * The old address space is no longer active on |
||
843 | * any processor. It can be appended to the |
||
844 | * list of inactive address spaces with assigned |
||
845 | * ASID. |
||
846 | */ |
||
847 | ASSERT(old->asid != ASID_INVALID); |
||
848 | list_append(&old->inactive_as_with_asid_link, &inactive_as_with_asid_head); |
||
849 | } |
||
1380 | jermar | 850 | mutex_unlock(&old->lock); |
1890 | jermar | 851 | |
852 | /* |
||
853 | * Perform architecture-specific tasks when the address space |
||
854 | * is being removed from the CPU. |
||
855 | */ |
||
856 | as_deinstall_arch(old); |
||
823 | jermar | 857 | } |
858 | |||
859 | /* |
||
860 | * Second, prepare the new address space. |
||
861 | */ |
||
1380 | jermar | 862 | mutex_lock_active(&new->lock); |
1415 | jermar | 863 | if ((new->cpu_refcount++ == 0) && (new != AS_KERNEL)) { |
823 | jermar | 864 | if (new->asid != ASID_INVALID) |
865 | list_remove(&new->inactive_as_with_asid_link); |
||
866 | else |
||
867 | needs_asid = true; /* defer call to asid_get() until new->lock is released */ |
||
868 | } |
||
869 | SET_PTL0_ADDRESS(new->page_table); |
||
1380 | jermar | 870 | mutex_unlock(&new->lock); |
823 | jermar | 871 | |
872 | if (needs_asid) { |
||
873 | /* |
||
874 | * Allocation of new ASID was deferred |
||
875 | * until now in order to avoid deadlock. |
||
876 | */ |
||
877 | asid_t asid; |
||
878 | |||
879 | asid = asid_get(); |
||
1380 | jermar | 880 | mutex_lock_active(&new->lock); |
823 | jermar | 881 | new->asid = asid; |
1380 | jermar | 882 | mutex_unlock(&new->lock); |
823 | jermar | 883 | } |
1415 | jermar | 884 | spinlock_unlock(&inactive_as_with_asid_lock); |
823 | jermar | 885 | interrupts_restore(ipl); |
886 | |||
887 | /* |
||
703 | jermar | 888 | * Perform architecture-specific steps. |
727 | jermar | 889 | * (e.g. write ASID to hardware register etc.) |
703 | jermar | 890 | */ |
823 | jermar | 891 | as_install_arch(new); |
703 | jermar | 892 | |
823 | jermar | 893 | AS = new; |
703 | jermar | 894 | } |
754 | jermar | 895 | |
1235 | jermar | 896 | /** Convert address space area flags to page flags. |
754 | jermar | 897 | * |
1235 | jermar | 898 | * @param aflags Flags of some address space area. |
754 | jermar | 899 | * |
1235 | jermar | 900 | * @return Flags to be passed to page_mapping_insert(). |
754 | jermar | 901 | */ |
1235 | jermar | 902 | int area_flags_to_page_flags(int aflags) |
754 | jermar | 903 | { |
904 | int flags; |
||
905 | |||
1178 | jermar | 906 | flags = PAGE_USER | PAGE_PRESENT; |
754 | jermar | 907 | |
1235 | jermar | 908 | if (aflags & AS_AREA_READ) |
1026 | jermar | 909 | flags |= PAGE_READ; |
910 | |||
1235 | jermar | 911 | if (aflags & AS_AREA_WRITE) |
1026 | jermar | 912 | flags |= PAGE_WRITE; |
913 | |||
1235 | jermar | 914 | if (aflags & AS_AREA_EXEC) |
1026 | jermar | 915 | flags |= PAGE_EXEC; |
916 | |||
1424 | jermar | 917 | if (aflags & AS_AREA_CACHEABLE) |
1178 | jermar | 918 | flags |= PAGE_CACHEABLE; |
919 | |||
754 | jermar | 920 | return flags; |
921 | } |
||
756 | jermar | 922 | |
1235 | jermar | 923 | /** Compute flags for virtual address translation subsytem. |
924 | * |
||
925 | * The address space area must be locked. |
||
926 | * Interrupts must be disabled. |
||
927 | * |
||
928 | * @param a Address space area. |
||
929 | * |
||
930 | * @return Flags to be used in page_mapping_insert(). |
||
931 | */ |
||
1409 | jermar | 932 | int as_area_get_flags(as_area_t *a) |
1235 | jermar | 933 | { |
934 | return area_flags_to_page_flags(a->flags); |
||
935 | } |
||
936 | |||
756 | jermar | 937 | /** Create page table. |
938 | * |
||
939 | * Depending on architecture, create either address space |
||
940 | * private or global page table. |
||
941 | * |
||
942 | * @param flags Flags saying whether the page table is for kernel address space. |
||
943 | * |
||
944 | * @return First entry of the page table. |
||
945 | */ |
||
946 | pte_t *page_table_create(int flags) |
||
947 | { |
||
948 | ASSERT(as_operations); |
||
949 | ASSERT(as_operations->page_table_create); |
||
950 | |||
951 | return as_operations->page_table_create(flags); |
||
952 | } |
||
977 | jermar | 953 | |
1468 | jermar | 954 | /** Destroy page table. |
955 | * |
||
956 | * Destroy page table in architecture specific way. |
||
957 | * |
||
958 | * @param page_table Physical address of PTL0. |
||
959 | */ |
||
960 | void page_table_destroy(pte_t *page_table) |
||
961 | { |
||
962 | ASSERT(as_operations); |
||
963 | ASSERT(as_operations->page_table_destroy); |
||
964 | |||
965 | as_operations->page_table_destroy(page_table); |
||
966 | } |
||
967 | |||
1044 | jermar | 968 | /** Lock page table. |
969 | * |
||
970 | * This function should be called before any page_mapping_insert(), |
||
971 | * page_mapping_remove() and page_mapping_find(). |
||
972 | * |
||
973 | * Locking order is such that address space areas must be locked |
||
974 | * prior to this call. Address space can be locked prior to this |
||
975 | * call in which case the lock argument is false. |
||
976 | * |
||
977 | * @param as Address space. |
||
1248 | jermar | 978 | * @param lock If false, do not attempt to lock as->lock. |
1044 | jermar | 979 | */ |
980 | void page_table_lock(as_t *as, bool lock) |
||
981 | { |
||
982 | ASSERT(as_operations); |
||
983 | ASSERT(as_operations->page_table_lock); |
||
984 | |||
985 | as_operations->page_table_lock(as, lock); |
||
986 | } |
||
987 | |||
988 | /** Unlock page table. |
||
989 | * |
||
990 | * @param as Address space. |
||
1248 | jermar | 991 | * @param unlock If false, do not attempt to unlock as->lock. |
1044 | jermar | 992 | */ |
993 | void page_table_unlock(as_t *as, bool unlock) |
||
994 | { |
||
995 | ASSERT(as_operations); |
||
996 | ASSERT(as_operations->page_table_unlock); |
||
997 | |||
998 | as_operations->page_table_unlock(as, unlock); |
||
999 | } |
||
1000 | |||
977 | jermar | 1001 | |
1002 | /** Find address space area and lock it. |
||
1003 | * |
||
1004 | * The address space must be locked and interrupts must be disabled. |
||
1005 | * |
||
1006 | * @param as Address space. |
||
1007 | * @param va Virtual address. |
||
1008 | * |
||
1009 | * @return Locked address space area containing va on success or NULL on failure. |
||
1010 | */ |
||
1780 | jermar | 1011 | as_area_t *find_area_and_lock(as_t *as, uintptr_t va) |
977 | jermar | 1012 | { |
1013 | as_area_t *a; |
||
1147 | jermar | 1014 | btree_node_t *leaf, *lnode; |
1015 | int i; |
||
977 | jermar | 1016 | |
1147 | jermar | 1017 | a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf); |
1018 | if (a) { |
||
1019 | /* va is the base address of an address space area */ |
||
1380 | jermar | 1020 | mutex_lock(&a->lock); |
1147 | jermar | 1021 | return a; |
1022 | } |
||
1023 | |||
1024 | /* |
||
1150 | jermar | 1025 | * Search the leaf node and the righmost record of its left neighbour |
1147 | jermar | 1026 | * to find out whether this is a miss or va belongs to an address |
1027 | * space area found there. |
||
1028 | */ |
||
1029 | |||
1030 | /* First, search the leaf node itself. */ |
||
1031 | for (i = 0; i < leaf->keys; i++) { |
||
1032 | a = (as_area_t *) leaf->value[i]; |
||
1380 | jermar | 1033 | mutex_lock(&a->lock); |
1147 | jermar | 1034 | if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) { |
1035 | return a; |
||
1036 | } |
||
1380 | jermar | 1037 | mutex_unlock(&a->lock); |
1147 | jermar | 1038 | } |
977 | jermar | 1039 | |
1147 | jermar | 1040 | /* |
1150 | jermar | 1041 | * Second, locate the left neighbour and test its last record. |
1148 | jermar | 1042 | * Because of its position in the B+tree, it must have base < va. |
1147 | jermar | 1043 | */ |
1150 | jermar | 1044 | if ((lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) { |
1147 | jermar | 1045 | a = (as_area_t *) lnode->value[lnode->keys - 1]; |
1380 | jermar | 1046 | mutex_lock(&a->lock); |
1147 | jermar | 1047 | if (va < a->base + a->pages * PAGE_SIZE) { |
1048 | jermar | 1048 | return a; |
1147 | jermar | 1049 | } |
1380 | jermar | 1050 | mutex_unlock(&a->lock); |
977 | jermar | 1051 | } |
1052 | |||
1053 | return NULL; |
||
1054 | } |
||
1048 | jermar | 1055 | |
1056 | /** Check area conflicts with other areas. |
||
1057 | * |
||
1058 | * The address space must be locked and interrupts must be disabled. |
||
1059 | * |
||
1060 | * @param as Address space. |
||
1061 | * @param va Starting virtual address of the area being tested. |
||
1062 | * @param size Size of the area being tested. |
||
1063 | * @param avoid_area Do not touch this area. |
||
1064 | * |
||
1065 | * @return True if there is no conflict, false otherwise. |
||
1066 | */ |
||
1780 | jermar | 1067 | bool check_area_conflicts(as_t *as, uintptr_t va, size_t size, as_area_t *avoid_area) |
1048 | jermar | 1068 | { |
1069 | as_area_t *a; |
||
1147 | jermar | 1070 | btree_node_t *leaf, *node; |
1071 | int i; |
||
1048 | jermar | 1072 | |
1070 | jermar | 1073 | /* |
1074 | * We don't want any area to have conflicts with NULL page. |
||
1075 | */ |
||
1076 | if (overlaps(va, size, NULL, PAGE_SIZE)) |
||
1077 | return false; |
||
1078 | |||
1147 | jermar | 1079 | /* |
1080 | * The leaf node is found in O(log n), where n is proportional to |
||
1081 | * the number of address space areas belonging to as. |
||
1082 | * The check for conflicts is then attempted on the rightmost |
||
1150 | jermar | 1083 | * record in the left neighbour, the leftmost record in the right |
1084 | * neighbour and all records in the leaf node itself. |
||
1147 | jermar | 1085 | */ |
1048 | jermar | 1086 | |
1147 | jermar | 1087 | if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) { |
1088 | if (a != avoid_area) |
||
1089 | return false; |
||
1090 | } |
||
1091 | |||
1092 | /* First, check the two border cases. */ |
||
1150 | jermar | 1093 | if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) { |
1147 | jermar | 1094 | a = (as_area_t *) node->value[node->keys - 1]; |
1380 | jermar | 1095 | mutex_lock(&a->lock); |
1147 | jermar | 1096 | if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) { |
1380 | jermar | 1097 | mutex_unlock(&a->lock); |
1147 | jermar | 1098 | return false; |
1099 | } |
||
1380 | jermar | 1100 | mutex_unlock(&a->lock); |
1147 | jermar | 1101 | } |
1150 | jermar | 1102 | if ((node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf))) { |
1147 | jermar | 1103 | a = (as_area_t *) node->value[0]; |
1380 | jermar | 1104 | mutex_lock(&a->lock); |
1147 | jermar | 1105 | if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) { |
1380 | jermar | 1106 | mutex_unlock(&a->lock); |
1147 | jermar | 1107 | return false; |
1108 | } |
||
1380 | jermar | 1109 | mutex_unlock(&a->lock); |
1147 | jermar | 1110 | } |
1111 | |||
1112 | /* Second, check the leaf node. */ |
||
1113 | for (i = 0; i < leaf->keys; i++) { |
||
1114 | a = (as_area_t *) leaf->value[i]; |
||
1115 | |||
1048 | jermar | 1116 | if (a == avoid_area) |
1117 | continue; |
||
1147 | jermar | 1118 | |
1380 | jermar | 1119 | mutex_lock(&a->lock); |
1147 | jermar | 1120 | if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) { |
1380 | jermar | 1121 | mutex_unlock(&a->lock); |
1147 | jermar | 1122 | return false; |
1123 | } |
||
1380 | jermar | 1124 | mutex_unlock(&a->lock); |
1048 | jermar | 1125 | } |
1126 | |||
1070 | jermar | 1127 | /* |
1128 | * So far, the area does not conflict with other areas. |
||
1129 | * Check if it doesn't conflict with kernel address space. |
||
1130 | */ |
||
1131 | if (!KERNEL_ADDRESS_SPACE_SHADOWED) { |
||
1132 | return !overlaps(va, size, |
||
1133 | KERNEL_ADDRESS_SPACE_START, KERNEL_ADDRESS_SPACE_END-KERNEL_ADDRESS_SPACE_START); |
||
1134 | } |
||
1135 | |||
1048 | jermar | 1136 | return true; |
1137 | } |
||
1235 | jermar | 1138 | |
1380 | jermar | 1139 | /** Return size of the address space area with given base. */ |
1780 | jermar | 1140 | size_t as_get_size(uintptr_t base) |
1329 | palkovsky | 1141 | { |
1142 | ipl_t ipl; |
||
1143 | as_area_t *src_area; |
||
1144 | size_t size; |
||
1145 | |||
1146 | ipl = interrupts_disable(); |
||
1147 | src_area = find_area_and_lock(AS, base); |
||
1148 | if (src_area){ |
||
1149 | size = src_area->pages * PAGE_SIZE; |
||
1380 | jermar | 1150 | mutex_unlock(&src_area->lock); |
1329 | palkovsky | 1151 | } else { |
1152 | size = 0; |
||
1153 | } |
||
1154 | interrupts_restore(ipl); |
||
1155 | return size; |
||
1156 | } |
||
1157 | |||
1387 | jermar | 1158 | /** Mark portion of address space area as used. |
1159 | * |
||
1160 | * The address space area must be already locked. |
||
1161 | * |
||
1162 | * @param a Address space area. |
||
1163 | * @param page First page to be marked. |
||
1164 | * @param count Number of page to be marked. |
||
1165 | * |
||
1166 | * @return 0 on failure and 1 on success. |
||
1167 | */ |
||
1780 | jermar | 1168 | int used_space_insert(as_area_t *a, uintptr_t page, count_t count) |
1387 | jermar | 1169 | { |
1170 | btree_node_t *leaf, *node; |
||
1171 | count_t pages; |
||
1172 | int i; |
||
1173 | |||
1174 | ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE)); |
||
1175 | ASSERT(count); |
||
1176 | |||
1177 | pages = (count_t) btree_search(&a->used_space, page, &leaf); |
||
1178 | if (pages) { |
||
1179 | /* |
||
1180 | * We hit the beginning of some used space. |
||
1181 | */ |
||
1182 | return 0; |
||
1183 | } |
||
1184 | |||
1437 | jermar | 1185 | if (!leaf->keys) { |
1186 | btree_insert(&a->used_space, page, (void *) count, leaf); |
||
1187 | return 1; |
||
1188 | } |
||
1189 | |||
1387 | jermar | 1190 | node = btree_leaf_node_left_neighbour(&a->used_space, leaf); |
1191 | if (node) { |
||
1780 | jermar | 1192 | uintptr_t left_pg = node->key[node->keys - 1], right_pg = leaf->key[0]; |
1387 | jermar | 1193 | count_t left_cnt = (count_t) node->value[node->keys - 1], right_cnt = (count_t) leaf->value[0]; |
1194 | |||
1195 | /* |
||
1196 | * Examine the possibility that the interval fits |
||
1197 | * somewhere between the rightmost interval of |
||
1198 | * the left neigbour and the first interval of the leaf. |
||
1199 | */ |
||
1200 | |||
1201 | if (page >= right_pg) { |
||
1202 | /* Do nothing. */ |
||
1203 | } else if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) { |
||
1204 | /* The interval intersects with the left interval. */ |
||
1205 | return 0; |
||
1206 | } else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) { |
||
1207 | /* The interval intersects with the right interval. */ |
||
1208 | return 0; |
||
1209 | } else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) { |
||
1210 | /* The interval can be added by merging the two already present intervals. */ |
||
1403 | jermar | 1211 | node->value[node->keys - 1] += count + right_cnt; |
1387 | jermar | 1212 | btree_remove(&a->used_space, right_pg, leaf); |
1213 | return 1; |
||
1214 | } else if (page == left_pg + left_cnt*PAGE_SIZE) { |
||
1215 | /* The interval can be added by simply growing the left interval. */ |
||
1403 | jermar | 1216 | node->value[node->keys - 1] += count; |
1387 | jermar | 1217 | return 1; |
1218 | } else if (page + count*PAGE_SIZE == right_pg) { |
||
1219 | /* |
||
1220 | * The interval can be addded by simply moving base of the right |
||
1221 | * interval down and increasing its size accordingly. |
||
1222 | */ |
||
1403 | jermar | 1223 | leaf->value[0] += count; |
1387 | jermar | 1224 | leaf->key[0] = page; |
1225 | return 1; |
||
1226 | } else { |
||
1227 | /* |
||
1228 | * The interval is between both neigbouring intervals, |
||
1229 | * but cannot be merged with any of them. |
||
1230 | */ |
||
1231 | btree_insert(&a->used_space, page, (void *) count, leaf); |
||
1232 | return 1; |
||
1233 | } |
||
1234 | } else if (page < leaf->key[0]) { |
||
1780 | jermar | 1235 | uintptr_t right_pg = leaf->key[0]; |
1387 | jermar | 1236 | count_t right_cnt = (count_t) leaf->value[0]; |
1237 | |||
1238 | /* |
||
1239 | * Investigate the border case in which the left neighbour does not |
||
1240 | * exist but the interval fits from the left. |
||
1241 | */ |
||
1242 | |||
1243 | if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) { |
||
1244 | /* The interval intersects with the right interval. */ |
||
1245 | return 0; |
||
1246 | } else if (page + count*PAGE_SIZE == right_pg) { |
||
1247 | /* |
||
1248 | * The interval can be added by moving the base of the right interval down |
||
1249 | * and increasing its size accordingly. |
||
1250 | */ |
||
1251 | leaf->key[0] = page; |
||
1403 | jermar | 1252 | leaf->value[0] += count; |
1387 | jermar | 1253 | return 1; |
1254 | } else { |
||
1255 | /* |
||
1256 | * The interval doesn't adjoin with the right interval. |
||
1257 | * It must be added individually. |
||
1258 | */ |
||
1259 | btree_insert(&a->used_space, page, (void *) count, leaf); |
||
1260 | return 1; |
||
1261 | } |
||
1262 | } |
||
1263 | |||
1264 | node = btree_leaf_node_right_neighbour(&a->used_space, leaf); |
||
1265 | if (node) { |
||
1780 | jermar | 1266 | uintptr_t left_pg = leaf->key[leaf->keys - 1], right_pg = node->key[0]; |
1387 | jermar | 1267 | count_t left_cnt = (count_t) leaf->value[leaf->keys - 1], right_cnt = (count_t) node->value[0]; |
1268 | |||
1269 | /* |
||
1270 | * Examine the possibility that the interval fits |
||
1271 | * somewhere between the leftmost interval of |
||
1272 | * the right neigbour and the last interval of the leaf. |
||
1273 | */ |
||
1274 | |||
1275 | if (page < left_pg) { |
||
1276 | /* Do nothing. */ |
||
1277 | } else if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) { |
||
1278 | /* The interval intersects with the left interval. */ |
||
1279 | return 0; |
||
1280 | } else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) { |
||
1281 | /* The interval intersects with the right interval. */ |
||
1282 | return 0; |
||
1283 | } else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) { |
||
1284 | /* The interval can be added by merging the two already present intervals. */ |
||
1403 | jermar | 1285 | leaf->value[leaf->keys - 1] += count + right_cnt; |
1387 | jermar | 1286 | btree_remove(&a->used_space, right_pg, node); |
1287 | return 1; |
||
1288 | } else if (page == left_pg + left_cnt*PAGE_SIZE) { |
||
1289 | /* The interval can be added by simply growing the left interval. */ |
||
1403 | jermar | 1290 | leaf->value[leaf->keys - 1] += count; |
1387 | jermar | 1291 | return 1; |
1292 | } else if (page + count*PAGE_SIZE == right_pg) { |
||
1293 | /* |
||
1294 | * The interval can be addded by simply moving base of the right |
||
1295 | * interval down and increasing its size accordingly. |
||
1296 | */ |
||
1403 | jermar | 1297 | node->value[0] += count; |
1387 | jermar | 1298 | node->key[0] = page; |
1299 | return 1; |
||
1300 | } else { |
||
1301 | /* |
||
1302 | * The interval is between both neigbouring intervals, |
||
1303 | * but cannot be merged with any of them. |
||
1304 | */ |
||
1305 | btree_insert(&a->used_space, page, (void *) count, leaf); |
||
1306 | return 1; |
||
1307 | } |
||
1308 | } else if (page >= leaf->key[leaf->keys - 1]) { |
||
1780 | jermar | 1309 | uintptr_t left_pg = leaf->key[leaf->keys - 1]; |
1387 | jermar | 1310 | count_t left_cnt = (count_t) leaf->value[leaf->keys - 1]; |
1311 | |||
1312 | /* |
||
1313 | * Investigate the border case in which the right neighbour does not |
||
1314 | * exist but the interval fits from the right. |
||
1315 | */ |
||
1316 | |||
1317 | if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) { |
||
1403 | jermar | 1318 | /* The interval intersects with the left interval. */ |
1387 | jermar | 1319 | return 0; |
1320 | } else if (left_pg + left_cnt*PAGE_SIZE == page) { |
||
1321 | /* The interval can be added by growing the left interval. */ |
||
1403 | jermar | 1322 | leaf->value[leaf->keys - 1] += count; |
1387 | jermar | 1323 | return 1; |
1324 | } else { |
||
1325 | /* |
||
1326 | * The interval doesn't adjoin with the left interval. |
||
1327 | * It must be added individually. |
||
1328 | */ |
||
1329 | btree_insert(&a->used_space, page, (void *) count, leaf); |
||
1330 | return 1; |
||
1331 | } |
||
1332 | } |
||
1333 | |||
1334 | /* |
||
1335 | * Note that if the algorithm made it thus far, the interval can fit only |
||
1336 | * between two other intervals of the leaf. The two border cases were already |
||
1337 | * resolved. |
||
1338 | */ |
||
1339 | for (i = 1; i < leaf->keys; i++) { |
||
1340 | if (page < leaf->key[i]) { |
||
1780 | jermar | 1341 | uintptr_t left_pg = leaf->key[i - 1], right_pg = leaf->key[i]; |
1387 | jermar | 1342 | count_t left_cnt = (count_t) leaf->value[i - 1], right_cnt = (count_t) leaf->value[i]; |
1343 | |||
1344 | /* |
||
1345 | * The interval fits between left_pg and right_pg. |
||
1346 | */ |
||
1347 | |||
1348 | if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) { |
||
1349 | /* The interval intersects with the left interval. */ |
||
1350 | return 0; |
||
1351 | } else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) { |
||
1352 | /* The interval intersects with the right interval. */ |
||
1353 | return 0; |
||
1354 | } else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) { |
||
1355 | /* The interval can be added by merging the two already present intervals. */ |
||
1403 | jermar | 1356 | leaf->value[i - 1] += count + right_cnt; |
1387 | jermar | 1357 | btree_remove(&a->used_space, right_pg, leaf); |
1358 | return 1; |
||
1359 | } else if (page == left_pg + left_cnt*PAGE_SIZE) { |
||
1360 | /* The interval can be added by simply growing the left interval. */ |
||
1403 | jermar | 1361 | leaf->value[i - 1] += count; |
1387 | jermar | 1362 | return 1; |
1363 | } else if (page + count*PAGE_SIZE == right_pg) { |
||
1364 | /* |
||
1365 | * The interval can be addded by simply moving base of the right |
||
1366 | * interval down and increasing its size accordingly. |
||
1367 | */ |
||
1403 | jermar | 1368 | leaf->value[i] += count; |
1387 | jermar | 1369 | leaf->key[i] = page; |
1370 | return 1; |
||
1371 | } else { |
||
1372 | /* |
||
1373 | * The interval is between both neigbouring intervals, |
||
1374 | * but cannot be merged with any of them. |
||
1375 | */ |
||
1376 | btree_insert(&a->used_space, page, (void *) count, leaf); |
||
1377 | return 1; |
||
1378 | } |
||
1379 | } |
||
1380 | } |
||
1381 | |||
1735 | decky | 1382 | panic("Inconsistency detected while adding %d pages of used space at %p.\n", count, page); |
1387 | jermar | 1383 | } |
1384 | |||
1385 | /** Mark portion of address space area as unused. |
||
1386 | * |
||
1387 | * The address space area must be already locked. |
||
1388 | * |
||
1389 | * @param a Address space area. |
||
1390 | * @param page First page to be marked. |
||
1391 | * @param count Number of page to be marked. |
||
1392 | * |
||
1393 | * @return 0 on failure and 1 on success. |
||
1394 | */ |
||
1780 | jermar | 1395 | int used_space_remove(as_area_t *a, uintptr_t page, count_t count) |
1387 | jermar | 1396 | { |
1397 | btree_node_t *leaf, *node; |
||
1398 | count_t pages; |
||
1399 | int i; |
||
1400 | |||
1401 | ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE)); |
||
1402 | ASSERT(count); |
||
1403 | |||
1404 | pages = (count_t) btree_search(&a->used_space, page, &leaf); |
||
1405 | if (pages) { |
||
1406 | /* |
||
1407 | * We are lucky, page is the beginning of some interval. |
||
1408 | */ |
||
1409 | if (count > pages) { |
||
1410 | return 0; |
||
1411 | } else if (count == pages) { |
||
1412 | btree_remove(&a->used_space, page, leaf); |
||
1403 | jermar | 1413 | return 1; |
1387 | jermar | 1414 | } else { |
1415 | /* |
||
1416 | * Find the respective interval. |
||
1417 | * Decrease its size and relocate its start address. |
||
1418 | */ |
||
1419 | for (i = 0; i < leaf->keys; i++) { |
||
1420 | if (leaf->key[i] == page) { |
||
1421 | leaf->key[i] += count*PAGE_SIZE; |
||
1403 | jermar | 1422 | leaf->value[i] -= count; |
1387 | jermar | 1423 | return 1; |
1424 | } |
||
1425 | } |
||
1426 | goto error; |
||
1427 | } |
||
1428 | } |
||
1429 | |||
1430 | node = btree_leaf_node_left_neighbour(&a->used_space, leaf); |
||
1431 | if (node && page < leaf->key[0]) { |
||
1780 | jermar | 1432 | uintptr_t left_pg = node->key[node->keys - 1]; |
1387 | jermar | 1433 | count_t left_cnt = (count_t) node->value[node->keys - 1]; |
1434 | |||
1435 | if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) { |
||
1436 | if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) { |
||
1437 | /* |
||
1438 | * The interval is contained in the rightmost interval |
||
1439 | * of the left neighbour and can be removed by |
||
1440 | * updating the size of the bigger interval. |
||
1441 | */ |
||
1403 | jermar | 1442 | node->value[node->keys - 1] -= count; |
1387 | jermar | 1443 | return 1; |
1444 | } else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) { |
||
1403 | jermar | 1445 | count_t new_cnt; |
1387 | jermar | 1446 | |
1447 | /* |
||
1448 | * The interval is contained in the rightmost interval |
||
1449 | * of the left neighbour but its removal requires |
||
1450 | * both updating the size of the original interval and |
||
1451 | * also inserting a new interval. |
||
1452 | */ |
||
1403 | jermar | 1453 | new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH; |
1454 | node->value[node->keys - 1] -= count + new_cnt; |
||
1387 | jermar | 1455 | btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf); |
1456 | return 1; |
||
1457 | } |
||
1458 | } |
||
1459 | return 0; |
||
1460 | } else if (page < leaf->key[0]) { |
||
1461 | return 0; |
||
1462 | } |
||
1463 | |||
1464 | if (page > leaf->key[leaf->keys - 1]) { |
||
1780 | jermar | 1465 | uintptr_t left_pg = leaf->key[leaf->keys - 1]; |
1387 | jermar | 1466 | count_t left_cnt = (count_t) leaf->value[leaf->keys - 1]; |
1467 | |||
1468 | if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) { |
||
1469 | if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) { |
||
1470 | /* |
||
1471 | * The interval is contained in the rightmost interval |
||
1472 | * of the leaf and can be removed by updating the size |
||
1473 | * of the bigger interval. |
||
1474 | */ |
||
1403 | jermar | 1475 | leaf->value[leaf->keys - 1] -= count; |
1387 | jermar | 1476 | return 1; |
1477 | } else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) { |
||
1403 | jermar | 1478 | count_t new_cnt; |
1387 | jermar | 1479 | |
1480 | /* |
||
1481 | * The interval is contained in the rightmost interval |
||
1482 | * of the leaf but its removal requires both updating |
||
1483 | * the size of the original interval and |
||
1484 | * also inserting a new interval. |
||
1485 | */ |
||
1403 | jermar | 1486 | new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH; |
1487 | leaf->value[leaf->keys - 1] -= count + new_cnt; |
||
1387 | jermar | 1488 | btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf); |
1489 | return 1; |
||
1490 | } |
||
1491 | } |
||
1492 | return 0; |
||
1493 | } |
||
1494 | |||
1495 | /* |
||
1496 | * The border cases have been already resolved. |
||
1497 | * Now the interval can be only between intervals of the leaf. |
||
1498 | */ |
||
1499 | for (i = 1; i < leaf->keys - 1; i++) { |
||
1500 | if (page < leaf->key[i]) { |
||
1780 | jermar | 1501 | uintptr_t left_pg = leaf->key[i - 1]; |
1387 | jermar | 1502 | count_t left_cnt = (count_t) leaf->value[i - 1]; |
1503 | |||
1504 | /* |
||
1505 | * Now the interval is between intervals corresponding to (i - 1) and i. |
||
1506 | */ |
||
1507 | if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) { |
||
1508 | if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) { |
||
1509 | /* |
||
1510 | * The interval is contained in the interval (i - 1) |
||
1511 | * of the leaf and can be removed by updating the size |
||
1512 | * of the bigger interval. |
||
1513 | */ |
||
1403 | jermar | 1514 | leaf->value[i - 1] -= count; |
1387 | jermar | 1515 | return 1; |
1516 | } else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) { |
||
1403 | jermar | 1517 | count_t new_cnt; |
1387 | jermar | 1518 | |
1519 | /* |
||
1520 | * The interval is contained in the interval (i - 1) |
||
1521 | * of the leaf but its removal requires both updating |
||
1522 | * the size of the original interval and |
||
1523 | * also inserting a new interval. |
||
1524 | */ |
||
1403 | jermar | 1525 | new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH; |
1526 | leaf->value[i - 1] -= count + new_cnt; |
||
1387 | jermar | 1527 | btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf); |
1528 | return 1; |
||
1529 | } |
||
1530 | } |
||
1531 | return 0; |
||
1532 | } |
||
1533 | } |
||
1534 | |||
1535 | error: |
||
1735 | decky | 1536 | panic("Inconsistency detected while removing %d pages of used space from %p.\n", count, page); |
1387 | jermar | 1537 | } |
1538 | |||
1409 | jermar | 1539 | /** Remove reference to address space area share info. |
1540 | * |
||
1541 | * If the reference count drops to 0, the sh_info is deallocated. |
||
1542 | * |
||
1543 | * @param sh_info Pointer to address space area share info. |
||
1544 | */ |
||
1545 | void sh_info_remove_reference(share_info_t *sh_info) |
||
1546 | { |
||
1547 | bool dealloc = false; |
||
1548 | |||
1549 | mutex_lock(&sh_info->lock); |
||
1550 | ASSERT(sh_info->refcount); |
||
1551 | if (--sh_info->refcount == 0) { |
||
1552 | dealloc = true; |
||
1495 | jermar | 1553 | link_t *cur; |
1409 | jermar | 1554 | |
1555 | /* |
||
1556 | * Now walk carefully the pagemap B+tree and free/remove |
||
1557 | * reference from all frames found there. |
||
1558 | */ |
||
1495 | jermar | 1559 | for (cur = sh_info->pagemap.leaf_head.next; cur != &sh_info->pagemap.leaf_head; cur = cur->next) { |
1409 | jermar | 1560 | btree_node_t *node; |
1495 | jermar | 1561 | int i; |
1409 | jermar | 1562 | |
1495 | jermar | 1563 | node = list_get_instance(cur, btree_node_t, leaf_link); |
1564 | for (i = 0; i < node->keys; i++) |
||
1780 | jermar | 1565 | frame_free((uintptr_t) node->value[i]); |
1409 | jermar | 1566 | } |
1567 | |||
1568 | } |
||
1569 | mutex_unlock(&sh_info->lock); |
||
1570 | |||
1571 | if (dealloc) { |
||
1572 | btree_destroy(&sh_info->pagemap); |
||
1573 | free(sh_info); |
||
1574 | } |
||
1575 | } |
||
1576 | |||
1235 | jermar | 1577 | /* |
1578 | * Address space related syscalls. |
||
1579 | */ |
||
1580 | |||
1581 | /** Wrapper for as_area_create(). */ |
||
1780 | jermar | 1582 | unative_t sys_as_area_create(uintptr_t address, size_t size, int flags) |
1235 | jermar | 1583 | { |
1424 | jermar | 1584 | if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address, AS_AREA_ATTR_NONE, &anon_backend, NULL)) |
1780 | jermar | 1585 | return (unative_t) address; |
1235 | jermar | 1586 | else |
1780 | jermar | 1587 | return (unative_t) -1; |
1235 | jermar | 1588 | } |
1589 | |||
1793 | jermar | 1590 | /** Wrapper for as_area_resize(). */ |
1780 | jermar | 1591 | unative_t sys_as_area_resize(uintptr_t address, size_t size, int flags) |
1235 | jermar | 1592 | { |
1780 | jermar | 1593 | return (unative_t) as_area_resize(AS, address, size, 0); |
1235 | jermar | 1594 | } |
1595 | |||
1793 | jermar | 1596 | /** Wrapper for as_area_destroy(). */ |
1780 | jermar | 1597 | unative_t sys_as_area_destroy(uintptr_t address) |
1306 | jermar | 1598 | { |
1780 | jermar | 1599 | return (unative_t) as_area_destroy(AS, address); |
1306 | jermar | 1600 | } |
1702 | cejka | 1601 | |
1914 | jermar | 1602 | /** Print out information about address space. |
1603 | * |
||
1604 | * @param as Address space. |
||
1605 | */ |
||
1606 | void as_print(as_t *as) |
||
1607 | { |
||
1608 | ipl_t ipl; |
||
1609 | |||
1610 | ipl = interrupts_disable(); |
||
1611 | mutex_lock(&as->lock); |
||
1612 | |||
1613 | /* print out info about address space areas */ |
||
1614 | link_t *cur; |
||
1615 | for (cur = as->as_area_btree.leaf_head.next; cur != &as->as_area_btree.leaf_head; cur = cur->next) { |
||
1915 | jermar | 1616 | btree_node_t *node = list_get_instance(cur, btree_node_t, leaf_link); |
1914 | jermar | 1617 | |
1618 | int i; |
||
1619 | for (i = 0; i < node->keys; i++) { |
||
1915 | jermar | 1620 | as_area_t *area = node->value[i]; |
1914 | jermar | 1621 | |
1622 | mutex_lock(&area->lock); |
||
1623 | printf("as_area: %p, base=%p, pages=%d (%p - %p)\n", |
||
1624 | area, area->base, area->pages, area->base, area->base + area->pages*PAGE_SIZE); |
||
1625 | mutex_unlock(&area->lock); |
||
1626 | } |
||
1627 | } |
||
1628 | |||
1629 | mutex_unlock(&as->lock); |
||
1630 | interrupts_restore(ipl); |
||
1631 | } |
||
1632 | |||
1757 | jermar | 1633 | /** @} |
1702 | cejka | 1634 | */ |