Rev 3186 | Rev 3240 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
703 | jermar | 1 | /* |
2071 | jermar | 2 | * Copyright (c) 2001-2006 Jakub Jermar |
703 | jermar | 3 | * All rights reserved. |
4 | * |
||
5 | * Redistribution and use in source and binary forms, with or without |
||
6 | * modification, are permitted provided that the following conditions |
||
7 | * are met: |
||
8 | * |
||
9 | * - Redistributions of source code must retain the above copyright |
||
10 | * notice, this list of conditions and the following disclaimer. |
||
11 | * - Redistributions in binary form must reproduce the above copyright |
||
12 | * notice, this list of conditions and the following disclaimer in the |
||
13 | * documentation and/or other materials provided with the distribution. |
||
14 | * - The name of the author may not be used to endorse or promote products |
||
15 | * derived from this software without specific prior written permission. |
||
16 | * |
||
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
||
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
||
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
||
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
||
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
||
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
||
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
||
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
||
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
||
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
||
27 | */ |
||
28 | |||
1757 | jermar | 29 | /** @addtogroup genericmm |
1702 | cejka | 30 | * @{ |
31 | */ |
||
32 | |||
1248 | jermar | 33 | /** |
1702 | cejka | 34 | * @file |
1248 | jermar | 35 | * @brief Address space related functions. |
36 | * |
||
703 | jermar | 37 | * This file contains address space manipulation functions. |
38 | * Roughly speaking, this is a higher-level client of |
||
39 | * Virtual Address Translation (VAT) subsystem. |
||
1248 | jermar | 40 | * |
41 | * Functionality provided by this file allows one to |
||
1757 | jermar | 42 | * create address spaces and create, resize and share |
1248 | jermar | 43 | * address space areas. |
44 | * |
||
45 | * @see page.c |
||
46 | * |
||
703 | jermar | 47 | */ |
48 | |||
49 | #include <mm/as.h> |
||
756 | jermar | 50 | #include <arch/mm/as.h> |
703 | jermar | 51 | #include <mm/page.h> |
52 | #include <mm/frame.h> |
||
814 | palkovsky | 53 | #include <mm/slab.h> |
703 | jermar | 54 | #include <mm/tlb.h> |
55 | #include <arch/mm/page.h> |
||
56 | #include <genarch/mm/page_pt.h> |
||
1108 | jermar | 57 | #include <genarch/mm/page_ht.h> |
727 | jermar | 58 | #include <mm/asid.h> |
703 | jermar | 59 | #include <arch/mm/asid.h> |
2183 | jermar | 60 | #include <preemption.h> |
703 | jermar | 61 | #include <synch/spinlock.h> |
1380 | jermar | 62 | #include <synch/mutex.h> |
788 | jermar | 63 | #include <adt/list.h> |
1147 | jermar | 64 | #include <adt/btree.h> |
1235 | jermar | 65 | #include <proc/task.h> |
1288 | jermar | 66 | #include <proc/thread.h> |
1235 | jermar | 67 | #include <arch/asm.h> |
703 | jermar | 68 | #include <panic.h> |
69 | #include <debug.h> |
||
1235 | jermar | 70 | #include <print.h> |
703 | jermar | 71 | #include <memstr.h> |
1070 | jermar | 72 | #include <macros.h> |
703 | jermar | 73 | #include <arch.h> |
1235 | jermar | 74 | #include <errno.h> |
75 | #include <config.h> |
||
1387 | jermar | 76 | #include <align.h> |
1235 | jermar | 77 | #include <arch/types.h> |
1288 | jermar | 78 | #include <syscall/copy.h> |
79 | #include <arch/interrupt.h> |
||
703 | jermar | 80 | |
2009 | jermar | 81 | #ifdef CONFIG_VIRT_IDX_DCACHE |
82 | #include <arch/mm/cache.h> |
||
83 | #endif /* CONFIG_VIRT_IDX_DCACHE */ |
||
84 | |||
2125 | decky | 85 | #ifndef __OBJC__ |
1757 | jermar | 86 | /** |
87 | * Each architecture decides what functions will be used to carry out |
||
88 | * address space operations such as creating or locking page tables. |
||
89 | */ |
||
756 | jermar | 90 | as_operations_t *as_operations = NULL; |
703 | jermar | 91 | |
1890 | jermar | 92 | /** |
93 | * Slab for as_t objects. |
||
94 | */ |
||
95 | static slab_cache_t *as_slab; |
||
2126 | decky | 96 | #endif |
1890 | jermar | 97 | |
2087 | jermar | 98 | /** |
2170 | jermar | 99 | * This lock serializes access to the ASID subsystem. |
100 | * It protects: |
||
101 | * - inactive_as_with_asid_head list |
||
102 | * - as->asid for each as of the as_t type |
||
103 | * - asids_allocated counter |
||
2087 | jermar | 104 | */ |
2170 | jermar | 105 | SPINLOCK_INITIALIZE(asidlock); |
823 | jermar | 106 | |
107 | /** |
||
108 | * This list contains address spaces that are not active on any |
||
109 | * processor and that have valid ASID. |
||
110 | */ |
||
111 | LIST_INITIALIZE(inactive_as_with_asid_head); |
||
112 | |||
757 | jermar | 113 | /** Kernel address space. */ |
114 | as_t *AS_KERNEL = NULL; |
||
115 | |||
1235 | jermar | 116 | static int area_flags_to_page_flags(int aflags); |
1780 | jermar | 117 | static as_area_t *find_area_and_lock(as_t *as, uintptr_t va); |
2087 | jermar | 118 | static bool check_area_conflicts(as_t *as, uintptr_t va, size_t size, |
119 | as_area_t *avoid_area); |
||
1409 | jermar | 120 | static void sh_info_remove_reference(share_info_t *sh_info); |
703 | jermar | 121 | |
2126 | decky | 122 | #ifndef __OBJC__ |
1891 | jermar | 123 | static int as_constructor(void *obj, int flags) |
124 | { |
||
125 | as_t *as = (as_t *) obj; |
||
126 | int rc; |
||
127 | |||
128 | link_initialize(&as->inactive_as_with_asid_link); |
||
3186 | jermar | 129 | mutex_initialize(&as->lock, MUTEX_PASSIVE); |
1891 | jermar | 130 | |
131 | rc = as_constructor_arch(as, flags); |
||
132 | |||
133 | return rc; |
||
134 | } |
||
135 | |||
136 | static int as_destructor(void *obj) |
||
137 | { |
||
138 | as_t *as = (as_t *) obj; |
||
139 | |||
140 | return as_destructor_arch(as); |
||
141 | } |
||
2126 | decky | 142 | #endif |
1891 | jermar | 143 | |
756 | jermar | 144 | /** Initialize address space subsystem. */ |
145 | void as_init(void) |
||
146 | { |
||
147 | as_arch_init(); |
||
2126 | decky | 148 | |
149 | #ifndef __OBJC__ |
||
1891 | jermar | 150 | as_slab = slab_cache_create("as_slab", sizeof(as_t), 0, |
2087 | jermar | 151 | as_constructor, as_destructor, SLAB_CACHE_MAGDEFERRED); |
2126 | decky | 152 | #endif |
1890 | jermar | 153 | |
789 | palkovsky | 154 | AS_KERNEL = as_create(FLAG_AS_KERNEL); |
1383 | decky | 155 | if (!AS_KERNEL) |
156 | panic("can't create kernel address space\n"); |
||
157 | |||
756 | jermar | 158 | } |
159 | |||
757 | jermar | 160 | /** Create address space. |
161 | * |
||
162 | * @param flags Flags that influence way in wich the address space is created. |
||
163 | */ |
||
756 | jermar | 164 | as_t *as_create(int flags) |
703 | jermar | 165 | { |
166 | as_t *as; |
||
167 | |||
2126 | decky | 168 | #ifdef __OBJC__ |
169 | as = [as_t new]; |
||
170 | link_initialize(&as->inactive_as_with_asid_link); |
||
3186 | jermar | 171 | mutex_initialize(&as->lock, MUTEX_PASSIVE); |
2126 | decky | 172 | (void) as_constructor_arch(as, flags); |
173 | #else |
||
1890 | jermar | 174 | as = (as_t *) slab_alloc(as_slab, 0); |
2126 | decky | 175 | #endif |
1891 | jermar | 176 | (void) as_create_arch(as, 0); |
177 | |||
1147 | jermar | 178 | btree_create(&as->as_area_btree); |
822 | palkovsky | 179 | |
180 | if (flags & FLAG_AS_KERNEL) |
||
181 | as->asid = ASID_KERNEL; |
||
182 | else |
||
183 | as->asid = ASID_INVALID; |
||
184 | |||
2183 | jermar | 185 | atomic_set(&as->refcount, 0); |
1415 | jermar | 186 | as->cpu_refcount = 0; |
2089 | decky | 187 | #ifdef AS_PAGE_TABLE |
2106 | jermar | 188 | as->genarch.page_table = page_table_create(flags); |
2089 | decky | 189 | #else |
190 | page_table_create(flags); |
||
191 | #endif |
||
703 | jermar | 192 | |
193 | return as; |
||
194 | } |
||
195 | |||
1468 | jermar | 196 | /** Destroy adress space. |
197 | * |
||
2087 | jermar | 198 | * When there are no tasks referencing this address space (i.e. its refcount is |
199 | * zero), the address space can be destroyed. |
||
2183 | jermar | 200 | * |
201 | * We know that we don't hold any spinlock. |
||
1468 | jermar | 202 | */ |
203 | void as_destroy(as_t *as) |
||
973 | palkovsky | 204 | { |
1468 | jermar | 205 | ipl_t ipl; |
1594 | jermar | 206 | bool cond; |
2183 | jermar | 207 | DEADLOCK_PROBE_INIT(p_asidlock); |
973 | palkovsky | 208 | |
2183 | jermar | 209 | ASSERT(atomic_get(&as->refcount) == 0); |
1468 | jermar | 210 | |
211 | /* |
||
212 | * Since there is no reference to this area, |
||
213 | * it is safe not to lock its mutex. |
||
214 | */ |
||
2170 | jermar | 215 | |
2183 | jermar | 216 | /* |
217 | * We need to avoid deadlock between TLB shootdown and asidlock. |
||
218 | * We therefore try to take asid conditionally and if we don't succeed, |
||
219 | * we enable interrupts and try again. This is done while preemption is |
||
220 | * disabled to prevent nested context switches. We also depend on the |
||
221 | * fact that so far no spinlocks are held. |
||
222 | */ |
||
223 | preemption_disable(); |
||
224 | ipl = interrupts_read(); |
||
225 | retry: |
||
226 | interrupts_disable(); |
||
227 | if (!spinlock_trylock(&asidlock)) { |
||
228 | interrupts_enable(); |
||
229 | DEADLOCK_PROBE(p_asidlock, DEADLOCK_THRESHOLD); |
||
230 | goto retry; |
||
231 | } |
||
232 | preemption_enable(); /* Interrupts disabled, enable preemption */ |
||
1587 | jermar | 233 | if (as->asid != ASID_INVALID && as != AS_KERNEL) { |
1594 | jermar | 234 | if (as != AS && as->cpu_refcount == 0) |
1587 | jermar | 235 | list_remove(&as->inactive_as_with_asid_link); |
1468 | jermar | 236 | asid_put(as->asid); |
237 | } |
||
2170 | jermar | 238 | spinlock_unlock(&asidlock); |
1468 | jermar | 239 | |
240 | /* |
||
241 | * Destroy address space areas of the address space. |
||
1954 | jermar | 242 | * The B+tree must be walked carefully because it is |
1594 | jermar | 243 | * also being destroyed. |
1468 | jermar | 244 | */ |
1594 | jermar | 245 | for (cond = true; cond; ) { |
1468 | jermar | 246 | btree_node_t *node; |
1594 | jermar | 247 | |
248 | ASSERT(!list_empty(&as->as_area_btree.leaf_head)); |
||
2087 | jermar | 249 | node = list_get_instance(as->as_area_btree.leaf_head.next, |
250 | btree_node_t, leaf_link); |
||
1594 | jermar | 251 | |
252 | if ((cond = node->keys)) { |
||
253 | as_area_destroy(as, node->key[0]); |
||
254 | } |
||
1468 | jermar | 255 | } |
1495 | jermar | 256 | |
1483 | jermar | 257 | btree_destroy(&as->as_area_btree); |
2089 | decky | 258 | #ifdef AS_PAGE_TABLE |
2106 | jermar | 259 | page_table_destroy(as->genarch.page_table); |
2089 | decky | 260 | #else |
261 | page_table_destroy(NULL); |
||
262 | #endif |
||
1468 | jermar | 263 | |
264 | interrupts_restore(ipl); |
||
2126 | decky | 265 | |
266 | #ifdef __OBJC__ |
||
267 | [as free]; |
||
268 | #else |
||
1890 | jermar | 269 | slab_free(as_slab, as); |
2126 | decky | 270 | #endif |
973 | palkovsky | 271 | } |
272 | |||
703 | jermar | 273 | /** Create address space area of common attributes. |
274 | * |
||
275 | * The created address space area is added to the target address space. |
||
276 | * |
||
277 | * @param as Target address space. |
||
1239 | jermar | 278 | * @param flags Flags of the area memory. |
1048 | jermar | 279 | * @param size Size of area. |
703 | jermar | 280 | * @param base Base address of area. |
1239 | jermar | 281 | * @param attrs Attributes of the area. |
1409 | jermar | 282 | * @param backend Address space area backend. NULL if no backend is used. |
283 | * @param backend_data NULL or a pointer to an array holding two void *. |
||
703 | jermar | 284 | * |
285 | * @return Address space area on success or NULL on failure. |
||
286 | */ |
||
2069 | jermar | 287 | as_area_t * |
288 | as_area_create(as_t *as, int flags, size_t size, uintptr_t base, int attrs, |
||
1424 | jermar | 289 | mem_backend_t *backend, mem_backend_data_t *backend_data) |
703 | jermar | 290 | { |
291 | ipl_t ipl; |
||
292 | as_area_t *a; |
||
293 | |||
294 | if (base % PAGE_SIZE) |
||
1048 | jermar | 295 | return NULL; |
296 | |||
1233 | jermar | 297 | if (!size) |
298 | return NULL; |
||
299 | |||
1048 | jermar | 300 | /* Writeable executable areas are not supported. */ |
301 | if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE)) |
||
302 | return NULL; |
||
703 | jermar | 303 | |
304 | ipl = interrupts_disable(); |
||
1380 | jermar | 305 | mutex_lock(&as->lock); |
703 | jermar | 306 | |
1048 | jermar | 307 | if (!check_area_conflicts(as, base, size, NULL)) { |
1380 | jermar | 308 | mutex_unlock(&as->lock); |
1048 | jermar | 309 | interrupts_restore(ipl); |
310 | return NULL; |
||
311 | } |
||
703 | jermar | 312 | |
822 | palkovsky | 313 | a = (as_area_t *) malloc(sizeof(as_area_t), 0); |
703 | jermar | 314 | |
3186 | jermar | 315 | mutex_initialize(&a->lock, MUTEX_PASSIVE); |
822 | palkovsky | 316 | |
1424 | jermar | 317 | a->as = as; |
1026 | jermar | 318 | a->flags = flags; |
1239 | jermar | 319 | a->attributes = attrs; |
1048 | jermar | 320 | a->pages = SIZE2FRAMES(size); |
822 | palkovsky | 321 | a->base = base; |
1409 | jermar | 322 | a->sh_info = NULL; |
323 | a->backend = backend; |
||
1424 | jermar | 324 | if (backend_data) |
325 | a->backend_data = *backend_data; |
||
326 | else |
||
3104 | svoboda | 327 | memsetb(&a->backend_data, sizeof(a->backend_data), 0); |
1424 | jermar | 328 | |
1387 | jermar | 329 | btree_create(&a->used_space); |
822 | palkovsky | 330 | |
1147 | jermar | 331 | btree_insert(&as->as_area_btree, base, (void *) a, NULL); |
822 | palkovsky | 332 | |
1380 | jermar | 333 | mutex_unlock(&as->lock); |
703 | jermar | 334 | interrupts_restore(ipl); |
704 | jermar | 335 | |
703 | jermar | 336 | return a; |
337 | } |
||
338 | |||
1235 | jermar | 339 | /** Find address space area and change it. |
340 | * |
||
341 | * @param as Address space. |
||
2087 | jermar | 342 | * @param address Virtual address belonging to the area to be changed. Must be |
343 | * page-aligned. |
||
1235 | jermar | 344 | * @param size New size of the virtual memory block starting at address. |
345 | * @param flags Flags influencing the remap operation. Currently unused. |
||
346 | * |
||
1306 | jermar | 347 | * @return Zero on success or a value from @ref errno.h otherwise. |
1235 | jermar | 348 | */ |
1780 | jermar | 349 | int as_area_resize(as_t *as, uintptr_t address, size_t size, int flags) |
1235 | jermar | 350 | { |
1306 | jermar | 351 | as_area_t *area; |
1235 | jermar | 352 | ipl_t ipl; |
353 | size_t pages; |
||
354 | |||
355 | ipl = interrupts_disable(); |
||
1380 | jermar | 356 | mutex_lock(&as->lock); |
1235 | jermar | 357 | |
358 | /* |
||
359 | * Locate the area. |
||
360 | */ |
||
361 | area = find_area_and_lock(as, address); |
||
362 | if (!area) { |
||
1380 | jermar | 363 | mutex_unlock(&as->lock); |
1235 | jermar | 364 | interrupts_restore(ipl); |
1306 | jermar | 365 | return ENOENT; |
1235 | jermar | 366 | } |
367 | |||
1424 | jermar | 368 | if (area->backend == &phys_backend) { |
1235 | jermar | 369 | /* |
370 | * Remapping of address space areas associated |
||
371 | * with memory mapped devices is not supported. |
||
372 | */ |
||
1380 | jermar | 373 | mutex_unlock(&area->lock); |
374 | mutex_unlock(&as->lock); |
||
1235 | jermar | 375 | interrupts_restore(ipl); |
1306 | jermar | 376 | return ENOTSUP; |
1235 | jermar | 377 | } |
1409 | jermar | 378 | if (area->sh_info) { |
379 | /* |
||
380 | * Remapping of shared address space areas |
||
381 | * is not supported. |
||
382 | */ |
||
383 | mutex_unlock(&area->lock); |
||
384 | mutex_unlock(&as->lock); |
||
385 | interrupts_restore(ipl); |
||
386 | return ENOTSUP; |
||
387 | } |
||
1235 | jermar | 388 | |
389 | pages = SIZE2FRAMES((address - area->base) + size); |
||
390 | if (!pages) { |
||
391 | /* |
||
392 | * Zero size address space areas are not allowed. |
||
393 | */ |
||
1380 | jermar | 394 | mutex_unlock(&area->lock); |
395 | mutex_unlock(&as->lock); |
||
1235 | jermar | 396 | interrupts_restore(ipl); |
1306 | jermar | 397 | return EPERM; |
1235 | jermar | 398 | } |
399 | |||
400 | if (pages < area->pages) { |
||
1403 | jermar | 401 | bool cond; |
1780 | jermar | 402 | uintptr_t start_free = area->base + pages*PAGE_SIZE; |
1235 | jermar | 403 | |
404 | /* |
||
405 | * Shrinking the area. |
||
406 | * No need to check for overlaps. |
||
407 | */ |
||
1403 | jermar | 408 | |
409 | /* |
||
1436 | jermar | 410 | * Start TLB shootdown sequence. |
411 | */ |
||
2087 | jermar | 412 | tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base + |
413 | pages * PAGE_SIZE, area->pages - pages); |
||
1436 | jermar | 414 | |
415 | /* |
||
1403 | jermar | 416 | * Remove frames belonging to used space starting from |
417 | * the highest addresses downwards until an overlap with |
||
418 | * the resized address space area is found. Note that this |
||
419 | * is also the right way to remove part of the used_space |
||
420 | * B+tree leaf list. |
||
421 | */ |
||
422 | for (cond = true; cond;) { |
||
423 | btree_node_t *node; |
||
424 | |||
425 | ASSERT(!list_empty(&area->used_space.leaf_head)); |
||
2087 | jermar | 426 | node = |
427 | list_get_instance(area->used_space.leaf_head.prev, |
||
428 | btree_node_t, leaf_link); |
||
1403 | jermar | 429 | if ((cond = (bool) node->keys)) { |
1780 | jermar | 430 | uintptr_t b = node->key[node->keys - 1]; |
2087 | jermar | 431 | count_t c = |
432 | (count_t) node->value[node->keys - 1]; |
||
2745 | decky | 433 | unsigned int i = 0; |
1235 | jermar | 434 | |
2087 | jermar | 435 | if (overlaps(b, c * PAGE_SIZE, area->base, |
2133 | jermar | 436 | pages * PAGE_SIZE)) { |
1403 | jermar | 437 | |
2087 | jermar | 438 | if (b + c * PAGE_SIZE <= start_free) { |
1403 | jermar | 439 | /* |
2087 | jermar | 440 | * The whole interval fits |
441 | * completely in the resized |
||
442 | * address space area. |
||
1403 | jermar | 443 | */ |
444 | break; |
||
445 | } |
||
446 | |||
447 | /* |
||
2087 | jermar | 448 | * Part of the interval corresponding |
449 | * to b and c overlaps with the resized |
||
450 | * address space area. |
||
1403 | jermar | 451 | */ |
452 | |||
453 | cond = false; /* we are almost done */ |
||
454 | i = (start_free - b) >> PAGE_WIDTH; |
||
3057 | decky | 455 | if (!used_space_remove(area, start_free, c - i)) |
456 | panic("Could not remove used space.\n"); |
||
1403 | jermar | 457 | } else { |
458 | /* |
||
2087 | jermar | 459 | * The interval of used space can be |
460 | * completely removed. |
||
1403 | jermar | 461 | */ |
462 | if (!used_space_remove(area, b, c)) |
||
3057 | decky | 463 | panic("Could not remove used space.\n"); |
1403 | jermar | 464 | } |
465 | |||
466 | for (; i < c; i++) { |
||
467 | pte_t *pte; |
||
468 | |||
469 | page_table_lock(as, false); |
||
2087 | jermar | 470 | pte = page_mapping_find(as, b + |
471 | i * PAGE_SIZE); |
||
472 | ASSERT(pte && PTE_VALID(pte) && |
||
473 | PTE_PRESENT(pte)); |
||
474 | if (area->backend && |
||
475 | area->backend->frame_free) { |
||
1424 | jermar | 476 | area->backend->frame_free(area, |
2087 | jermar | 477 | b + i * PAGE_SIZE, |
478 | PTE_GET_FRAME(pte)); |
||
1409 | jermar | 479 | } |
2087 | jermar | 480 | page_mapping_remove(as, b + |
481 | i * PAGE_SIZE); |
||
1403 | jermar | 482 | page_table_unlock(as, false); |
483 | } |
||
1235 | jermar | 484 | } |
485 | } |
||
1436 | jermar | 486 | |
1235 | jermar | 487 | /* |
1436 | jermar | 488 | * Finish TLB shootdown sequence. |
1235 | jermar | 489 | */ |
2183 | jermar | 490 | |
2087 | jermar | 491 | tlb_invalidate_pages(as->asid, area->base + pages * PAGE_SIZE, |
492 | area->pages - pages); |
||
1889 | jermar | 493 | /* |
494 | * Invalidate software translation caches (e.g. TSB on sparc64). |
||
495 | */ |
||
2087 | jermar | 496 | as_invalidate_translation_cache(as, area->base + |
497 | pages * PAGE_SIZE, area->pages - pages); |
||
2183 | jermar | 498 | tlb_shootdown_finalize(); |
499 | |||
1235 | jermar | 500 | } else { |
501 | /* |
||
502 | * Growing the area. |
||
503 | * Check for overlaps with other address space areas. |
||
504 | */ |
||
2087 | jermar | 505 | if (!check_area_conflicts(as, address, pages * PAGE_SIZE, |
506 | area)) { |
||
1380 | jermar | 507 | mutex_unlock(&area->lock); |
508 | mutex_unlock(&as->lock); |
||
1235 | jermar | 509 | interrupts_restore(ipl); |
1306 | jermar | 510 | return EADDRNOTAVAIL; |
1235 | jermar | 511 | } |
512 | } |
||
513 | |||
514 | area->pages = pages; |
||
515 | |||
1380 | jermar | 516 | mutex_unlock(&area->lock); |
517 | mutex_unlock(&as->lock); |
||
1235 | jermar | 518 | interrupts_restore(ipl); |
519 | |||
1306 | jermar | 520 | return 0; |
1235 | jermar | 521 | } |
522 | |||
1306 | jermar | 523 | /** Destroy address space area. |
524 | * |
||
525 | * @param as Address space. |
||
526 | * @param address Address withing the area to be deleted. |
||
527 | * |
||
528 | * @return Zero on success or a value from @ref errno.h on failure. |
||
529 | */ |
||
1780 | jermar | 530 | int as_area_destroy(as_t *as, uintptr_t address) |
1306 | jermar | 531 | { |
532 | as_area_t *area; |
||
1780 | jermar | 533 | uintptr_t base; |
1495 | jermar | 534 | link_t *cur; |
1306 | jermar | 535 | ipl_t ipl; |
536 | |||
537 | ipl = interrupts_disable(); |
||
1380 | jermar | 538 | mutex_lock(&as->lock); |
1306 | jermar | 539 | |
540 | area = find_area_and_lock(as, address); |
||
541 | if (!area) { |
||
1380 | jermar | 542 | mutex_unlock(&as->lock); |
1306 | jermar | 543 | interrupts_restore(ipl); |
544 | return ENOENT; |
||
545 | } |
||
546 | |||
1403 | jermar | 547 | base = area->base; |
548 | |||
1411 | jermar | 549 | /* |
1436 | jermar | 550 | * Start TLB shootdown sequence. |
551 | */ |
||
1889 | jermar | 552 | tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, area->pages); |
1436 | jermar | 553 | |
554 | /* |
||
1411 | jermar | 555 | * Visit only the pages mapped by used_space B+tree. |
556 | */ |
||
2087 | jermar | 557 | for (cur = area->used_space.leaf_head.next; |
558 | cur != &area->used_space.leaf_head; cur = cur->next) { |
||
1411 | jermar | 559 | btree_node_t *node; |
2745 | decky | 560 | unsigned int i; |
1403 | jermar | 561 | |
1495 | jermar | 562 | node = list_get_instance(cur, btree_node_t, leaf_link); |
563 | for (i = 0; i < node->keys; i++) { |
||
1780 | jermar | 564 | uintptr_t b = node->key[i]; |
1495 | jermar | 565 | count_t j; |
1411 | jermar | 566 | pte_t *pte; |
1403 | jermar | 567 | |
1495 | jermar | 568 | for (j = 0; j < (count_t) node->value[i]; j++) { |
1411 | jermar | 569 | page_table_lock(as, false); |
2087 | jermar | 570 | pte = page_mapping_find(as, b + j * PAGE_SIZE); |
571 | ASSERT(pte && PTE_VALID(pte) && |
||
572 | PTE_PRESENT(pte)); |
||
573 | if (area->backend && |
||
574 | area->backend->frame_free) { |
||
575 | area->backend->frame_free(area, b + |
||
2133 | jermar | 576 | j * PAGE_SIZE, PTE_GET_FRAME(pte)); |
1403 | jermar | 577 | } |
2087 | jermar | 578 | page_mapping_remove(as, b + j * PAGE_SIZE); |
1411 | jermar | 579 | page_table_unlock(as, false); |
1306 | jermar | 580 | } |
581 | } |
||
582 | } |
||
1403 | jermar | 583 | |
1306 | jermar | 584 | /* |
1436 | jermar | 585 | * Finish TLB shootdown sequence. |
1306 | jermar | 586 | */ |
2183 | jermar | 587 | |
1889 | jermar | 588 | tlb_invalidate_pages(as->asid, area->base, area->pages); |
589 | /* |
||
2087 | jermar | 590 | * Invalidate potential software translation caches (e.g. TSB on |
591 | * sparc64). |
||
1889 | jermar | 592 | */ |
593 | as_invalidate_translation_cache(as, area->base, area->pages); |
||
2183 | jermar | 594 | tlb_shootdown_finalize(); |
1889 | jermar | 595 | |
1436 | jermar | 596 | btree_destroy(&area->used_space); |
1306 | jermar | 597 | |
1309 | jermar | 598 | area->attributes |= AS_AREA_ATTR_PARTIAL; |
1409 | jermar | 599 | |
600 | if (area->sh_info) |
||
601 | sh_info_remove_reference(area->sh_info); |
||
602 | |||
1380 | jermar | 603 | mutex_unlock(&area->lock); |
1306 | jermar | 604 | |
605 | /* |
||
606 | * Remove the empty area from address space. |
||
607 | */ |
||
1889 | jermar | 608 | btree_remove(&as->as_area_btree, base, NULL); |
1306 | jermar | 609 | |
1309 | jermar | 610 | free(area); |
611 | |||
1889 | jermar | 612 | mutex_unlock(&as->lock); |
1306 | jermar | 613 | interrupts_restore(ipl); |
614 | return 0; |
||
615 | } |
||
616 | |||
1413 | jermar | 617 | /** Share address space area with another or the same address space. |
1235 | jermar | 618 | * |
1424 | jermar | 619 | * Address space area mapping is shared with a new address space area. |
620 | * If the source address space area has not been shared so far, |
||
621 | * a new sh_info is created. The new address space area simply gets the |
||
622 | * sh_info of the source area. The process of duplicating the |
||
623 | * mapping is done through the backend share function. |
||
1413 | jermar | 624 | * |
1417 | jermar | 625 | * @param src_as Pointer to source address space. |
1239 | jermar | 626 | * @param src_base Base address of the source address space area. |
1417 | jermar | 627 | * @param acc_size Expected size of the source area. |
1428 | palkovsky | 628 | * @param dst_as Pointer to destination address space. |
1417 | jermar | 629 | * @param dst_base Target base address. |
630 | * @param dst_flags_mask Destination address space area flags mask. |
||
1235 | jermar | 631 | * |
2007 | jermar | 632 | * @return Zero on success or ENOENT if there is no such task or if there is no |
633 | * such address space area, EPERM if there was a problem in accepting the area |
||
634 | * or ENOMEM if there was a problem in allocating destination address space |
||
635 | * area. ENOTSUP is returned if the address space area backend does not support |
||
2141 | jermar | 636 | * sharing. |
1235 | jermar | 637 | */ |
1780 | jermar | 638 | int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size, |
2647 | jermar | 639 | as_t *dst_as, uintptr_t dst_base, int dst_flags_mask) |
1235 | jermar | 640 | { |
641 | ipl_t ipl; |
||
1239 | jermar | 642 | int src_flags; |
643 | size_t src_size; |
||
644 | as_area_t *src_area, *dst_area; |
||
1413 | jermar | 645 | share_info_t *sh_info; |
1424 | jermar | 646 | mem_backend_t *src_backend; |
647 | mem_backend_data_t src_backend_data; |
||
1434 | palkovsky | 648 | |
1235 | jermar | 649 | ipl = interrupts_disable(); |
1380 | jermar | 650 | mutex_lock(&src_as->lock); |
1329 | palkovsky | 651 | src_area = find_area_and_lock(src_as, src_base); |
1239 | jermar | 652 | if (!src_area) { |
1238 | jermar | 653 | /* |
654 | * Could not find the source address space area. |
||
655 | */ |
||
1380 | jermar | 656 | mutex_unlock(&src_as->lock); |
1238 | jermar | 657 | interrupts_restore(ipl); |
658 | return ENOENT; |
||
659 | } |
||
2007 | jermar | 660 | |
1424 | jermar | 661 | if (!src_area->backend || !src_area->backend->share) { |
1413 | jermar | 662 | /* |
1851 | jermar | 663 | * There is no backend or the backend does not |
1424 | jermar | 664 | * know how to share the area. |
1413 | jermar | 665 | */ |
666 | mutex_unlock(&src_area->lock); |
||
667 | mutex_unlock(&src_as->lock); |
||
668 | interrupts_restore(ipl); |
||
669 | return ENOTSUP; |
||
670 | } |
||
671 | |||
1239 | jermar | 672 | src_size = src_area->pages * PAGE_SIZE; |
673 | src_flags = src_area->flags; |
||
1424 | jermar | 674 | src_backend = src_area->backend; |
675 | src_backend_data = src_area->backend_data; |
||
1544 | palkovsky | 676 | |
677 | /* Share the cacheable flag from the original mapping */ |
||
678 | if (src_flags & AS_AREA_CACHEABLE) |
||
679 | dst_flags_mask |= AS_AREA_CACHEABLE; |
||
680 | |||
2087 | jermar | 681 | if (src_size != acc_size || |
682 | (src_flags & dst_flags_mask) != dst_flags_mask) { |
||
1413 | jermar | 683 | mutex_unlock(&src_area->lock); |
684 | mutex_unlock(&src_as->lock); |
||
1235 | jermar | 685 | interrupts_restore(ipl); |
686 | return EPERM; |
||
687 | } |
||
1413 | jermar | 688 | |
1235 | jermar | 689 | /* |
1413 | jermar | 690 | * Now we are committed to sharing the area. |
1954 | jermar | 691 | * First, prepare the area for sharing. |
1413 | jermar | 692 | * Then it will be safe to unlock it. |
693 | */ |
||
694 | sh_info = src_area->sh_info; |
||
695 | if (!sh_info) { |
||
696 | sh_info = (share_info_t *) malloc(sizeof(share_info_t), 0); |
||
3186 | jermar | 697 | mutex_initialize(&sh_info->lock, MUTEX_PASSIVE); |
1413 | jermar | 698 | sh_info->refcount = 2; |
699 | btree_create(&sh_info->pagemap); |
||
700 | src_area->sh_info = sh_info; |
||
2647 | jermar | 701 | /* |
702 | * Call the backend to setup sharing. |
||
703 | */ |
||
704 | src_area->backend->share(src_area); |
||
1413 | jermar | 705 | } else { |
706 | mutex_lock(&sh_info->lock); |
||
707 | sh_info->refcount++; |
||
708 | mutex_unlock(&sh_info->lock); |
||
709 | } |
||
710 | |||
711 | mutex_unlock(&src_area->lock); |
||
712 | mutex_unlock(&src_as->lock); |
||
713 | |||
714 | /* |
||
1239 | jermar | 715 | * Create copy of the source address space area. |
716 | * The destination area is created with AS_AREA_ATTR_PARTIAL |
||
717 | * attribute set which prevents race condition with |
||
718 | * preliminary as_page_fault() calls. |
||
1417 | jermar | 719 | * The flags of the source area are masked against dst_flags_mask |
720 | * to support sharing in less privileged mode. |
||
1235 | jermar | 721 | */ |
1461 | palkovsky | 722 | dst_area = as_area_create(dst_as, dst_flags_mask, src_size, dst_base, |
2087 | jermar | 723 | AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data); |
1239 | jermar | 724 | if (!dst_area) { |
1235 | jermar | 725 | /* |
726 | * Destination address space area could not be created. |
||
727 | */ |
||
1413 | jermar | 728 | sh_info_remove_reference(sh_info); |
729 | |||
1235 | jermar | 730 | interrupts_restore(ipl); |
731 | return ENOMEM; |
||
732 | } |
||
2009 | jermar | 733 | |
1235 | jermar | 734 | /* |
1239 | jermar | 735 | * Now the destination address space area has been |
736 | * fully initialized. Clear the AS_AREA_ATTR_PARTIAL |
||
1413 | jermar | 737 | * attribute and set the sh_info. |
1239 | jermar | 738 | */ |
2009 | jermar | 739 | mutex_lock(&dst_as->lock); |
1380 | jermar | 740 | mutex_lock(&dst_area->lock); |
1239 | jermar | 741 | dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL; |
1413 | jermar | 742 | dst_area->sh_info = sh_info; |
1380 | jermar | 743 | mutex_unlock(&dst_area->lock); |
2009 | jermar | 744 | mutex_unlock(&dst_as->lock); |
745 | |||
1235 | jermar | 746 | interrupts_restore(ipl); |
747 | |||
748 | return 0; |
||
749 | } |
||
750 | |||
1423 | jermar | 751 | /** Check access mode for address space area. |
752 | * |
||
753 | * The address space area must be locked prior to this call. |
||
754 | * |
||
755 | * @param area Address space area. |
||
756 | * @param access Access mode. |
||
757 | * |
||
758 | * @return False if access violates area's permissions, true otherwise. |
||
759 | */ |
||
760 | bool as_area_check_access(as_area_t *area, pf_access_t access) |
||
761 | { |
||
762 | int flagmap[] = { |
||
763 | [PF_ACCESS_READ] = AS_AREA_READ, |
||
764 | [PF_ACCESS_WRITE] = AS_AREA_WRITE, |
||
765 | [PF_ACCESS_EXEC] = AS_AREA_EXEC |
||
766 | }; |
||
767 | |||
768 | if (!(area->flags & flagmap[access])) |
||
769 | return false; |
||
770 | |||
771 | return true; |
||
772 | } |
||
773 | |||
3222 | svoboda | 774 | /** Change adress area flags. |
775 | * |
||
776 | * The idea is to have the same data, but with a different access mode. |
||
777 | * This is needed e.g. for writing code into memory and then executing it. |
||
778 | * In order for this to work properly, this may copy the data |
||
779 | * into private anonymous memory (unless it's already there). |
||
780 | * |
||
781 | * @param as Address space. |
||
782 | * @param flags Flags of the area memory. |
||
783 | * @param address Address withing the area to be changed. |
||
784 | * |
||
785 | * @return Zero on success or a value from @ref errno.h on failure. |
||
786 | */ |
||
787 | int as_area_change_flags(as_t *as, int flags, uintptr_t address) |
||
788 | { |
||
789 | as_area_t *area; |
||
790 | uintptr_t base; |
||
791 | link_t *cur; |
||
792 | ipl_t ipl; |
||
793 | int page_flags; |
||
794 | uintptr_t *old_frame; |
||
795 | index_t frame_idx; |
||
796 | count_t used_pages; |
||
797 | |||
798 | /* Flags for the new memory mapping */ |
||
799 | page_flags = area_flags_to_page_flags(flags); |
||
800 | |||
801 | ipl = interrupts_disable(); |
||
802 | mutex_lock(&as->lock); |
||
803 | |||
804 | area = find_area_and_lock(as, address); |
||
805 | if (!area) { |
||
806 | mutex_unlock(&as->lock); |
||
807 | interrupts_restore(ipl); |
||
808 | return ENOENT; |
||
809 | } |
||
810 | |||
811 | if (area->sh_info || area->backend != &anon_backend) { |
||
812 | /* Copying shared areas not supported yet */ |
||
813 | /* Copying non-anonymous memory not supported yet */ |
||
814 | mutex_unlock(&area->lock); |
||
815 | mutex_unlock(&as->lock); |
||
816 | interrupts_restore(ipl); |
||
817 | return ENOTSUP; |
||
818 | } |
||
819 | |||
820 | base = area->base; |
||
821 | |||
822 | /* |
||
823 | * Compute total number of used pages in the used_space B+tree |
||
824 | */ |
||
825 | used_pages = 0; |
||
826 | |||
827 | for (cur = area->used_space.leaf_head.next; |
||
828 | cur != &area->used_space.leaf_head; cur = cur->next) { |
||
829 | btree_node_t *node; |
||
830 | unsigned int i; |
||
831 | |||
832 | node = list_get_instance(cur, btree_node_t, leaf_link); |
||
833 | for (i = 0; i < node->keys; i++) { |
||
834 | used_pages += (count_t) node->value[i]; |
||
835 | } |
||
836 | } |
||
837 | |||
838 | /* An array for storing frame numbers */ |
||
839 | old_frame = malloc(used_pages * sizeof(uintptr_t), 0); |
||
840 | |||
841 | /* |
||
842 | * Start TLB shootdown sequence. |
||
843 | */ |
||
844 | tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, area->pages); |
||
845 | |||
846 | /* |
||
847 | * Remove used pages from page tables and remember their frame |
||
848 | * numbers. |
||
849 | */ |
||
850 | frame_idx = 0; |
||
851 | |||
852 | for (cur = area->used_space.leaf_head.next; |
||
853 | cur != &area->used_space.leaf_head; cur = cur->next) { |
||
854 | btree_node_t *node; |
||
855 | unsigned int i; |
||
856 | |||
857 | node = list_get_instance(cur, btree_node_t, leaf_link); |
||
858 | for (i = 0; i < node->keys; i++) { |
||
859 | uintptr_t b = node->key[i]; |
||
860 | count_t j; |
||
861 | pte_t *pte; |
||
862 | |||
863 | for (j = 0; j < (count_t) node->value[i]; j++) { |
||
864 | page_table_lock(as, false); |
||
865 | pte = page_mapping_find(as, b + j * PAGE_SIZE); |
||
866 | ASSERT(pte && PTE_VALID(pte) && |
||
867 | PTE_PRESENT(pte)); |
||
868 | old_frame[frame_idx++] = PTE_GET_FRAME(pte); |
||
869 | |||
870 | /* Remove old mapping */ |
||
871 | page_mapping_remove(as, b + j * PAGE_SIZE); |
||
872 | page_table_unlock(as, false); |
||
873 | } |
||
874 | } |
||
875 | } |
||
876 | |||
877 | /* |
||
878 | * Finish TLB shootdown sequence. |
||
879 | */ |
||
880 | |||
881 | tlb_invalidate_pages(as->asid, area->base, area->pages); |
||
882 | /* |
||
883 | * Invalidate potential software translation caches (e.g. TSB on |
||
884 | * sparc64). |
||
885 | */ |
||
886 | as_invalidate_translation_cache(as, area->base, area->pages); |
||
887 | tlb_shootdown_finalize(); |
||
888 | |||
889 | /* |
||
890 | * Map pages back in with new flags. This step is kept separate |
||
891 | * so that there's no instant when the memory area could be |
||
892 | * accesed with both the old and the new flags at once. |
||
893 | */ |
||
894 | frame_idx = 0; |
||
895 | |||
896 | for (cur = area->used_space.leaf_head.next; |
||
897 | cur != &area->used_space.leaf_head; cur = cur->next) { |
||
898 | btree_node_t *node; |
||
899 | unsigned int i; |
||
900 | |||
901 | node = list_get_instance(cur, btree_node_t, leaf_link); |
||
902 | for (i = 0; i < node->keys; i++) { |
||
903 | uintptr_t b = node->key[i]; |
||
904 | count_t j; |
||
905 | |||
906 | for (j = 0; j < (count_t) node->value[i]; j++) { |
||
907 | page_table_lock(as, false); |
||
908 | |||
909 | /* Insert the new mapping */ |
||
910 | page_mapping_insert(as, b + j * PAGE_SIZE, |
||
911 | old_frame[frame_idx++], page_flags); |
||
912 | |||
913 | page_table_unlock(as, false); |
||
914 | } |
||
915 | } |
||
916 | } |
||
917 | |||
918 | free(old_frame); |
||
919 | |||
920 | mutex_unlock(&area->lock); |
||
921 | mutex_unlock(&as->lock); |
||
922 | interrupts_restore(ipl); |
||
923 | |||
924 | return 0; |
||
925 | } |
||
926 | |||
927 | |||
703 | jermar | 928 | /** Handle page fault within the current address space. |
929 | * |
||
1409 | jermar | 930 | * This is the high-level page fault handler. It decides |
931 | * whether the page fault can be resolved by any backend |
||
932 | * and if so, it invokes the backend to resolve the page |
||
933 | * fault. |
||
934 | * |
||
703 | jermar | 935 | * Interrupts are assumed disabled. |
936 | * |
||
937 | * @param page Faulting page. |
||
1411 | jermar | 938 | * @param access Access mode that caused the fault (i.e. read/write/exec). |
1288 | jermar | 939 | * @param istate Pointer to interrupted state. |
703 | jermar | 940 | * |
1409 | jermar | 941 | * @return AS_PF_FAULT on page fault, AS_PF_OK on success or AS_PF_DEFER if the |
942 | * fault was caused by copy_to_uspace() or copy_from_uspace(). |
||
703 | jermar | 943 | */ |
1780 | jermar | 944 | int as_page_fault(uintptr_t page, pf_access_t access, istate_t *istate) |
703 | jermar | 945 | { |
1044 | jermar | 946 | pte_t *pte; |
977 | jermar | 947 | as_area_t *area; |
703 | jermar | 948 | |
1380 | jermar | 949 | if (!THREAD) |
1409 | jermar | 950 | return AS_PF_FAULT; |
1380 | jermar | 951 | |
703 | jermar | 952 | ASSERT(AS); |
1044 | jermar | 953 | |
1380 | jermar | 954 | mutex_lock(&AS->lock); |
977 | jermar | 955 | area = find_area_and_lock(AS, page); |
703 | jermar | 956 | if (!area) { |
957 | /* |
||
958 | * No area contained mapping for 'page'. |
||
959 | * Signal page fault to low-level handler. |
||
960 | */ |
||
1380 | jermar | 961 | mutex_unlock(&AS->lock); |
1288 | jermar | 962 | goto page_fault; |
703 | jermar | 963 | } |
964 | |||
1239 | jermar | 965 | if (area->attributes & AS_AREA_ATTR_PARTIAL) { |
966 | /* |
||
967 | * The address space area is not fully initialized. |
||
968 | * Avoid possible race by returning error. |
||
969 | */ |
||
1380 | jermar | 970 | mutex_unlock(&area->lock); |
971 | mutex_unlock(&AS->lock); |
||
1288 | jermar | 972 | goto page_fault; |
1239 | jermar | 973 | } |
974 | |||
1424 | jermar | 975 | if (!area->backend || !area->backend->page_fault) { |
1409 | jermar | 976 | /* |
977 | * The address space area is not backed by any backend |
||
978 | * or the backend cannot handle page faults. |
||
979 | */ |
||
980 | mutex_unlock(&area->lock); |
||
981 | mutex_unlock(&AS->lock); |
||
982 | goto page_fault; |
||
983 | } |
||
1179 | jermar | 984 | |
1044 | jermar | 985 | page_table_lock(AS, false); |
986 | |||
703 | jermar | 987 | /* |
1044 | jermar | 988 | * To avoid race condition between two page faults |
989 | * on the same address, we need to make sure |
||
990 | * the mapping has not been already inserted. |
||
991 | */ |
||
992 | if ((pte = page_mapping_find(AS, page))) { |
||
993 | if (PTE_PRESENT(pte)) { |
||
1423 | jermar | 994 | if (((access == PF_ACCESS_READ) && PTE_READABLE(pte)) || |
2087 | jermar | 995 | (access == PF_ACCESS_WRITE && PTE_WRITABLE(pte)) || |
996 | (access == PF_ACCESS_EXEC && PTE_EXECUTABLE(pte))) { |
||
1423 | jermar | 997 | page_table_unlock(AS, false); |
998 | mutex_unlock(&area->lock); |
||
999 | mutex_unlock(&AS->lock); |
||
1000 | return AS_PF_OK; |
||
1001 | } |
||
1044 | jermar | 1002 | } |
1003 | } |
||
1409 | jermar | 1004 | |
1044 | jermar | 1005 | /* |
1409 | jermar | 1006 | * Resort to the backend page fault handler. |
703 | jermar | 1007 | */ |
1424 | jermar | 1008 | if (area->backend->page_fault(area, page, access) != AS_PF_OK) { |
1409 | jermar | 1009 | page_table_unlock(AS, false); |
1010 | mutex_unlock(&area->lock); |
||
1011 | mutex_unlock(&AS->lock); |
||
1012 | goto page_fault; |
||
1013 | } |
||
703 | jermar | 1014 | |
1044 | jermar | 1015 | page_table_unlock(AS, false); |
1380 | jermar | 1016 | mutex_unlock(&area->lock); |
1017 | mutex_unlock(&AS->lock); |
||
1288 | jermar | 1018 | return AS_PF_OK; |
1019 | |||
1020 | page_fault: |
||
1021 | if (THREAD->in_copy_from_uspace) { |
||
1022 | THREAD->in_copy_from_uspace = false; |
||
2087 | jermar | 1023 | istate_set_retaddr(istate, |
1024 | (uintptr_t) &memcpy_from_uspace_failover_address); |
||
1288 | jermar | 1025 | } else if (THREAD->in_copy_to_uspace) { |
1026 | THREAD->in_copy_to_uspace = false; |
||
2087 | jermar | 1027 | istate_set_retaddr(istate, |
1028 | (uintptr_t) &memcpy_to_uspace_failover_address); |
||
1288 | jermar | 1029 | } else { |
1030 | return AS_PF_FAULT; |
||
1031 | } |
||
1032 | |||
1033 | return AS_PF_DEFER; |
||
703 | jermar | 1034 | } |
1035 | |||
823 | jermar | 1036 | /** Switch address spaces. |
703 | jermar | 1037 | * |
1380 | jermar | 1038 | * Note that this function cannot sleep as it is essentially a part of |
2170 | jermar | 1039 | * scheduling. Sleeping here would lead to deadlock on wakeup. Another |
1040 | * thing which is forbidden in this context is locking the address space. |
||
1380 | jermar | 1041 | * |
2183 | jermar | 1042 | * When this function is enetered, no spinlocks may be held. |
1043 | * |
||
823 | jermar | 1044 | * @param old Old address space or NULL. |
1045 | * @param new New address space. |
||
703 | jermar | 1046 | */ |
2106 | jermar | 1047 | void as_switch(as_t *old_as, as_t *new_as) |
703 | jermar | 1048 | { |
2183 | jermar | 1049 | DEADLOCK_PROBE_INIT(p_asidlock); |
1050 | preemption_disable(); |
||
1051 | retry: |
||
1052 | (void) interrupts_disable(); |
||
1053 | if (!spinlock_trylock(&asidlock)) { |
||
1054 | /* |
||
1055 | * Avoid deadlock with TLB shootdown. |
||
1056 | * We can enable interrupts here because |
||
1057 | * preemption is disabled. We should not be |
||
1058 | * holding any other lock. |
||
1059 | */ |
||
1060 | (void) interrupts_enable(); |
||
1061 | DEADLOCK_PROBE(p_asidlock, DEADLOCK_THRESHOLD); |
||
1062 | goto retry; |
||
1063 | } |
||
1064 | preemption_enable(); |
||
703 | jermar | 1065 | |
1066 | /* |
||
823 | jermar | 1067 | * First, take care of the old address space. |
1068 | */ |
||
2106 | jermar | 1069 | if (old_as) { |
1070 | ASSERT(old_as->cpu_refcount); |
||
1071 | if((--old_as->cpu_refcount == 0) && (old_as != AS_KERNEL)) { |
||
823 | jermar | 1072 | /* |
1073 | * The old address space is no longer active on |
||
1074 | * any processor. It can be appended to the |
||
1075 | * list of inactive address spaces with assigned |
||
1076 | * ASID. |
||
1077 | */ |
||
2141 | jermar | 1078 | ASSERT(old_as->asid != ASID_INVALID); |
1079 | list_append(&old_as->inactive_as_with_asid_link, |
||
1080 | &inactive_as_with_asid_head); |
||
823 | jermar | 1081 | } |
1890 | jermar | 1082 | |
1083 | /* |
||
1084 | * Perform architecture-specific tasks when the address space |
||
1085 | * is being removed from the CPU. |
||
1086 | */ |
||
2106 | jermar | 1087 | as_deinstall_arch(old_as); |
823 | jermar | 1088 | } |
1089 | |||
1090 | /* |
||
1091 | * Second, prepare the new address space. |
||
1092 | */ |
||
2106 | jermar | 1093 | if ((new_as->cpu_refcount++ == 0) && (new_as != AS_KERNEL)) { |
2170 | jermar | 1094 | if (new_as->asid != ASID_INVALID) |
2106 | jermar | 1095 | list_remove(&new_as->inactive_as_with_asid_link); |
2170 | jermar | 1096 | else |
1097 | new_as->asid = asid_get(); |
||
823 | jermar | 1098 | } |
2106 | jermar | 1099 | #ifdef AS_PAGE_TABLE |
1100 | SET_PTL0_ADDRESS(new_as->genarch.page_table); |
||
1101 | #endif |
||
823 | jermar | 1102 | |
1103 | /* |
||
703 | jermar | 1104 | * Perform architecture-specific steps. |
727 | jermar | 1105 | * (e.g. write ASID to hardware register etc.) |
703 | jermar | 1106 | */ |
2106 | jermar | 1107 | as_install_arch(new_as); |
2170 | jermar | 1108 | |
1109 | spinlock_unlock(&asidlock); |
||
703 | jermar | 1110 | |
2106 | jermar | 1111 | AS = new_as; |
703 | jermar | 1112 | } |
754 | jermar | 1113 | |
1235 | jermar | 1114 | /** Convert address space area flags to page flags. |
754 | jermar | 1115 | * |
1235 | jermar | 1116 | * @param aflags Flags of some address space area. |
754 | jermar | 1117 | * |
1235 | jermar | 1118 | * @return Flags to be passed to page_mapping_insert(). |
754 | jermar | 1119 | */ |
1235 | jermar | 1120 | int area_flags_to_page_flags(int aflags) |
754 | jermar | 1121 | { |
1122 | int flags; |
||
1123 | |||
1178 | jermar | 1124 | flags = PAGE_USER | PAGE_PRESENT; |
754 | jermar | 1125 | |
1235 | jermar | 1126 | if (aflags & AS_AREA_READ) |
1026 | jermar | 1127 | flags |= PAGE_READ; |
1128 | |||
1235 | jermar | 1129 | if (aflags & AS_AREA_WRITE) |
1026 | jermar | 1130 | flags |= PAGE_WRITE; |
1131 | |||
1235 | jermar | 1132 | if (aflags & AS_AREA_EXEC) |
1026 | jermar | 1133 | flags |= PAGE_EXEC; |
1134 | |||
1424 | jermar | 1135 | if (aflags & AS_AREA_CACHEABLE) |
1178 | jermar | 1136 | flags |= PAGE_CACHEABLE; |
1137 | |||
754 | jermar | 1138 | return flags; |
1139 | } |
||
756 | jermar | 1140 | |
1235 | jermar | 1141 | /** Compute flags for virtual address translation subsytem. |
1142 | * |
||
1143 | * The address space area must be locked. |
||
1144 | * Interrupts must be disabled. |
||
1145 | * |
||
1146 | * @param a Address space area. |
||
1147 | * |
||
1148 | * @return Flags to be used in page_mapping_insert(). |
||
1149 | */ |
||
1409 | jermar | 1150 | int as_area_get_flags(as_area_t *a) |
1235 | jermar | 1151 | { |
1152 | return area_flags_to_page_flags(a->flags); |
||
1153 | } |
||
1154 | |||
756 | jermar | 1155 | /** Create page table. |
1156 | * |
||
1157 | * Depending on architecture, create either address space |
||
1158 | * private or global page table. |
||
1159 | * |
||
1160 | * @param flags Flags saying whether the page table is for kernel address space. |
||
1161 | * |
||
1162 | * @return First entry of the page table. |
||
1163 | */ |
||
1164 | pte_t *page_table_create(int flags) |
||
1165 | { |
||
2125 | decky | 1166 | #ifdef __OBJC__ |
1167 | return [as_t page_table_create: flags]; |
||
1168 | #else |
||
1169 | ASSERT(as_operations); |
||
1170 | ASSERT(as_operations->page_table_create); |
||
1171 | |||
1172 | return as_operations->page_table_create(flags); |
||
1173 | #endif |
||
756 | jermar | 1174 | } |
977 | jermar | 1175 | |
1468 | jermar | 1176 | /** Destroy page table. |
1177 | * |
||
1178 | * Destroy page table in architecture specific way. |
||
1179 | * |
||
1180 | * @param page_table Physical address of PTL0. |
||
1181 | */ |
||
1182 | void page_table_destroy(pte_t *page_table) |
||
1183 | { |
||
2125 | decky | 1184 | #ifdef __OBJC__ |
1185 | return [as_t page_table_destroy: page_table]; |
||
1186 | #else |
||
1187 | ASSERT(as_operations); |
||
1188 | ASSERT(as_operations->page_table_destroy); |
||
1189 | |||
1190 | as_operations->page_table_destroy(page_table); |
||
1191 | #endif |
||
1468 | jermar | 1192 | } |
1193 | |||
1044 | jermar | 1194 | /** Lock page table. |
1195 | * |
||
1196 | * This function should be called before any page_mapping_insert(), |
||
1197 | * page_mapping_remove() and page_mapping_find(). |
||
1198 | * |
||
1199 | * Locking order is such that address space areas must be locked |
||
1200 | * prior to this call. Address space can be locked prior to this |
||
1201 | * call in which case the lock argument is false. |
||
1202 | * |
||
1203 | * @param as Address space. |
||
1248 | jermar | 1204 | * @param lock If false, do not attempt to lock as->lock. |
1044 | jermar | 1205 | */ |
1206 | void page_table_lock(as_t *as, bool lock) |
||
1207 | { |
||
2125 | decky | 1208 | #ifdef __OBJC__ |
1209 | [as page_table_lock: lock]; |
||
1210 | #else |
||
1044 | jermar | 1211 | ASSERT(as_operations); |
1212 | ASSERT(as_operations->page_table_lock); |
||
2125 | decky | 1213 | |
1044 | jermar | 1214 | as_operations->page_table_lock(as, lock); |
2125 | decky | 1215 | #endif |
1044 | jermar | 1216 | } |
1217 | |||
1218 | /** Unlock page table. |
||
1219 | * |
||
1220 | * @param as Address space. |
||
1248 | jermar | 1221 | * @param unlock If false, do not attempt to unlock as->lock. |
1044 | jermar | 1222 | */ |
1223 | void page_table_unlock(as_t *as, bool unlock) |
||
1224 | { |
||
2125 | decky | 1225 | #ifdef __OBJC__ |
1226 | [as page_table_unlock: unlock]; |
||
1227 | #else |
||
1044 | jermar | 1228 | ASSERT(as_operations); |
1229 | ASSERT(as_operations->page_table_unlock); |
||
2125 | decky | 1230 | |
1044 | jermar | 1231 | as_operations->page_table_unlock(as, unlock); |
2125 | decky | 1232 | #endif |
1044 | jermar | 1233 | } |
1234 | |||
977 | jermar | 1235 | |
1236 | /** Find address space area and lock it. |
||
1237 | * |
||
1238 | * The address space must be locked and interrupts must be disabled. |
||
1239 | * |
||
1240 | * @param as Address space. |
||
1241 | * @param va Virtual address. |
||
1242 | * |
||
2087 | jermar | 1243 | * @return Locked address space area containing va on success or NULL on |
1244 | * failure. |
||
977 | jermar | 1245 | */ |
1780 | jermar | 1246 | as_area_t *find_area_and_lock(as_t *as, uintptr_t va) |
977 | jermar | 1247 | { |
1248 | as_area_t *a; |
||
1147 | jermar | 1249 | btree_node_t *leaf, *lnode; |
2745 | decky | 1250 | unsigned int i; |
977 | jermar | 1251 | |
1147 | jermar | 1252 | a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf); |
1253 | if (a) { |
||
1254 | /* va is the base address of an address space area */ |
||
1380 | jermar | 1255 | mutex_lock(&a->lock); |
1147 | jermar | 1256 | return a; |
1257 | } |
||
1258 | |||
1259 | /* |
||
1150 | jermar | 1260 | * Search the leaf node and the righmost record of its left neighbour |
1147 | jermar | 1261 | * to find out whether this is a miss or va belongs to an address |
1262 | * space area found there. |
||
1263 | */ |
||
1264 | |||
1265 | /* First, search the leaf node itself. */ |
||
1266 | for (i = 0; i < leaf->keys; i++) { |
||
1267 | a = (as_area_t *) leaf->value[i]; |
||
1380 | jermar | 1268 | mutex_lock(&a->lock); |
1147 | jermar | 1269 | if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) { |
1270 | return a; |
||
1271 | } |
||
1380 | jermar | 1272 | mutex_unlock(&a->lock); |
1147 | jermar | 1273 | } |
977 | jermar | 1274 | |
1147 | jermar | 1275 | /* |
1150 | jermar | 1276 | * Second, locate the left neighbour and test its last record. |
1148 | jermar | 1277 | * Because of its position in the B+tree, it must have base < va. |
1147 | jermar | 1278 | */ |
2087 | jermar | 1279 | lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf); |
1280 | if (lnode) { |
||
1147 | jermar | 1281 | a = (as_area_t *) lnode->value[lnode->keys - 1]; |
1380 | jermar | 1282 | mutex_lock(&a->lock); |
1147 | jermar | 1283 | if (va < a->base + a->pages * PAGE_SIZE) { |
1048 | jermar | 1284 | return a; |
1147 | jermar | 1285 | } |
1380 | jermar | 1286 | mutex_unlock(&a->lock); |
977 | jermar | 1287 | } |
1288 | |||
1289 | return NULL; |
||
1290 | } |
||
1048 | jermar | 1291 | |
1292 | /** Check area conflicts with other areas. |
||
1293 | * |
||
1294 | * The address space must be locked and interrupts must be disabled. |
||
1295 | * |
||
1296 | * @param as Address space. |
||
1297 | * @param va Starting virtual address of the area being tested. |
||
1298 | * @param size Size of the area being tested. |
||
1299 | * @param avoid_area Do not touch this area. |
||
1300 | * |
||
1301 | * @return True if there is no conflict, false otherwise. |
||
1302 | */ |
||
2087 | jermar | 1303 | bool check_area_conflicts(as_t *as, uintptr_t va, size_t size, |
1304 | as_area_t *avoid_area) |
||
1048 | jermar | 1305 | { |
1306 | as_area_t *a; |
||
1147 | jermar | 1307 | btree_node_t *leaf, *node; |
2745 | decky | 1308 | unsigned int i; |
1048 | jermar | 1309 | |
1070 | jermar | 1310 | /* |
1311 | * We don't want any area to have conflicts with NULL page. |
||
1312 | */ |
||
1313 | if (overlaps(va, size, NULL, PAGE_SIZE)) |
||
1314 | return false; |
||
1315 | |||
1147 | jermar | 1316 | /* |
1317 | * The leaf node is found in O(log n), where n is proportional to |
||
1318 | * the number of address space areas belonging to as. |
||
1319 | * The check for conflicts is then attempted on the rightmost |
||
1150 | jermar | 1320 | * record in the left neighbour, the leftmost record in the right |
1321 | * neighbour and all records in the leaf node itself. |
||
1147 | jermar | 1322 | */ |
1048 | jermar | 1323 | |
1147 | jermar | 1324 | if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) { |
1325 | if (a != avoid_area) |
||
1326 | return false; |
||
1327 | } |
||
1328 | |||
1329 | /* First, check the two border cases. */ |
||
1150 | jermar | 1330 | if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) { |
1147 | jermar | 1331 | a = (as_area_t *) node->value[node->keys - 1]; |
1380 | jermar | 1332 | mutex_lock(&a->lock); |
1147 | jermar | 1333 | if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) { |
1380 | jermar | 1334 | mutex_unlock(&a->lock); |
1147 | jermar | 1335 | return false; |
1336 | } |
||
1380 | jermar | 1337 | mutex_unlock(&a->lock); |
1147 | jermar | 1338 | } |
2087 | jermar | 1339 | node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf); |
1340 | if (node) { |
||
1147 | jermar | 1341 | a = (as_area_t *) node->value[0]; |
1380 | jermar | 1342 | mutex_lock(&a->lock); |
1147 | jermar | 1343 | if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) { |
1380 | jermar | 1344 | mutex_unlock(&a->lock); |
1147 | jermar | 1345 | return false; |
1346 | } |
||
1380 | jermar | 1347 | mutex_unlock(&a->lock); |
1147 | jermar | 1348 | } |
1349 | |||
1350 | /* Second, check the leaf node. */ |
||
1351 | for (i = 0; i < leaf->keys; i++) { |
||
1352 | a = (as_area_t *) leaf->value[i]; |
||
1353 | |||
1048 | jermar | 1354 | if (a == avoid_area) |
1355 | continue; |
||
1147 | jermar | 1356 | |
1380 | jermar | 1357 | mutex_lock(&a->lock); |
1147 | jermar | 1358 | if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) { |
1380 | jermar | 1359 | mutex_unlock(&a->lock); |
1147 | jermar | 1360 | return false; |
1361 | } |
||
1380 | jermar | 1362 | mutex_unlock(&a->lock); |
1048 | jermar | 1363 | } |
1364 | |||
1070 | jermar | 1365 | /* |
1366 | * So far, the area does not conflict with other areas. |
||
1367 | * Check if it doesn't conflict with kernel address space. |
||
1368 | */ |
||
1369 | if (!KERNEL_ADDRESS_SPACE_SHADOWED) { |
||
1370 | return !overlaps(va, size, |
||
2087 | jermar | 1371 | KERNEL_ADDRESS_SPACE_START, |
1372 | KERNEL_ADDRESS_SPACE_END - KERNEL_ADDRESS_SPACE_START); |
||
1070 | jermar | 1373 | } |
1374 | |||
1048 | jermar | 1375 | return true; |
1376 | } |
||
1235 | jermar | 1377 | |
2556 | jermar | 1378 | /** Return size of the address space area with given base. |
1379 | * |
||
1380 | * @param base Arbitrary address insede the address space area. |
||
1381 | * |
||
1382 | * @return Size of the address space area in bytes or zero if it |
||
1383 | * does not exist. |
||
1384 | */ |
||
1385 | size_t as_area_get_size(uintptr_t base) |
||
1329 | palkovsky | 1386 | { |
1387 | ipl_t ipl; |
||
1388 | as_area_t *src_area; |
||
1389 | size_t size; |
||
1390 | |||
1391 | ipl = interrupts_disable(); |
||
1392 | src_area = find_area_and_lock(AS, base); |
||
1393 | if (src_area){ |
||
1394 | size = src_area->pages * PAGE_SIZE; |
||
1380 | jermar | 1395 | mutex_unlock(&src_area->lock); |
1329 | palkovsky | 1396 | } else { |
1397 | size = 0; |
||
1398 | } |
||
1399 | interrupts_restore(ipl); |
||
1400 | return size; |
||
1401 | } |
||
1402 | |||
1387 | jermar | 1403 | /** Mark portion of address space area as used. |
1404 | * |
||
1405 | * The address space area must be already locked. |
||
1406 | * |
||
1407 | * @param a Address space area. |
||
1408 | * @param page First page to be marked. |
||
1409 | * @param count Number of page to be marked. |
||
1410 | * |
||
1411 | * @return 0 on failure and 1 on success. |
||
1412 | */ |
||
1780 | jermar | 1413 | int used_space_insert(as_area_t *a, uintptr_t page, count_t count) |
1387 | jermar | 1414 | { |
1415 | btree_node_t *leaf, *node; |
||
1416 | count_t pages; |
||
2745 | decky | 1417 | unsigned int i; |
1387 | jermar | 1418 | |
1419 | ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE)); |
||
1420 | ASSERT(count); |
||
1421 | |||
1422 | pages = (count_t) btree_search(&a->used_space, page, &leaf); |
||
1423 | if (pages) { |
||
1424 | /* |
||
1425 | * We hit the beginning of some used space. |
||
1426 | */ |
||
1427 | return 0; |
||
1428 | } |
||
1429 | |||
1437 | jermar | 1430 | if (!leaf->keys) { |
1431 | btree_insert(&a->used_space, page, (void *) count, leaf); |
||
1432 | return 1; |
||
1433 | } |
||
1434 | |||
1387 | jermar | 1435 | node = btree_leaf_node_left_neighbour(&a->used_space, leaf); |
1436 | if (node) { |
||
2087 | jermar | 1437 | uintptr_t left_pg = node->key[node->keys - 1]; |
1438 | uintptr_t right_pg = leaf->key[0]; |
||
1439 | count_t left_cnt = (count_t) node->value[node->keys - 1]; |
||
1440 | count_t right_cnt = (count_t) leaf->value[0]; |
||
1387 | jermar | 1441 | |
1442 | /* |
||
1443 | * Examine the possibility that the interval fits |
||
1444 | * somewhere between the rightmost interval of |
||
1445 | * the left neigbour and the first interval of the leaf. |
||
1446 | */ |
||
1447 | |||
1448 | if (page >= right_pg) { |
||
1449 | /* Do nothing. */ |
||
2087 | jermar | 1450 | } else if (overlaps(page, count * PAGE_SIZE, left_pg, |
1451 | left_cnt * PAGE_SIZE)) { |
||
1387 | jermar | 1452 | /* The interval intersects with the left interval. */ |
1453 | return 0; |
||
2087 | jermar | 1454 | } else if (overlaps(page, count * PAGE_SIZE, right_pg, |
1455 | right_cnt * PAGE_SIZE)) { |
||
1387 | jermar | 1456 | /* The interval intersects with the right interval. */ |
1457 | return 0; |
||
2087 | jermar | 1458 | } else if ((page == left_pg + left_cnt * PAGE_SIZE) && |
1459 | (page + count * PAGE_SIZE == right_pg)) { |
||
1460 | /* |
||
1461 | * The interval can be added by merging the two already |
||
1462 | * present intervals. |
||
1463 | */ |
||
1403 | jermar | 1464 | node->value[node->keys - 1] += count + right_cnt; |
1387 | jermar | 1465 | btree_remove(&a->used_space, right_pg, leaf); |
1466 | return 1; |
||
2087 | jermar | 1467 | } else if (page == left_pg + left_cnt * PAGE_SIZE) { |
1468 | /* |
||
1469 | * The interval can be added by simply growing the left |
||
1470 | * interval. |
||
1471 | */ |
||
1403 | jermar | 1472 | node->value[node->keys - 1] += count; |
1387 | jermar | 1473 | return 1; |
2087 | jermar | 1474 | } else if (page + count * PAGE_SIZE == right_pg) { |
1387 | jermar | 1475 | /* |
2087 | jermar | 1476 | * The interval can be addded by simply moving base of |
1477 | * the right interval down and increasing its size |
||
1478 | * accordingly. |
||
1387 | jermar | 1479 | */ |
1403 | jermar | 1480 | leaf->value[0] += count; |
1387 | jermar | 1481 | leaf->key[0] = page; |
1482 | return 1; |
||
1483 | } else { |
||
1484 | /* |
||
1485 | * The interval is between both neigbouring intervals, |
||
1486 | * but cannot be merged with any of them. |
||
1487 | */ |
||
2087 | jermar | 1488 | btree_insert(&a->used_space, page, (void *) count, |
1489 | leaf); |
||
1387 | jermar | 1490 | return 1; |
1491 | } |
||
1492 | } else if (page < leaf->key[0]) { |
||
1780 | jermar | 1493 | uintptr_t right_pg = leaf->key[0]; |
1387 | jermar | 1494 | count_t right_cnt = (count_t) leaf->value[0]; |
1495 | |||
1496 | /* |
||
2087 | jermar | 1497 | * Investigate the border case in which the left neighbour does |
1498 | * not exist but the interval fits from the left. |
||
1387 | jermar | 1499 | */ |
1500 | |||
2087 | jermar | 1501 | if (overlaps(page, count * PAGE_SIZE, right_pg, |
1502 | right_cnt * PAGE_SIZE)) { |
||
1387 | jermar | 1503 | /* The interval intersects with the right interval. */ |
1504 | return 0; |
||
2087 | jermar | 1505 | } else if (page + count * PAGE_SIZE == right_pg) { |
1387 | jermar | 1506 | /* |
2087 | jermar | 1507 | * The interval can be added by moving the base of the |
1508 | * right interval down and increasing its size |
||
1509 | * accordingly. |
||
1387 | jermar | 1510 | */ |
1511 | leaf->key[0] = page; |
||
1403 | jermar | 1512 | leaf->value[0] += count; |
1387 | jermar | 1513 | return 1; |
1514 | } else { |
||
1515 | /* |
||
1516 | * The interval doesn't adjoin with the right interval. |
||
1517 | * It must be added individually. |
||
1518 | */ |
||
2087 | jermar | 1519 | btree_insert(&a->used_space, page, (void *) count, |
1520 | leaf); |
||
1387 | jermar | 1521 | return 1; |
1522 | } |
||
1523 | } |
||
1524 | |||
1525 | node = btree_leaf_node_right_neighbour(&a->used_space, leaf); |
||
1526 | if (node) { |
||
2087 | jermar | 1527 | uintptr_t left_pg = leaf->key[leaf->keys - 1]; |
1528 | uintptr_t right_pg = node->key[0]; |
||
1529 | count_t left_cnt = (count_t) leaf->value[leaf->keys - 1]; |
||
1530 | count_t right_cnt = (count_t) node->value[0]; |
||
1387 | jermar | 1531 | |
1532 | /* |
||
1533 | * Examine the possibility that the interval fits |
||
1534 | * somewhere between the leftmost interval of |
||
1535 | * the right neigbour and the last interval of the leaf. |
||
1536 | */ |
||
1537 | |||
1538 | if (page < left_pg) { |
||
1539 | /* Do nothing. */ |
||
2087 | jermar | 1540 | } else if (overlaps(page, count * PAGE_SIZE, left_pg, |
1541 | left_cnt * PAGE_SIZE)) { |
||
1387 | jermar | 1542 | /* The interval intersects with the left interval. */ |
1543 | return 0; |
||
2087 | jermar | 1544 | } else if (overlaps(page, count * PAGE_SIZE, right_pg, |
1545 | right_cnt * PAGE_SIZE)) { |
||
1387 | jermar | 1546 | /* The interval intersects with the right interval. */ |
1547 | return 0; |
||
2087 | jermar | 1548 | } else if ((page == left_pg + left_cnt * PAGE_SIZE) && |
1549 | (page + count * PAGE_SIZE == right_pg)) { |
||
1550 | /* |
||
1551 | * The interval can be added by merging the two already |
||
1552 | * present intervals. |
||
1553 | * */ |
||
1403 | jermar | 1554 | leaf->value[leaf->keys - 1] += count + right_cnt; |
1387 | jermar | 1555 | btree_remove(&a->used_space, right_pg, node); |
1556 | return 1; |
||
2087 | jermar | 1557 | } else if (page == left_pg + left_cnt * PAGE_SIZE) { |
1558 | /* |
||
1559 | * The interval can be added by simply growing the left |
||
1560 | * interval. |
||
1561 | * */ |
||
1403 | jermar | 1562 | leaf->value[leaf->keys - 1] += count; |
1387 | jermar | 1563 | return 1; |
2087 | jermar | 1564 | } else if (page + count * PAGE_SIZE == right_pg) { |
1387 | jermar | 1565 | /* |
2087 | jermar | 1566 | * The interval can be addded by simply moving base of |
1567 | * the right interval down and increasing its size |
||
1568 | * accordingly. |
||
1387 | jermar | 1569 | */ |
1403 | jermar | 1570 | node->value[0] += count; |
1387 | jermar | 1571 | node->key[0] = page; |
1572 | return 1; |
||
1573 | } else { |
||
1574 | /* |
||
1575 | * The interval is between both neigbouring intervals, |
||
1576 | * but cannot be merged with any of them. |
||
1577 | */ |
||
2087 | jermar | 1578 | btree_insert(&a->used_space, page, (void *) count, |
1579 | leaf); |
||
1387 | jermar | 1580 | return 1; |
1581 | } |
||
1582 | } else if (page >= leaf->key[leaf->keys - 1]) { |
||
1780 | jermar | 1583 | uintptr_t left_pg = leaf->key[leaf->keys - 1]; |
1387 | jermar | 1584 | count_t left_cnt = (count_t) leaf->value[leaf->keys - 1]; |
1585 | |||
1586 | /* |
||
2087 | jermar | 1587 | * Investigate the border case in which the right neighbour |
1588 | * does not exist but the interval fits from the right. |
||
1387 | jermar | 1589 | */ |
1590 | |||
2087 | jermar | 1591 | if (overlaps(page, count * PAGE_SIZE, left_pg, |
1592 | left_cnt * PAGE_SIZE)) { |
||
1403 | jermar | 1593 | /* The interval intersects with the left interval. */ |
1387 | jermar | 1594 | return 0; |
2087 | jermar | 1595 | } else if (left_pg + left_cnt * PAGE_SIZE == page) { |
1596 | /* |
||
1597 | * The interval can be added by growing the left |
||
1598 | * interval. |
||
1599 | */ |
||
1403 | jermar | 1600 | leaf->value[leaf->keys - 1] += count; |
1387 | jermar | 1601 | return 1; |
1602 | } else { |
||
1603 | /* |
||
1604 | * The interval doesn't adjoin with the left interval. |
||
1605 | * It must be added individually. |
||
1606 | */ |
||
2087 | jermar | 1607 | btree_insert(&a->used_space, page, (void *) count, |
1608 | leaf); |
||
1387 | jermar | 1609 | return 1; |
1610 | } |
||
1611 | } |
||
1612 | |||
1613 | /* |
||
2087 | jermar | 1614 | * Note that if the algorithm made it thus far, the interval can fit |
1615 | * only between two other intervals of the leaf. The two border cases |
||
1616 | * were already resolved. |
||
1387 | jermar | 1617 | */ |
1618 | for (i = 1; i < leaf->keys; i++) { |
||
1619 | if (page < leaf->key[i]) { |
||
2087 | jermar | 1620 | uintptr_t left_pg = leaf->key[i - 1]; |
1621 | uintptr_t right_pg = leaf->key[i]; |
||
1622 | count_t left_cnt = (count_t) leaf->value[i - 1]; |
||
1623 | count_t right_cnt = (count_t) leaf->value[i]; |
||
1387 | jermar | 1624 | |
1625 | /* |
||
1626 | * The interval fits between left_pg and right_pg. |
||
1627 | */ |
||
1628 | |||
2087 | jermar | 1629 | if (overlaps(page, count * PAGE_SIZE, left_pg, |
1630 | left_cnt * PAGE_SIZE)) { |
||
1631 | /* |
||
1632 | * The interval intersects with the left |
||
1633 | * interval. |
||
1634 | */ |
||
1387 | jermar | 1635 | return 0; |
2087 | jermar | 1636 | } else if (overlaps(page, count * PAGE_SIZE, right_pg, |
1637 | right_cnt * PAGE_SIZE)) { |
||
1638 | /* |
||
1639 | * The interval intersects with the right |
||
1640 | * interval. |
||
1641 | */ |
||
1387 | jermar | 1642 | return 0; |
2087 | jermar | 1643 | } else if ((page == left_pg + left_cnt * PAGE_SIZE) && |
1644 | (page + count * PAGE_SIZE == right_pg)) { |
||
1645 | /* |
||
1646 | * The interval can be added by merging the two |
||
1647 | * already present intervals. |
||
1648 | */ |
||
1403 | jermar | 1649 | leaf->value[i - 1] += count + right_cnt; |
1387 | jermar | 1650 | btree_remove(&a->used_space, right_pg, leaf); |
1651 | return 1; |
||
2087 | jermar | 1652 | } else if (page == left_pg + left_cnt * PAGE_SIZE) { |
1653 | /* |
||
1654 | * The interval can be added by simply growing |
||
1655 | * the left interval. |
||
1656 | */ |
||
1403 | jermar | 1657 | leaf->value[i - 1] += count; |
1387 | jermar | 1658 | return 1; |
2087 | jermar | 1659 | } else if (page + count * PAGE_SIZE == right_pg) { |
1387 | jermar | 1660 | /* |
2087 | jermar | 1661 | * The interval can be addded by simply moving |
1662 | * base of the right interval down and |
||
1663 | * increasing its size accordingly. |
||
1387 | jermar | 1664 | */ |
1403 | jermar | 1665 | leaf->value[i] += count; |
1387 | jermar | 1666 | leaf->key[i] = page; |
1667 | return 1; |
||
1668 | } else { |
||
1669 | /* |
||
2087 | jermar | 1670 | * The interval is between both neigbouring |
1671 | * intervals, but cannot be merged with any of |
||
1672 | * them. |
||
1387 | jermar | 1673 | */ |
2087 | jermar | 1674 | btree_insert(&a->used_space, page, |
1675 | (void *) count, leaf); |
||
1387 | jermar | 1676 | return 1; |
1677 | } |
||
1678 | } |
||
1679 | } |
||
1680 | |||
3057 | decky | 1681 | panic("Inconsistency detected while adding %" PRIc " pages of used space at " |
2087 | jermar | 1682 | "%p.\n", count, page); |
1387 | jermar | 1683 | } |
1684 | |||
1685 | /** Mark portion of address space area as unused. |
||
1686 | * |
||
1687 | * The address space area must be already locked. |
||
1688 | * |
||
1689 | * @param a Address space area. |
||
1690 | * @param page First page to be marked. |
||
1691 | * @param count Number of page to be marked. |
||
1692 | * |
||
1693 | * @return 0 on failure and 1 on success. |
||
1694 | */ |
||
1780 | jermar | 1695 | int used_space_remove(as_area_t *a, uintptr_t page, count_t count) |
1387 | jermar | 1696 | { |
1697 | btree_node_t *leaf, *node; |
||
1698 | count_t pages; |
||
2745 | decky | 1699 | unsigned int i; |
1387 | jermar | 1700 | |
1701 | ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE)); |
||
1702 | ASSERT(count); |
||
1703 | |||
1704 | pages = (count_t) btree_search(&a->used_space, page, &leaf); |
||
1705 | if (pages) { |
||
1706 | /* |
||
1707 | * We are lucky, page is the beginning of some interval. |
||
1708 | */ |
||
1709 | if (count > pages) { |
||
1710 | return 0; |
||
1711 | } else if (count == pages) { |
||
1712 | btree_remove(&a->used_space, page, leaf); |
||
1403 | jermar | 1713 | return 1; |
1387 | jermar | 1714 | } else { |
1715 | /* |
||
1716 | * Find the respective interval. |
||
1717 | * Decrease its size and relocate its start address. |
||
1718 | */ |
||
1719 | for (i = 0; i < leaf->keys; i++) { |
||
1720 | if (leaf->key[i] == page) { |
||
2087 | jermar | 1721 | leaf->key[i] += count * PAGE_SIZE; |
1403 | jermar | 1722 | leaf->value[i] -= count; |
1387 | jermar | 1723 | return 1; |
1724 | } |
||
1725 | } |
||
1726 | goto error; |
||
1727 | } |
||
1728 | } |
||
1729 | |||
1730 | node = btree_leaf_node_left_neighbour(&a->used_space, leaf); |
||
1731 | if (node && page < leaf->key[0]) { |
||
1780 | jermar | 1732 | uintptr_t left_pg = node->key[node->keys - 1]; |
1387 | jermar | 1733 | count_t left_cnt = (count_t) node->value[node->keys - 1]; |
1734 | |||
2087 | jermar | 1735 | if (overlaps(left_pg, left_cnt * PAGE_SIZE, page, |
1736 | count * PAGE_SIZE)) { |
||
1737 | if (page + count * PAGE_SIZE == |
||
1738 | left_pg + left_cnt * PAGE_SIZE) { |
||
1387 | jermar | 1739 | /* |
2087 | jermar | 1740 | * The interval is contained in the rightmost |
1741 | * interval of the left neighbour and can be |
||
1742 | * removed by updating the size of the bigger |
||
1743 | * interval. |
||
1387 | jermar | 1744 | */ |
1403 | jermar | 1745 | node->value[node->keys - 1] -= count; |
1387 | jermar | 1746 | return 1; |
2087 | jermar | 1747 | } else if (page + count * PAGE_SIZE < |
1748 | left_pg + left_cnt*PAGE_SIZE) { |
||
1403 | jermar | 1749 | count_t new_cnt; |
1387 | jermar | 1750 | |
1751 | /* |
||
2087 | jermar | 1752 | * The interval is contained in the rightmost |
1753 | * interval of the left neighbour but its |
||
1754 | * removal requires both updating the size of |
||
1755 | * the original interval and also inserting a |
||
1756 | * new interval. |
||
1387 | jermar | 1757 | */ |
2087 | jermar | 1758 | new_cnt = ((left_pg + left_cnt * PAGE_SIZE) - |
1759 | (page + count*PAGE_SIZE)) >> PAGE_WIDTH; |
||
1403 | jermar | 1760 | node->value[node->keys - 1] -= count + new_cnt; |
2087 | jermar | 1761 | btree_insert(&a->used_space, page + |
1762 | count * PAGE_SIZE, (void *) new_cnt, leaf); |
||
1387 | jermar | 1763 | return 1; |
1764 | } |
||
1765 | } |
||
1766 | return 0; |
||
1767 | } else if (page < leaf->key[0]) { |
||
1768 | return 0; |
||
1769 | } |
||
1770 | |||
1771 | if (page > leaf->key[leaf->keys - 1]) { |
||
1780 | jermar | 1772 | uintptr_t left_pg = leaf->key[leaf->keys - 1]; |
1387 | jermar | 1773 | count_t left_cnt = (count_t) leaf->value[leaf->keys - 1]; |
1774 | |||
2087 | jermar | 1775 | if (overlaps(left_pg, left_cnt * PAGE_SIZE, page, |
1776 | count * PAGE_SIZE)) { |
||
1777 | if (page + count * PAGE_SIZE == |
||
1778 | left_pg + left_cnt * PAGE_SIZE) { |
||
1387 | jermar | 1779 | /* |
2087 | jermar | 1780 | * The interval is contained in the rightmost |
1781 | * interval of the leaf and can be removed by |
||
1782 | * updating the size of the bigger interval. |
||
1387 | jermar | 1783 | */ |
1403 | jermar | 1784 | leaf->value[leaf->keys - 1] -= count; |
1387 | jermar | 1785 | return 1; |
2087 | jermar | 1786 | } else if (page + count * PAGE_SIZE < left_pg + |
1787 | left_cnt * PAGE_SIZE) { |
||
1403 | jermar | 1788 | count_t new_cnt; |
1387 | jermar | 1789 | |
1790 | /* |
||
2087 | jermar | 1791 | * The interval is contained in the rightmost |
1792 | * interval of the leaf but its removal |
||
1793 | * requires both updating the size of the |
||
1794 | * original interval and also inserting a new |
||
1795 | * interval. |
||
1387 | jermar | 1796 | */ |
2087 | jermar | 1797 | new_cnt = ((left_pg + left_cnt * PAGE_SIZE) - |
1798 | (page + count * PAGE_SIZE)) >> PAGE_WIDTH; |
||
1403 | jermar | 1799 | leaf->value[leaf->keys - 1] -= count + new_cnt; |
2087 | jermar | 1800 | btree_insert(&a->used_space, page + |
1801 | count * PAGE_SIZE, (void *) new_cnt, leaf); |
||
1387 | jermar | 1802 | return 1; |
1803 | } |
||
1804 | } |
||
1805 | return 0; |
||
1806 | } |
||
1807 | |||
1808 | /* |
||
1809 | * The border cases have been already resolved. |
||
1810 | * Now the interval can be only between intervals of the leaf. |
||
1811 | */ |
||
1812 | for (i = 1; i < leaf->keys - 1; i++) { |
||
1813 | if (page < leaf->key[i]) { |
||
1780 | jermar | 1814 | uintptr_t left_pg = leaf->key[i - 1]; |
1387 | jermar | 1815 | count_t left_cnt = (count_t) leaf->value[i - 1]; |
1816 | |||
1817 | /* |
||
2087 | jermar | 1818 | * Now the interval is between intervals corresponding |
1819 | * to (i - 1) and i. |
||
1387 | jermar | 1820 | */ |
2087 | jermar | 1821 | if (overlaps(left_pg, left_cnt * PAGE_SIZE, page, |
1822 | count * PAGE_SIZE)) { |
||
1823 | if (page + count * PAGE_SIZE == |
||
1824 | left_pg + left_cnt*PAGE_SIZE) { |
||
1387 | jermar | 1825 | /* |
2087 | jermar | 1826 | * The interval is contained in the |
1827 | * interval (i - 1) of the leaf and can |
||
1828 | * be removed by updating the size of |
||
1829 | * the bigger interval. |
||
1387 | jermar | 1830 | */ |
1403 | jermar | 1831 | leaf->value[i - 1] -= count; |
1387 | jermar | 1832 | return 1; |
2087 | jermar | 1833 | } else if (page + count * PAGE_SIZE < |
1834 | left_pg + left_cnt * PAGE_SIZE) { |
||
1403 | jermar | 1835 | count_t new_cnt; |
1387 | jermar | 1836 | |
1837 | /* |
||
2087 | jermar | 1838 | * The interval is contained in the |
1839 | * interval (i - 1) of the leaf but its |
||
1840 | * removal requires both updating the |
||
1841 | * size of the original interval and |
||
1387 | jermar | 1842 | * also inserting a new interval. |
1843 | */ |
||
2087 | jermar | 1844 | new_cnt = ((left_pg + |
1845 | left_cnt * PAGE_SIZE) - |
||
1846 | (page + count * PAGE_SIZE)) >> |
||
1847 | PAGE_WIDTH; |
||
1403 | jermar | 1848 | leaf->value[i - 1] -= count + new_cnt; |
2087 | jermar | 1849 | btree_insert(&a->used_space, page + |
1850 | count * PAGE_SIZE, (void *) new_cnt, |
||
1851 | leaf); |
||
1387 | jermar | 1852 | return 1; |
1853 | } |
||
1854 | } |
||
1855 | return 0; |
||
1856 | } |
||
1857 | } |
||
1858 | |||
1859 | error: |
||
3057 | decky | 1860 | panic("Inconsistency detected while removing %" PRIc " pages of used space " |
2087 | jermar | 1861 | "from %p.\n", count, page); |
1387 | jermar | 1862 | } |
1863 | |||
1409 | jermar | 1864 | /** Remove reference to address space area share info. |
1865 | * |
||
1866 | * If the reference count drops to 0, the sh_info is deallocated. |
||
1867 | * |
||
1868 | * @param sh_info Pointer to address space area share info. |
||
1869 | */ |
||
1870 | void sh_info_remove_reference(share_info_t *sh_info) |
||
1871 | { |
||
1872 | bool dealloc = false; |
||
1873 | |||
1874 | mutex_lock(&sh_info->lock); |
||
1875 | ASSERT(sh_info->refcount); |
||
1876 | if (--sh_info->refcount == 0) { |
||
1877 | dealloc = true; |
||
1495 | jermar | 1878 | link_t *cur; |
1409 | jermar | 1879 | |
1880 | /* |
||
1881 | * Now walk carefully the pagemap B+tree and free/remove |
||
1882 | * reference from all frames found there. |
||
1883 | */ |
||
2087 | jermar | 1884 | for (cur = sh_info->pagemap.leaf_head.next; |
1885 | cur != &sh_info->pagemap.leaf_head; cur = cur->next) { |
||
1409 | jermar | 1886 | btree_node_t *node; |
2745 | decky | 1887 | unsigned int i; |
1409 | jermar | 1888 | |
1495 | jermar | 1889 | node = list_get_instance(cur, btree_node_t, leaf_link); |
1890 | for (i = 0; i < node->keys; i++) |
||
1780 | jermar | 1891 | frame_free((uintptr_t) node->value[i]); |
1409 | jermar | 1892 | } |
1893 | |||
1894 | } |
||
1895 | mutex_unlock(&sh_info->lock); |
||
1896 | |||
1897 | if (dealloc) { |
||
1898 | btree_destroy(&sh_info->pagemap); |
||
1899 | free(sh_info); |
||
1900 | } |
||
1901 | } |
||
1902 | |||
1235 | jermar | 1903 | /* |
1904 | * Address space related syscalls. |
||
1905 | */ |
||
1906 | |||
1907 | /** Wrapper for as_area_create(). */ |
||
1780 | jermar | 1908 | unative_t sys_as_area_create(uintptr_t address, size_t size, int flags) |
1235 | jermar | 1909 | { |
2087 | jermar | 1910 | if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address, |
1911 | AS_AREA_ATTR_NONE, &anon_backend, NULL)) |
||
1780 | jermar | 1912 | return (unative_t) address; |
1235 | jermar | 1913 | else |
1780 | jermar | 1914 | return (unative_t) -1; |
1235 | jermar | 1915 | } |
1916 | |||
1793 | jermar | 1917 | /** Wrapper for as_area_resize(). */ |
1780 | jermar | 1918 | unative_t sys_as_area_resize(uintptr_t address, size_t size, int flags) |
1235 | jermar | 1919 | { |
1780 | jermar | 1920 | return (unative_t) as_area_resize(AS, address, size, 0); |
1235 | jermar | 1921 | } |
1922 | |||
3222 | svoboda | 1923 | /** Wrapper for as_area_change_flags(). */ |
1924 | unative_t sys_as_area_change_flags(uintptr_t address, int flags) |
||
1925 | { |
||
1926 | return (unative_t) as_area_change_flags(AS, flags, address); |
||
1927 | } |
||
1928 | |||
1793 | jermar | 1929 | /** Wrapper for as_area_destroy(). */ |
1780 | jermar | 1930 | unative_t sys_as_area_destroy(uintptr_t address) |
1306 | jermar | 1931 | { |
1780 | jermar | 1932 | return (unative_t) as_area_destroy(AS, address); |
1306 | jermar | 1933 | } |
1702 | cejka | 1934 | |
1914 | jermar | 1935 | /** Print out information about address space. |
1936 | * |
||
1937 | * @param as Address space. |
||
1938 | */ |
||
1939 | void as_print(as_t *as) |
||
1940 | { |
||
1941 | ipl_t ipl; |
||
1942 | |||
1943 | ipl = interrupts_disable(); |
||
1944 | mutex_lock(&as->lock); |
||
1945 | |||
1946 | /* print out info about address space areas */ |
||
1947 | link_t *cur; |
||
2087 | jermar | 1948 | for (cur = as->as_area_btree.leaf_head.next; |
1949 | cur != &as->as_area_btree.leaf_head; cur = cur->next) { |
||
1950 | btree_node_t *node; |
||
1914 | jermar | 1951 | |
2087 | jermar | 1952 | node = list_get_instance(cur, btree_node_t, leaf_link); |
1953 | |||
2745 | decky | 1954 | unsigned int i; |
1914 | jermar | 1955 | for (i = 0; i < node->keys; i++) { |
1915 | jermar | 1956 | as_area_t *area = node->value[i]; |
1914 | jermar | 1957 | |
1958 | mutex_lock(&area->lock); |
||
3057 | decky | 1959 | printf("as_area: %p, base=%p, pages=%" PRIc " (%p - %p)\n", |
2087 | jermar | 1960 | area, area->base, area->pages, area->base, |
3057 | decky | 1961 | area->base + FRAMES2SIZE(area->pages)); |
1914 | jermar | 1962 | mutex_unlock(&area->lock); |
1963 | } |
||
1964 | } |
||
1965 | |||
1966 | mutex_unlock(&as->lock); |
||
1967 | interrupts_restore(ipl); |
||
1968 | } |
||
1969 | |||
1757 | jermar | 1970 | /** @} |
1702 | cejka | 1971 | */ |