Rev 3384 | Rev 3707 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
703 | jermar | 1 | /* |
2071 | jermar | 2 | * Copyright (c) 2001-2006 Jakub Jermar |
703 | jermar | 3 | * All rights reserved. |
4 | * |
||
5 | * Redistribution and use in source and binary forms, with or without |
||
6 | * modification, are permitted provided that the following conditions |
||
7 | * are met: |
||
8 | * |
||
9 | * - Redistributions of source code must retain the above copyright |
||
10 | * notice, this list of conditions and the following disclaimer. |
||
11 | * - Redistributions in binary form must reproduce the above copyright |
||
12 | * notice, this list of conditions and the following disclaimer in the |
||
13 | * documentation and/or other materials provided with the distribution. |
||
14 | * - The name of the author may not be used to endorse or promote products |
||
15 | * derived from this software without specific prior written permission. |
||
16 | * |
||
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
||
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
||
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
||
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
||
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
||
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
||
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
||
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
||
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
||
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
||
27 | */ |
||
28 | |||
1757 | jermar | 29 | /** @addtogroup genericmm |
1702 | cejka | 30 | * @{ |
31 | */ |
||
32 | |||
1248 | jermar | 33 | /** |
1702 | cejka | 34 | * @file |
1248 | jermar | 35 | * @brief Address space related functions. |
36 | * |
||
703 | jermar | 37 | * This file contains address space manipulation functions. |
38 | * Roughly speaking, this is a higher-level client of |
||
39 | * Virtual Address Translation (VAT) subsystem. |
||
1248 | jermar | 40 | * |
41 | * Functionality provided by this file allows one to |
||
1757 | jermar | 42 | * create address spaces and create, resize and share |
1248 | jermar | 43 | * address space areas. |
44 | * |
||
45 | * @see page.c |
||
46 | * |
||
703 | jermar | 47 | */ |
48 | |||
49 | #include <mm/as.h> |
||
756 | jermar | 50 | #include <arch/mm/as.h> |
703 | jermar | 51 | #include <mm/page.h> |
52 | #include <mm/frame.h> |
||
814 | palkovsky | 53 | #include <mm/slab.h> |
703 | jermar | 54 | #include <mm/tlb.h> |
55 | #include <arch/mm/page.h> |
||
56 | #include <genarch/mm/page_pt.h> |
||
1108 | jermar | 57 | #include <genarch/mm/page_ht.h> |
727 | jermar | 58 | #include <mm/asid.h> |
703 | jermar | 59 | #include <arch/mm/asid.h> |
2183 | jermar | 60 | #include <preemption.h> |
703 | jermar | 61 | #include <synch/spinlock.h> |
1380 | jermar | 62 | #include <synch/mutex.h> |
788 | jermar | 63 | #include <adt/list.h> |
1147 | jermar | 64 | #include <adt/btree.h> |
1235 | jermar | 65 | #include <proc/task.h> |
1288 | jermar | 66 | #include <proc/thread.h> |
1235 | jermar | 67 | #include <arch/asm.h> |
703 | jermar | 68 | #include <panic.h> |
69 | #include <debug.h> |
||
1235 | jermar | 70 | #include <print.h> |
703 | jermar | 71 | #include <memstr.h> |
1070 | jermar | 72 | #include <macros.h> |
703 | jermar | 73 | #include <arch.h> |
1235 | jermar | 74 | #include <errno.h> |
75 | #include <config.h> |
||
1387 | jermar | 76 | #include <align.h> |
1235 | jermar | 77 | #include <arch/types.h> |
1288 | jermar | 78 | #include <syscall/copy.h> |
79 | #include <arch/interrupt.h> |
||
703 | jermar | 80 | |
2009 | jermar | 81 | #ifdef CONFIG_VIRT_IDX_DCACHE |
82 | #include <arch/mm/cache.h> |
||
83 | #endif /* CONFIG_VIRT_IDX_DCACHE */ |
||
84 | |||
1757 | jermar | 85 | /** |
86 | * Each architecture decides what functions will be used to carry out |
||
87 | * address space operations such as creating or locking page tables. |
||
88 | */ |
||
756 | jermar | 89 | as_operations_t *as_operations = NULL; |
703 | jermar | 90 | |
1890 | jermar | 91 | /** |
92 | * Slab for as_t objects. |
||
93 | */ |
||
94 | static slab_cache_t *as_slab; |
||
95 | |||
2087 | jermar | 96 | /** |
2170 | jermar | 97 | * This lock serializes access to the ASID subsystem. |
98 | * It protects: |
||
99 | * - inactive_as_with_asid_head list |
||
100 | * - as->asid for each as of the as_t type |
||
101 | * - asids_allocated counter |
||
2087 | jermar | 102 | */ |
2170 | jermar | 103 | SPINLOCK_INITIALIZE(asidlock); |
823 | jermar | 104 | |
105 | /** |
||
106 | * This list contains address spaces that are not active on any |
||
107 | * processor and that have valid ASID. |
||
108 | */ |
||
109 | LIST_INITIALIZE(inactive_as_with_asid_head); |
||
110 | |||
757 | jermar | 111 | /** Kernel address space. */ |
112 | as_t *AS_KERNEL = NULL; |
||
113 | |||
3384 | jermar | 114 | static int area_flags_to_page_flags(int); |
115 | static as_area_t *find_area_and_lock(as_t *, uintptr_t); |
||
116 | static bool check_area_conflicts(as_t *, uintptr_t, size_t, as_area_t *); |
||
117 | static void sh_info_remove_reference(share_info_t *); |
||
703 | jermar | 118 | |
1891 | jermar | 119 | static int as_constructor(void *obj, int flags) |
120 | { |
||
121 | as_t *as = (as_t *) obj; |
||
122 | int rc; |
||
123 | |||
124 | link_initialize(&as->inactive_as_with_asid_link); |
||
3186 | jermar | 125 | mutex_initialize(&as->lock, MUTEX_PASSIVE); |
1891 | jermar | 126 | |
127 | rc = as_constructor_arch(as, flags); |
||
128 | |||
129 | return rc; |
||
130 | } |
||
131 | |||
132 | static int as_destructor(void *obj) |
||
133 | { |
||
134 | as_t *as = (as_t *) obj; |
||
135 | |||
136 | return as_destructor_arch(as); |
||
137 | } |
||
138 | |||
756 | jermar | 139 | /** Initialize address space subsystem. */ |
140 | void as_init(void) |
||
141 | { |
||
142 | as_arch_init(); |
||
2126 | decky | 143 | |
1891 | jermar | 144 | as_slab = slab_cache_create("as_slab", sizeof(as_t), 0, |
2087 | jermar | 145 | as_constructor, as_destructor, SLAB_CACHE_MAGDEFERRED); |
1890 | jermar | 146 | |
789 | palkovsky | 147 | AS_KERNEL = as_create(FLAG_AS_KERNEL); |
1383 | decky | 148 | if (!AS_KERNEL) |
149 | panic("can't create kernel address space\n"); |
||
150 | |||
756 | jermar | 151 | } |
152 | |||
757 | jermar | 153 | /** Create address space. |
154 | * |
||
3384 | jermar | 155 | * @param flags Flags that influence the way in wich the address space |
156 | * is created. |
||
757 | jermar | 157 | */ |
756 | jermar | 158 | as_t *as_create(int flags) |
703 | jermar | 159 | { |
160 | as_t *as; |
||
161 | |||
1890 | jermar | 162 | as = (as_t *) slab_alloc(as_slab, 0); |
1891 | jermar | 163 | (void) as_create_arch(as, 0); |
164 | |||
1147 | jermar | 165 | btree_create(&as->as_area_btree); |
822 | palkovsky | 166 | |
167 | if (flags & FLAG_AS_KERNEL) |
||
168 | as->asid = ASID_KERNEL; |
||
169 | else |
||
170 | as->asid = ASID_INVALID; |
||
171 | |||
2183 | jermar | 172 | atomic_set(&as->refcount, 0); |
1415 | jermar | 173 | as->cpu_refcount = 0; |
2089 | decky | 174 | #ifdef AS_PAGE_TABLE |
2106 | jermar | 175 | as->genarch.page_table = page_table_create(flags); |
2089 | decky | 176 | #else |
177 | page_table_create(flags); |
||
178 | #endif |
||
703 | jermar | 179 | |
180 | return as; |
||
181 | } |
||
182 | |||
1468 | jermar | 183 | /** Destroy adress space. |
184 | * |
||
2087 | jermar | 185 | * When there are no tasks referencing this address space (i.e. its refcount is |
186 | * zero), the address space can be destroyed. |
||
2183 | jermar | 187 | * |
188 | * We know that we don't hold any spinlock. |
||
3384 | jermar | 189 | * |
190 | * @param as Address space to be destroyed. |
||
1468 | jermar | 191 | */ |
192 | void as_destroy(as_t *as) |
||
973 | palkovsky | 193 | { |
1468 | jermar | 194 | ipl_t ipl; |
1594 | jermar | 195 | bool cond; |
2183 | jermar | 196 | DEADLOCK_PROBE_INIT(p_asidlock); |
973 | palkovsky | 197 | |
2183 | jermar | 198 | ASSERT(atomic_get(&as->refcount) == 0); |
1468 | jermar | 199 | |
200 | /* |
||
201 | * Since there is no reference to this area, |
||
202 | * it is safe not to lock its mutex. |
||
203 | */ |
||
2170 | jermar | 204 | |
2183 | jermar | 205 | /* |
206 | * We need to avoid deadlock between TLB shootdown and asidlock. |
||
207 | * We therefore try to take asid conditionally and if we don't succeed, |
||
208 | * we enable interrupts and try again. This is done while preemption is |
||
209 | * disabled to prevent nested context switches. We also depend on the |
||
210 | * fact that so far no spinlocks are held. |
||
211 | */ |
||
212 | preemption_disable(); |
||
213 | ipl = interrupts_read(); |
||
214 | retry: |
||
215 | interrupts_disable(); |
||
216 | if (!spinlock_trylock(&asidlock)) { |
||
217 | interrupts_enable(); |
||
218 | DEADLOCK_PROBE(p_asidlock, DEADLOCK_THRESHOLD); |
||
219 | goto retry; |
||
220 | } |
||
221 | preemption_enable(); /* Interrupts disabled, enable preemption */ |
||
1587 | jermar | 222 | if (as->asid != ASID_INVALID && as != AS_KERNEL) { |
1594 | jermar | 223 | if (as != AS && as->cpu_refcount == 0) |
1587 | jermar | 224 | list_remove(&as->inactive_as_with_asid_link); |
1468 | jermar | 225 | asid_put(as->asid); |
226 | } |
||
2170 | jermar | 227 | spinlock_unlock(&asidlock); |
1468 | jermar | 228 | |
229 | /* |
||
230 | * Destroy address space areas of the address space. |
||
1954 | jermar | 231 | * The B+tree must be walked carefully because it is |
1594 | jermar | 232 | * also being destroyed. |
1468 | jermar | 233 | */ |
1594 | jermar | 234 | for (cond = true; cond; ) { |
1468 | jermar | 235 | btree_node_t *node; |
1594 | jermar | 236 | |
237 | ASSERT(!list_empty(&as->as_area_btree.leaf_head)); |
||
2087 | jermar | 238 | node = list_get_instance(as->as_area_btree.leaf_head.next, |
239 | btree_node_t, leaf_link); |
||
1594 | jermar | 240 | |
241 | if ((cond = node->keys)) { |
||
242 | as_area_destroy(as, node->key[0]); |
||
243 | } |
||
1468 | jermar | 244 | } |
1495 | jermar | 245 | |
1483 | jermar | 246 | btree_destroy(&as->as_area_btree); |
2089 | decky | 247 | #ifdef AS_PAGE_TABLE |
2106 | jermar | 248 | page_table_destroy(as->genarch.page_table); |
2089 | decky | 249 | #else |
250 | page_table_destroy(NULL); |
||
251 | #endif |
||
1468 | jermar | 252 | |
253 | interrupts_restore(ipl); |
||
2126 | decky | 254 | |
1890 | jermar | 255 | slab_free(as_slab, as); |
973 | palkovsky | 256 | } |
257 | |||
703 | jermar | 258 | /** Create address space area of common attributes. |
259 | * |
||
260 | * The created address space area is added to the target address space. |
||
261 | * |
||
3384 | jermar | 262 | * @param as Target address space. |
263 | * @param flags Flags of the area memory. |
||
264 | * @param size Size of area. |
||
265 | * @param base Base address of area. |
||
266 | * @param attrs Attributes of the area. |
||
267 | * @param backend Address space area backend. NULL if no backend is used. |
||
268 | * @param backend_data NULL or a pointer to an array holding two void *. |
||
703 | jermar | 269 | * |
3384 | jermar | 270 | * @return Address space area on success or NULL on failure. |
703 | jermar | 271 | */ |
2069 | jermar | 272 | as_area_t * |
273 | as_area_create(as_t *as, int flags, size_t size, uintptr_t base, int attrs, |
||
3384 | jermar | 274 | mem_backend_t *backend, mem_backend_data_t *backend_data) |
703 | jermar | 275 | { |
276 | ipl_t ipl; |
||
277 | as_area_t *a; |
||
278 | |||
279 | if (base % PAGE_SIZE) |
||
1048 | jermar | 280 | return NULL; |
281 | |||
1233 | jermar | 282 | if (!size) |
283 | return NULL; |
||
284 | |||
1048 | jermar | 285 | /* Writeable executable areas are not supported. */ |
286 | if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE)) |
||
287 | return NULL; |
||
703 | jermar | 288 | |
289 | ipl = interrupts_disable(); |
||
1380 | jermar | 290 | mutex_lock(&as->lock); |
703 | jermar | 291 | |
1048 | jermar | 292 | if (!check_area_conflicts(as, base, size, NULL)) { |
1380 | jermar | 293 | mutex_unlock(&as->lock); |
1048 | jermar | 294 | interrupts_restore(ipl); |
295 | return NULL; |
||
296 | } |
||
703 | jermar | 297 | |
822 | palkovsky | 298 | a = (as_area_t *) malloc(sizeof(as_area_t), 0); |
703 | jermar | 299 | |
3186 | jermar | 300 | mutex_initialize(&a->lock, MUTEX_PASSIVE); |
822 | palkovsky | 301 | |
1424 | jermar | 302 | a->as = as; |
1026 | jermar | 303 | a->flags = flags; |
1239 | jermar | 304 | a->attributes = attrs; |
1048 | jermar | 305 | a->pages = SIZE2FRAMES(size); |
822 | palkovsky | 306 | a->base = base; |
1409 | jermar | 307 | a->sh_info = NULL; |
308 | a->backend = backend; |
||
1424 | jermar | 309 | if (backend_data) |
310 | a->backend_data = *backend_data; |
||
311 | else |
||
3104 | svoboda | 312 | memsetb(&a->backend_data, sizeof(a->backend_data), 0); |
1424 | jermar | 313 | |
1387 | jermar | 314 | btree_create(&a->used_space); |
822 | palkovsky | 315 | |
1147 | jermar | 316 | btree_insert(&as->as_area_btree, base, (void *) a, NULL); |
822 | palkovsky | 317 | |
1380 | jermar | 318 | mutex_unlock(&as->lock); |
703 | jermar | 319 | interrupts_restore(ipl); |
704 | jermar | 320 | |
703 | jermar | 321 | return a; |
322 | } |
||
323 | |||
1235 | jermar | 324 | /** Find address space area and change it. |
325 | * |
||
3384 | jermar | 326 | * @param as Address space. |
327 | * @param address Virtual address belonging to the area to be changed. |
||
328 | * Must be page-aligned. |
||
329 | * @param size New size of the virtual memory block starting at |
||
330 | * address. |
||
331 | * @param flags Flags influencing the remap operation. Currently unused. |
||
1235 | jermar | 332 | * |
3384 | jermar | 333 | * @return Zero on success or a value from @ref errno.h otherwise. |
1235 | jermar | 334 | */ |
1780 | jermar | 335 | int as_area_resize(as_t *as, uintptr_t address, size_t size, int flags) |
1235 | jermar | 336 | { |
1306 | jermar | 337 | as_area_t *area; |
1235 | jermar | 338 | ipl_t ipl; |
339 | size_t pages; |
||
340 | |||
341 | ipl = interrupts_disable(); |
||
1380 | jermar | 342 | mutex_lock(&as->lock); |
1235 | jermar | 343 | |
344 | /* |
||
345 | * Locate the area. |
||
346 | */ |
||
347 | area = find_area_and_lock(as, address); |
||
348 | if (!area) { |
||
1380 | jermar | 349 | mutex_unlock(&as->lock); |
1235 | jermar | 350 | interrupts_restore(ipl); |
1306 | jermar | 351 | return ENOENT; |
1235 | jermar | 352 | } |
353 | |||
1424 | jermar | 354 | if (area->backend == &phys_backend) { |
1235 | jermar | 355 | /* |
356 | * Remapping of address space areas associated |
||
357 | * with memory mapped devices is not supported. |
||
358 | */ |
||
1380 | jermar | 359 | mutex_unlock(&area->lock); |
360 | mutex_unlock(&as->lock); |
||
1235 | jermar | 361 | interrupts_restore(ipl); |
1306 | jermar | 362 | return ENOTSUP; |
1235 | jermar | 363 | } |
1409 | jermar | 364 | if (area->sh_info) { |
365 | /* |
||
366 | * Remapping of shared address space areas |
||
367 | * is not supported. |
||
368 | */ |
||
369 | mutex_unlock(&area->lock); |
||
370 | mutex_unlock(&as->lock); |
||
371 | interrupts_restore(ipl); |
||
372 | return ENOTSUP; |
||
373 | } |
||
1235 | jermar | 374 | |
375 | pages = SIZE2FRAMES((address - area->base) + size); |
||
376 | if (!pages) { |
||
377 | /* |
||
378 | * Zero size address space areas are not allowed. |
||
379 | */ |
||
1380 | jermar | 380 | mutex_unlock(&area->lock); |
381 | mutex_unlock(&as->lock); |
||
1235 | jermar | 382 | interrupts_restore(ipl); |
1306 | jermar | 383 | return EPERM; |
1235 | jermar | 384 | } |
385 | |||
386 | if (pages < area->pages) { |
||
1403 | jermar | 387 | bool cond; |
3422 | jermar | 388 | uintptr_t start_free = area->base + pages * PAGE_SIZE; |
1235 | jermar | 389 | |
390 | /* |
||
391 | * Shrinking the area. |
||
392 | * No need to check for overlaps. |
||
393 | */ |
||
1403 | jermar | 394 | |
395 | /* |
||
1436 | jermar | 396 | * Start TLB shootdown sequence. |
397 | */ |
||
3422 | jermar | 398 | tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base + |
2087 | jermar | 399 | pages * PAGE_SIZE, area->pages - pages); |
1436 | jermar | 400 | |
401 | /* |
||
1403 | jermar | 402 | * Remove frames belonging to used space starting from |
403 | * the highest addresses downwards until an overlap with |
||
404 | * the resized address space area is found. Note that this |
||
405 | * is also the right way to remove part of the used_space |
||
406 | * B+tree leaf list. |
||
407 | */ |
||
408 | for (cond = true; cond;) { |
||
409 | btree_node_t *node; |
||
410 | |||
411 | ASSERT(!list_empty(&area->used_space.leaf_head)); |
||
2087 | jermar | 412 | node = |
413 | list_get_instance(area->used_space.leaf_head.prev, |
||
414 | btree_node_t, leaf_link); |
||
1403 | jermar | 415 | if ((cond = (bool) node->keys)) { |
1780 | jermar | 416 | uintptr_t b = node->key[node->keys - 1]; |
2087 | jermar | 417 | count_t c = |
418 | (count_t) node->value[node->keys - 1]; |
||
2745 | decky | 419 | unsigned int i = 0; |
1235 | jermar | 420 | |
2087 | jermar | 421 | if (overlaps(b, c * PAGE_SIZE, area->base, |
2133 | jermar | 422 | pages * PAGE_SIZE)) { |
1403 | jermar | 423 | |
2087 | jermar | 424 | if (b + c * PAGE_SIZE <= start_free) { |
1403 | jermar | 425 | /* |
2087 | jermar | 426 | * The whole interval fits |
427 | * completely in the resized |
||
428 | * address space area. |
||
1403 | jermar | 429 | */ |
430 | break; |
||
431 | } |
||
432 | |||
433 | /* |
||
2087 | jermar | 434 | * Part of the interval corresponding |
435 | * to b and c overlaps with the resized |
||
436 | * address space area. |
||
1403 | jermar | 437 | */ |
438 | |||
439 | cond = false; /* we are almost done */ |
||
440 | i = (start_free - b) >> PAGE_WIDTH; |
||
3384 | jermar | 441 | if (!used_space_remove(area, start_free, |
442 | c - i)) |
||
443 | panic("Could not remove used " |
||
444 | "space.\n"); |
||
1403 | jermar | 445 | } else { |
446 | /* |
||
2087 | jermar | 447 | * The interval of used space can be |
448 | * completely removed. |
||
1403 | jermar | 449 | */ |
450 | if (!used_space_remove(area, b, c)) |
||
3384 | jermar | 451 | panic("Could not remove used " |
452 | "space.\n"); |
||
1403 | jermar | 453 | } |
454 | |||
455 | for (; i < c; i++) { |
||
456 | pte_t *pte; |
||
457 | |||
458 | page_table_lock(as, false); |
||
2087 | jermar | 459 | pte = page_mapping_find(as, b + |
460 | i * PAGE_SIZE); |
||
461 | ASSERT(pte && PTE_VALID(pte) && |
||
462 | PTE_PRESENT(pte)); |
||
463 | if (area->backend && |
||
464 | area->backend->frame_free) { |
||
1424 | jermar | 465 | area->backend->frame_free(area, |
2087 | jermar | 466 | b + i * PAGE_SIZE, |
467 | PTE_GET_FRAME(pte)); |
||
1409 | jermar | 468 | } |
2087 | jermar | 469 | page_mapping_remove(as, b + |
470 | i * PAGE_SIZE); |
||
1403 | jermar | 471 | page_table_unlock(as, false); |
472 | } |
||
1235 | jermar | 473 | } |
474 | } |
||
1436 | jermar | 475 | |
1235 | jermar | 476 | /* |
1436 | jermar | 477 | * Finish TLB shootdown sequence. |
1235 | jermar | 478 | */ |
2183 | jermar | 479 | |
2087 | jermar | 480 | tlb_invalidate_pages(as->asid, area->base + pages * PAGE_SIZE, |
481 | area->pages - pages); |
||
1889 | jermar | 482 | /* |
483 | * Invalidate software translation caches (e.g. TSB on sparc64). |
||
484 | */ |
||
2087 | jermar | 485 | as_invalidate_translation_cache(as, area->base + |
486 | pages * PAGE_SIZE, area->pages - pages); |
||
2183 | jermar | 487 | tlb_shootdown_finalize(); |
488 | |||
1235 | jermar | 489 | } else { |
490 | /* |
||
491 | * Growing the area. |
||
492 | * Check for overlaps with other address space areas. |
||
493 | */ |
||
2087 | jermar | 494 | if (!check_area_conflicts(as, address, pages * PAGE_SIZE, |
495 | area)) { |
||
1380 | jermar | 496 | mutex_unlock(&area->lock); |
497 | mutex_unlock(&as->lock); |
||
1235 | jermar | 498 | interrupts_restore(ipl); |
1306 | jermar | 499 | return EADDRNOTAVAIL; |
1235 | jermar | 500 | } |
501 | } |
||
502 | |||
503 | area->pages = pages; |
||
504 | |||
1380 | jermar | 505 | mutex_unlock(&area->lock); |
506 | mutex_unlock(&as->lock); |
||
1235 | jermar | 507 | interrupts_restore(ipl); |
508 | |||
1306 | jermar | 509 | return 0; |
1235 | jermar | 510 | } |
511 | |||
1306 | jermar | 512 | /** Destroy address space area. |
513 | * |
||
3384 | jermar | 514 | * @param as Address space. |
515 | * @param address Address within the area to be deleted. |
||
1306 | jermar | 516 | * |
3384 | jermar | 517 | * @return Zero on success or a value from @ref errno.h on failure. |
1306 | jermar | 518 | */ |
1780 | jermar | 519 | int as_area_destroy(as_t *as, uintptr_t address) |
1306 | jermar | 520 | { |
521 | as_area_t *area; |
||
1780 | jermar | 522 | uintptr_t base; |
1495 | jermar | 523 | link_t *cur; |
1306 | jermar | 524 | ipl_t ipl; |
525 | |||
526 | ipl = interrupts_disable(); |
||
1380 | jermar | 527 | mutex_lock(&as->lock); |
1306 | jermar | 528 | |
529 | area = find_area_and_lock(as, address); |
||
530 | if (!area) { |
||
1380 | jermar | 531 | mutex_unlock(&as->lock); |
1306 | jermar | 532 | interrupts_restore(ipl); |
533 | return ENOENT; |
||
534 | } |
||
535 | |||
1403 | jermar | 536 | base = area->base; |
537 | |||
1411 | jermar | 538 | /* |
1436 | jermar | 539 | * Start TLB shootdown sequence. |
540 | */ |
||
1889 | jermar | 541 | tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, area->pages); |
1436 | jermar | 542 | |
543 | /* |
||
1411 | jermar | 544 | * Visit only the pages mapped by used_space B+tree. |
545 | */ |
||
2087 | jermar | 546 | for (cur = area->used_space.leaf_head.next; |
547 | cur != &area->used_space.leaf_head; cur = cur->next) { |
||
1411 | jermar | 548 | btree_node_t *node; |
2745 | decky | 549 | unsigned int i; |
1403 | jermar | 550 | |
1495 | jermar | 551 | node = list_get_instance(cur, btree_node_t, leaf_link); |
552 | for (i = 0; i < node->keys; i++) { |
||
1780 | jermar | 553 | uintptr_t b = node->key[i]; |
1495 | jermar | 554 | count_t j; |
1411 | jermar | 555 | pte_t *pte; |
1403 | jermar | 556 | |
1495 | jermar | 557 | for (j = 0; j < (count_t) node->value[i]; j++) { |
1411 | jermar | 558 | page_table_lock(as, false); |
2087 | jermar | 559 | pte = page_mapping_find(as, b + j * PAGE_SIZE); |
560 | ASSERT(pte && PTE_VALID(pte) && |
||
561 | PTE_PRESENT(pte)); |
||
562 | if (area->backend && |
||
563 | area->backend->frame_free) { |
||
564 | area->backend->frame_free(area, b + |
||
2133 | jermar | 565 | j * PAGE_SIZE, PTE_GET_FRAME(pte)); |
1403 | jermar | 566 | } |
2087 | jermar | 567 | page_mapping_remove(as, b + j * PAGE_SIZE); |
1411 | jermar | 568 | page_table_unlock(as, false); |
1306 | jermar | 569 | } |
570 | } |
||
571 | } |
||
1403 | jermar | 572 | |
1306 | jermar | 573 | /* |
1436 | jermar | 574 | * Finish TLB shootdown sequence. |
1306 | jermar | 575 | */ |
2183 | jermar | 576 | |
1889 | jermar | 577 | tlb_invalidate_pages(as->asid, area->base, area->pages); |
578 | /* |
||
2087 | jermar | 579 | * Invalidate potential software translation caches (e.g. TSB on |
580 | * sparc64). |
||
1889 | jermar | 581 | */ |
582 | as_invalidate_translation_cache(as, area->base, area->pages); |
||
2183 | jermar | 583 | tlb_shootdown_finalize(); |
1889 | jermar | 584 | |
1436 | jermar | 585 | btree_destroy(&area->used_space); |
1306 | jermar | 586 | |
1309 | jermar | 587 | area->attributes |= AS_AREA_ATTR_PARTIAL; |
1409 | jermar | 588 | |
589 | if (area->sh_info) |
||
590 | sh_info_remove_reference(area->sh_info); |
||
591 | |||
1380 | jermar | 592 | mutex_unlock(&area->lock); |
1306 | jermar | 593 | |
594 | /* |
||
595 | * Remove the empty area from address space. |
||
596 | */ |
||
1889 | jermar | 597 | btree_remove(&as->as_area_btree, base, NULL); |
1306 | jermar | 598 | |
1309 | jermar | 599 | free(area); |
600 | |||
1889 | jermar | 601 | mutex_unlock(&as->lock); |
1306 | jermar | 602 | interrupts_restore(ipl); |
603 | return 0; |
||
604 | } |
||
605 | |||
1413 | jermar | 606 | /** Share address space area with another or the same address space. |
1235 | jermar | 607 | * |
1424 | jermar | 608 | * Address space area mapping is shared with a new address space area. |
609 | * If the source address space area has not been shared so far, |
||
610 | * a new sh_info is created. The new address space area simply gets the |
||
611 | * sh_info of the source area. The process of duplicating the |
||
612 | * mapping is done through the backend share function. |
||
1413 | jermar | 613 | * |
3384 | jermar | 614 | * @param src_as Pointer to source address space. |
615 | * @param src_base Base address of the source address space area. |
||
616 | * @param acc_size Expected size of the source area. |
||
617 | * @param dst_as Pointer to destination address space. |
||
618 | * @param dst_base Target base address. |
||
1417 | jermar | 619 | * @param dst_flags_mask Destination address space area flags mask. |
1235 | jermar | 620 | * |
3384 | jermar | 621 | * @return Zero on success or ENOENT if there is no such task or if |
622 | * there is no such address space area, EPERM if there was |
||
623 | * a problem in accepting the area or ENOMEM if there was a |
||
624 | * problem in allocating destination address space area. |
||
625 | * ENOTSUP is returned if the address space area backend |
||
626 | * does not support sharing. |
||
1235 | jermar | 627 | */ |
1780 | jermar | 628 | int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size, |
2647 | jermar | 629 | as_t *dst_as, uintptr_t dst_base, int dst_flags_mask) |
1235 | jermar | 630 | { |
631 | ipl_t ipl; |
||
1239 | jermar | 632 | int src_flags; |
633 | size_t src_size; |
||
634 | as_area_t *src_area, *dst_area; |
||
1413 | jermar | 635 | share_info_t *sh_info; |
1424 | jermar | 636 | mem_backend_t *src_backend; |
637 | mem_backend_data_t src_backend_data; |
||
1434 | palkovsky | 638 | |
1235 | jermar | 639 | ipl = interrupts_disable(); |
1380 | jermar | 640 | mutex_lock(&src_as->lock); |
1329 | palkovsky | 641 | src_area = find_area_and_lock(src_as, src_base); |
1239 | jermar | 642 | if (!src_area) { |
1238 | jermar | 643 | /* |
644 | * Could not find the source address space area. |
||
645 | */ |
||
1380 | jermar | 646 | mutex_unlock(&src_as->lock); |
1238 | jermar | 647 | interrupts_restore(ipl); |
648 | return ENOENT; |
||
649 | } |
||
2007 | jermar | 650 | |
1424 | jermar | 651 | if (!src_area->backend || !src_area->backend->share) { |
1413 | jermar | 652 | /* |
1851 | jermar | 653 | * There is no backend or the backend does not |
1424 | jermar | 654 | * know how to share the area. |
1413 | jermar | 655 | */ |
656 | mutex_unlock(&src_area->lock); |
||
657 | mutex_unlock(&src_as->lock); |
||
658 | interrupts_restore(ipl); |
||
659 | return ENOTSUP; |
||
660 | } |
||
661 | |||
1239 | jermar | 662 | src_size = src_area->pages * PAGE_SIZE; |
663 | src_flags = src_area->flags; |
||
1424 | jermar | 664 | src_backend = src_area->backend; |
665 | src_backend_data = src_area->backend_data; |
||
1544 | palkovsky | 666 | |
667 | /* Share the cacheable flag from the original mapping */ |
||
668 | if (src_flags & AS_AREA_CACHEABLE) |
||
669 | dst_flags_mask |= AS_AREA_CACHEABLE; |
||
670 | |||
2087 | jermar | 671 | if (src_size != acc_size || |
672 | (src_flags & dst_flags_mask) != dst_flags_mask) { |
||
1413 | jermar | 673 | mutex_unlock(&src_area->lock); |
674 | mutex_unlock(&src_as->lock); |
||
1235 | jermar | 675 | interrupts_restore(ipl); |
676 | return EPERM; |
||
677 | } |
||
1413 | jermar | 678 | |
1235 | jermar | 679 | /* |
1413 | jermar | 680 | * Now we are committed to sharing the area. |
1954 | jermar | 681 | * First, prepare the area for sharing. |
1413 | jermar | 682 | * Then it will be safe to unlock it. |
683 | */ |
||
684 | sh_info = src_area->sh_info; |
||
685 | if (!sh_info) { |
||
686 | sh_info = (share_info_t *) malloc(sizeof(share_info_t), 0); |
||
3186 | jermar | 687 | mutex_initialize(&sh_info->lock, MUTEX_PASSIVE); |
1413 | jermar | 688 | sh_info->refcount = 2; |
689 | btree_create(&sh_info->pagemap); |
||
690 | src_area->sh_info = sh_info; |
||
2647 | jermar | 691 | /* |
692 | * Call the backend to setup sharing. |
||
693 | */ |
||
694 | src_area->backend->share(src_area); |
||
1413 | jermar | 695 | } else { |
696 | mutex_lock(&sh_info->lock); |
||
697 | sh_info->refcount++; |
||
698 | mutex_unlock(&sh_info->lock); |
||
699 | } |
||
700 | |||
701 | mutex_unlock(&src_area->lock); |
||
702 | mutex_unlock(&src_as->lock); |
||
703 | |||
704 | /* |
||
1239 | jermar | 705 | * Create copy of the source address space area. |
706 | * The destination area is created with AS_AREA_ATTR_PARTIAL |
||
707 | * attribute set which prevents race condition with |
||
708 | * preliminary as_page_fault() calls. |
||
1417 | jermar | 709 | * The flags of the source area are masked against dst_flags_mask |
710 | * to support sharing in less privileged mode. |
||
1235 | jermar | 711 | */ |
1461 | palkovsky | 712 | dst_area = as_area_create(dst_as, dst_flags_mask, src_size, dst_base, |
2087 | jermar | 713 | AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data); |
1239 | jermar | 714 | if (!dst_area) { |
1235 | jermar | 715 | /* |
716 | * Destination address space area could not be created. |
||
717 | */ |
||
1413 | jermar | 718 | sh_info_remove_reference(sh_info); |
719 | |||
1235 | jermar | 720 | interrupts_restore(ipl); |
721 | return ENOMEM; |
||
722 | } |
||
2009 | jermar | 723 | |
1235 | jermar | 724 | /* |
1239 | jermar | 725 | * Now the destination address space area has been |
726 | * fully initialized. Clear the AS_AREA_ATTR_PARTIAL |
||
1413 | jermar | 727 | * attribute and set the sh_info. |
1239 | jermar | 728 | */ |
2009 | jermar | 729 | mutex_lock(&dst_as->lock); |
1380 | jermar | 730 | mutex_lock(&dst_area->lock); |
1239 | jermar | 731 | dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL; |
1413 | jermar | 732 | dst_area->sh_info = sh_info; |
1380 | jermar | 733 | mutex_unlock(&dst_area->lock); |
2009 | jermar | 734 | mutex_unlock(&dst_as->lock); |
735 | |||
1235 | jermar | 736 | interrupts_restore(ipl); |
737 | |||
738 | return 0; |
||
739 | } |
||
740 | |||
1423 | jermar | 741 | /** Check access mode for address space area. |
742 | * |
||
743 | * The address space area must be locked prior to this call. |
||
744 | * |
||
3384 | jermar | 745 | * @param area Address space area. |
746 | * @param access Access mode. |
||
1423 | jermar | 747 | * |
3384 | jermar | 748 | * @return False if access violates area's permissions, true |
749 | * otherwise. |
||
1423 | jermar | 750 | */ |
751 | bool as_area_check_access(as_area_t *area, pf_access_t access) |
||
752 | { |
||
753 | int flagmap[] = { |
||
754 | [PF_ACCESS_READ] = AS_AREA_READ, |
||
755 | [PF_ACCESS_WRITE] = AS_AREA_WRITE, |
||
756 | [PF_ACCESS_EXEC] = AS_AREA_EXEC |
||
757 | }; |
||
758 | |||
759 | if (!(area->flags & flagmap[access])) |
||
760 | return false; |
||
761 | |||
762 | return true; |
||
763 | } |
||
764 | |||
3384 | jermar | 765 | /** Change adress space area flags. |
3222 | svoboda | 766 | * |
767 | * The idea is to have the same data, but with a different access mode. |
||
768 | * This is needed e.g. for writing code into memory and then executing it. |
||
769 | * In order for this to work properly, this may copy the data |
||
770 | * into private anonymous memory (unless it's already there). |
||
771 | * |
||
3384 | jermar | 772 | * @param as Address space. |
773 | * @param flags Flags of the area memory. |
||
774 | * @param address Address withing the area to be changed. |
||
3222 | svoboda | 775 | * |
3384 | jermar | 776 | * @return Zero on success or a value from @ref errno.h on failure. |
3222 | svoboda | 777 | */ |
778 | int as_area_change_flags(as_t *as, int flags, uintptr_t address) |
||
779 | { |
||
780 | as_area_t *area; |
||
781 | uintptr_t base; |
||
782 | link_t *cur; |
||
783 | ipl_t ipl; |
||
784 | int page_flags; |
||
785 | uintptr_t *old_frame; |
||
786 | index_t frame_idx; |
||
787 | count_t used_pages; |
||
788 | |||
789 | /* Flags for the new memory mapping */ |
||
790 | page_flags = area_flags_to_page_flags(flags); |
||
791 | |||
792 | ipl = interrupts_disable(); |
||
793 | mutex_lock(&as->lock); |
||
794 | |||
795 | area = find_area_and_lock(as, address); |
||
796 | if (!area) { |
||
797 | mutex_unlock(&as->lock); |
||
798 | interrupts_restore(ipl); |
||
799 | return ENOENT; |
||
800 | } |
||
801 | |||
802 | if (area->sh_info || area->backend != &anon_backend) { |
||
803 | /* Copying shared areas not supported yet */ |
||
804 | /* Copying non-anonymous memory not supported yet */ |
||
805 | mutex_unlock(&area->lock); |
||
806 | mutex_unlock(&as->lock); |
||
807 | interrupts_restore(ipl); |
||
808 | return ENOTSUP; |
||
809 | } |
||
810 | |||
811 | base = area->base; |
||
812 | |||
813 | /* |
||
814 | * Compute total number of used pages in the used_space B+tree |
||
815 | */ |
||
816 | used_pages = 0; |
||
817 | |||
818 | for (cur = area->used_space.leaf_head.next; |
||
819 | cur != &area->used_space.leaf_head; cur = cur->next) { |
||
820 | btree_node_t *node; |
||
821 | unsigned int i; |
||
822 | |||
823 | node = list_get_instance(cur, btree_node_t, leaf_link); |
||
824 | for (i = 0; i < node->keys; i++) { |
||
825 | used_pages += (count_t) node->value[i]; |
||
826 | } |
||
827 | } |
||
828 | |||
829 | /* An array for storing frame numbers */ |
||
830 | old_frame = malloc(used_pages * sizeof(uintptr_t), 0); |
||
831 | |||
832 | /* |
||
833 | * Start TLB shootdown sequence. |
||
834 | */ |
||
835 | tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, area->pages); |
||
836 | |||
837 | /* |
||
838 | * Remove used pages from page tables and remember their frame |
||
839 | * numbers. |
||
840 | */ |
||
841 | frame_idx = 0; |
||
842 | |||
843 | for (cur = area->used_space.leaf_head.next; |
||
844 | cur != &area->used_space.leaf_head; cur = cur->next) { |
||
845 | btree_node_t *node; |
||
846 | unsigned int i; |
||
847 | |||
848 | node = list_get_instance(cur, btree_node_t, leaf_link); |
||
849 | for (i = 0; i < node->keys; i++) { |
||
850 | uintptr_t b = node->key[i]; |
||
851 | count_t j; |
||
852 | pte_t *pte; |
||
853 | |||
854 | for (j = 0; j < (count_t) node->value[i]; j++) { |
||
855 | page_table_lock(as, false); |
||
856 | pte = page_mapping_find(as, b + j * PAGE_SIZE); |
||
857 | ASSERT(pte && PTE_VALID(pte) && |
||
858 | PTE_PRESENT(pte)); |
||
859 | old_frame[frame_idx++] = PTE_GET_FRAME(pte); |
||
860 | |||
861 | /* Remove old mapping */ |
||
862 | page_mapping_remove(as, b + j * PAGE_SIZE); |
||
863 | page_table_unlock(as, false); |
||
864 | } |
||
865 | } |
||
866 | } |
||
867 | |||
868 | /* |
||
869 | * Finish TLB shootdown sequence. |
||
870 | */ |
||
871 | |||
872 | tlb_invalidate_pages(as->asid, area->base, area->pages); |
||
873 | /* |
||
874 | * Invalidate potential software translation caches (e.g. TSB on |
||
875 | * sparc64). |
||
876 | */ |
||
877 | as_invalidate_translation_cache(as, area->base, area->pages); |
||
878 | tlb_shootdown_finalize(); |
||
879 | |||
880 | /* |
||
3383 | svoboda | 881 | * Set the new flags. |
882 | */ |
||
883 | area->flags = flags; |
||
884 | |||
885 | /* |
||
3222 | svoboda | 886 | * Map pages back in with new flags. This step is kept separate |
3384 | jermar | 887 | * so that the memory area could not be accesed with both the old and |
888 | * the new flags at once. |
||
3222 | svoboda | 889 | */ |
890 | frame_idx = 0; |
||
891 | |||
892 | for (cur = area->used_space.leaf_head.next; |
||
893 | cur != &area->used_space.leaf_head; cur = cur->next) { |
||
894 | btree_node_t *node; |
||
895 | unsigned int i; |
||
896 | |||
897 | node = list_get_instance(cur, btree_node_t, leaf_link); |
||
898 | for (i = 0; i < node->keys; i++) { |
||
899 | uintptr_t b = node->key[i]; |
||
900 | count_t j; |
||
901 | |||
902 | for (j = 0; j < (count_t) node->value[i]; j++) { |
||
903 | page_table_lock(as, false); |
||
904 | |||
905 | /* Insert the new mapping */ |
||
906 | page_mapping_insert(as, b + j * PAGE_SIZE, |
||
907 | old_frame[frame_idx++], page_flags); |
||
908 | |||
909 | page_table_unlock(as, false); |
||
910 | } |
||
911 | } |
||
912 | } |
||
913 | |||
914 | free(old_frame); |
||
915 | |||
916 | mutex_unlock(&area->lock); |
||
917 | mutex_unlock(&as->lock); |
||
918 | interrupts_restore(ipl); |
||
919 | |||
920 | return 0; |
||
921 | } |
||
922 | |||
923 | |||
703 | jermar | 924 | /** Handle page fault within the current address space. |
925 | * |
||
3384 | jermar | 926 | * This is the high-level page fault handler. It decides whether the page fault |
927 | * can be resolved by any backend and if so, it invokes the backend to resolve |
||
928 | * the page fault. |
||
1409 | jermar | 929 | * |
703 | jermar | 930 | * Interrupts are assumed disabled. |
931 | * |
||
3384 | jermar | 932 | * @param page Faulting page. |
933 | * @param access Access mode that caused the page fault (i.e. |
||
934 | * read/write/exec). |
||
935 | * @param istate Pointer to the interrupted state. |
||
703 | jermar | 936 | * |
3384 | jermar | 937 | * @return AS_PF_FAULT on page fault, AS_PF_OK on success or |
938 | * AS_PF_DEFER if the fault was caused by copy_to_uspace() |
||
939 | * or copy_from_uspace(). |
||
703 | jermar | 940 | */ |
1780 | jermar | 941 | int as_page_fault(uintptr_t page, pf_access_t access, istate_t *istate) |
703 | jermar | 942 | { |
1044 | jermar | 943 | pte_t *pte; |
977 | jermar | 944 | as_area_t *area; |
703 | jermar | 945 | |
1380 | jermar | 946 | if (!THREAD) |
1409 | jermar | 947 | return AS_PF_FAULT; |
1380 | jermar | 948 | |
703 | jermar | 949 | ASSERT(AS); |
1044 | jermar | 950 | |
1380 | jermar | 951 | mutex_lock(&AS->lock); |
977 | jermar | 952 | area = find_area_and_lock(AS, page); |
703 | jermar | 953 | if (!area) { |
954 | /* |
||
955 | * No area contained mapping for 'page'. |
||
956 | * Signal page fault to low-level handler. |
||
957 | */ |
||
1380 | jermar | 958 | mutex_unlock(&AS->lock); |
1288 | jermar | 959 | goto page_fault; |
703 | jermar | 960 | } |
961 | |||
1239 | jermar | 962 | if (area->attributes & AS_AREA_ATTR_PARTIAL) { |
963 | /* |
||
964 | * The address space area is not fully initialized. |
||
965 | * Avoid possible race by returning error. |
||
966 | */ |
||
1380 | jermar | 967 | mutex_unlock(&area->lock); |
968 | mutex_unlock(&AS->lock); |
||
1288 | jermar | 969 | goto page_fault; |
1239 | jermar | 970 | } |
971 | |||
1424 | jermar | 972 | if (!area->backend || !area->backend->page_fault) { |
1409 | jermar | 973 | /* |
974 | * The address space area is not backed by any backend |
||
975 | * or the backend cannot handle page faults. |
||
976 | */ |
||
977 | mutex_unlock(&area->lock); |
||
978 | mutex_unlock(&AS->lock); |
||
979 | goto page_fault; |
||
980 | } |
||
1179 | jermar | 981 | |
1044 | jermar | 982 | page_table_lock(AS, false); |
983 | |||
703 | jermar | 984 | /* |
3384 | jermar | 985 | * To avoid race condition between two page faults on the same address, |
986 | * we need to make sure the mapping has not been already inserted. |
||
1044 | jermar | 987 | */ |
988 | if ((pte = page_mapping_find(AS, page))) { |
||
989 | if (PTE_PRESENT(pte)) { |
||
1423 | jermar | 990 | if (((access == PF_ACCESS_READ) && PTE_READABLE(pte)) || |
2087 | jermar | 991 | (access == PF_ACCESS_WRITE && PTE_WRITABLE(pte)) || |
992 | (access == PF_ACCESS_EXEC && PTE_EXECUTABLE(pte))) { |
||
1423 | jermar | 993 | page_table_unlock(AS, false); |
994 | mutex_unlock(&area->lock); |
||
995 | mutex_unlock(&AS->lock); |
||
996 | return AS_PF_OK; |
||
997 | } |
||
1044 | jermar | 998 | } |
999 | } |
||
1409 | jermar | 1000 | |
1044 | jermar | 1001 | /* |
1409 | jermar | 1002 | * Resort to the backend page fault handler. |
703 | jermar | 1003 | */ |
1424 | jermar | 1004 | if (area->backend->page_fault(area, page, access) != AS_PF_OK) { |
1409 | jermar | 1005 | page_table_unlock(AS, false); |
1006 | mutex_unlock(&area->lock); |
||
1007 | mutex_unlock(&AS->lock); |
||
1008 | goto page_fault; |
||
1009 | } |
||
703 | jermar | 1010 | |
1044 | jermar | 1011 | page_table_unlock(AS, false); |
1380 | jermar | 1012 | mutex_unlock(&area->lock); |
1013 | mutex_unlock(&AS->lock); |
||
1288 | jermar | 1014 | return AS_PF_OK; |
1015 | |||
1016 | page_fault: |
||
1017 | if (THREAD->in_copy_from_uspace) { |
||
1018 | THREAD->in_copy_from_uspace = false; |
||
2087 | jermar | 1019 | istate_set_retaddr(istate, |
1020 | (uintptr_t) &memcpy_from_uspace_failover_address); |
||
1288 | jermar | 1021 | } else if (THREAD->in_copy_to_uspace) { |
1022 | THREAD->in_copy_to_uspace = false; |
||
2087 | jermar | 1023 | istate_set_retaddr(istate, |
1024 | (uintptr_t) &memcpy_to_uspace_failover_address); |
||
1288 | jermar | 1025 | } else { |
1026 | return AS_PF_FAULT; |
||
1027 | } |
||
1028 | |||
1029 | return AS_PF_DEFER; |
||
703 | jermar | 1030 | } |
1031 | |||
823 | jermar | 1032 | /** Switch address spaces. |
703 | jermar | 1033 | * |
1380 | jermar | 1034 | * Note that this function cannot sleep as it is essentially a part of |
2170 | jermar | 1035 | * scheduling. Sleeping here would lead to deadlock on wakeup. Another |
1036 | * thing which is forbidden in this context is locking the address space. |
||
1380 | jermar | 1037 | * |
2183 | jermar | 1038 | * When this function is enetered, no spinlocks may be held. |
1039 | * |
||
3384 | jermar | 1040 | * @param old Old address space or NULL. |
1041 | * @param new New address space. |
||
703 | jermar | 1042 | */ |
2106 | jermar | 1043 | void as_switch(as_t *old_as, as_t *new_as) |
703 | jermar | 1044 | { |
2183 | jermar | 1045 | DEADLOCK_PROBE_INIT(p_asidlock); |
1046 | preemption_disable(); |
||
1047 | retry: |
||
1048 | (void) interrupts_disable(); |
||
1049 | if (!spinlock_trylock(&asidlock)) { |
||
1050 | /* |
||
1051 | * Avoid deadlock with TLB shootdown. |
||
1052 | * We can enable interrupts here because |
||
1053 | * preemption is disabled. We should not be |
||
1054 | * holding any other lock. |
||
1055 | */ |
||
1056 | (void) interrupts_enable(); |
||
1057 | DEADLOCK_PROBE(p_asidlock, DEADLOCK_THRESHOLD); |
||
1058 | goto retry; |
||
1059 | } |
||
1060 | preemption_enable(); |
||
703 | jermar | 1061 | |
1062 | /* |
||
823 | jermar | 1063 | * First, take care of the old address space. |
1064 | */ |
||
2106 | jermar | 1065 | if (old_as) { |
1066 | ASSERT(old_as->cpu_refcount); |
||
1067 | if((--old_as->cpu_refcount == 0) && (old_as != AS_KERNEL)) { |
||
823 | jermar | 1068 | /* |
1069 | * The old address space is no longer active on |
||
1070 | * any processor. It can be appended to the |
||
1071 | * list of inactive address spaces with assigned |
||
1072 | * ASID. |
||
1073 | */ |
||
2141 | jermar | 1074 | ASSERT(old_as->asid != ASID_INVALID); |
1075 | list_append(&old_as->inactive_as_with_asid_link, |
||
1076 | &inactive_as_with_asid_head); |
||
823 | jermar | 1077 | } |
1890 | jermar | 1078 | |
1079 | /* |
||
1080 | * Perform architecture-specific tasks when the address space |
||
1081 | * is being removed from the CPU. |
||
1082 | */ |
||
2106 | jermar | 1083 | as_deinstall_arch(old_as); |
823 | jermar | 1084 | } |
1085 | |||
1086 | /* |
||
1087 | * Second, prepare the new address space. |
||
1088 | */ |
||
2106 | jermar | 1089 | if ((new_as->cpu_refcount++ == 0) && (new_as != AS_KERNEL)) { |
2170 | jermar | 1090 | if (new_as->asid != ASID_INVALID) |
2106 | jermar | 1091 | list_remove(&new_as->inactive_as_with_asid_link); |
2170 | jermar | 1092 | else |
1093 | new_as->asid = asid_get(); |
||
823 | jermar | 1094 | } |
2106 | jermar | 1095 | #ifdef AS_PAGE_TABLE |
1096 | SET_PTL0_ADDRESS(new_as->genarch.page_table); |
||
1097 | #endif |
||
823 | jermar | 1098 | |
1099 | /* |
||
703 | jermar | 1100 | * Perform architecture-specific steps. |
727 | jermar | 1101 | * (e.g. write ASID to hardware register etc.) |
703 | jermar | 1102 | */ |
2106 | jermar | 1103 | as_install_arch(new_as); |
2170 | jermar | 1104 | |
1105 | spinlock_unlock(&asidlock); |
||
703 | jermar | 1106 | |
2106 | jermar | 1107 | AS = new_as; |
703 | jermar | 1108 | } |
754 | jermar | 1109 | |
1235 | jermar | 1110 | /** Convert address space area flags to page flags. |
754 | jermar | 1111 | * |
3384 | jermar | 1112 | * @param aflags Flags of some address space area. |
754 | jermar | 1113 | * |
3384 | jermar | 1114 | * @return Flags to be passed to page_mapping_insert(). |
754 | jermar | 1115 | */ |
1235 | jermar | 1116 | int area_flags_to_page_flags(int aflags) |
754 | jermar | 1117 | { |
1118 | int flags; |
||
1119 | |||
1178 | jermar | 1120 | flags = PAGE_USER | PAGE_PRESENT; |
754 | jermar | 1121 | |
1235 | jermar | 1122 | if (aflags & AS_AREA_READ) |
1026 | jermar | 1123 | flags |= PAGE_READ; |
1124 | |||
1235 | jermar | 1125 | if (aflags & AS_AREA_WRITE) |
1026 | jermar | 1126 | flags |= PAGE_WRITE; |
1127 | |||
1235 | jermar | 1128 | if (aflags & AS_AREA_EXEC) |
1026 | jermar | 1129 | flags |= PAGE_EXEC; |
1130 | |||
1424 | jermar | 1131 | if (aflags & AS_AREA_CACHEABLE) |
1178 | jermar | 1132 | flags |= PAGE_CACHEABLE; |
1133 | |||
754 | jermar | 1134 | return flags; |
1135 | } |
||
756 | jermar | 1136 | |
1235 | jermar | 1137 | /** Compute flags for virtual address translation subsytem. |
1138 | * |
||
1139 | * The address space area must be locked. |
||
1140 | * Interrupts must be disabled. |
||
1141 | * |
||
3384 | jermar | 1142 | * @param a Address space area. |
1235 | jermar | 1143 | * |
3384 | jermar | 1144 | * @return Flags to be used in page_mapping_insert(). |
1235 | jermar | 1145 | */ |
1409 | jermar | 1146 | int as_area_get_flags(as_area_t *a) |
1235 | jermar | 1147 | { |
1148 | return area_flags_to_page_flags(a->flags); |
||
1149 | } |
||
1150 | |||
756 | jermar | 1151 | /** Create page table. |
1152 | * |
||
3384 | jermar | 1153 | * Depending on architecture, create either address space private or global page |
1154 | * table. |
||
756 | jermar | 1155 | * |
3384 | jermar | 1156 | * @param flags Flags saying whether the page table is for the kernel |
1157 | * address space. |
||
756 | jermar | 1158 | * |
3384 | jermar | 1159 | * @return First entry of the page table. |
756 | jermar | 1160 | */ |
1161 | pte_t *page_table_create(int flags) |
||
1162 | { |
||
2125 | decky | 1163 | ASSERT(as_operations); |
1164 | ASSERT(as_operations->page_table_create); |
||
1165 | |||
1166 | return as_operations->page_table_create(flags); |
||
756 | jermar | 1167 | } |
977 | jermar | 1168 | |
1468 | jermar | 1169 | /** Destroy page table. |
1170 | * |
||
1171 | * Destroy page table in architecture specific way. |
||
1172 | * |
||
3384 | jermar | 1173 | * @param page_table Physical address of PTL0. |
1468 | jermar | 1174 | */ |
1175 | void page_table_destroy(pte_t *page_table) |
||
1176 | { |
||
2125 | decky | 1177 | ASSERT(as_operations); |
1178 | ASSERT(as_operations->page_table_destroy); |
||
1179 | |||
1180 | as_operations->page_table_destroy(page_table); |
||
1468 | jermar | 1181 | } |
1182 | |||
1044 | jermar | 1183 | /** Lock page table. |
1184 | * |
||
1185 | * This function should be called before any page_mapping_insert(), |
||
1186 | * page_mapping_remove() and page_mapping_find(). |
||
1187 | * |
||
1188 | * Locking order is such that address space areas must be locked |
||
1189 | * prior to this call. Address space can be locked prior to this |
||
1190 | * call in which case the lock argument is false. |
||
1191 | * |
||
3384 | jermar | 1192 | * @param as Address space. |
1193 | * @param lock If false, do not attempt to lock as->lock. |
||
1044 | jermar | 1194 | */ |
1195 | void page_table_lock(as_t *as, bool lock) |
||
1196 | { |
||
1197 | ASSERT(as_operations); |
||
1198 | ASSERT(as_operations->page_table_lock); |
||
2125 | decky | 1199 | |
1044 | jermar | 1200 | as_operations->page_table_lock(as, lock); |
1201 | } |
||
1202 | |||
1203 | /** Unlock page table. |
||
1204 | * |
||
3384 | jermar | 1205 | * @param as Address space. |
1206 | * @param unlock If false, do not attempt to unlock as->lock. |
||
1044 | jermar | 1207 | */ |
1208 | void page_table_unlock(as_t *as, bool unlock) |
||
1209 | { |
||
1210 | ASSERT(as_operations); |
||
1211 | ASSERT(as_operations->page_table_unlock); |
||
2125 | decky | 1212 | |
1044 | jermar | 1213 | as_operations->page_table_unlock(as, unlock); |
1214 | } |
||
1215 | |||
977 | jermar | 1216 | |
1217 | /** Find address space area and lock it. |
||
1218 | * |
||
1219 | * The address space must be locked and interrupts must be disabled. |
||
1220 | * |
||
3384 | jermar | 1221 | * @param as Address space. |
1222 | * @param va Virtual address. |
||
977 | jermar | 1223 | * |
3384 | jermar | 1224 | * @return Locked address space area containing va on success or |
1225 | * NULL on failure. |
||
977 | jermar | 1226 | */ |
1780 | jermar | 1227 | as_area_t *find_area_and_lock(as_t *as, uintptr_t va) |
977 | jermar | 1228 | { |
1229 | as_area_t *a; |
||
1147 | jermar | 1230 | btree_node_t *leaf, *lnode; |
2745 | decky | 1231 | unsigned int i; |
977 | jermar | 1232 | |
1147 | jermar | 1233 | a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf); |
1234 | if (a) { |
||
1235 | /* va is the base address of an address space area */ |
||
1380 | jermar | 1236 | mutex_lock(&a->lock); |
1147 | jermar | 1237 | return a; |
1238 | } |
||
1239 | |||
1240 | /* |
||
1150 | jermar | 1241 | * Search the leaf node and the righmost record of its left neighbour |
1147 | jermar | 1242 | * to find out whether this is a miss or va belongs to an address |
1243 | * space area found there. |
||
1244 | */ |
||
1245 | |||
1246 | /* First, search the leaf node itself. */ |
||
1247 | for (i = 0; i < leaf->keys; i++) { |
||
1248 | a = (as_area_t *) leaf->value[i]; |
||
1380 | jermar | 1249 | mutex_lock(&a->lock); |
1147 | jermar | 1250 | if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) { |
1251 | return a; |
||
1252 | } |
||
1380 | jermar | 1253 | mutex_unlock(&a->lock); |
1147 | jermar | 1254 | } |
977 | jermar | 1255 | |
1147 | jermar | 1256 | /* |
1150 | jermar | 1257 | * Second, locate the left neighbour and test its last record. |
1148 | jermar | 1258 | * Because of its position in the B+tree, it must have base < va. |
1147 | jermar | 1259 | */ |
2087 | jermar | 1260 | lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf); |
1261 | if (lnode) { |
||
1147 | jermar | 1262 | a = (as_area_t *) lnode->value[lnode->keys - 1]; |
1380 | jermar | 1263 | mutex_lock(&a->lock); |
1147 | jermar | 1264 | if (va < a->base + a->pages * PAGE_SIZE) { |
1048 | jermar | 1265 | return a; |
1147 | jermar | 1266 | } |
1380 | jermar | 1267 | mutex_unlock(&a->lock); |
977 | jermar | 1268 | } |
1269 | |||
1270 | return NULL; |
||
1271 | } |
||
1048 | jermar | 1272 | |
1273 | /** Check area conflicts with other areas. |
||
1274 | * |
||
1275 | * The address space must be locked and interrupts must be disabled. |
||
1276 | * |
||
3384 | jermar | 1277 | * @param as Address space. |
1278 | * @param va Starting virtual address of the area being tested. |
||
1279 | * @param size Size of the area being tested. |
||
1280 | * @param avoid_area Do not touch this area. |
||
1048 | jermar | 1281 | * |
3384 | jermar | 1282 | * @return True if there is no conflict, false otherwise. |
1048 | jermar | 1283 | */ |
3384 | jermar | 1284 | bool |
1285 | check_area_conflicts(as_t *as, uintptr_t va, size_t size, as_area_t *avoid_area) |
||
1048 | jermar | 1286 | { |
1287 | as_area_t *a; |
||
1147 | jermar | 1288 | btree_node_t *leaf, *node; |
2745 | decky | 1289 | unsigned int i; |
1048 | jermar | 1290 | |
1070 | jermar | 1291 | /* |
1292 | * We don't want any area to have conflicts with NULL page. |
||
1293 | */ |
||
1294 | if (overlaps(va, size, NULL, PAGE_SIZE)) |
||
1295 | return false; |
||
1296 | |||
1147 | jermar | 1297 | /* |
1298 | * The leaf node is found in O(log n), where n is proportional to |
||
1299 | * the number of address space areas belonging to as. |
||
1300 | * The check for conflicts is then attempted on the rightmost |
||
1150 | jermar | 1301 | * record in the left neighbour, the leftmost record in the right |
1302 | * neighbour and all records in the leaf node itself. |
||
1147 | jermar | 1303 | */ |
1048 | jermar | 1304 | |
1147 | jermar | 1305 | if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) { |
1306 | if (a != avoid_area) |
||
1307 | return false; |
||
1308 | } |
||
1309 | |||
1310 | /* First, check the two border cases. */ |
||
1150 | jermar | 1311 | if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) { |
1147 | jermar | 1312 | a = (as_area_t *) node->value[node->keys - 1]; |
1380 | jermar | 1313 | mutex_lock(&a->lock); |
1147 | jermar | 1314 | if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) { |
1380 | jermar | 1315 | mutex_unlock(&a->lock); |
1147 | jermar | 1316 | return false; |
1317 | } |
||
1380 | jermar | 1318 | mutex_unlock(&a->lock); |
1147 | jermar | 1319 | } |
2087 | jermar | 1320 | node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf); |
1321 | if (node) { |
||
1147 | jermar | 1322 | a = (as_area_t *) node->value[0]; |
1380 | jermar | 1323 | mutex_lock(&a->lock); |
1147 | jermar | 1324 | if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) { |
1380 | jermar | 1325 | mutex_unlock(&a->lock); |
1147 | jermar | 1326 | return false; |
1327 | } |
||
1380 | jermar | 1328 | mutex_unlock(&a->lock); |
1147 | jermar | 1329 | } |
1330 | |||
1331 | /* Second, check the leaf node. */ |
||
1332 | for (i = 0; i < leaf->keys; i++) { |
||
1333 | a = (as_area_t *) leaf->value[i]; |
||
1334 | |||
1048 | jermar | 1335 | if (a == avoid_area) |
1336 | continue; |
||
1147 | jermar | 1337 | |
1380 | jermar | 1338 | mutex_lock(&a->lock); |
1147 | jermar | 1339 | if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) { |
1380 | jermar | 1340 | mutex_unlock(&a->lock); |
1147 | jermar | 1341 | return false; |
1342 | } |
||
1380 | jermar | 1343 | mutex_unlock(&a->lock); |
1048 | jermar | 1344 | } |
1345 | |||
1070 | jermar | 1346 | /* |
1347 | * So far, the area does not conflict with other areas. |
||
1348 | * Check if it doesn't conflict with kernel address space. |
||
1349 | */ |
||
1350 | if (!KERNEL_ADDRESS_SPACE_SHADOWED) { |
||
1351 | return !overlaps(va, size, |
||
2087 | jermar | 1352 | KERNEL_ADDRESS_SPACE_START, |
1353 | KERNEL_ADDRESS_SPACE_END - KERNEL_ADDRESS_SPACE_START); |
||
1070 | jermar | 1354 | } |
1355 | |||
1048 | jermar | 1356 | return true; |
1357 | } |
||
1235 | jermar | 1358 | |
2556 | jermar | 1359 | /** Return size of the address space area with given base. |
1360 | * |
||
1361 | * @param base Arbitrary address insede the address space area. |
||
1362 | * |
||
1363 | * @return Size of the address space area in bytes or zero if it |
||
1364 | * does not exist. |
||
1365 | */ |
||
1366 | size_t as_area_get_size(uintptr_t base) |
||
1329 | palkovsky | 1367 | { |
1368 | ipl_t ipl; |
||
1369 | as_area_t *src_area; |
||
1370 | size_t size; |
||
1371 | |||
1372 | ipl = interrupts_disable(); |
||
1373 | src_area = find_area_and_lock(AS, base); |
||
3384 | jermar | 1374 | if (src_area) { |
1329 | palkovsky | 1375 | size = src_area->pages * PAGE_SIZE; |
1380 | jermar | 1376 | mutex_unlock(&src_area->lock); |
1329 | palkovsky | 1377 | } else { |
1378 | size = 0; |
||
1379 | } |
||
1380 | interrupts_restore(ipl); |
||
1381 | return size; |
||
1382 | } |
||
1383 | |||
1387 | jermar | 1384 | /** Mark portion of address space area as used. |
1385 | * |
||
1386 | * The address space area must be already locked. |
||
1387 | * |
||
3384 | jermar | 1388 | * @param a Address space area. |
1389 | * @param page First page to be marked. |
||
1390 | * @param count Number of page to be marked. |
||
1387 | jermar | 1391 | * |
3384 | jermar | 1392 | * @return Zero on failure and non-zero on success. |
1387 | jermar | 1393 | */ |
1780 | jermar | 1394 | int used_space_insert(as_area_t *a, uintptr_t page, count_t count) |
1387 | jermar | 1395 | { |
1396 | btree_node_t *leaf, *node; |
||
1397 | count_t pages; |
||
2745 | decky | 1398 | unsigned int i; |
1387 | jermar | 1399 | |
1400 | ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE)); |
||
1401 | ASSERT(count); |
||
1402 | |||
1403 | pages = (count_t) btree_search(&a->used_space, page, &leaf); |
||
1404 | if (pages) { |
||
1405 | /* |
||
1406 | * We hit the beginning of some used space. |
||
1407 | */ |
||
1408 | return 0; |
||
1409 | } |
||
1410 | |||
1437 | jermar | 1411 | if (!leaf->keys) { |
1412 | btree_insert(&a->used_space, page, (void *) count, leaf); |
||
1413 | return 1; |
||
1414 | } |
||
1415 | |||
1387 | jermar | 1416 | node = btree_leaf_node_left_neighbour(&a->used_space, leaf); |
1417 | if (node) { |
||
2087 | jermar | 1418 | uintptr_t left_pg = node->key[node->keys - 1]; |
1419 | uintptr_t right_pg = leaf->key[0]; |
||
1420 | count_t left_cnt = (count_t) node->value[node->keys - 1]; |
||
1421 | count_t right_cnt = (count_t) leaf->value[0]; |
||
1387 | jermar | 1422 | |
1423 | /* |
||
1424 | * Examine the possibility that the interval fits |
||
1425 | * somewhere between the rightmost interval of |
||
1426 | * the left neigbour and the first interval of the leaf. |
||
1427 | */ |
||
1428 | |||
1429 | if (page >= right_pg) { |
||
1430 | /* Do nothing. */ |
||
2087 | jermar | 1431 | } else if (overlaps(page, count * PAGE_SIZE, left_pg, |
1432 | left_cnt * PAGE_SIZE)) { |
||
1387 | jermar | 1433 | /* The interval intersects with the left interval. */ |
1434 | return 0; |
||
2087 | jermar | 1435 | } else if (overlaps(page, count * PAGE_SIZE, right_pg, |
1436 | right_cnt * PAGE_SIZE)) { |
||
1387 | jermar | 1437 | /* The interval intersects with the right interval. */ |
1438 | return 0; |
||
2087 | jermar | 1439 | } else if ((page == left_pg + left_cnt * PAGE_SIZE) && |
1440 | (page + count * PAGE_SIZE == right_pg)) { |
||
1441 | /* |
||
1442 | * The interval can be added by merging the two already |
||
1443 | * present intervals. |
||
1444 | */ |
||
1403 | jermar | 1445 | node->value[node->keys - 1] += count + right_cnt; |
1387 | jermar | 1446 | btree_remove(&a->used_space, right_pg, leaf); |
1447 | return 1; |
||
2087 | jermar | 1448 | } else if (page == left_pg + left_cnt * PAGE_SIZE) { |
1449 | /* |
||
1450 | * The interval can be added by simply growing the left |
||
1451 | * interval. |
||
1452 | */ |
||
1403 | jermar | 1453 | node->value[node->keys - 1] += count; |
1387 | jermar | 1454 | return 1; |
2087 | jermar | 1455 | } else if (page + count * PAGE_SIZE == right_pg) { |
1387 | jermar | 1456 | /* |
2087 | jermar | 1457 | * The interval can be addded by simply moving base of |
1458 | * the right interval down and increasing its size |
||
1459 | * accordingly. |
||
1387 | jermar | 1460 | */ |
1403 | jermar | 1461 | leaf->value[0] += count; |
1387 | jermar | 1462 | leaf->key[0] = page; |
1463 | return 1; |
||
1464 | } else { |
||
1465 | /* |
||
1466 | * The interval is between both neigbouring intervals, |
||
1467 | * but cannot be merged with any of them. |
||
1468 | */ |
||
2087 | jermar | 1469 | btree_insert(&a->used_space, page, (void *) count, |
1470 | leaf); |
||
1387 | jermar | 1471 | return 1; |
1472 | } |
||
1473 | } else if (page < leaf->key[0]) { |
||
1780 | jermar | 1474 | uintptr_t right_pg = leaf->key[0]; |
1387 | jermar | 1475 | count_t right_cnt = (count_t) leaf->value[0]; |
1476 | |||
1477 | /* |
||
2087 | jermar | 1478 | * Investigate the border case in which the left neighbour does |
1479 | * not exist but the interval fits from the left. |
||
1387 | jermar | 1480 | */ |
1481 | |||
2087 | jermar | 1482 | if (overlaps(page, count * PAGE_SIZE, right_pg, |
1483 | right_cnt * PAGE_SIZE)) { |
||
1387 | jermar | 1484 | /* The interval intersects with the right interval. */ |
1485 | return 0; |
||
2087 | jermar | 1486 | } else if (page + count * PAGE_SIZE == right_pg) { |
1387 | jermar | 1487 | /* |
2087 | jermar | 1488 | * The interval can be added by moving the base of the |
1489 | * right interval down and increasing its size |
||
1490 | * accordingly. |
||
1387 | jermar | 1491 | */ |
1492 | leaf->key[0] = page; |
||
1403 | jermar | 1493 | leaf->value[0] += count; |
1387 | jermar | 1494 | return 1; |
1495 | } else { |
||
1496 | /* |
||
1497 | * The interval doesn't adjoin with the right interval. |
||
1498 | * It must be added individually. |
||
1499 | */ |
||
2087 | jermar | 1500 | btree_insert(&a->used_space, page, (void *) count, |
1501 | leaf); |
||
1387 | jermar | 1502 | return 1; |
1503 | } |
||
1504 | } |
||
1505 | |||
1506 | node = btree_leaf_node_right_neighbour(&a->used_space, leaf); |
||
1507 | if (node) { |
||
2087 | jermar | 1508 | uintptr_t left_pg = leaf->key[leaf->keys - 1]; |
1509 | uintptr_t right_pg = node->key[0]; |
||
1510 | count_t left_cnt = (count_t) leaf->value[leaf->keys - 1]; |
||
1511 | count_t right_cnt = (count_t) node->value[0]; |
||
1387 | jermar | 1512 | |
1513 | /* |
||
1514 | * Examine the possibility that the interval fits |
||
1515 | * somewhere between the leftmost interval of |
||
1516 | * the right neigbour and the last interval of the leaf. |
||
1517 | */ |
||
1518 | |||
1519 | if (page < left_pg) { |
||
1520 | /* Do nothing. */ |
||
2087 | jermar | 1521 | } else if (overlaps(page, count * PAGE_SIZE, left_pg, |
1522 | left_cnt * PAGE_SIZE)) { |
||
1387 | jermar | 1523 | /* The interval intersects with the left interval. */ |
1524 | return 0; |
||
2087 | jermar | 1525 | } else if (overlaps(page, count * PAGE_SIZE, right_pg, |
1526 | right_cnt * PAGE_SIZE)) { |
||
1387 | jermar | 1527 | /* The interval intersects with the right interval. */ |
1528 | return 0; |
||
2087 | jermar | 1529 | } else if ((page == left_pg + left_cnt * PAGE_SIZE) && |
1530 | (page + count * PAGE_SIZE == right_pg)) { |
||
1531 | /* |
||
1532 | * The interval can be added by merging the two already |
||
1533 | * present intervals. |
||
1534 | * */ |
||
1403 | jermar | 1535 | leaf->value[leaf->keys - 1] += count + right_cnt; |
1387 | jermar | 1536 | btree_remove(&a->used_space, right_pg, node); |
1537 | return 1; |
||
2087 | jermar | 1538 | } else if (page == left_pg + left_cnt * PAGE_SIZE) { |
1539 | /* |
||
1540 | * The interval can be added by simply growing the left |
||
1541 | * interval. |
||
1542 | * */ |
||
1403 | jermar | 1543 | leaf->value[leaf->keys - 1] += count; |
1387 | jermar | 1544 | return 1; |
2087 | jermar | 1545 | } else if (page + count * PAGE_SIZE == right_pg) { |
1387 | jermar | 1546 | /* |
2087 | jermar | 1547 | * The interval can be addded by simply moving base of |
1548 | * the right interval down and increasing its size |
||
1549 | * accordingly. |
||
1387 | jermar | 1550 | */ |
1403 | jermar | 1551 | node->value[0] += count; |
1387 | jermar | 1552 | node->key[0] = page; |
1553 | return 1; |
||
1554 | } else { |
||
1555 | /* |
||
1556 | * The interval is between both neigbouring intervals, |
||
1557 | * but cannot be merged with any of them. |
||
1558 | */ |
||
2087 | jermar | 1559 | btree_insert(&a->used_space, page, (void *) count, |
1560 | leaf); |
||
1387 | jermar | 1561 | return 1; |
1562 | } |
||
1563 | } else if (page >= leaf->key[leaf->keys - 1]) { |
||
1780 | jermar | 1564 | uintptr_t left_pg = leaf->key[leaf->keys - 1]; |
1387 | jermar | 1565 | count_t left_cnt = (count_t) leaf->value[leaf->keys - 1]; |
1566 | |||
1567 | /* |
||
2087 | jermar | 1568 | * Investigate the border case in which the right neighbour |
1569 | * does not exist but the interval fits from the right. |
||
1387 | jermar | 1570 | */ |
1571 | |||
2087 | jermar | 1572 | if (overlaps(page, count * PAGE_SIZE, left_pg, |
1573 | left_cnt * PAGE_SIZE)) { |
||
1403 | jermar | 1574 | /* The interval intersects with the left interval. */ |
1387 | jermar | 1575 | return 0; |
2087 | jermar | 1576 | } else if (left_pg + left_cnt * PAGE_SIZE == page) { |
1577 | /* |
||
1578 | * The interval can be added by growing the left |
||
1579 | * interval. |
||
1580 | */ |
||
1403 | jermar | 1581 | leaf->value[leaf->keys - 1] += count; |
1387 | jermar | 1582 | return 1; |
1583 | } else { |
||
1584 | /* |
||
1585 | * The interval doesn't adjoin with the left interval. |
||
1586 | * It must be added individually. |
||
1587 | */ |
||
2087 | jermar | 1588 | btree_insert(&a->used_space, page, (void *) count, |
1589 | leaf); |
||
1387 | jermar | 1590 | return 1; |
1591 | } |
||
1592 | } |
||
1593 | |||
1594 | /* |
||
2087 | jermar | 1595 | * Note that if the algorithm made it thus far, the interval can fit |
1596 | * only between two other intervals of the leaf. The two border cases |
||
1597 | * were already resolved. |
||
1387 | jermar | 1598 | */ |
1599 | for (i = 1; i < leaf->keys; i++) { |
||
1600 | if (page < leaf->key[i]) { |
||
2087 | jermar | 1601 | uintptr_t left_pg = leaf->key[i - 1]; |
1602 | uintptr_t right_pg = leaf->key[i]; |
||
1603 | count_t left_cnt = (count_t) leaf->value[i - 1]; |
||
1604 | count_t right_cnt = (count_t) leaf->value[i]; |
||
1387 | jermar | 1605 | |
1606 | /* |
||
1607 | * The interval fits between left_pg and right_pg. |
||
1608 | */ |
||
1609 | |||
2087 | jermar | 1610 | if (overlaps(page, count * PAGE_SIZE, left_pg, |
1611 | left_cnt * PAGE_SIZE)) { |
||
1612 | /* |
||
1613 | * The interval intersects with the left |
||
1614 | * interval. |
||
1615 | */ |
||
1387 | jermar | 1616 | return 0; |
2087 | jermar | 1617 | } else if (overlaps(page, count * PAGE_SIZE, right_pg, |
1618 | right_cnt * PAGE_SIZE)) { |
||
1619 | /* |
||
1620 | * The interval intersects with the right |
||
1621 | * interval. |
||
1622 | */ |
||
1387 | jermar | 1623 | return 0; |
2087 | jermar | 1624 | } else if ((page == left_pg + left_cnt * PAGE_SIZE) && |
1625 | (page + count * PAGE_SIZE == right_pg)) { |
||
1626 | /* |
||
1627 | * The interval can be added by merging the two |
||
1628 | * already present intervals. |
||
1629 | */ |
||
1403 | jermar | 1630 | leaf->value[i - 1] += count + right_cnt; |
1387 | jermar | 1631 | btree_remove(&a->used_space, right_pg, leaf); |
1632 | return 1; |
||
2087 | jermar | 1633 | } else if (page == left_pg + left_cnt * PAGE_SIZE) { |
1634 | /* |
||
1635 | * The interval can be added by simply growing |
||
1636 | * the left interval. |
||
1637 | */ |
||
1403 | jermar | 1638 | leaf->value[i - 1] += count; |
1387 | jermar | 1639 | return 1; |
2087 | jermar | 1640 | } else if (page + count * PAGE_SIZE == right_pg) { |
1387 | jermar | 1641 | /* |
2087 | jermar | 1642 | * The interval can be addded by simply moving |
1643 | * base of the right interval down and |
||
1644 | * increasing its size accordingly. |
||
1387 | jermar | 1645 | */ |
1403 | jermar | 1646 | leaf->value[i] += count; |
1387 | jermar | 1647 | leaf->key[i] = page; |
1648 | return 1; |
||
1649 | } else { |
||
1650 | /* |
||
2087 | jermar | 1651 | * The interval is between both neigbouring |
1652 | * intervals, but cannot be merged with any of |
||
1653 | * them. |
||
1387 | jermar | 1654 | */ |
2087 | jermar | 1655 | btree_insert(&a->used_space, page, |
1656 | (void *) count, leaf); |
||
1387 | jermar | 1657 | return 1; |
1658 | } |
||
1659 | } |
||
1660 | } |
||
1661 | |||
3384 | jermar | 1662 | panic("Inconsistency detected while adding %" PRIc " pages of used " |
1663 | "space at %p.\n", count, page); |
||
1387 | jermar | 1664 | } |
1665 | |||
1666 | /** Mark portion of address space area as unused. |
||
1667 | * |
||
1668 | * The address space area must be already locked. |
||
1669 | * |
||
3384 | jermar | 1670 | * @param a Address space area. |
1671 | * @param page First page to be marked. |
||
1672 | * @param count Number of page to be marked. |
||
1387 | jermar | 1673 | * |
3384 | jermar | 1674 | * @return Zero on failure and non-zero on success. |
1387 | jermar | 1675 | */ |
1780 | jermar | 1676 | int used_space_remove(as_area_t *a, uintptr_t page, count_t count) |
1387 | jermar | 1677 | { |
1678 | btree_node_t *leaf, *node; |
||
1679 | count_t pages; |
||
2745 | decky | 1680 | unsigned int i; |
1387 | jermar | 1681 | |
1682 | ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE)); |
||
1683 | ASSERT(count); |
||
1684 | |||
1685 | pages = (count_t) btree_search(&a->used_space, page, &leaf); |
||
1686 | if (pages) { |
||
1687 | /* |
||
1688 | * We are lucky, page is the beginning of some interval. |
||
1689 | */ |
||
1690 | if (count > pages) { |
||
1691 | return 0; |
||
1692 | } else if (count == pages) { |
||
1693 | btree_remove(&a->used_space, page, leaf); |
||
1403 | jermar | 1694 | return 1; |
1387 | jermar | 1695 | } else { |
1696 | /* |
||
1697 | * Find the respective interval. |
||
1698 | * Decrease its size and relocate its start address. |
||
1699 | */ |
||
1700 | for (i = 0; i < leaf->keys; i++) { |
||
1701 | if (leaf->key[i] == page) { |
||
2087 | jermar | 1702 | leaf->key[i] += count * PAGE_SIZE; |
1403 | jermar | 1703 | leaf->value[i] -= count; |
1387 | jermar | 1704 | return 1; |
1705 | } |
||
1706 | } |
||
1707 | goto error; |
||
1708 | } |
||
1709 | } |
||
1710 | |||
1711 | node = btree_leaf_node_left_neighbour(&a->used_space, leaf); |
||
1712 | if (node && page < leaf->key[0]) { |
||
1780 | jermar | 1713 | uintptr_t left_pg = node->key[node->keys - 1]; |
1387 | jermar | 1714 | count_t left_cnt = (count_t) node->value[node->keys - 1]; |
1715 | |||
2087 | jermar | 1716 | if (overlaps(left_pg, left_cnt * PAGE_SIZE, page, |
1717 | count * PAGE_SIZE)) { |
||
1718 | if (page + count * PAGE_SIZE == |
||
1719 | left_pg + left_cnt * PAGE_SIZE) { |
||
1387 | jermar | 1720 | /* |
2087 | jermar | 1721 | * The interval is contained in the rightmost |
1722 | * interval of the left neighbour and can be |
||
1723 | * removed by updating the size of the bigger |
||
1724 | * interval. |
||
1387 | jermar | 1725 | */ |
1403 | jermar | 1726 | node->value[node->keys - 1] -= count; |
1387 | jermar | 1727 | return 1; |
2087 | jermar | 1728 | } else if (page + count * PAGE_SIZE < |
1729 | left_pg + left_cnt*PAGE_SIZE) { |
||
1403 | jermar | 1730 | count_t new_cnt; |
1387 | jermar | 1731 | |
1732 | /* |
||
2087 | jermar | 1733 | * The interval is contained in the rightmost |
1734 | * interval of the left neighbour but its |
||
1735 | * removal requires both updating the size of |
||
1736 | * the original interval and also inserting a |
||
1737 | * new interval. |
||
1387 | jermar | 1738 | */ |
2087 | jermar | 1739 | new_cnt = ((left_pg + left_cnt * PAGE_SIZE) - |
1740 | (page + count*PAGE_SIZE)) >> PAGE_WIDTH; |
||
1403 | jermar | 1741 | node->value[node->keys - 1] -= count + new_cnt; |
2087 | jermar | 1742 | btree_insert(&a->used_space, page + |
1743 | count * PAGE_SIZE, (void *) new_cnt, leaf); |
||
1387 | jermar | 1744 | return 1; |
1745 | } |
||
1746 | } |
||
1747 | return 0; |
||
1748 | } else if (page < leaf->key[0]) { |
||
1749 | return 0; |
||
1750 | } |
||
1751 | |||
1752 | if (page > leaf->key[leaf->keys - 1]) { |
||
1780 | jermar | 1753 | uintptr_t left_pg = leaf->key[leaf->keys - 1]; |
1387 | jermar | 1754 | count_t left_cnt = (count_t) leaf->value[leaf->keys - 1]; |
1755 | |||
2087 | jermar | 1756 | if (overlaps(left_pg, left_cnt * PAGE_SIZE, page, |
1757 | count * PAGE_SIZE)) { |
||
1758 | if (page + count * PAGE_SIZE == |
||
1759 | left_pg + left_cnt * PAGE_SIZE) { |
||
1387 | jermar | 1760 | /* |
2087 | jermar | 1761 | * The interval is contained in the rightmost |
1762 | * interval of the leaf and can be removed by |
||
1763 | * updating the size of the bigger interval. |
||
1387 | jermar | 1764 | */ |
1403 | jermar | 1765 | leaf->value[leaf->keys - 1] -= count; |
1387 | jermar | 1766 | return 1; |
2087 | jermar | 1767 | } else if (page + count * PAGE_SIZE < left_pg + |
1768 | left_cnt * PAGE_SIZE) { |
||
1403 | jermar | 1769 | count_t new_cnt; |
1387 | jermar | 1770 | |
1771 | /* |
||
2087 | jermar | 1772 | * The interval is contained in the rightmost |
1773 | * interval of the leaf but its removal |
||
1774 | * requires both updating the size of the |
||
1775 | * original interval and also inserting a new |
||
1776 | * interval. |
||
1387 | jermar | 1777 | */ |
2087 | jermar | 1778 | new_cnt = ((left_pg + left_cnt * PAGE_SIZE) - |
1779 | (page + count * PAGE_SIZE)) >> PAGE_WIDTH; |
||
1403 | jermar | 1780 | leaf->value[leaf->keys - 1] -= count + new_cnt; |
2087 | jermar | 1781 | btree_insert(&a->used_space, page + |
1782 | count * PAGE_SIZE, (void *) new_cnt, leaf); |
||
1387 | jermar | 1783 | return 1; |
1784 | } |
||
1785 | } |
||
1786 | return 0; |
||
1787 | } |
||
1788 | |||
1789 | /* |
||
1790 | * The border cases have been already resolved. |
||
1791 | * Now the interval can be only between intervals of the leaf. |
||
1792 | */ |
||
1793 | for (i = 1; i < leaf->keys - 1; i++) { |
||
1794 | if (page < leaf->key[i]) { |
||
1780 | jermar | 1795 | uintptr_t left_pg = leaf->key[i - 1]; |
1387 | jermar | 1796 | count_t left_cnt = (count_t) leaf->value[i - 1]; |
1797 | |||
1798 | /* |
||
2087 | jermar | 1799 | * Now the interval is between intervals corresponding |
1800 | * to (i - 1) and i. |
||
1387 | jermar | 1801 | */ |
2087 | jermar | 1802 | if (overlaps(left_pg, left_cnt * PAGE_SIZE, page, |
1803 | count * PAGE_SIZE)) { |
||
1804 | if (page + count * PAGE_SIZE == |
||
1805 | left_pg + left_cnt*PAGE_SIZE) { |
||
1387 | jermar | 1806 | /* |
2087 | jermar | 1807 | * The interval is contained in the |
1808 | * interval (i - 1) of the leaf and can |
||
1809 | * be removed by updating the size of |
||
1810 | * the bigger interval. |
||
1387 | jermar | 1811 | */ |
1403 | jermar | 1812 | leaf->value[i - 1] -= count; |
1387 | jermar | 1813 | return 1; |
2087 | jermar | 1814 | } else if (page + count * PAGE_SIZE < |
1815 | left_pg + left_cnt * PAGE_SIZE) { |
||
1403 | jermar | 1816 | count_t new_cnt; |
1387 | jermar | 1817 | |
1818 | /* |
||
2087 | jermar | 1819 | * The interval is contained in the |
1820 | * interval (i - 1) of the leaf but its |
||
1821 | * removal requires both updating the |
||
1822 | * size of the original interval and |
||
1387 | jermar | 1823 | * also inserting a new interval. |
1824 | */ |
||
2087 | jermar | 1825 | new_cnt = ((left_pg + |
1826 | left_cnt * PAGE_SIZE) - |
||
1827 | (page + count * PAGE_SIZE)) >> |
||
1828 | PAGE_WIDTH; |
||
1403 | jermar | 1829 | leaf->value[i - 1] -= count + new_cnt; |
2087 | jermar | 1830 | btree_insert(&a->used_space, page + |
1831 | count * PAGE_SIZE, (void *) new_cnt, |
||
1832 | leaf); |
||
1387 | jermar | 1833 | return 1; |
1834 | } |
||
1835 | } |
||
1836 | return 0; |
||
1837 | } |
||
1838 | } |
||
1839 | |||
1840 | error: |
||
3384 | jermar | 1841 | panic("Inconsistency detected while removing %" PRIc " pages of used " |
1842 | "space from %p.\n", count, page); |
||
1387 | jermar | 1843 | } |
1844 | |||
1409 | jermar | 1845 | /** Remove reference to address space area share info. |
1846 | * |
||
1847 | * If the reference count drops to 0, the sh_info is deallocated. |
||
1848 | * |
||
3384 | jermar | 1849 | * @param sh_info Pointer to address space area share info. |
1409 | jermar | 1850 | */ |
1851 | void sh_info_remove_reference(share_info_t *sh_info) |
||
1852 | { |
||
1853 | bool dealloc = false; |
||
1854 | |||
1855 | mutex_lock(&sh_info->lock); |
||
1856 | ASSERT(sh_info->refcount); |
||
1857 | if (--sh_info->refcount == 0) { |
||
1858 | dealloc = true; |
||
1495 | jermar | 1859 | link_t *cur; |
1409 | jermar | 1860 | |
1861 | /* |
||
1862 | * Now walk carefully the pagemap B+tree and free/remove |
||
1863 | * reference from all frames found there. |
||
1864 | */ |
||
2087 | jermar | 1865 | for (cur = sh_info->pagemap.leaf_head.next; |
1866 | cur != &sh_info->pagemap.leaf_head; cur = cur->next) { |
||
1409 | jermar | 1867 | btree_node_t *node; |
2745 | decky | 1868 | unsigned int i; |
1409 | jermar | 1869 | |
1495 | jermar | 1870 | node = list_get_instance(cur, btree_node_t, leaf_link); |
1871 | for (i = 0; i < node->keys; i++) |
||
1780 | jermar | 1872 | frame_free((uintptr_t) node->value[i]); |
1409 | jermar | 1873 | } |
1874 | |||
1875 | } |
||
1876 | mutex_unlock(&sh_info->lock); |
||
1877 | |||
1878 | if (dealloc) { |
||
1879 | btree_destroy(&sh_info->pagemap); |
||
1880 | free(sh_info); |
||
1881 | } |
||
1882 | } |
||
1883 | |||
1235 | jermar | 1884 | /* |
1885 | * Address space related syscalls. |
||
1886 | */ |
||
1887 | |||
1888 | /** Wrapper for as_area_create(). */ |
||
1780 | jermar | 1889 | unative_t sys_as_area_create(uintptr_t address, size_t size, int flags) |
1235 | jermar | 1890 | { |
2087 | jermar | 1891 | if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address, |
1892 | AS_AREA_ATTR_NONE, &anon_backend, NULL)) |
||
1780 | jermar | 1893 | return (unative_t) address; |
1235 | jermar | 1894 | else |
1780 | jermar | 1895 | return (unative_t) -1; |
1235 | jermar | 1896 | } |
1897 | |||
1793 | jermar | 1898 | /** Wrapper for as_area_resize(). */ |
1780 | jermar | 1899 | unative_t sys_as_area_resize(uintptr_t address, size_t size, int flags) |
1235 | jermar | 1900 | { |
1780 | jermar | 1901 | return (unative_t) as_area_resize(AS, address, size, 0); |
1235 | jermar | 1902 | } |
1903 | |||
3222 | svoboda | 1904 | /** Wrapper for as_area_change_flags(). */ |
1905 | unative_t sys_as_area_change_flags(uintptr_t address, int flags) |
||
1906 | { |
||
1907 | return (unative_t) as_area_change_flags(AS, flags, address); |
||
1908 | } |
||
1909 | |||
1793 | jermar | 1910 | /** Wrapper for as_area_destroy(). */ |
1780 | jermar | 1911 | unative_t sys_as_area_destroy(uintptr_t address) |
1306 | jermar | 1912 | { |
1780 | jermar | 1913 | return (unative_t) as_area_destroy(AS, address); |
1306 | jermar | 1914 | } |
1702 | cejka | 1915 | |
1914 | jermar | 1916 | /** Print out information about address space. |
1917 | * |
||
3384 | jermar | 1918 | * @param as Address space. |
1914 | jermar | 1919 | */ |
1920 | void as_print(as_t *as) |
||
1921 | { |
||
1922 | ipl_t ipl; |
||
1923 | |||
1924 | ipl = interrupts_disable(); |
||
1925 | mutex_lock(&as->lock); |
||
1926 | |||
1927 | /* print out info about address space areas */ |
||
1928 | link_t *cur; |
||
2087 | jermar | 1929 | for (cur = as->as_area_btree.leaf_head.next; |
1930 | cur != &as->as_area_btree.leaf_head; cur = cur->next) { |
||
1931 | btree_node_t *node; |
||
1914 | jermar | 1932 | |
2087 | jermar | 1933 | node = list_get_instance(cur, btree_node_t, leaf_link); |
1934 | |||
2745 | decky | 1935 | unsigned int i; |
1914 | jermar | 1936 | for (i = 0; i < node->keys; i++) { |
1915 | jermar | 1937 | as_area_t *area = node->value[i]; |
1914 | jermar | 1938 | |
1939 | mutex_lock(&area->lock); |
||
3384 | jermar | 1940 | printf("as_area: %p, base=%p, pages=%" PRIc |
1941 | " (%p - %p)\n", area, area->base, area->pages, |
||
1942 | area->base, area->base + FRAMES2SIZE(area->pages)); |
||
1914 | jermar | 1943 | mutex_unlock(&area->lock); |
1944 | } |
||
1945 | } |
||
1946 | |||
1947 | mutex_unlock(&as->lock); |
||
1948 | interrupts_restore(ipl); |
||
1949 | } |
||
1950 | |||
1757 | jermar | 1951 | /** @} |
1702 | cejka | 1952 | */ |