Rev 1387 | Rev 1409 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
703 | jermar | 1 | /* |
2 | * Copyright (C) 2001-2006 Jakub Jermar |
||
3 | * All rights reserved. |
||
4 | * |
||
5 | * Redistribution and use in source and binary forms, with or without |
||
6 | * modification, are permitted provided that the following conditions |
||
7 | * are met: |
||
8 | * |
||
9 | * - Redistributions of source code must retain the above copyright |
||
10 | * notice, this list of conditions and the following disclaimer. |
||
11 | * - Redistributions in binary form must reproduce the above copyright |
||
12 | * notice, this list of conditions and the following disclaimer in the |
||
13 | * documentation and/or other materials provided with the distribution. |
||
14 | * - The name of the author may not be used to endorse or promote products |
||
15 | * derived from this software without specific prior written permission. |
||
16 | * |
||
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
||
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
||
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
||
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
||
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
||
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
||
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
||
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
||
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
||
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
||
27 | */ |
||
28 | |||
1248 | jermar | 29 | /** |
30 | * @file as.c |
||
31 | * @brief Address space related functions. |
||
32 | * |
||
703 | jermar | 33 | * This file contains address space manipulation functions. |
34 | * Roughly speaking, this is a higher-level client of |
||
35 | * Virtual Address Translation (VAT) subsystem. |
||
1248 | jermar | 36 | * |
37 | * Functionality provided by this file allows one to |
||
38 | * create address space and create, resize and share |
||
39 | * address space areas. |
||
40 | * |
||
41 | * @see page.c |
||
42 | * |
||
703 | jermar | 43 | */ |
44 | |||
45 | #include <mm/as.h> |
||
756 | jermar | 46 | #include <arch/mm/as.h> |
703 | jermar | 47 | #include <mm/page.h> |
48 | #include <mm/frame.h> |
||
814 | palkovsky | 49 | #include <mm/slab.h> |
703 | jermar | 50 | #include <mm/tlb.h> |
51 | #include <arch/mm/page.h> |
||
52 | #include <genarch/mm/page_pt.h> |
||
1108 | jermar | 53 | #include <genarch/mm/page_ht.h> |
727 | jermar | 54 | #include <mm/asid.h> |
703 | jermar | 55 | #include <arch/mm/asid.h> |
56 | #include <synch/spinlock.h> |
||
1380 | jermar | 57 | #include <synch/mutex.h> |
788 | jermar | 58 | #include <adt/list.h> |
1147 | jermar | 59 | #include <adt/btree.h> |
1235 | jermar | 60 | #include <proc/task.h> |
1288 | jermar | 61 | #include <proc/thread.h> |
1235 | jermar | 62 | #include <arch/asm.h> |
703 | jermar | 63 | #include <panic.h> |
64 | #include <debug.h> |
||
1235 | jermar | 65 | #include <print.h> |
703 | jermar | 66 | #include <memstr.h> |
1070 | jermar | 67 | #include <macros.h> |
703 | jermar | 68 | #include <arch.h> |
1235 | jermar | 69 | #include <errno.h> |
70 | #include <config.h> |
||
1387 | jermar | 71 | #include <align.h> |
1235 | jermar | 72 | #include <arch/types.h> |
73 | #include <typedefs.h> |
||
1288 | jermar | 74 | #include <syscall/copy.h> |
75 | #include <arch/interrupt.h> |
||
703 | jermar | 76 | |
756 | jermar | 77 | as_operations_t *as_operations = NULL; |
703 | jermar | 78 | |
1380 | jermar | 79 | /** Address space lock. It protects inactive_as_with_asid_head. Must be acquired before as_t mutex. */ |
823 | jermar | 80 | SPINLOCK_INITIALIZE(as_lock); |
81 | |||
82 | /** |
||
83 | * This list contains address spaces that are not active on any |
||
84 | * processor and that have valid ASID. |
||
85 | */ |
||
86 | LIST_INITIALIZE(inactive_as_with_asid_head); |
||
87 | |||
757 | jermar | 88 | /** Kernel address space. */ |
89 | as_t *AS_KERNEL = NULL; |
||
90 | |||
1235 | jermar | 91 | static int area_flags_to_page_flags(int aflags); |
754 | jermar | 92 | static int get_area_flags(as_area_t *a); |
977 | jermar | 93 | static as_area_t *find_area_and_lock(as_t *as, __address va); |
1048 | jermar | 94 | static bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area); |
1403 | jermar | 95 | static int used_space_insert(as_area_t *a, __address page, count_t count); |
96 | static int used_space_remove(as_area_t *a, __address page, count_t count); |
||
703 | jermar | 97 | |
756 | jermar | 98 | /** Initialize address space subsystem. */ |
99 | void as_init(void) |
||
100 | { |
||
101 | as_arch_init(); |
||
789 | palkovsky | 102 | AS_KERNEL = as_create(FLAG_AS_KERNEL); |
1383 | decky | 103 | if (!AS_KERNEL) |
104 | panic("can't create kernel address space\n"); |
||
105 | |||
756 | jermar | 106 | } |
107 | |||
757 | jermar | 108 | /** Create address space. |
109 | * |
||
110 | * @param flags Flags that influence way in wich the address space is created. |
||
111 | */ |
||
756 | jermar | 112 | as_t *as_create(int flags) |
703 | jermar | 113 | { |
114 | as_t *as; |
||
115 | |||
822 | palkovsky | 116 | as = (as_t *) malloc(sizeof(as_t), 0); |
823 | jermar | 117 | link_initialize(&as->inactive_as_with_asid_link); |
1380 | jermar | 118 | mutex_initialize(&as->lock); |
1147 | jermar | 119 | btree_create(&as->as_area_btree); |
822 | palkovsky | 120 | |
121 | if (flags & FLAG_AS_KERNEL) |
||
122 | as->asid = ASID_KERNEL; |
||
123 | else |
||
124 | as->asid = ASID_INVALID; |
||
125 | |||
823 | jermar | 126 | as->refcount = 0; |
822 | palkovsky | 127 | as->page_table = page_table_create(flags); |
703 | jermar | 128 | |
129 | return as; |
||
130 | } |
||
131 | |||
973 | palkovsky | 132 | /** Free Adress space */ |
133 | void as_free(as_t *as) |
||
134 | { |
||
135 | ASSERT(as->refcount == 0); |
||
136 | |||
137 | /* TODO: free as_areas and other resources held by as */ |
||
138 | /* TODO: free page table */ |
||
139 | free(as); |
||
140 | } |
||
141 | |||
703 | jermar | 142 | /** Create address space area of common attributes. |
143 | * |
||
144 | * The created address space area is added to the target address space. |
||
145 | * |
||
146 | * @param as Target address space. |
||
1239 | jermar | 147 | * @param flags Flags of the area memory. |
1048 | jermar | 148 | * @param size Size of area. |
703 | jermar | 149 | * @param base Base address of area. |
1239 | jermar | 150 | * @param attrs Attributes of the area. |
703 | jermar | 151 | * |
152 | * @return Address space area on success or NULL on failure. |
||
153 | */ |
||
1239 | jermar | 154 | as_area_t *as_area_create(as_t *as, int flags, size_t size, __address base, int attrs) |
703 | jermar | 155 | { |
156 | ipl_t ipl; |
||
157 | as_area_t *a; |
||
158 | |||
159 | if (base % PAGE_SIZE) |
||
1048 | jermar | 160 | return NULL; |
161 | |||
1233 | jermar | 162 | if (!size) |
163 | return NULL; |
||
164 | |||
1048 | jermar | 165 | /* Writeable executable areas are not supported. */ |
166 | if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE)) |
||
167 | return NULL; |
||
703 | jermar | 168 | |
169 | ipl = interrupts_disable(); |
||
1380 | jermar | 170 | mutex_lock(&as->lock); |
703 | jermar | 171 | |
1048 | jermar | 172 | if (!check_area_conflicts(as, base, size, NULL)) { |
1380 | jermar | 173 | mutex_unlock(&as->lock); |
1048 | jermar | 174 | interrupts_restore(ipl); |
175 | return NULL; |
||
176 | } |
||
703 | jermar | 177 | |
822 | palkovsky | 178 | a = (as_area_t *) malloc(sizeof(as_area_t), 0); |
703 | jermar | 179 | |
1380 | jermar | 180 | mutex_initialize(&a->lock); |
822 | palkovsky | 181 | |
1026 | jermar | 182 | a->flags = flags; |
1239 | jermar | 183 | a->attributes = attrs; |
1048 | jermar | 184 | a->pages = SIZE2FRAMES(size); |
822 | palkovsky | 185 | a->base = base; |
1387 | jermar | 186 | btree_create(&a->used_space); |
822 | palkovsky | 187 | |
1147 | jermar | 188 | btree_insert(&as->as_area_btree, base, (void *) a, NULL); |
822 | palkovsky | 189 | |
1380 | jermar | 190 | mutex_unlock(&as->lock); |
703 | jermar | 191 | interrupts_restore(ipl); |
704 | jermar | 192 | |
703 | jermar | 193 | return a; |
194 | } |
||
195 | |||
1235 | jermar | 196 | /** Find address space area and change it. |
197 | * |
||
198 | * @param as Address space. |
||
199 | * @param address Virtual address belonging to the area to be changed. Must be page-aligned. |
||
200 | * @param size New size of the virtual memory block starting at address. |
||
201 | * @param flags Flags influencing the remap operation. Currently unused. |
||
202 | * |
||
1306 | jermar | 203 | * @return Zero on success or a value from @ref errno.h otherwise. |
1235 | jermar | 204 | */ |
1306 | jermar | 205 | int as_area_resize(as_t *as, __address address, size_t size, int flags) |
1235 | jermar | 206 | { |
1306 | jermar | 207 | as_area_t *area; |
1235 | jermar | 208 | ipl_t ipl; |
209 | size_t pages; |
||
210 | |||
211 | ipl = interrupts_disable(); |
||
1380 | jermar | 212 | mutex_lock(&as->lock); |
1235 | jermar | 213 | |
214 | /* |
||
215 | * Locate the area. |
||
216 | */ |
||
217 | area = find_area_and_lock(as, address); |
||
218 | if (!area) { |
||
1380 | jermar | 219 | mutex_unlock(&as->lock); |
1235 | jermar | 220 | interrupts_restore(ipl); |
1306 | jermar | 221 | return ENOENT; |
1235 | jermar | 222 | } |
223 | |||
224 | if (area->flags & AS_AREA_DEVICE) { |
||
225 | /* |
||
226 | * Remapping of address space areas associated |
||
227 | * with memory mapped devices is not supported. |
||
228 | */ |
||
1380 | jermar | 229 | mutex_unlock(&area->lock); |
230 | mutex_unlock(&as->lock); |
||
1235 | jermar | 231 | interrupts_restore(ipl); |
1306 | jermar | 232 | return ENOTSUP; |
1235 | jermar | 233 | } |
234 | |||
235 | pages = SIZE2FRAMES((address - area->base) + size); |
||
236 | if (!pages) { |
||
237 | /* |
||
238 | * Zero size address space areas are not allowed. |
||
239 | */ |
||
1380 | jermar | 240 | mutex_unlock(&area->lock); |
241 | mutex_unlock(&as->lock); |
||
1235 | jermar | 242 | interrupts_restore(ipl); |
1306 | jermar | 243 | return EPERM; |
1235 | jermar | 244 | } |
245 | |||
246 | if (pages < area->pages) { |
||
1403 | jermar | 247 | bool cond; |
248 | __address start_free = area->base + pages*PAGE_SIZE; |
||
1235 | jermar | 249 | |
250 | /* |
||
251 | * Shrinking the area. |
||
252 | * No need to check for overlaps. |
||
253 | */ |
||
1403 | jermar | 254 | |
255 | /* |
||
256 | * Remove frames belonging to used space starting from |
||
257 | * the highest addresses downwards until an overlap with |
||
258 | * the resized address space area is found. Note that this |
||
259 | * is also the right way to remove part of the used_space |
||
260 | * B+tree leaf list. |
||
261 | */ |
||
262 | for (cond = true; cond;) { |
||
263 | btree_node_t *node; |
||
264 | |||
265 | ASSERT(!list_empty(&area->used_space.leaf_head)); |
||
266 | node = list_get_instance(area->used_space.leaf_head.prev, btree_node_t, leaf_link); |
||
267 | if ((cond = (bool) node->keys)) { |
||
268 | __address b = node->key[node->keys - 1]; |
||
269 | count_t c = (count_t) node->value[node->keys - 1]; |
||
270 | int i = 0; |
||
1235 | jermar | 271 | |
1403 | jermar | 272 | if (overlaps(b, c*PAGE_SIZE, area->base, pages*PAGE_SIZE)) { |
273 | |||
274 | if (b + c*PAGE_SIZE <= start_free) { |
||
275 | /* |
||
276 | * The whole interval fits completely |
||
277 | * in the resized address space area. |
||
278 | */ |
||
279 | break; |
||
280 | } |
||
281 | |||
282 | /* |
||
283 | * Part of the interval corresponding to b and c |
||
284 | * overlaps with the resized address space area. |
||
285 | */ |
||
286 | |||
287 | cond = false; /* we are almost done */ |
||
288 | i = (start_free - b) >> PAGE_WIDTH; |
||
289 | if (!used_space_remove(area, start_free, c - i)) |
||
290 | panic("Could not remove used space."); |
||
291 | } else { |
||
292 | /* |
||
293 | * The interval of used space can be completely removed. |
||
294 | */ |
||
295 | if (!used_space_remove(area, b, c)) |
||
296 | panic("Could not remove used space.\n"); |
||
297 | } |
||
298 | |||
299 | for (; i < c; i++) { |
||
300 | pte_t *pte; |
||
301 | |||
302 | page_table_lock(as, false); |
||
303 | pte = page_mapping_find(as, b + i*PAGE_SIZE); |
||
304 | ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte)); |
||
305 | frame_free(ADDR2PFN(PTE_GET_FRAME(pte))); |
||
306 | page_mapping_remove(as, b + i*PAGE_SIZE); |
||
307 | page_table_unlock(as, false); |
||
308 | } |
||
1235 | jermar | 309 | } |
310 | } |
||
311 | /* |
||
312 | * Invalidate TLB's. |
||
313 | */ |
||
314 | tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages); |
||
315 | tlb_invalidate_pages(AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages); |
||
316 | tlb_shootdown_finalize(); |
||
317 | } else { |
||
318 | /* |
||
319 | * Growing the area. |
||
320 | * Check for overlaps with other address space areas. |
||
321 | */ |
||
322 | if (!check_area_conflicts(as, address, pages * PAGE_SIZE, area)) { |
||
1380 | jermar | 323 | mutex_unlock(&area->lock); |
324 | mutex_unlock(&as->lock); |
||
1235 | jermar | 325 | interrupts_restore(ipl); |
1306 | jermar | 326 | return EADDRNOTAVAIL; |
1235 | jermar | 327 | } |
328 | } |
||
329 | |||
330 | area->pages = pages; |
||
331 | |||
1380 | jermar | 332 | mutex_unlock(&area->lock); |
333 | mutex_unlock(&as->lock); |
||
1235 | jermar | 334 | interrupts_restore(ipl); |
335 | |||
1306 | jermar | 336 | return 0; |
1235 | jermar | 337 | } |
338 | |||
1306 | jermar | 339 | /** Destroy address space area. |
340 | * |
||
341 | * @param as Address space. |
||
342 | * @param address Address withing the area to be deleted. |
||
343 | * |
||
344 | * @return Zero on success or a value from @ref errno.h on failure. |
||
345 | */ |
||
346 | int as_area_destroy(as_t *as, __address address) |
||
347 | { |
||
348 | as_area_t *area; |
||
349 | __address base; |
||
350 | ipl_t ipl; |
||
351 | |||
352 | ipl = interrupts_disable(); |
||
1380 | jermar | 353 | mutex_lock(&as->lock); |
1306 | jermar | 354 | |
355 | area = find_area_and_lock(as, address); |
||
356 | if (!area) { |
||
1380 | jermar | 357 | mutex_unlock(&as->lock); |
1306 | jermar | 358 | interrupts_restore(ipl); |
359 | return ENOENT; |
||
360 | } |
||
361 | |||
1403 | jermar | 362 | base = area->base; |
363 | if (!(area->flags & AS_AREA_DEVICE)) { |
||
364 | bool cond; |
||
365 | |||
1306 | jermar | 366 | /* |
367 | * Releasing physical memory. |
||
368 | * Areas mapping memory-mapped devices are treated differently than |
||
369 | * areas backing frame_alloc()'ed memory. |
||
370 | */ |
||
1403 | jermar | 371 | |
372 | /* |
||
373 | * Visit only the pages mapped by used_space B+tree. |
||
374 | * Note that we must be very careful when walking the tree |
||
375 | * leaf list and removing used space as the leaf list changes |
||
376 | * unpredictibly after each remove. The solution is to actually |
||
377 | * not walk the tree at all, but to remove items from the head |
||
378 | * of the leaf list until there are some keys left. |
||
379 | */ |
||
380 | for (cond = true; cond;) { |
||
381 | btree_node_t *node; |
||
382 | |||
383 | ASSERT(!list_empty(&area->used_space.leaf_head)); |
||
384 | node = list_get_instance(area->used_space.leaf_head.next, btree_node_t, leaf_link); |
||
385 | if ((cond = (bool) node->keys)) { |
||
386 | __address b = node->key[0]; |
||
387 | count_t i; |
||
388 | pte_t *pte; |
||
389 | |||
390 | for (i = 0; i < (count_t) node->value[0]; i++) { |
||
391 | page_table_lock(as, false); |
||
392 | pte = page_mapping_find(as, b + i*PAGE_SIZE); |
||
393 | ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte)); |
||
394 | frame_free(ADDR2PFN(PTE_GET_FRAME(pte))); |
||
395 | page_mapping_remove(as, b + i*PAGE_SIZE); |
||
396 | page_table_unlock(as, false); |
||
397 | } |
||
398 | if (!used_space_remove(area, b, i)) |
||
399 | panic("Could not remove used space.\n"); |
||
1306 | jermar | 400 | } |
401 | } |
||
402 | } |
||
1403 | jermar | 403 | btree_destroy(&area->used_space); |
404 | |||
1306 | jermar | 405 | /* |
406 | * Invalidate TLB's. |
||
407 | */ |
||
408 | tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base, area->pages); |
||
409 | tlb_invalidate_pages(AS->asid, area->base, area->pages); |
||
410 | tlb_shootdown_finalize(); |
||
411 | |||
1309 | jermar | 412 | area->attributes |= AS_AREA_ATTR_PARTIAL; |
1380 | jermar | 413 | mutex_unlock(&area->lock); |
1306 | jermar | 414 | |
415 | /* |
||
416 | * Remove the empty area from address space. |
||
417 | */ |
||
418 | btree_remove(&AS->as_area_btree, base, NULL); |
||
419 | |||
1309 | jermar | 420 | free(area); |
421 | |||
1380 | jermar | 422 | mutex_unlock(&AS->lock); |
1306 | jermar | 423 | interrupts_restore(ipl); |
424 | return 0; |
||
425 | } |
||
426 | |||
1329 | palkovsky | 427 | /** Steal address space area from another task. |
1235 | jermar | 428 | * |
1329 | palkovsky | 429 | * Address space area is stolen from another task |
430 | * Moreover, any existing mapping |
||
1235 | jermar | 431 | * is copied as well, providing thus a mechanism |
432 | * for sharing group of pages. The source address |
||
433 | * space area and any associated mapping is preserved. |
||
434 | * |
||
1329 | palkovsky | 435 | * @param src_task Pointer of source task |
1239 | jermar | 436 | * @param src_base Base address of the source address space area. |
1329 | palkovsky | 437 | * @param acc_size Expected size of the source area |
438 | * @param dst_base Target base address |
||
1235 | jermar | 439 | * |
1306 | jermar | 440 | * @return Zero on success or ENOENT if there is no such task or |
1235 | jermar | 441 | * if there is no such address space area, |
442 | * EPERM if there was a problem in accepting the area or |
||
443 | * ENOMEM if there was a problem in allocating destination |
||
444 | * address space area. |
||
445 | */ |
||
1329 | palkovsky | 446 | int as_area_steal(task_t *src_task, __address src_base, size_t acc_size, |
447 | __address dst_base) |
||
1235 | jermar | 448 | { |
449 | ipl_t ipl; |
||
450 | count_t i; |
||
1329 | palkovsky | 451 | as_t *src_as; |
1239 | jermar | 452 | int src_flags; |
453 | size_t src_size; |
||
454 | as_area_t *src_area, *dst_area; |
||
1329 | palkovsky | 455 | |
1235 | jermar | 456 | ipl = interrupts_disable(); |
1329 | palkovsky | 457 | spinlock_lock(&src_task->lock); |
458 | src_as = src_task->as; |
||
1235 | jermar | 459 | |
1380 | jermar | 460 | mutex_lock(&src_as->lock); |
1329 | palkovsky | 461 | src_area = find_area_and_lock(src_as, src_base); |
1239 | jermar | 462 | if (!src_area) { |
1238 | jermar | 463 | /* |
464 | * Could not find the source address space area. |
||
465 | */ |
||
1329 | palkovsky | 466 | spinlock_unlock(&src_task->lock); |
1380 | jermar | 467 | mutex_unlock(&src_as->lock); |
1238 | jermar | 468 | interrupts_restore(ipl); |
469 | return ENOENT; |
||
470 | } |
||
1239 | jermar | 471 | src_size = src_area->pages * PAGE_SIZE; |
472 | src_flags = src_area->flags; |
||
1380 | jermar | 473 | mutex_unlock(&src_area->lock); |
474 | mutex_unlock(&src_as->lock); |
||
1235 | jermar | 475 | |
1329 | palkovsky | 476 | if (src_size != acc_size) { |
477 | spinlock_unlock(&src_task->lock); |
||
1235 | jermar | 478 | interrupts_restore(ipl); |
479 | return EPERM; |
||
480 | } |
||
481 | /* |
||
1239 | jermar | 482 | * Create copy of the source address space area. |
483 | * The destination area is created with AS_AREA_ATTR_PARTIAL |
||
484 | * attribute set which prevents race condition with |
||
485 | * preliminary as_page_fault() calls. |
||
1235 | jermar | 486 | */ |
1329 | palkovsky | 487 | dst_area = as_area_create(AS, src_flags, src_size, dst_base, AS_AREA_ATTR_PARTIAL); |
1239 | jermar | 488 | if (!dst_area) { |
1235 | jermar | 489 | /* |
490 | * Destination address space area could not be created. |
||
491 | */ |
||
1329 | palkovsky | 492 | spinlock_unlock(&src_task->lock); |
1235 | jermar | 493 | interrupts_restore(ipl); |
494 | return ENOMEM; |
||
495 | } |
||
496 | |||
1329 | palkovsky | 497 | spinlock_unlock(&src_task->lock); |
1235 | jermar | 498 | |
499 | /* |
||
500 | * Avoid deadlock by first locking the address space with lower address. |
||
501 | */ |
||
1329 | palkovsky | 502 | if (AS < src_as) { |
1380 | jermar | 503 | mutex_lock(&AS->lock); |
504 | mutex_lock(&src_as->lock); |
||
1235 | jermar | 505 | } else { |
1380 | jermar | 506 | mutex_lock(&AS->lock); |
507 | mutex_lock(&src_as->lock); |
||
1235 | jermar | 508 | } |
509 | |||
1239 | jermar | 510 | for (i = 0; i < SIZE2FRAMES(src_size); i++) { |
1235 | jermar | 511 | pte_t *pte; |
512 | __address frame; |
||
513 | |||
1329 | palkovsky | 514 | page_table_lock(src_as, false); |
515 | pte = page_mapping_find(src_as, src_base + i*PAGE_SIZE); |
||
1235 | jermar | 516 | if (pte && PTE_VALID(pte)) { |
517 | ASSERT(PTE_PRESENT(pte)); |
||
518 | frame = PTE_GET_FRAME(pte); |
||
1239 | jermar | 519 | if (!(src_flags & AS_AREA_DEVICE)) |
1236 | jermar | 520 | frame_reference_add(ADDR2PFN(frame)); |
1329 | palkovsky | 521 | page_table_unlock(src_as, false); |
1235 | jermar | 522 | } else { |
1329 | palkovsky | 523 | page_table_unlock(src_as, false); |
1235 | jermar | 524 | continue; |
525 | } |
||
526 | |||
1329 | palkovsky | 527 | page_table_lock(AS, false); |
528 | page_mapping_insert(AS, dst_base + i*PAGE_SIZE, frame, area_flags_to_page_flags(src_flags)); |
||
529 | page_table_unlock(AS, false); |
||
1235 | jermar | 530 | } |
1239 | jermar | 531 | |
532 | /* |
||
533 | * Now the destination address space area has been |
||
534 | * fully initialized. Clear the AS_AREA_ATTR_PARTIAL |
||
535 | * attribute. |
||
536 | */ |
||
1380 | jermar | 537 | mutex_lock(&dst_area->lock); |
1239 | jermar | 538 | dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL; |
1380 | jermar | 539 | mutex_unlock(&dst_area->lock); |
1235 | jermar | 540 | |
1380 | jermar | 541 | mutex_unlock(&AS->lock); |
542 | mutex_unlock(&src_as->lock); |
||
1235 | jermar | 543 | interrupts_restore(ipl); |
544 | |||
545 | return 0; |
||
546 | } |
||
547 | |||
754 | jermar | 548 | /** Initialize mapping for one page of address space. |
703 | jermar | 549 | * |
754 | jermar | 550 | * This functions maps 'page' to 'frame' according |
551 | * to attributes of the address space area to |
||
552 | * wich 'page' belongs. |
||
703 | jermar | 553 | * |
840 | jermar | 554 | * @param as Target address space. |
754 | jermar | 555 | * @param page Virtual page within the area. |
556 | * @param frame Physical frame to which page will be mapped. |
||
703 | jermar | 557 | */ |
754 | jermar | 558 | void as_set_mapping(as_t *as, __address page, __address frame) |
703 | jermar | 559 | { |
977 | jermar | 560 | as_area_t *area; |
703 | jermar | 561 | ipl_t ipl; |
562 | |||
563 | ipl = interrupts_disable(); |
||
1044 | jermar | 564 | page_table_lock(as, true); |
703 | jermar | 565 | |
977 | jermar | 566 | area = find_area_and_lock(as, page); |
754 | jermar | 567 | if (!area) { |
1403 | jermar | 568 | panic("Page not part of any as_area.\n"); |
754 | jermar | 569 | } |
570 | |||
756 | jermar | 571 | page_mapping_insert(as, page, frame, get_area_flags(area)); |
1403 | jermar | 572 | if (!used_space_insert(area, page, 1)) |
573 | panic("Could not insert used space.\n"); |
||
754 | jermar | 574 | |
1380 | jermar | 575 | mutex_unlock(&area->lock); |
1044 | jermar | 576 | page_table_unlock(as, true); |
703 | jermar | 577 | interrupts_restore(ipl); |
578 | } |
||
579 | |||
580 | /** Handle page fault within the current address space. |
||
581 | * |
||
582 | * This is the high-level page fault handler. |
||
583 | * Interrupts are assumed disabled. |
||
584 | * |
||
585 | * @param page Faulting page. |
||
1288 | jermar | 586 | * @param istate Pointer to interrupted state. |
703 | jermar | 587 | * |
1288 | jermar | 588 | * @return 0 on page fault, 1 on success or 2 if the fault was caused by copy_to_uspace() or copy_from_uspace(). |
703 | jermar | 589 | */ |
1288 | jermar | 590 | int as_page_fault(__address page, istate_t *istate) |
703 | jermar | 591 | { |
1044 | jermar | 592 | pte_t *pte; |
977 | jermar | 593 | as_area_t *area; |
703 | jermar | 594 | __address frame; |
595 | |||
1380 | jermar | 596 | if (!THREAD) |
597 | return 0; |
||
598 | |||
703 | jermar | 599 | ASSERT(AS); |
1044 | jermar | 600 | |
1380 | jermar | 601 | mutex_lock(&AS->lock); |
977 | jermar | 602 | area = find_area_and_lock(AS, page); |
703 | jermar | 603 | if (!area) { |
604 | /* |
||
605 | * No area contained mapping for 'page'. |
||
606 | * Signal page fault to low-level handler. |
||
607 | */ |
||
1380 | jermar | 608 | mutex_unlock(&AS->lock); |
1288 | jermar | 609 | goto page_fault; |
703 | jermar | 610 | } |
611 | |||
1239 | jermar | 612 | if (area->attributes & AS_AREA_ATTR_PARTIAL) { |
613 | /* |
||
614 | * The address space area is not fully initialized. |
||
615 | * Avoid possible race by returning error. |
||
616 | */ |
||
1380 | jermar | 617 | mutex_unlock(&area->lock); |
618 | mutex_unlock(&AS->lock); |
||
1288 | jermar | 619 | goto page_fault; |
1239 | jermar | 620 | } |
621 | |||
1179 | jermar | 622 | ASSERT(!(area->flags & AS_AREA_DEVICE)); |
623 | |||
1044 | jermar | 624 | page_table_lock(AS, false); |
625 | |||
703 | jermar | 626 | /* |
1044 | jermar | 627 | * To avoid race condition between two page faults |
628 | * on the same address, we need to make sure |
||
629 | * the mapping has not been already inserted. |
||
630 | */ |
||
631 | if ((pte = page_mapping_find(AS, page))) { |
||
632 | if (PTE_PRESENT(pte)) { |
||
633 | page_table_unlock(AS, false); |
||
1380 | jermar | 634 | mutex_unlock(&area->lock); |
635 | mutex_unlock(&AS->lock); |
||
1044 | jermar | 636 | return 1; |
637 | } |
||
638 | } |
||
639 | |||
640 | /* |
||
754 | jermar | 641 | * In general, there can be several reasons that |
642 | * can have caused this fault. |
||
643 | * |
||
644 | * - non-existent mapping: the area is a scratch |
||
645 | * area (e.g. stack) and so far has not been |
||
646 | * allocated a frame for the faulting page |
||
647 | * |
||
648 | * - non-present mapping: another possibility, |
||
649 | * currently not implemented, would be frame |
||
650 | * reuse; when this becomes a possibility, |
||
651 | * do not forget to distinguish between |
||
652 | * the different causes |
||
703 | jermar | 653 | */ |
814 | palkovsky | 654 | frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0)); |
754 | jermar | 655 | memsetb(PA2KA(frame), FRAME_SIZE, 0); |
703 | jermar | 656 | |
657 | /* |
||
658 | * Map 'page' to 'frame'. |
||
659 | * Note that TLB shootdown is not attempted as only new information is being |
||
660 | * inserted into page tables. |
||
661 | */ |
||
756 | jermar | 662 | page_mapping_insert(AS, page, frame, get_area_flags(area)); |
1403 | jermar | 663 | if (!used_space_insert(area, ALIGN_DOWN(page, PAGE_SIZE), 1)) |
664 | panic("Could not insert used space.\n"); |
||
1044 | jermar | 665 | page_table_unlock(AS, false); |
703 | jermar | 666 | |
1380 | jermar | 667 | mutex_unlock(&area->lock); |
668 | mutex_unlock(&AS->lock); |
||
1288 | jermar | 669 | return AS_PF_OK; |
670 | |||
671 | page_fault: |
||
672 | if (!THREAD) |
||
673 | return AS_PF_FAULT; |
||
674 | |||
675 | if (THREAD->in_copy_from_uspace) { |
||
676 | THREAD->in_copy_from_uspace = false; |
||
677 | istate_set_retaddr(istate, (__address) &memcpy_from_uspace_failover_address); |
||
678 | } else if (THREAD->in_copy_to_uspace) { |
||
679 | THREAD->in_copy_to_uspace = false; |
||
680 | istate_set_retaddr(istate, (__address) &memcpy_to_uspace_failover_address); |
||
681 | } else { |
||
682 | return AS_PF_FAULT; |
||
683 | } |
||
684 | |||
685 | return AS_PF_DEFER; |
||
703 | jermar | 686 | } |
687 | |||
823 | jermar | 688 | /** Switch address spaces. |
703 | jermar | 689 | * |
1380 | jermar | 690 | * Note that this function cannot sleep as it is essentially a part of |
691 | * the scheduling. Sleeping here would lead to deadlock on wakeup. |
||
692 | * |
||
823 | jermar | 693 | * @param old Old address space or NULL. |
694 | * @param new New address space. |
||
703 | jermar | 695 | */ |
823 | jermar | 696 | void as_switch(as_t *old, as_t *new) |
703 | jermar | 697 | { |
698 | ipl_t ipl; |
||
823 | jermar | 699 | bool needs_asid = false; |
703 | jermar | 700 | |
701 | ipl = interrupts_disable(); |
||
823 | jermar | 702 | spinlock_lock(&as_lock); |
703 | jermar | 703 | |
704 | /* |
||
823 | jermar | 705 | * First, take care of the old address space. |
706 | */ |
||
707 | if (old) { |
||
1380 | jermar | 708 | mutex_lock_active(&old->lock); |
823 | jermar | 709 | ASSERT(old->refcount); |
710 | if((--old->refcount == 0) && (old != AS_KERNEL)) { |
||
711 | /* |
||
712 | * The old address space is no longer active on |
||
713 | * any processor. It can be appended to the |
||
714 | * list of inactive address spaces with assigned |
||
715 | * ASID. |
||
716 | */ |
||
717 | ASSERT(old->asid != ASID_INVALID); |
||
718 | list_append(&old->inactive_as_with_asid_link, &inactive_as_with_asid_head); |
||
719 | } |
||
1380 | jermar | 720 | mutex_unlock(&old->lock); |
823 | jermar | 721 | } |
722 | |||
723 | /* |
||
724 | * Second, prepare the new address space. |
||
725 | */ |
||
1380 | jermar | 726 | mutex_lock_active(&new->lock); |
823 | jermar | 727 | if ((new->refcount++ == 0) && (new != AS_KERNEL)) { |
728 | if (new->asid != ASID_INVALID) |
||
729 | list_remove(&new->inactive_as_with_asid_link); |
||
730 | else |
||
731 | needs_asid = true; /* defer call to asid_get() until new->lock is released */ |
||
732 | } |
||
733 | SET_PTL0_ADDRESS(new->page_table); |
||
1380 | jermar | 734 | mutex_unlock(&new->lock); |
823 | jermar | 735 | |
736 | if (needs_asid) { |
||
737 | /* |
||
738 | * Allocation of new ASID was deferred |
||
739 | * until now in order to avoid deadlock. |
||
740 | */ |
||
741 | asid_t asid; |
||
742 | |||
743 | asid = asid_get(); |
||
1380 | jermar | 744 | mutex_lock_active(&new->lock); |
823 | jermar | 745 | new->asid = asid; |
1380 | jermar | 746 | mutex_unlock(&new->lock); |
823 | jermar | 747 | } |
748 | spinlock_unlock(&as_lock); |
||
749 | interrupts_restore(ipl); |
||
750 | |||
751 | /* |
||
703 | jermar | 752 | * Perform architecture-specific steps. |
727 | jermar | 753 | * (e.g. write ASID to hardware register etc.) |
703 | jermar | 754 | */ |
823 | jermar | 755 | as_install_arch(new); |
703 | jermar | 756 | |
823 | jermar | 757 | AS = new; |
703 | jermar | 758 | } |
754 | jermar | 759 | |
1235 | jermar | 760 | /** Convert address space area flags to page flags. |
754 | jermar | 761 | * |
1235 | jermar | 762 | * @param aflags Flags of some address space area. |
754 | jermar | 763 | * |
1235 | jermar | 764 | * @return Flags to be passed to page_mapping_insert(). |
754 | jermar | 765 | */ |
1235 | jermar | 766 | int area_flags_to_page_flags(int aflags) |
754 | jermar | 767 | { |
768 | int flags; |
||
769 | |||
1178 | jermar | 770 | flags = PAGE_USER | PAGE_PRESENT; |
754 | jermar | 771 | |
1235 | jermar | 772 | if (aflags & AS_AREA_READ) |
1026 | jermar | 773 | flags |= PAGE_READ; |
774 | |||
1235 | jermar | 775 | if (aflags & AS_AREA_WRITE) |
1026 | jermar | 776 | flags |= PAGE_WRITE; |
777 | |||
1235 | jermar | 778 | if (aflags & AS_AREA_EXEC) |
1026 | jermar | 779 | flags |= PAGE_EXEC; |
780 | |||
1235 | jermar | 781 | if (!(aflags & AS_AREA_DEVICE)) |
1178 | jermar | 782 | flags |= PAGE_CACHEABLE; |
783 | |||
754 | jermar | 784 | return flags; |
785 | } |
||
756 | jermar | 786 | |
1235 | jermar | 787 | /** Compute flags for virtual address translation subsytem. |
788 | * |
||
789 | * The address space area must be locked. |
||
790 | * Interrupts must be disabled. |
||
791 | * |
||
792 | * @param a Address space area. |
||
793 | * |
||
794 | * @return Flags to be used in page_mapping_insert(). |
||
795 | */ |
||
796 | int get_area_flags(as_area_t *a) |
||
797 | { |
||
798 | return area_flags_to_page_flags(a->flags); |
||
799 | } |
||
800 | |||
756 | jermar | 801 | /** Create page table. |
802 | * |
||
803 | * Depending on architecture, create either address space |
||
804 | * private or global page table. |
||
805 | * |
||
806 | * @param flags Flags saying whether the page table is for kernel address space. |
||
807 | * |
||
808 | * @return First entry of the page table. |
||
809 | */ |
||
810 | pte_t *page_table_create(int flags) |
||
811 | { |
||
812 | ASSERT(as_operations); |
||
813 | ASSERT(as_operations->page_table_create); |
||
814 | |||
815 | return as_operations->page_table_create(flags); |
||
816 | } |
||
977 | jermar | 817 | |
1044 | jermar | 818 | /** Lock page table. |
819 | * |
||
820 | * This function should be called before any page_mapping_insert(), |
||
821 | * page_mapping_remove() and page_mapping_find(). |
||
822 | * |
||
823 | * Locking order is such that address space areas must be locked |
||
824 | * prior to this call. Address space can be locked prior to this |
||
825 | * call in which case the lock argument is false. |
||
826 | * |
||
827 | * @param as Address space. |
||
1248 | jermar | 828 | * @param lock If false, do not attempt to lock as->lock. |
1044 | jermar | 829 | */ |
830 | void page_table_lock(as_t *as, bool lock) |
||
831 | { |
||
832 | ASSERT(as_operations); |
||
833 | ASSERT(as_operations->page_table_lock); |
||
834 | |||
835 | as_operations->page_table_lock(as, lock); |
||
836 | } |
||
837 | |||
838 | /** Unlock page table. |
||
839 | * |
||
840 | * @param as Address space. |
||
1248 | jermar | 841 | * @param unlock If false, do not attempt to unlock as->lock. |
1044 | jermar | 842 | */ |
843 | void page_table_unlock(as_t *as, bool unlock) |
||
844 | { |
||
845 | ASSERT(as_operations); |
||
846 | ASSERT(as_operations->page_table_unlock); |
||
847 | |||
848 | as_operations->page_table_unlock(as, unlock); |
||
849 | } |
||
850 | |||
977 | jermar | 851 | |
852 | /** Find address space area and lock it. |
||
853 | * |
||
854 | * The address space must be locked and interrupts must be disabled. |
||
855 | * |
||
856 | * @param as Address space. |
||
857 | * @param va Virtual address. |
||
858 | * |
||
859 | * @return Locked address space area containing va on success or NULL on failure. |
||
860 | */ |
||
861 | as_area_t *find_area_and_lock(as_t *as, __address va) |
||
862 | { |
||
863 | as_area_t *a; |
||
1147 | jermar | 864 | btree_node_t *leaf, *lnode; |
865 | int i; |
||
977 | jermar | 866 | |
1147 | jermar | 867 | a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf); |
868 | if (a) { |
||
869 | /* va is the base address of an address space area */ |
||
1380 | jermar | 870 | mutex_lock(&a->lock); |
1147 | jermar | 871 | return a; |
872 | } |
||
873 | |||
874 | /* |
||
1150 | jermar | 875 | * Search the leaf node and the righmost record of its left neighbour |
1147 | jermar | 876 | * to find out whether this is a miss or va belongs to an address |
877 | * space area found there. |
||
878 | */ |
||
879 | |||
880 | /* First, search the leaf node itself. */ |
||
881 | for (i = 0; i < leaf->keys; i++) { |
||
882 | a = (as_area_t *) leaf->value[i]; |
||
1380 | jermar | 883 | mutex_lock(&a->lock); |
1147 | jermar | 884 | if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) { |
885 | return a; |
||
886 | } |
||
1380 | jermar | 887 | mutex_unlock(&a->lock); |
1147 | jermar | 888 | } |
977 | jermar | 889 | |
1147 | jermar | 890 | /* |
1150 | jermar | 891 | * Second, locate the left neighbour and test its last record. |
1148 | jermar | 892 | * Because of its position in the B+tree, it must have base < va. |
1147 | jermar | 893 | */ |
1150 | jermar | 894 | if ((lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) { |
1147 | jermar | 895 | a = (as_area_t *) lnode->value[lnode->keys - 1]; |
1380 | jermar | 896 | mutex_lock(&a->lock); |
1147 | jermar | 897 | if (va < a->base + a->pages * PAGE_SIZE) { |
1048 | jermar | 898 | return a; |
1147 | jermar | 899 | } |
1380 | jermar | 900 | mutex_unlock(&a->lock); |
977 | jermar | 901 | } |
902 | |||
903 | return NULL; |
||
904 | } |
||
1048 | jermar | 905 | |
906 | /** Check area conflicts with other areas. |
||
907 | * |
||
908 | * The address space must be locked and interrupts must be disabled. |
||
909 | * |
||
910 | * @param as Address space. |
||
911 | * @param va Starting virtual address of the area being tested. |
||
912 | * @param size Size of the area being tested. |
||
913 | * @param avoid_area Do not touch this area. |
||
914 | * |
||
915 | * @return True if there is no conflict, false otherwise. |
||
916 | */ |
||
917 | bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area) |
||
918 | { |
||
919 | as_area_t *a; |
||
1147 | jermar | 920 | btree_node_t *leaf, *node; |
921 | int i; |
||
1048 | jermar | 922 | |
1070 | jermar | 923 | /* |
924 | * We don't want any area to have conflicts with NULL page. |
||
925 | */ |
||
926 | if (overlaps(va, size, NULL, PAGE_SIZE)) |
||
927 | return false; |
||
928 | |||
1147 | jermar | 929 | /* |
930 | * The leaf node is found in O(log n), where n is proportional to |
||
931 | * the number of address space areas belonging to as. |
||
932 | * The check for conflicts is then attempted on the rightmost |
||
1150 | jermar | 933 | * record in the left neighbour, the leftmost record in the right |
934 | * neighbour and all records in the leaf node itself. |
||
1147 | jermar | 935 | */ |
1048 | jermar | 936 | |
1147 | jermar | 937 | if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) { |
938 | if (a != avoid_area) |
||
939 | return false; |
||
940 | } |
||
941 | |||
942 | /* First, check the two border cases. */ |
||
1150 | jermar | 943 | if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) { |
1147 | jermar | 944 | a = (as_area_t *) node->value[node->keys - 1]; |
1380 | jermar | 945 | mutex_lock(&a->lock); |
1147 | jermar | 946 | if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) { |
1380 | jermar | 947 | mutex_unlock(&a->lock); |
1147 | jermar | 948 | return false; |
949 | } |
||
1380 | jermar | 950 | mutex_unlock(&a->lock); |
1147 | jermar | 951 | } |
1150 | jermar | 952 | if ((node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf))) { |
1147 | jermar | 953 | a = (as_area_t *) node->value[0]; |
1380 | jermar | 954 | mutex_lock(&a->lock); |
1147 | jermar | 955 | if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) { |
1380 | jermar | 956 | mutex_unlock(&a->lock); |
1147 | jermar | 957 | return false; |
958 | } |
||
1380 | jermar | 959 | mutex_unlock(&a->lock); |
1147 | jermar | 960 | } |
961 | |||
962 | /* Second, check the leaf node. */ |
||
963 | for (i = 0; i < leaf->keys; i++) { |
||
964 | a = (as_area_t *) leaf->value[i]; |
||
965 | |||
1048 | jermar | 966 | if (a == avoid_area) |
967 | continue; |
||
1147 | jermar | 968 | |
1380 | jermar | 969 | mutex_lock(&a->lock); |
1147 | jermar | 970 | if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) { |
1380 | jermar | 971 | mutex_unlock(&a->lock); |
1147 | jermar | 972 | return false; |
973 | } |
||
1380 | jermar | 974 | mutex_unlock(&a->lock); |
1048 | jermar | 975 | } |
976 | |||
1070 | jermar | 977 | /* |
978 | * So far, the area does not conflict with other areas. |
||
979 | * Check if it doesn't conflict with kernel address space. |
||
980 | */ |
||
981 | if (!KERNEL_ADDRESS_SPACE_SHADOWED) { |
||
982 | return !overlaps(va, size, |
||
983 | KERNEL_ADDRESS_SPACE_START, KERNEL_ADDRESS_SPACE_END-KERNEL_ADDRESS_SPACE_START); |
||
984 | } |
||
985 | |||
1048 | jermar | 986 | return true; |
987 | } |
||
1235 | jermar | 988 | |
1380 | jermar | 989 | /** Return size of the address space area with given base. */ |
1329 | palkovsky | 990 | size_t as_get_size(__address base) |
991 | { |
||
992 | ipl_t ipl; |
||
993 | as_area_t *src_area; |
||
994 | size_t size; |
||
995 | |||
996 | ipl = interrupts_disable(); |
||
997 | src_area = find_area_and_lock(AS, base); |
||
998 | if (src_area){ |
||
999 | size = src_area->pages * PAGE_SIZE; |
||
1380 | jermar | 1000 | mutex_unlock(&src_area->lock); |
1329 | palkovsky | 1001 | } else { |
1002 | size = 0; |
||
1003 | } |
||
1004 | interrupts_restore(ipl); |
||
1005 | return size; |
||
1006 | } |
||
1007 | |||
1387 | jermar | 1008 | /** Mark portion of address space area as used. |
1009 | * |
||
1010 | * The address space area must be already locked. |
||
1011 | * |
||
1012 | * @param a Address space area. |
||
1013 | * @param page First page to be marked. |
||
1014 | * @param count Number of page to be marked. |
||
1015 | * |
||
1016 | * @return 0 on failure and 1 on success. |
||
1017 | */ |
||
1018 | int used_space_insert(as_area_t *a, __address page, count_t count) |
||
1019 | { |
||
1020 | btree_node_t *leaf, *node; |
||
1021 | count_t pages; |
||
1022 | int i; |
||
1023 | |||
1024 | ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE)); |
||
1025 | ASSERT(count); |
||
1026 | |||
1027 | pages = (count_t) btree_search(&a->used_space, page, &leaf); |
||
1028 | if (pages) { |
||
1029 | /* |
||
1030 | * We hit the beginning of some used space. |
||
1031 | */ |
||
1032 | return 0; |
||
1033 | } |
||
1034 | |||
1035 | node = btree_leaf_node_left_neighbour(&a->used_space, leaf); |
||
1036 | if (node) { |
||
1037 | __address left_pg = node->key[node->keys - 1], right_pg = leaf->key[0]; |
||
1038 | count_t left_cnt = (count_t) node->value[node->keys - 1], right_cnt = (count_t) leaf->value[0]; |
||
1039 | |||
1040 | /* |
||
1041 | * Examine the possibility that the interval fits |
||
1042 | * somewhere between the rightmost interval of |
||
1043 | * the left neigbour and the first interval of the leaf. |
||
1044 | */ |
||
1045 | |||
1046 | if (page >= right_pg) { |
||
1047 | /* Do nothing. */ |
||
1048 | } else if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) { |
||
1049 | /* The interval intersects with the left interval. */ |
||
1050 | return 0; |
||
1051 | } else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) { |
||
1052 | /* The interval intersects with the right interval. */ |
||
1053 | return 0; |
||
1054 | } else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) { |
||
1055 | /* The interval can be added by merging the two already present intervals. */ |
||
1403 | jermar | 1056 | node->value[node->keys - 1] += count + right_cnt; |
1387 | jermar | 1057 | btree_remove(&a->used_space, right_pg, leaf); |
1058 | return 1; |
||
1059 | } else if (page == left_pg + left_cnt*PAGE_SIZE) { |
||
1060 | /* The interval can be added by simply growing the left interval. */ |
||
1403 | jermar | 1061 | node->value[node->keys - 1] += count; |
1387 | jermar | 1062 | return 1; |
1063 | } else if (page + count*PAGE_SIZE == right_pg) { |
||
1064 | /* |
||
1065 | * The interval can be addded by simply moving base of the right |
||
1066 | * interval down and increasing its size accordingly. |
||
1067 | */ |
||
1403 | jermar | 1068 | leaf->value[0] += count; |
1387 | jermar | 1069 | leaf->key[0] = page; |
1070 | return 1; |
||
1071 | } else { |
||
1072 | /* |
||
1073 | * The interval is between both neigbouring intervals, |
||
1074 | * but cannot be merged with any of them. |
||
1075 | */ |
||
1076 | btree_insert(&a->used_space, page, (void *) count, leaf); |
||
1077 | return 1; |
||
1078 | } |
||
1079 | } else if (page < leaf->key[0]) { |
||
1080 | __address right_pg = leaf->key[0]; |
||
1081 | count_t right_cnt = (count_t) leaf->value[0]; |
||
1082 | |||
1083 | /* |
||
1084 | * Investigate the border case in which the left neighbour does not |
||
1085 | * exist but the interval fits from the left. |
||
1086 | */ |
||
1087 | |||
1088 | if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) { |
||
1089 | /* The interval intersects with the right interval. */ |
||
1090 | return 0; |
||
1091 | } else if (page + count*PAGE_SIZE == right_pg) { |
||
1092 | /* |
||
1093 | * The interval can be added by moving the base of the right interval down |
||
1094 | * and increasing its size accordingly. |
||
1095 | */ |
||
1096 | leaf->key[0] = page; |
||
1403 | jermar | 1097 | leaf->value[0] += count; |
1387 | jermar | 1098 | return 1; |
1099 | } else { |
||
1100 | /* |
||
1101 | * The interval doesn't adjoin with the right interval. |
||
1102 | * It must be added individually. |
||
1103 | */ |
||
1104 | btree_insert(&a->used_space, page, (void *) count, leaf); |
||
1105 | return 1; |
||
1106 | } |
||
1107 | } |
||
1108 | |||
1109 | node = btree_leaf_node_right_neighbour(&a->used_space, leaf); |
||
1110 | if (node) { |
||
1111 | __address left_pg = leaf->key[leaf->keys - 1], right_pg = node->key[0]; |
||
1112 | count_t left_cnt = (count_t) leaf->value[leaf->keys - 1], right_cnt = (count_t) node->value[0]; |
||
1113 | |||
1114 | /* |
||
1115 | * Examine the possibility that the interval fits |
||
1116 | * somewhere between the leftmost interval of |
||
1117 | * the right neigbour and the last interval of the leaf. |
||
1118 | */ |
||
1119 | |||
1120 | if (page < left_pg) { |
||
1121 | /* Do nothing. */ |
||
1122 | } else if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) { |
||
1123 | /* The interval intersects with the left interval. */ |
||
1124 | return 0; |
||
1125 | } else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) { |
||
1126 | /* The interval intersects with the right interval. */ |
||
1127 | return 0; |
||
1128 | } else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) { |
||
1129 | /* The interval can be added by merging the two already present intervals. */ |
||
1403 | jermar | 1130 | leaf->value[leaf->keys - 1] += count + right_cnt; |
1387 | jermar | 1131 | btree_remove(&a->used_space, right_pg, node); |
1132 | return 1; |
||
1133 | } else if (page == left_pg + left_cnt*PAGE_SIZE) { |
||
1134 | /* The interval can be added by simply growing the left interval. */ |
||
1403 | jermar | 1135 | leaf->value[leaf->keys - 1] += count; |
1387 | jermar | 1136 | return 1; |
1137 | } else if (page + count*PAGE_SIZE == right_pg) { |
||
1138 | /* |
||
1139 | * The interval can be addded by simply moving base of the right |
||
1140 | * interval down and increasing its size accordingly. |
||
1141 | */ |
||
1403 | jermar | 1142 | node->value[0] += count; |
1387 | jermar | 1143 | node->key[0] = page; |
1144 | return 1; |
||
1145 | } else { |
||
1146 | /* |
||
1147 | * The interval is between both neigbouring intervals, |
||
1148 | * but cannot be merged with any of them. |
||
1149 | */ |
||
1150 | btree_insert(&a->used_space, page, (void *) count, leaf); |
||
1151 | return 1; |
||
1152 | } |
||
1153 | } else if (page >= leaf->key[leaf->keys - 1]) { |
||
1154 | __address left_pg = leaf->key[leaf->keys - 1]; |
||
1155 | count_t left_cnt = (count_t) leaf->value[leaf->keys - 1]; |
||
1156 | |||
1157 | /* |
||
1158 | * Investigate the border case in which the right neighbour does not |
||
1159 | * exist but the interval fits from the right. |
||
1160 | */ |
||
1161 | |||
1162 | if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) { |
||
1403 | jermar | 1163 | /* The interval intersects with the left interval. */ |
1387 | jermar | 1164 | return 0; |
1165 | } else if (left_pg + left_cnt*PAGE_SIZE == page) { |
||
1166 | /* The interval can be added by growing the left interval. */ |
||
1403 | jermar | 1167 | leaf->value[leaf->keys - 1] += count; |
1387 | jermar | 1168 | return 1; |
1169 | } else { |
||
1170 | /* |
||
1171 | * The interval doesn't adjoin with the left interval. |
||
1172 | * It must be added individually. |
||
1173 | */ |
||
1174 | btree_insert(&a->used_space, page, (void *) count, leaf); |
||
1175 | return 1; |
||
1176 | } |
||
1177 | } |
||
1178 | |||
1179 | /* |
||
1180 | * Note that if the algorithm made it thus far, the interval can fit only |
||
1181 | * between two other intervals of the leaf. The two border cases were already |
||
1182 | * resolved. |
||
1183 | */ |
||
1184 | for (i = 1; i < leaf->keys; i++) { |
||
1185 | if (page < leaf->key[i]) { |
||
1186 | __address left_pg = leaf->key[i - 1], right_pg = leaf->key[i]; |
||
1187 | count_t left_cnt = (count_t) leaf->value[i - 1], right_cnt = (count_t) leaf->value[i]; |
||
1188 | |||
1189 | /* |
||
1190 | * The interval fits between left_pg and right_pg. |
||
1191 | */ |
||
1192 | |||
1193 | if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) { |
||
1194 | /* The interval intersects with the left interval. */ |
||
1195 | return 0; |
||
1196 | } else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) { |
||
1197 | /* The interval intersects with the right interval. */ |
||
1198 | return 0; |
||
1199 | } else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) { |
||
1200 | /* The interval can be added by merging the two already present intervals. */ |
||
1403 | jermar | 1201 | leaf->value[i - 1] += count + right_cnt; |
1387 | jermar | 1202 | btree_remove(&a->used_space, right_pg, leaf); |
1203 | return 1; |
||
1204 | } else if (page == left_pg + left_cnt*PAGE_SIZE) { |
||
1205 | /* The interval can be added by simply growing the left interval. */ |
||
1403 | jermar | 1206 | leaf->value[i - 1] += count; |
1387 | jermar | 1207 | return 1; |
1208 | } else if (page + count*PAGE_SIZE == right_pg) { |
||
1209 | /* |
||
1210 | * The interval can be addded by simply moving base of the right |
||
1211 | * interval down and increasing its size accordingly. |
||
1212 | */ |
||
1403 | jermar | 1213 | leaf->value[i] += count; |
1387 | jermar | 1214 | leaf->key[i] = page; |
1215 | return 1; |
||
1216 | } else { |
||
1217 | /* |
||
1218 | * The interval is between both neigbouring intervals, |
||
1219 | * but cannot be merged with any of them. |
||
1220 | */ |
||
1221 | btree_insert(&a->used_space, page, (void *) count, leaf); |
||
1222 | return 1; |
||
1223 | } |
||
1224 | } |
||
1225 | } |
||
1226 | |||
1227 | panic("Inconsistency detected while adding %d pages of used space at %P.\n", count, page); |
||
1228 | } |
||
1229 | |||
1230 | /** Mark portion of address space area as unused. |
||
1231 | * |
||
1232 | * The address space area must be already locked. |
||
1233 | * |
||
1234 | * @param a Address space area. |
||
1235 | * @param page First page to be marked. |
||
1236 | * @param count Number of page to be marked. |
||
1237 | * |
||
1238 | * @return 0 on failure and 1 on success. |
||
1239 | */ |
||
1240 | int used_space_remove(as_area_t *a, __address page, count_t count) |
||
1241 | { |
||
1242 | btree_node_t *leaf, *node; |
||
1243 | count_t pages; |
||
1244 | int i; |
||
1245 | |||
1246 | ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE)); |
||
1247 | ASSERT(count); |
||
1248 | |||
1249 | pages = (count_t) btree_search(&a->used_space, page, &leaf); |
||
1250 | if (pages) { |
||
1251 | /* |
||
1252 | * We are lucky, page is the beginning of some interval. |
||
1253 | */ |
||
1254 | if (count > pages) { |
||
1255 | return 0; |
||
1256 | } else if (count == pages) { |
||
1257 | btree_remove(&a->used_space, page, leaf); |
||
1403 | jermar | 1258 | return 1; |
1387 | jermar | 1259 | } else { |
1260 | /* |
||
1261 | * Find the respective interval. |
||
1262 | * Decrease its size and relocate its start address. |
||
1263 | */ |
||
1264 | for (i = 0; i < leaf->keys; i++) { |
||
1265 | if (leaf->key[i] == page) { |
||
1266 | leaf->key[i] += count*PAGE_SIZE; |
||
1403 | jermar | 1267 | leaf->value[i] -= count; |
1387 | jermar | 1268 | return 1; |
1269 | } |
||
1270 | } |
||
1271 | goto error; |
||
1272 | } |
||
1273 | } |
||
1274 | |||
1275 | node = btree_leaf_node_left_neighbour(&a->used_space, leaf); |
||
1276 | if (node && page < leaf->key[0]) { |
||
1277 | __address left_pg = node->key[node->keys - 1]; |
||
1278 | count_t left_cnt = (count_t) node->value[node->keys - 1]; |
||
1279 | |||
1280 | if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) { |
||
1281 | if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) { |
||
1282 | /* |
||
1283 | * The interval is contained in the rightmost interval |
||
1284 | * of the left neighbour and can be removed by |
||
1285 | * updating the size of the bigger interval. |
||
1286 | */ |
||
1403 | jermar | 1287 | node->value[node->keys - 1] -= count; |
1387 | jermar | 1288 | return 1; |
1289 | } else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) { |
||
1403 | jermar | 1290 | count_t new_cnt; |
1387 | jermar | 1291 | |
1292 | /* |
||
1293 | * The interval is contained in the rightmost interval |
||
1294 | * of the left neighbour but its removal requires |
||
1295 | * both updating the size of the original interval and |
||
1296 | * also inserting a new interval. |
||
1297 | */ |
||
1403 | jermar | 1298 | new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH; |
1299 | node->value[node->keys - 1] -= count + new_cnt; |
||
1387 | jermar | 1300 | btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf); |
1301 | return 1; |
||
1302 | } |
||
1303 | } |
||
1304 | return 0; |
||
1305 | } else if (page < leaf->key[0]) { |
||
1306 | return 0; |
||
1307 | } |
||
1308 | |||
1309 | if (page > leaf->key[leaf->keys - 1]) { |
||
1310 | __address left_pg = leaf->key[leaf->keys - 1]; |
||
1311 | count_t left_cnt = (count_t) leaf->value[leaf->keys - 1]; |
||
1312 | |||
1313 | if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) { |
||
1314 | if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) { |
||
1315 | /* |
||
1316 | * The interval is contained in the rightmost interval |
||
1317 | * of the leaf and can be removed by updating the size |
||
1318 | * of the bigger interval. |
||
1319 | */ |
||
1403 | jermar | 1320 | leaf->value[leaf->keys - 1] -= count; |
1387 | jermar | 1321 | return 1; |
1322 | } else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) { |
||
1403 | jermar | 1323 | count_t new_cnt; |
1387 | jermar | 1324 | |
1325 | /* |
||
1326 | * The interval is contained in the rightmost interval |
||
1327 | * of the leaf but its removal requires both updating |
||
1328 | * the size of the original interval and |
||
1329 | * also inserting a new interval. |
||
1330 | */ |
||
1403 | jermar | 1331 | new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH; |
1332 | leaf->value[leaf->keys - 1] -= count + new_cnt; |
||
1387 | jermar | 1333 | btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf); |
1334 | return 1; |
||
1335 | } |
||
1336 | } |
||
1337 | return 0; |
||
1338 | } |
||
1339 | |||
1340 | /* |
||
1341 | * The border cases have been already resolved. |
||
1342 | * Now the interval can be only between intervals of the leaf. |
||
1343 | */ |
||
1344 | for (i = 1; i < leaf->keys - 1; i++) { |
||
1345 | if (page < leaf->key[i]) { |
||
1346 | __address left_pg = leaf->key[i - 1]; |
||
1347 | count_t left_cnt = (count_t) leaf->value[i - 1]; |
||
1348 | |||
1349 | /* |
||
1350 | * Now the interval is between intervals corresponding to (i - 1) and i. |
||
1351 | */ |
||
1352 | if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) { |
||
1353 | if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) { |
||
1354 | /* |
||
1355 | * The interval is contained in the interval (i - 1) |
||
1356 | * of the leaf and can be removed by updating the size |
||
1357 | * of the bigger interval. |
||
1358 | */ |
||
1403 | jermar | 1359 | leaf->value[i - 1] -= count; |
1387 | jermar | 1360 | return 1; |
1361 | } else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) { |
||
1403 | jermar | 1362 | count_t new_cnt; |
1387 | jermar | 1363 | |
1364 | /* |
||
1365 | * The interval is contained in the interval (i - 1) |
||
1366 | * of the leaf but its removal requires both updating |
||
1367 | * the size of the original interval and |
||
1368 | * also inserting a new interval. |
||
1369 | */ |
||
1403 | jermar | 1370 | new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH; |
1371 | leaf->value[i - 1] -= count + new_cnt; |
||
1387 | jermar | 1372 | btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf); |
1373 | return 1; |
||
1374 | } |
||
1375 | } |
||
1376 | return 0; |
||
1377 | } |
||
1378 | } |
||
1379 | |||
1380 | error: |
||
1381 | panic("Inconsistency detected while removing %d pages of used space from %P.\n", count, page); |
||
1382 | } |
||
1383 | |||
1235 | jermar | 1384 | /* |
1385 | * Address space related syscalls. |
||
1386 | */ |
||
1387 | |||
1388 | /** Wrapper for as_area_create(). */ |
||
1389 | __native sys_as_area_create(__address address, size_t size, int flags) |
||
1390 | { |
||
1239 | jermar | 1391 | if (as_area_create(AS, flags, size, address, AS_AREA_ATTR_NONE)) |
1235 | jermar | 1392 | return (__native) address; |
1393 | else |
||
1394 | return (__native) -1; |
||
1395 | } |
||
1396 | |||
1397 | /** Wrapper for as_area_resize. */ |
||
1398 | __native sys_as_area_resize(__address address, size_t size, int flags) |
||
1399 | { |
||
1306 | jermar | 1400 | return (__native) as_area_resize(AS, address, size, 0); |
1235 | jermar | 1401 | } |
1402 | |||
1306 | jermar | 1403 | /** Wrapper for as_area_destroy. */ |
1404 | __native sys_as_area_destroy(__address address) |
||
1405 | { |
||
1406 | return (__native) as_area_destroy(AS, address); |
||
1407 | } |