Rev 3424 | Rev 3431 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
| Rev 3424 | Rev 3425 | ||
|---|---|---|---|
| Line 80... | Line 80... | ||
| 80 | 80 | ||
| 81 | #ifdef CONFIG_VIRT_IDX_DCACHE |
81 | #ifdef CONFIG_VIRT_IDX_DCACHE |
| 82 | #include <arch/mm/cache.h> |
82 | #include <arch/mm/cache.h> |
| 83 | #endif /* CONFIG_VIRT_IDX_DCACHE */ |
83 | #endif /* CONFIG_VIRT_IDX_DCACHE */ |
| 84 | 84 | ||
| 85 | #ifndef __OBJC__ |
- | |
| 86 | /** |
85 | /** |
| 87 | * Each architecture decides what functions will be used to carry out |
86 | * Each architecture decides what functions will be used to carry out |
| 88 | * address space operations such as creating or locking page tables. |
87 | * address space operations such as creating or locking page tables. |
| 89 | */ |
88 | */ |
| 90 | as_operations_t *as_operations = NULL; |
89 | as_operations_t *as_operations = NULL; |
| 91 | 90 | ||
| 92 | /** |
91 | /** |
| 93 | * Slab for as_t objects. |
92 | * Slab for as_t objects. |
| 94 | */ |
93 | */ |
| 95 | static slab_cache_t *as_slab; |
94 | static slab_cache_t *as_slab; |
| 96 | #endif |
- | |
| 97 | 95 | ||
| 98 | /** |
96 | /** |
| 99 | * This lock serializes access to the ASID subsystem. |
97 | * This lock serializes access to the ASID subsystem. |
| 100 | * It protects: |
98 | * It protects: |
| 101 | * - inactive_as_with_asid_head list |
99 | * - inactive_as_with_asid_head list |
| Line 111... | Line 109... | ||
| 111 | LIST_INITIALIZE(inactive_as_with_asid_head); |
109 | LIST_INITIALIZE(inactive_as_with_asid_head); |
| 112 | 110 | ||
| 113 | /** Kernel address space. */ |
111 | /** Kernel address space. */ |
| 114 | as_t *AS_KERNEL = NULL; |
112 | as_t *AS_KERNEL = NULL; |
| 115 | 113 | ||
| 116 | static int area_flags_to_page_flags(int aflags); |
114 | static int area_flags_to_page_flags(int); |
| 117 | static as_area_t *find_area_and_lock(as_t *as, uintptr_t va); |
115 | static as_area_t *find_area_and_lock(as_t *, uintptr_t); |
| 118 | static bool check_area_conflicts(as_t *as, uintptr_t va, size_t size, |
116 | static bool check_area_conflicts(as_t *, uintptr_t, size_t, as_area_t *); |
| 119 | as_area_t *avoid_area); |
- | |
| 120 | static void sh_info_remove_reference(share_info_t *sh_info); |
117 | static void sh_info_remove_reference(share_info_t *); |
| 121 | 118 | ||
| 122 | #ifndef __OBJC__ |
- | |
| 123 | static int as_constructor(void *obj, int flags) |
119 | static int as_constructor(void *obj, int flags) |
| 124 | { |
120 | { |
| 125 | as_t *as = (as_t *) obj; |
121 | as_t *as = (as_t *) obj; |
| 126 | int rc; |
122 | int rc; |
| 127 | 123 | ||
| 128 | link_initialize(&as->inactive_as_with_asid_link); |
124 | link_initialize(&as->inactive_as_with_asid_link); |
| 129 | mutex_initialize(&as->lock); |
125 | mutex_initialize(&as->lock, MUTEX_PASSIVE); |
| 130 | 126 | ||
| 131 | rc = as_constructor_arch(as, flags); |
127 | rc = as_constructor_arch(as, flags); |
| 132 | 128 | ||
| 133 | return rc; |
129 | return rc; |
| 134 | } |
130 | } |
| Line 137... | Line 133... | ||
| 137 | { |
133 | { |
| 138 | as_t *as = (as_t *) obj; |
134 | as_t *as = (as_t *) obj; |
| 139 | 135 | ||
| 140 | return as_destructor_arch(as); |
136 | return as_destructor_arch(as); |
| 141 | } |
137 | } |
| 142 | #endif |
- | |
| 143 | 138 | ||
| 144 | /** Initialize address space subsystem. */ |
139 | /** Initialize address space subsystem. */ |
| 145 | void as_init(void) |
140 | void as_init(void) |
| 146 | { |
141 | { |
| 147 | as_arch_init(); |
142 | as_arch_init(); |
| 148 | 143 | ||
| 149 | #ifndef __OBJC__ |
- | |
| 150 | as_slab = slab_cache_create("as_slab", sizeof(as_t), 0, |
144 | as_slab = slab_cache_create("as_slab", sizeof(as_t), 0, |
| 151 | as_constructor, as_destructor, SLAB_CACHE_MAGDEFERRED); |
145 | as_constructor, as_destructor, SLAB_CACHE_MAGDEFERRED); |
| 152 | #endif |
- | |
| 153 | 146 | ||
| 154 | AS_KERNEL = as_create(FLAG_AS_KERNEL); |
147 | AS_KERNEL = as_create(FLAG_AS_KERNEL); |
| 155 | if (!AS_KERNEL) |
148 | if (!AS_KERNEL) |
| 156 | panic("can't create kernel address space\n"); |
149 | panic("can't create kernel address space\n"); |
| 157 | 150 | ||
| 158 | } |
151 | } |
| 159 | 152 | ||
| 160 | /** Create address space. |
153 | /** Create address space. |
| 161 | * |
154 | * |
| 162 | * @param flags Flags that influence way in wich the address space is created. |
155 | * @param flags Flags that influence the way in wich the address space |
| - | 156 | * is created. |
|
| 163 | */ |
157 | */ |
| 164 | as_t *as_create(int flags) |
158 | as_t *as_create(int flags) |
| 165 | { |
159 | { |
| 166 | as_t *as; |
160 | as_t *as; |
| 167 | 161 | ||
| 168 | #ifdef __OBJC__ |
- | |
| 169 | as = [as_t new]; |
- | |
| 170 | link_initialize(&as->inactive_as_with_asid_link); |
- | |
| 171 | mutex_initialize(&as->lock); |
- | |
| 172 | (void) as_constructor_arch(as, flags); |
- | |
| 173 | #else |
- | |
| 174 | as = (as_t *) slab_alloc(as_slab, 0); |
162 | as = (as_t *) slab_alloc(as_slab, 0); |
| 175 | #endif |
- | |
| 176 | (void) as_create_arch(as, 0); |
163 | (void) as_create_arch(as, 0); |
| 177 | 164 | ||
| 178 | btree_create(&as->as_area_btree); |
165 | btree_create(&as->as_area_btree); |
| 179 | 166 | ||
| 180 | if (flags & FLAG_AS_KERNEL) |
167 | if (flags & FLAG_AS_KERNEL) |
| Line 197... | Line 184... | ||
| 197 | * |
184 | * |
| 198 | * When there are no tasks referencing this address space (i.e. its refcount is |
185 | * When there are no tasks referencing this address space (i.e. its refcount is |
| 199 | * zero), the address space can be destroyed. |
186 | * zero), the address space can be destroyed. |
| 200 | * |
187 | * |
| 201 | * We know that we don't hold any spinlock. |
188 | * We know that we don't hold any spinlock. |
| - | 189 | * |
|
| - | 190 | * @param as Address space to be destroyed. |
|
| 202 | */ |
191 | */ |
| 203 | void as_destroy(as_t *as) |
192 | void as_destroy(as_t *as) |
| 204 | { |
193 | { |
| 205 | ipl_t ipl; |
194 | ipl_t ipl; |
| 206 | bool cond; |
195 | bool cond; |
| Line 261... | Line 250... | ||
| 261 | page_table_destroy(NULL); |
250 | page_table_destroy(NULL); |
| 262 | #endif |
251 | #endif |
| 263 | 252 | ||
| 264 | interrupts_restore(ipl); |
253 | interrupts_restore(ipl); |
| 265 | 254 | ||
| 266 | #ifdef __OBJC__ |
- | |
| 267 | [as free]; |
- | |
| 268 | #else |
- | |
| 269 | slab_free(as_slab, as); |
255 | slab_free(as_slab, as); |
| 270 | #endif |
- | |
| 271 | } |
256 | } |
| 272 | 257 | ||
| 273 | /** Create address space area of common attributes. |
258 | /** Create address space area of common attributes. |
| 274 | * |
259 | * |
| 275 | * The created address space area is added to the target address space. |
260 | * The created address space area is added to the target address space. |
| 276 | * |
261 | * |
| 277 | * @param as Target address space. |
262 | * @param as Target address space. |
| 278 | * @param flags Flags of the area memory. |
263 | * @param flags Flags of the area memory. |
| 279 | * @param size Size of area. |
264 | * @param size Size of area. |
| 280 | * @param base Base address of area. |
265 | * @param base Base address of area. |
| 281 | * @param attrs Attributes of the area. |
266 | * @param attrs Attributes of the area. |
| 282 | * @param backend Address space area backend. NULL if no backend is used. |
267 | * @param backend Address space area backend. NULL if no backend is used. |
| 283 | * @param backend_data NULL or a pointer to an array holding two void *. |
268 | * @param backend_data NULL or a pointer to an array holding two void *. |
| 284 | * |
269 | * |
| 285 | * @return Address space area on success or NULL on failure. |
270 | * @return Address space area on success or NULL on failure. |
| 286 | */ |
271 | */ |
| 287 | as_area_t * |
272 | as_area_t * |
| 288 | as_area_create(as_t *as, int flags, size_t size, uintptr_t base, int attrs, |
273 | as_area_create(as_t *as, int flags, size_t size, uintptr_t base, int attrs, |
| 289 | mem_backend_t *backend, mem_backend_data_t *backend_data) |
274 | mem_backend_t *backend, mem_backend_data_t *backend_data) |
| 290 | { |
275 | { |
| 291 | ipl_t ipl; |
276 | ipl_t ipl; |
| 292 | as_area_t *a; |
277 | as_area_t *a; |
| 293 | 278 | ||
| 294 | if (base % PAGE_SIZE) |
279 | if (base % PAGE_SIZE) |
| Line 310... | Line 295... | ||
| 310 | return NULL; |
295 | return NULL; |
| 311 | } |
296 | } |
| 312 | 297 | ||
| 313 | a = (as_area_t *) malloc(sizeof(as_area_t), 0); |
298 | a = (as_area_t *) malloc(sizeof(as_area_t), 0); |
| 314 | 299 | ||
| 315 | mutex_initialize(&a->lock); |
300 | mutex_initialize(&a->lock, MUTEX_PASSIVE); |
| 316 | 301 | ||
| 317 | a->as = as; |
302 | a->as = as; |
| 318 | a->flags = flags; |
303 | a->flags = flags; |
| 319 | a->attributes = attrs; |
304 | a->attributes = attrs; |
| 320 | a->pages = SIZE2FRAMES(size); |
305 | a->pages = SIZE2FRAMES(size); |
| Line 336... | Line 321... | ||
| 336 | return a; |
321 | return a; |
| 337 | } |
322 | } |
| 338 | 323 | ||
| 339 | /** Find address space area and change it. |
324 | /** Find address space area and change it. |
| 340 | * |
325 | * |
| 341 | * @param as Address space. |
326 | * @param as Address space. |
| 342 | * @param address Virtual address belonging to the area to be changed. Must be |
327 | * @param address Virtual address belonging to the area to be changed. |
| 343 | * page-aligned. |
328 | * Must be page-aligned. |
| 344 | * @param size New size of the virtual memory block starting at address. |
329 | * @param size New size of the virtual memory block starting at |
| - | 330 | * address. |
|
| 345 | * @param flags Flags influencing the remap operation. Currently unused. |
331 | * @param flags Flags influencing the remap operation. Currently unused. |
| 346 | * |
332 | * |
| 347 | * @return Zero on success or a value from @ref errno.h otherwise. |
333 | * @return Zero on success or a value from @ref errno.h otherwise. |
| 348 | */ |
334 | */ |
| 349 | int as_area_resize(as_t *as, uintptr_t address, size_t size, int flags) |
335 | int as_area_resize(as_t *as, uintptr_t address, size_t size, int flags) |
| 350 | { |
336 | { |
| 351 | as_area_t *area; |
337 | as_area_t *area; |
| 352 | ipl_t ipl; |
338 | ipl_t ipl; |
| Line 397... | Line 383... | ||
| 397 | return EPERM; |
383 | return EPERM; |
| 398 | } |
384 | } |
| 399 | 385 | ||
| 400 | if (pages < area->pages) { |
386 | if (pages < area->pages) { |
| 401 | bool cond; |
387 | bool cond; |
| 402 | uintptr_t start_free = area->base + pages*PAGE_SIZE; |
388 | uintptr_t start_free = area->base + pages * PAGE_SIZE; |
| 403 | 389 | ||
| 404 | /* |
390 | /* |
| 405 | * Shrinking the area. |
391 | * Shrinking the area. |
| 406 | * No need to check for overlaps. |
392 | * No need to check for overlaps. |
| 407 | */ |
393 | */ |
| 408 | 394 | ||
| 409 | /* |
395 | /* |
| 410 | * Start TLB shootdown sequence. |
396 | * Start TLB shootdown sequence. |
| 411 | */ |
397 | */ |
| 412 | tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base + |
398 | tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base + |
| 413 | pages * PAGE_SIZE, area->pages - pages); |
399 | pages * PAGE_SIZE, area->pages - pages); |
| 414 | 400 | ||
| 415 | /* |
401 | /* |
| 416 | * Remove frames belonging to used space starting from |
402 | * Remove frames belonging to used space starting from |
| 417 | * the highest addresses downwards until an overlap with |
403 | * the highest addresses downwards until an overlap with |
| Line 450... | Line 436... | ||
| 450 | * address space area. |
436 | * address space area. |
| 451 | */ |
437 | */ |
| 452 | 438 | ||
| 453 | cond = false; /* we are almost done */ |
439 | cond = false; /* we are almost done */ |
| 454 | i = (start_free - b) >> PAGE_WIDTH; |
440 | i = (start_free - b) >> PAGE_WIDTH; |
| 455 | if (!used_space_remove(area, start_free, c - i)) |
441 | if (!used_space_remove(area, start_free, |
| - | 442 | c - i)) |
|
| 456 | panic("Could not remove used space.\n"); |
443 | panic("Could not remove used " |
| - | 444 | "space.\n"); |
|
| 457 | } else { |
445 | } else { |
| 458 | /* |
446 | /* |
| 459 | * The interval of used space can be |
447 | * The interval of used space can be |
| 460 | * completely removed. |
448 | * completely removed. |
| 461 | */ |
449 | */ |
| 462 | if (!used_space_remove(area, b, c)) |
450 | if (!used_space_remove(area, b, c)) |
| 463 | panic("Could not remove used space.\n"); |
451 | panic("Could not remove used " |
| - | 452 | "space.\n"); |
|
| 464 | } |
453 | } |
| 465 | 454 | ||
| 466 | for (; i < c; i++) { |
455 | for (; i < c; i++) { |
| 467 | pte_t *pte; |
456 | pte_t *pte; |
| 468 | 457 | ||
| Line 520... | Line 509... | ||
| 520 | return 0; |
509 | return 0; |
| 521 | } |
510 | } |
| 522 | 511 | ||
| 523 | /** Destroy address space area. |
512 | /** Destroy address space area. |
| 524 | * |
513 | * |
| 525 | * @param as Address space. |
514 | * @param as Address space. |
| 526 | * @param address Address withing the area to be deleted. |
515 | * @param address Address within the area to be deleted. |
| 527 | * |
516 | * |
| 528 | * @return Zero on success or a value from @ref errno.h on failure. |
517 | * @return Zero on success or a value from @ref errno.h on failure. |
| 529 | */ |
518 | */ |
| 530 | int as_area_destroy(as_t *as, uintptr_t address) |
519 | int as_area_destroy(as_t *as, uintptr_t address) |
| 531 | { |
520 | { |
| 532 | as_area_t *area; |
521 | as_area_t *area; |
| 533 | uintptr_t base; |
522 | uintptr_t base; |
| Line 620... | Line 609... | ||
| 620 | * If the source address space area has not been shared so far, |
609 | * If the source address space area has not been shared so far, |
| 621 | * a new sh_info is created. The new address space area simply gets the |
610 | * a new sh_info is created. The new address space area simply gets the |
| 622 | * sh_info of the source area. The process of duplicating the |
611 | * sh_info of the source area. The process of duplicating the |
| 623 | * mapping is done through the backend share function. |
612 | * mapping is done through the backend share function. |
| 624 | * |
613 | * |
| 625 | * @param src_as Pointer to source address space. |
614 | * @param src_as Pointer to source address space. |
| 626 | * @param src_base Base address of the source address space area. |
615 | * @param src_base Base address of the source address space area. |
| 627 | * @param acc_size Expected size of the source area. |
616 | * @param acc_size Expected size of the source area. |
| 628 | * @param dst_as Pointer to destination address space. |
617 | * @param dst_as Pointer to destination address space. |
| 629 | * @param dst_base Target base address. |
618 | * @param dst_base Target base address. |
| 630 | * @param dst_flags_mask Destination address space area flags mask. |
619 | * @param dst_flags_mask Destination address space area flags mask. |
| 631 | * |
620 | * |
| 632 | * @return Zero on success or ENOENT if there is no such task or if there is no |
621 | * @return Zero on success or ENOENT if there is no such task or if |
| 633 | * such address space area, EPERM if there was a problem in accepting the area |
622 | * there is no such address space area, EPERM if there was |
| - | 623 | * a problem in accepting the area or ENOMEM if there was a |
|
| 634 | * or ENOMEM if there was a problem in allocating destination address space |
624 | * problem in allocating destination address space area. |
| 635 | * area. ENOTSUP is returned if the address space area backend does not support |
625 | * ENOTSUP is returned if the address space area backend |
| 636 | * sharing. |
626 | * does not support sharing. |
| 637 | */ |
627 | */ |
| 638 | int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size, |
628 | int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size, |
| 639 | as_t *dst_as, uintptr_t dst_base, int dst_flags_mask) |
629 | as_t *dst_as, uintptr_t dst_base, int dst_flags_mask) |
| 640 | { |
630 | { |
| 641 | ipl_t ipl; |
631 | ipl_t ipl; |
| Line 692... | Line 682... | ||
| 692 | * Then it will be safe to unlock it. |
682 | * Then it will be safe to unlock it. |
| 693 | */ |
683 | */ |
| 694 | sh_info = src_area->sh_info; |
684 | sh_info = src_area->sh_info; |
| 695 | if (!sh_info) { |
685 | if (!sh_info) { |
| 696 | sh_info = (share_info_t *) malloc(sizeof(share_info_t), 0); |
686 | sh_info = (share_info_t *) malloc(sizeof(share_info_t), 0); |
| 697 | mutex_initialize(&sh_info->lock); |
687 | mutex_initialize(&sh_info->lock, MUTEX_PASSIVE); |
| 698 | sh_info->refcount = 2; |
688 | sh_info->refcount = 2; |
| 699 | btree_create(&sh_info->pagemap); |
689 | btree_create(&sh_info->pagemap); |
| 700 | src_area->sh_info = sh_info; |
690 | src_area->sh_info = sh_info; |
| 701 | /* |
691 | /* |
| 702 | * Call the backend to setup sharing. |
692 | * Call the backend to setup sharing. |
| Line 750... | Line 740... | ||
| 750 | 740 | ||
| 751 | /** Check access mode for address space area. |
741 | /** Check access mode for address space area. |
| 752 | * |
742 | * |
| 753 | * The address space area must be locked prior to this call. |
743 | * The address space area must be locked prior to this call. |
| 754 | * |
744 | * |
| 755 | * @param area Address space area. |
745 | * @param area Address space area. |
| 756 | * @param access Access mode. |
746 | * @param access Access mode. |
| 757 | * |
747 | * |
| 758 | * @return False if access violates area's permissions, true otherwise. |
748 | * @return False if access violates area's permissions, true |
| - | 749 | * otherwise. |
|
| 759 | */ |
750 | */ |
| 760 | bool as_area_check_access(as_area_t *area, pf_access_t access) |
751 | bool as_area_check_access(as_area_t *area, pf_access_t access) |
| 761 | { |
752 | { |
| 762 | int flagmap[] = { |
753 | int flagmap[] = { |
| 763 | [PF_ACCESS_READ] = AS_AREA_READ, |
754 | [PF_ACCESS_READ] = AS_AREA_READ, |
| Line 769... | Line 760... | ||
| 769 | return false; |
760 | return false; |
| 770 | 761 | ||
| 771 | return true; |
762 | return true; |
| 772 | } |
763 | } |
| 773 | 764 | ||
| - | 765 | /** Change adress space area flags. |
|
| - | 766 | * |
|
| - | 767 | * The idea is to have the same data, but with a different access mode. |
|
| - | 768 | * This is needed e.g. for writing code into memory and then executing it. |
|
| - | 769 | * In order for this to work properly, this may copy the data |
|
| - | 770 | * into private anonymous memory (unless it's already there). |
|
| - | 771 | * |
|
| - | 772 | * @param as Address space. |
|
| - | 773 | * @param flags Flags of the area memory. |
|
| - | 774 | * @param address Address withing the area to be changed. |
|
| - | 775 | * |
|
| - | 776 | * @return Zero on success or a value from @ref errno.h on failure. |
|
| - | 777 | */ |
|
| - | 778 | int as_area_change_flags(as_t *as, int flags, uintptr_t address) |
|
| - | 779 | { |
|
| - | 780 | as_area_t *area; |
|
| - | 781 | uintptr_t base; |
|
| - | 782 | link_t *cur; |
|
| - | 783 | ipl_t ipl; |
|
| - | 784 | int page_flags; |
|
| - | 785 | uintptr_t *old_frame; |
|
| - | 786 | index_t frame_idx; |
|
| - | 787 | count_t used_pages; |
|
| - | 788 | ||
| - | 789 | /* Flags for the new memory mapping */ |
|
| - | 790 | page_flags = area_flags_to_page_flags(flags); |
|
| - | 791 | ||
| - | 792 | ipl = interrupts_disable(); |
|
| - | 793 | mutex_lock(&as->lock); |
|
| - | 794 | ||
| - | 795 | area = find_area_and_lock(as, address); |
|
| - | 796 | if (!area) { |
|
| - | 797 | mutex_unlock(&as->lock); |
|
| - | 798 | interrupts_restore(ipl); |
|
| - | 799 | return ENOENT; |
|
| - | 800 | } |
|
| - | 801 | ||
| - | 802 | if (area->sh_info || area->backend != &anon_backend) { |
|
| - | 803 | /* Copying shared areas not supported yet */ |
|
| - | 804 | /* Copying non-anonymous memory not supported yet */ |
|
| - | 805 | mutex_unlock(&area->lock); |
|
| - | 806 | mutex_unlock(&as->lock); |
|
| - | 807 | interrupts_restore(ipl); |
|
| - | 808 | return ENOTSUP; |
|
| - | 809 | } |
|
| - | 810 | ||
| - | 811 | base = area->base; |
|
| - | 812 | ||
| - | 813 | /* |
|
| - | 814 | * Compute total number of used pages in the used_space B+tree |
|
| - | 815 | */ |
|
| - | 816 | used_pages = 0; |
|
| - | 817 | ||
| - | 818 | for (cur = area->used_space.leaf_head.next; |
|
| - | 819 | cur != &area->used_space.leaf_head; cur = cur->next) { |
|
| - | 820 | btree_node_t *node; |
|
| - | 821 | unsigned int i; |
|
| - | 822 | ||
| - | 823 | node = list_get_instance(cur, btree_node_t, leaf_link); |
|
| - | 824 | for (i = 0; i < node->keys; i++) { |
|
| - | 825 | used_pages += (count_t) node->value[i]; |
|
| - | 826 | } |
|
| - | 827 | } |
|
| - | 828 | ||
| - | 829 | /* An array for storing frame numbers */ |
|
| - | 830 | old_frame = malloc(used_pages * sizeof(uintptr_t), 0); |
|
| - | 831 | ||
| - | 832 | /* |
|
| - | 833 | * Start TLB shootdown sequence. |
|
| - | 834 | */ |
|
| - | 835 | tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, area->pages); |
|
| - | 836 | ||
| - | 837 | /* |
|
| - | 838 | * Remove used pages from page tables and remember their frame |
|
| - | 839 | * numbers. |
|
| - | 840 | */ |
|
| - | 841 | frame_idx = 0; |
|
| - | 842 | ||
| - | 843 | for (cur = area->used_space.leaf_head.next; |
|
| - | 844 | cur != &area->used_space.leaf_head; cur = cur->next) { |
|
| - | 845 | btree_node_t *node; |
|
| - | 846 | unsigned int i; |
|
| - | 847 | ||
| - | 848 | node = list_get_instance(cur, btree_node_t, leaf_link); |
|
| - | 849 | for (i = 0; i < node->keys; i++) { |
|
| - | 850 | uintptr_t b = node->key[i]; |
|
| - | 851 | count_t j; |
|
| - | 852 | pte_t *pte; |
|
| - | 853 | ||
| - | 854 | for (j = 0; j < (count_t) node->value[i]; j++) { |
|
| - | 855 | page_table_lock(as, false); |
|
| - | 856 | pte = page_mapping_find(as, b + j * PAGE_SIZE); |
|
| - | 857 | ASSERT(pte && PTE_VALID(pte) && |
|
| - | 858 | PTE_PRESENT(pte)); |
|
| - | 859 | old_frame[frame_idx++] = PTE_GET_FRAME(pte); |
|
| - | 860 | ||
| - | 861 | /* Remove old mapping */ |
|
| - | 862 | page_mapping_remove(as, b + j * PAGE_SIZE); |
|
| - | 863 | page_table_unlock(as, false); |
|
| - | 864 | } |
|
| - | 865 | } |
|
| - | 866 | } |
|
| - | 867 | ||
| - | 868 | /* |
|
| - | 869 | * Finish TLB shootdown sequence. |
|
| - | 870 | */ |
|
| - | 871 | ||
| - | 872 | tlb_invalidate_pages(as->asid, area->base, area->pages); |
|
| - | 873 | /* |
|
| - | 874 | * Invalidate potential software translation caches (e.g. TSB on |
|
| - | 875 | * sparc64). |
|
| - | 876 | */ |
|
| - | 877 | as_invalidate_translation_cache(as, area->base, area->pages); |
|
| - | 878 | tlb_shootdown_finalize(); |
|
| - | 879 | ||
| - | 880 | /* |
|
| - | 881 | * Set the new flags. |
|
| - | 882 | */ |
|
| - | 883 | area->flags = flags; |
|
| - | 884 | ||
| - | 885 | /* |
|
| - | 886 | * Map pages back in with new flags. This step is kept separate |
|
| - | 887 | * so that the memory area could not be accesed with both the old and |
|
| - | 888 | * the new flags at once. |
|
| - | 889 | */ |
|
| - | 890 | frame_idx = 0; |
|
| - | 891 | ||
| - | 892 | for (cur = area->used_space.leaf_head.next; |
|
| - | 893 | cur != &area->used_space.leaf_head; cur = cur->next) { |
|
| - | 894 | btree_node_t *node; |
|
| - | 895 | unsigned int i; |
|
| - | 896 | ||
| - | 897 | node = list_get_instance(cur, btree_node_t, leaf_link); |
|
| - | 898 | for (i = 0; i < node->keys; i++) { |
|
| - | 899 | uintptr_t b = node->key[i]; |
|
| - | 900 | count_t j; |
|
| - | 901 | ||
| - | 902 | for (j = 0; j < (count_t) node->value[i]; j++) { |
|
| - | 903 | page_table_lock(as, false); |
|
| - | 904 | ||
| - | 905 | /* Insert the new mapping */ |
|
| - | 906 | page_mapping_insert(as, b + j * PAGE_SIZE, |
|
| - | 907 | old_frame[frame_idx++], page_flags); |
|
| - | 908 | ||
| - | 909 | page_table_unlock(as, false); |
|
| - | 910 | } |
|
| - | 911 | } |
|
| - | 912 | } |
|
| - | 913 | ||
| - | 914 | free(old_frame); |
|
| - | 915 | ||
| - | 916 | mutex_unlock(&area->lock); |
|
| - | 917 | mutex_unlock(&as->lock); |
|
| - | 918 | interrupts_restore(ipl); |
|
| - | 919 | ||
| - | 920 | return 0; |
|
| - | 921 | } |
|
| - | 922 | ||
| - | 923 | ||
| 774 | /** Handle page fault within the current address space. |
924 | /** Handle page fault within the current address space. |
| 775 | * |
925 | * |
| 776 | * This is the high-level page fault handler. It decides |
926 | * This is the high-level page fault handler. It decides whether the page fault |
| 777 | * whether the page fault can be resolved by any backend |
- | |
| 778 | * and if so, it invokes the backend to resolve the page |
927 | * can be resolved by any backend and if so, it invokes the backend to resolve |
| 779 | * fault. |
928 | * the page fault. |
| 780 | * |
929 | * |
| 781 | * Interrupts are assumed disabled. |
930 | * Interrupts are assumed disabled. |
| 782 | * |
931 | * |
| 783 | * @param page Faulting page. |
932 | * @param page Faulting page. |
| 784 | * @param access Access mode that caused the fault (i.e. read/write/exec). |
933 | * @param access Access mode that caused the page fault (i.e. |
| - | 934 | * read/write/exec). |
|
| 785 | * @param istate Pointer to interrupted state. |
935 | * @param istate Pointer to the interrupted state. |
| 786 | * |
936 | * |
| 787 | * @return AS_PF_FAULT on page fault, AS_PF_OK on success or AS_PF_DEFER if the |
937 | * @return AS_PF_FAULT on page fault, AS_PF_OK on success or |
| 788 | * fault was caused by copy_to_uspace() or copy_from_uspace(). |
938 | * AS_PF_DEFER if the fault was caused by copy_to_uspace() |
| - | 939 | * or copy_from_uspace(). |
|
| 789 | */ |
940 | */ |
| 790 | int as_page_fault(uintptr_t page, pf_access_t access, istate_t *istate) |
941 | int as_page_fault(uintptr_t page, pf_access_t access, istate_t *istate) |
| 791 | { |
942 | { |
| 792 | pte_t *pte; |
943 | pte_t *pte; |
| 793 | as_area_t *area; |
944 | as_area_t *area; |
| Line 829... | Line 980... | ||
| 829 | } |
980 | } |
| 830 | 981 | ||
| 831 | page_table_lock(AS, false); |
982 | page_table_lock(AS, false); |
| 832 | 983 | ||
| 833 | /* |
984 | /* |
| 834 | * To avoid race condition between two page faults |
985 | * To avoid race condition between two page faults on the same address, |
| 835 | * on the same address, we need to make sure |
- | |
| 836 | * the mapping has not been already inserted. |
986 | * we need to make sure the mapping has not been already inserted. |
| 837 | */ |
987 | */ |
| 838 | if ((pte = page_mapping_find(AS, page))) { |
988 | if ((pte = page_mapping_find(AS, page))) { |
| 839 | if (PTE_PRESENT(pte)) { |
989 | if (PTE_PRESENT(pte)) { |
| 840 | if (((access == PF_ACCESS_READ) && PTE_READABLE(pte)) || |
990 | if (((access == PF_ACCESS_READ) && PTE_READABLE(pte)) || |
| 841 | (access == PF_ACCESS_WRITE && PTE_WRITABLE(pte)) || |
991 | (access == PF_ACCESS_WRITE && PTE_WRITABLE(pte)) || |
| Line 885... | Line 1035... | ||
| 885 | * scheduling. Sleeping here would lead to deadlock on wakeup. Another |
1035 | * scheduling. Sleeping here would lead to deadlock on wakeup. Another |
| 886 | * thing which is forbidden in this context is locking the address space. |
1036 | * thing which is forbidden in this context is locking the address space. |
| 887 | * |
1037 | * |
| 888 | * When this function is enetered, no spinlocks may be held. |
1038 | * When this function is enetered, no spinlocks may be held. |
| 889 | * |
1039 | * |
| 890 | * @param old Old address space or NULL. |
1040 | * @param old Old address space or NULL. |
| 891 | * @param new New address space. |
1041 | * @param new New address space. |
| 892 | */ |
1042 | */ |
| 893 | void as_switch(as_t *old_as, as_t *new_as) |
1043 | void as_switch(as_t *old_as, as_t *new_as) |
| 894 | { |
1044 | { |
| 895 | DEADLOCK_PROBE_INIT(p_asidlock); |
1045 | DEADLOCK_PROBE_INIT(p_asidlock); |
| 896 | preemption_disable(); |
1046 | preemption_disable(); |
| Line 1157... | Line 1307... | ||
| 1157 | return EOK; |
1307 | return EOK; |
| 1158 | } |
1308 | } |
| 1159 | 1309 | ||
| 1160 | /** Convert address space area flags to page flags. |
1310 | /** Convert address space area flags to page flags. |
| 1161 | * |
1311 | * |
| 1162 | * @param aflags Flags of some address space area. |
1312 | * @param aflags Flags of some address space area. |
| 1163 | * |
1313 | * |
| 1164 | * @return Flags to be passed to page_mapping_insert(). |
1314 | * @return Flags to be passed to page_mapping_insert(). |
| 1165 | */ |
1315 | */ |
| 1166 | int area_flags_to_page_flags(int aflags) |
1316 | int area_flags_to_page_flags(int aflags) |
| 1167 | { |
1317 | { |
| 1168 | int flags; |
1318 | int flags; |
| 1169 | 1319 | ||
| Line 1187... | Line 1337... | ||
| 1187 | /** Compute flags for virtual address translation subsytem. |
1337 | /** Compute flags for virtual address translation subsytem. |
| 1188 | * |
1338 | * |
| 1189 | * The address space area must be locked. |
1339 | * The address space area must be locked. |
| 1190 | * Interrupts must be disabled. |
1340 | * Interrupts must be disabled. |
| 1191 | * |
1341 | * |
| 1192 | * @param a Address space area. |
1342 | * @param a Address space area. |
| 1193 | * |
1343 | * |
| 1194 | * @return Flags to be used in page_mapping_insert(). |
1344 | * @return Flags to be used in page_mapping_insert(). |
| 1195 | */ |
1345 | */ |
| 1196 | int as_area_get_flags(as_area_t *a) |
1346 | int as_area_get_flags(as_area_t *a) |
| 1197 | { |
1347 | { |
| 1198 | return area_flags_to_page_flags(a->flags); |
1348 | return area_flags_to_page_flags(a->flags); |
| 1199 | } |
1349 | } |
| 1200 | 1350 | ||
| 1201 | /** Create page table. |
1351 | /** Create page table. |
| 1202 | * |
1352 | * |
| 1203 | * Depending on architecture, create either address space |
1353 | * Depending on architecture, create either address space private or global page |
| 1204 | * private or global page table. |
1354 | * table. |
| 1205 | * |
1355 | * |
| 1206 | * @param flags Flags saying whether the page table is for kernel address space. |
1356 | * @param flags Flags saying whether the page table is for the kernel |
| - | 1357 | * address space. |
|
| 1207 | * |
1358 | * |
| 1208 | * @return First entry of the page table. |
1359 | * @return First entry of the page table. |
| 1209 | */ |
1360 | */ |
| 1210 | pte_t *page_table_create(int flags) |
1361 | pte_t *page_table_create(int flags) |
| 1211 | { |
1362 | { |
| 1212 | #ifdef __OBJC__ |
- | |
| 1213 | return [as_t page_table_create: flags]; |
- | |
| 1214 | #else |
- | |
| 1215 | ASSERT(as_operations); |
1363 | ASSERT(as_operations); |
| 1216 | ASSERT(as_operations->page_table_create); |
1364 | ASSERT(as_operations->page_table_create); |
| 1217 | 1365 | ||
| 1218 | return as_operations->page_table_create(flags); |
1366 | return as_operations->page_table_create(flags); |
| 1219 | #endif |
- | |
| 1220 | } |
1367 | } |
| 1221 | 1368 | ||
| 1222 | /** Destroy page table. |
1369 | /** Destroy page table. |
| 1223 | * |
1370 | * |
| 1224 | * Destroy page table in architecture specific way. |
1371 | * Destroy page table in architecture specific way. |
| 1225 | * |
1372 | * |
| 1226 | * @param page_table Physical address of PTL0. |
1373 | * @param page_table Physical address of PTL0. |
| 1227 | */ |
1374 | */ |
| 1228 | void page_table_destroy(pte_t *page_table) |
1375 | void page_table_destroy(pte_t *page_table) |
| 1229 | { |
1376 | { |
| 1230 | #ifdef __OBJC__ |
- | |
| 1231 | return [as_t page_table_destroy: page_table]; |
- | |
| 1232 | #else |
- | |
| 1233 | ASSERT(as_operations); |
1377 | ASSERT(as_operations); |
| 1234 | ASSERT(as_operations->page_table_destroy); |
1378 | ASSERT(as_operations->page_table_destroy); |
| 1235 | 1379 | ||
| 1236 | as_operations->page_table_destroy(page_table); |
1380 | as_operations->page_table_destroy(page_table); |
| 1237 | #endif |
- | |
| 1238 | } |
1381 | } |
| 1239 | 1382 | ||
| 1240 | /** Lock page table. |
1383 | /** Lock page table. |
| 1241 | * |
1384 | * |
| 1242 | * This function should be called before any page_mapping_insert(), |
1385 | * This function should be called before any page_mapping_insert(), |
| Line 1244... | Line 1387... | ||
| 1244 | * |
1387 | * |
| 1245 | * Locking order is such that address space areas must be locked |
1388 | * Locking order is such that address space areas must be locked |
| 1246 | * prior to this call. Address space can be locked prior to this |
1389 | * prior to this call. Address space can be locked prior to this |
| 1247 | * call in which case the lock argument is false. |
1390 | * call in which case the lock argument is false. |
| 1248 | * |
1391 | * |
| 1249 | * @param as Address space. |
1392 | * @param as Address space. |
| 1250 | * @param lock If false, do not attempt to lock as->lock. |
1393 | * @param lock If false, do not attempt to lock as->lock. |
| 1251 | */ |
1394 | */ |
| 1252 | void page_table_lock(as_t *as, bool lock) |
1395 | void page_table_lock(as_t *as, bool lock) |
| 1253 | { |
1396 | { |
| 1254 | #ifdef __OBJC__ |
- | |
| 1255 | [as page_table_lock: lock]; |
- | |
| 1256 | #else |
- | |
| 1257 | ASSERT(as_operations); |
1397 | ASSERT(as_operations); |
| 1258 | ASSERT(as_operations->page_table_lock); |
1398 | ASSERT(as_operations->page_table_lock); |
| 1259 | 1399 | ||
| 1260 | as_operations->page_table_lock(as, lock); |
1400 | as_operations->page_table_lock(as, lock); |
| 1261 | #endif |
- | |
| 1262 | } |
1401 | } |
| 1263 | 1402 | ||
| 1264 | /** Unlock page table. |
1403 | /** Unlock page table. |
| 1265 | * |
1404 | * |
| 1266 | * @param as Address space. |
1405 | * @param as Address space. |
| 1267 | * @param unlock If false, do not attempt to unlock as->lock. |
1406 | * @param unlock If false, do not attempt to unlock as->lock. |
| 1268 | */ |
1407 | */ |
| 1269 | void page_table_unlock(as_t *as, bool unlock) |
1408 | void page_table_unlock(as_t *as, bool unlock) |
| 1270 | { |
1409 | { |
| 1271 | #ifdef __OBJC__ |
- | |
| 1272 | [as page_table_unlock: unlock]; |
- | |
| 1273 | #else |
- | |
| 1274 | ASSERT(as_operations); |
1410 | ASSERT(as_operations); |
| 1275 | ASSERT(as_operations->page_table_unlock); |
1411 | ASSERT(as_operations->page_table_unlock); |
| 1276 | 1412 | ||
| 1277 | as_operations->page_table_unlock(as, unlock); |
1413 | as_operations->page_table_unlock(as, unlock); |
| 1278 | #endif |
- | |
| 1279 | } |
1414 | } |
| 1280 | 1415 | ||
| 1281 | 1416 | ||
| 1282 | /** Find address space area and lock it. |
1417 | /** Find address space area and lock it. |
| 1283 | * |
1418 | * |
| 1284 | * The address space must be locked and interrupts must be disabled. |
1419 | * The address space must be locked and interrupts must be disabled. |
| 1285 | * |
1420 | * |
| 1286 | * @param as Address space. |
1421 | * @param as Address space. |
| 1287 | * @param va Virtual address. |
1422 | * @param va Virtual address. |
| 1288 | * |
1423 | * |
| 1289 | * @return Locked address space area containing va on success or NULL on |
1424 | * @return Locked address space area containing va on success or |
| 1290 | * failure. |
1425 | * NULL on failure. |
| 1291 | */ |
1426 | */ |
| 1292 | as_area_t *find_area_and_lock(as_t *as, uintptr_t va) |
1427 | as_area_t *find_area_and_lock(as_t *as, uintptr_t va) |
| 1293 | { |
1428 | { |
| 1294 | as_area_t *a; |
1429 | as_area_t *a; |
| 1295 | btree_node_t *leaf, *lnode; |
1430 | btree_node_t *leaf, *lnode; |
| Line 1337... | Line 1472... | ||
| 1337 | 1472 | ||
| 1338 | /** Check area conflicts with other areas. |
1473 | /** Check area conflicts with other areas. |
| 1339 | * |
1474 | * |
| 1340 | * The address space must be locked and interrupts must be disabled. |
1475 | * The address space must be locked and interrupts must be disabled. |
| 1341 | * |
1476 | * |
| 1342 | * @param as Address space. |
1477 | * @param as Address space. |
| 1343 | * @param va Starting virtual address of the area being tested. |
1478 | * @param va Starting virtual address of the area being tested. |
| 1344 | * @param size Size of the area being tested. |
1479 | * @param size Size of the area being tested. |
| 1345 | * @param avoid_area Do not touch this area. |
1480 | * @param avoid_area Do not touch this area. |
| 1346 | * |
1481 | * |
| 1347 | * @return True if there is no conflict, false otherwise. |
1482 | * @return True if there is no conflict, false otherwise. |
| 1348 | */ |
1483 | */ |
| - | 1484 | bool |
|
| 1349 | bool check_area_conflicts(as_t *as, uintptr_t va, size_t size, |
1485 | check_area_conflicts(as_t *as, uintptr_t va, size_t size, as_area_t *avoid_area) |
| 1350 | as_area_t *avoid_area) |
- | |
| 1351 | { |
1486 | { |
| 1352 | as_area_t *a; |
1487 | as_area_t *a; |
| 1353 | btree_node_t *leaf, *node; |
1488 | btree_node_t *leaf, *node; |
| 1354 | unsigned int i; |
1489 | unsigned int i; |
| 1355 | 1490 | ||
| Line 1434... | Line 1569... | ||
| 1434 | as_area_t *src_area; |
1569 | as_area_t *src_area; |
| 1435 | size_t size; |
1570 | size_t size; |
| 1436 | 1571 | ||
| 1437 | ipl = interrupts_disable(); |
1572 | ipl = interrupts_disable(); |
| 1438 | src_area = find_area_and_lock(AS, base); |
1573 | src_area = find_area_and_lock(AS, base); |
| 1439 | if (src_area){ |
1574 | if (src_area) { |
| 1440 | size = src_area->pages * PAGE_SIZE; |
1575 | size = src_area->pages * PAGE_SIZE; |
| 1441 | mutex_unlock(&src_area->lock); |
1576 | mutex_unlock(&src_area->lock); |
| 1442 | } else { |
1577 | } else { |
| 1443 | size = 0; |
1578 | size = 0; |
| 1444 | } |
1579 | } |
| Line 1448... | Line 1583... | ||
| 1448 | 1583 | ||
| 1449 | /** Mark portion of address space area as used. |
1584 | /** Mark portion of address space area as used. |
| 1450 | * |
1585 | * |
| 1451 | * The address space area must be already locked. |
1586 | * The address space area must be already locked. |
| 1452 | * |
1587 | * |
| 1453 | * @param a Address space area. |
1588 | * @param a Address space area. |
| 1454 | * @param page First page to be marked. |
1589 | * @param page First page to be marked. |
| 1455 | * @param count Number of page to be marked. |
1590 | * @param count Number of page to be marked. |
| 1456 | * |
1591 | * |
| 1457 | * @return 0 on failure and 1 on success. |
1592 | * @return Zero on failure and non-zero on success. |
| 1458 | */ |
1593 | */ |
| 1459 | int used_space_insert(as_area_t *a, uintptr_t page, count_t count) |
1594 | int used_space_insert(as_area_t *a, uintptr_t page, count_t count) |
| 1460 | { |
1595 | { |
| 1461 | btree_node_t *leaf, *node; |
1596 | btree_node_t *leaf, *node; |
| 1462 | count_t pages; |
1597 | count_t pages; |
| Line 1722... | Line 1857... | ||
| 1722 | return 1; |
1857 | return 1; |
| 1723 | } |
1858 | } |
| 1724 | } |
1859 | } |
| 1725 | } |
1860 | } |
| 1726 | 1861 | ||
| 1727 | panic("Inconsistency detected while adding %" PRIc " pages of used space at " |
1862 | panic("Inconsistency detected while adding %" PRIc " pages of used " |
| 1728 | "%p.\n", count, page); |
1863 | "space at %p.\n", count, page); |
| 1729 | } |
1864 | } |
| 1730 | 1865 | ||
| 1731 | /** Mark portion of address space area as unused. |
1866 | /** Mark portion of address space area as unused. |
| 1732 | * |
1867 | * |
| 1733 | * The address space area must be already locked. |
1868 | * The address space area must be already locked. |
| 1734 | * |
1869 | * |
| 1735 | * @param a Address space area. |
1870 | * @param a Address space area. |
| 1736 | * @param page First page to be marked. |
1871 | * @param page First page to be marked. |
| 1737 | * @param count Number of page to be marked. |
1872 | * @param count Number of page to be marked. |
| 1738 | * |
1873 | * |
| 1739 | * @return 0 on failure and 1 on success. |
1874 | * @return Zero on failure and non-zero on success. |
| 1740 | */ |
1875 | */ |
| 1741 | int used_space_remove(as_area_t *a, uintptr_t page, count_t count) |
1876 | int used_space_remove(as_area_t *a, uintptr_t page, count_t count) |
| 1742 | { |
1877 | { |
| 1743 | btree_node_t *leaf, *node; |
1878 | btree_node_t *leaf, *node; |
| 1744 | count_t pages; |
1879 | count_t pages; |
| Line 1901... | Line 2036... | ||
| 1901 | return 0; |
2036 | return 0; |
| 1902 | } |
2037 | } |
| 1903 | } |
2038 | } |
| 1904 | 2039 | ||
| 1905 | error: |
2040 | error: |
| 1906 | panic("Inconsistency detected while removing %" PRIc " pages of used space " |
2041 | panic("Inconsistency detected while removing %" PRIc " pages of used " |
| 1907 | "from %p.\n", count, page); |
2042 | "space from %p.\n", count, page); |
| 1908 | } |
2043 | } |
| 1909 | 2044 | ||
| 1910 | /** Remove reference to address space area share info. |
2045 | /** Remove reference to address space area share info. |
| 1911 | * |
2046 | * |
| 1912 | * If the reference count drops to 0, the sh_info is deallocated. |
2047 | * If the reference count drops to 0, the sh_info is deallocated. |
| 1913 | * |
2048 | * |
| 1914 | * @param sh_info Pointer to address space area share info. |
2049 | * @param sh_info Pointer to address space area share info. |
| 1915 | */ |
2050 | */ |
| 1916 | void sh_info_remove_reference(share_info_t *sh_info) |
2051 | void sh_info_remove_reference(share_info_t *sh_info) |
| 1917 | { |
2052 | { |
| 1918 | bool dealloc = false; |
2053 | bool dealloc = false; |
| 1919 | 2054 | ||
| Line 1964... | Line 2099... | ||
| 1964 | unative_t sys_as_area_resize(uintptr_t address, size_t size, int flags) |
2099 | unative_t sys_as_area_resize(uintptr_t address, size_t size, int flags) |
| 1965 | { |
2100 | { |
| 1966 | return (unative_t) as_area_resize(AS, address, size, 0); |
2101 | return (unative_t) as_area_resize(AS, address, size, 0); |
| 1967 | } |
2102 | } |
| 1968 | 2103 | ||
| - | 2104 | /** Wrapper for as_area_change_flags(). */ |
|
| - | 2105 | unative_t sys_as_area_change_flags(uintptr_t address, int flags) |
|
| - | 2106 | { |
|
| - | 2107 | return (unative_t) as_area_change_flags(AS, flags, address); |
|
| - | 2108 | } |
|
| - | 2109 | ||
| 1969 | /** Wrapper for as_area_destroy(). */ |
2110 | /** Wrapper for as_area_destroy(). */ |
| 1970 | unative_t sys_as_area_destroy(uintptr_t address) |
2111 | unative_t sys_as_area_destroy(uintptr_t address) |
| 1971 | { |
2112 | { |
| 1972 | return (unative_t) as_area_destroy(AS, address); |
2113 | return (unative_t) as_area_destroy(AS, address); |
| 1973 | } |
2114 | } |
| 1974 | 2115 | ||
| 1975 | /** Print out information about address space. |
2116 | /** Print out information about address space. |
| 1976 | * |
2117 | * |
| 1977 | * @param as Address space. |
2118 | * @param as Address space. |
| 1978 | */ |
2119 | */ |
| 1979 | void as_print(as_t *as) |
2120 | void as_print(as_t *as) |
| 1980 | { |
2121 | { |
| 1981 | ipl_t ipl; |
2122 | ipl_t ipl; |
| 1982 | 2123 | ||
| Line 1994... | Line 2135... | ||
| 1994 | unsigned int i; |
2135 | unsigned int i; |
| 1995 | for (i = 0; i < node->keys; i++) { |
2136 | for (i = 0; i < node->keys; i++) { |
| 1996 | as_area_t *area = node->value[i]; |
2137 | as_area_t *area = node->value[i]; |
| 1997 | 2138 | ||
| 1998 | mutex_lock(&area->lock); |
2139 | mutex_lock(&area->lock); |
| 1999 | printf("as_area: %p, base=%p, pages=%" PRIc " (%p - %p)\n", |
2140 | printf("as_area: %p, base=%p, pages=%" PRIc |
| 2000 | area, area->base, area->pages, area->base, |
2141 | " (%p - %p)\n", area, area->base, area->pages, |
| 2001 | area->base + FRAMES2SIZE(area->pages)); |
2142 | area->base, area->base + FRAMES2SIZE(area->pages)); |
| 2002 | mutex_unlock(&area->lock); |
2143 | mutex_unlock(&area->lock); |
| 2003 | } |
2144 | } |
| 2004 | } |
2145 | } |
| 2005 | 2146 | ||
| 2006 | mutex_unlock(&as->lock); |
2147 | mutex_unlock(&as->lock); |