Subversion Repositories HelenOS

Rev

Rev 3156 | Rev 3403 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (c) 2001-2006 Jakub Jermar
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. /** @addtogroup genericmm
  30.  * @{
  31.  */
  32.  
  33. /**
  34.  * @file
  35.  * @brief   Address space related functions.
  36.  *
  37.  * This file contains address space manipulation functions.
  38.  * Roughly speaking, this is a higher-level client of
  39.  * Virtual Address Translation (VAT) subsystem.
  40.  *
  41.  * Functionality provided by this file allows one to
  42.  * create address spaces and create, resize and share
  43.  * address space areas.
  44.  *
  45.  * @see page.c
  46.  *
  47.  */
  48.  
  49. #include <mm/as.h>
  50. #include <arch/mm/as.h>
  51. #include <mm/page.h>
  52. #include <mm/frame.h>
  53. #include <mm/slab.h>
  54. #include <mm/tlb.h>
  55. #include <arch/mm/page.h>
  56. #include <genarch/mm/page_pt.h>
  57. #include <genarch/mm/page_ht.h>
  58. #include <mm/asid.h>
  59. #include <arch/mm/asid.h>
  60. #include <preemption.h>
  61. #include <synch/spinlock.h>
  62. #include <synch/mutex.h>
  63. #include <adt/list.h>
  64. #include <adt/btree.h>
  65. #include <proc/task.h>
  66. #include <proc/thread.h>
  67. #include <arch/asm.h>
  68. #include <panic.h>
  69. #include <debug.h>
  70. #include <print.h>
  71. #include <memstr.h>
  72. #include <macros.h>
  73. #include <arch.h>
  74. #include <errno.h>
  75. #include <config.h>
  76. #include <align.h>
  77. #include <arch/types.h>
  78. #include <syscall/copy.h>
  79. #include <arch/interrupt.h>
  80.  
  81. #ifdef CONFIG_VIRT_IDX_DCACHE
  82. #include <arch/mm/cache.h>
  83. #endif /* CONFIG_VIRT_IDX_DCACHE */
  84.  
  85. #ifndef __OBJC__
  86. /**
  87.  * Each architecture decides what functions will be used to carry out
  88.  * address space operations such as creating or locking page tables.
  89.  */
  90. as_operations_t *as_operations = NULL;
  91.  
  92. /**
  93.  * Slab for as_t objects.
  94.  */
  95. static slab_cache_t *as_slab;
  96. #endif
  97.  
  98. /**
  99.  * This lock serializes access to the ASID subsystem.
  100.  * It protects:
  101.  * - inactive_as_with_asid_head list
  102.  * - as->asid for each as of the as_t type
  103.  * - asids_allocated counter
  104.  */
  105. SPINLOCK_INITIALIZE(asidlock);
  106.  
  107. /**
  108.  * This list contains address spaces that are not active on any
  109.  * processor and that have valid ASID.
  110.  */
  111. LIST_INITIALIZE(inactive_as_with_asid_head);
  112.  
  113. /** Kernel address space. */
  114. as_t *AS_KERNEL = NULL;
  115.  
  116. static int area_flags_to_page_flags(int aflags);
  117. static as_area_t *find_area_and_lock(as_t *as, uintptr_t va);
  118. static bool check_area_conflicts(as_t *as, uintptr_t va, size_t size,
  119.     as_area_t *avoid_area);
  120. static void sh_info_remove_reference(share_info_t *sh_info);
  121.  
  122. #ifndef __OBJC__
  123. static int as_constructor(void *obj, int flags)
  124. {
  125.     as_t *as = (as_t *) obj;
  126.     int rc;
  127.  
  128.     link_initialize(&as->inactive_as_with_asid_link);
  129.     mutex_initialize(&as->lock, MUTEX_PASSIVE);
  130.    
  131.     rc = as_constructor_arch(as, flags);
  132.    
  133.     return rc;
  134. }
  135.  
  136. static int as_destructor(void *obj)
  137. {
  138.     as_t *as = (as_t *) obj;
  139.  
  140.     return as_destructor_arch(as);
  141. }
  142. #endif
  143.  
  144. /** Initialize address space subsystem. */
  145. void as_init(void)
  146. {
  147.     as_arch_init();
  148.  
  149. #ifndef __OBJC__
  150.     as_slab = slab_cache_create("as_slab", sizeof(as_t), 0,
  151.         as_constructor, as_destructor, SLAB_CACHE_MAGDEFERRED);
  152. #endif
  153.    
  154.     AS_KERNEL = as_create(FLAG_AS_KERNEL);
  155.     if (!AS_KERNEL)
  156.         panic("can't create kernel address space\n");
  157.    
  158. }
  159.  
  160. /** Create address space.
  161.  *
  162.  * @param flags Flags that influence way in wich the address space is created.
  163.  */
  164. as_t *as_create(int flags)
  165. {
  166.     as_t *as;
  167.  
  168. #ifdef __OBJC__
  169.     as = [as_t new];
  170.     link_initialize(&as->inactive_as_with_asid_link);
  171.     mutex_initialize(&as->lock, MUTEX_PASSIVE);
  172.     (void) as_constructor_arch(as, flags);
  173. #else
  174.     as = (as_t *) slab_alloc(as_slab, 0);
  175. #endif
  176.     (void) as_create_arch(as, 0);
  177.    
  178.     btree_create(&as->as_area_btree);
  179.    
  180.     if (flags & FLAG_AS_KERNEL)
  181.         as->asid = ASID_KERNEL;
  182.     else
  183.         as->asid = ASID_INVALID;
  184.    
  185.     atomic_set(&as->refcount, 0);
  186.     as->cpu_refcount = 0;
  187. #ifdef AS_PAGE_TABLE
  188.     as->genarch.page_table = page_table_create(flags);
  189. #else
  190.     page_table_create(flags);
  191. #endif
  192.  
  193.     return as;
  194. }
  195.  
  196. /** Destroy adress space.
  197.  *
  198.  * When there are no tasks referencing this address space (i.e. its refcount is
  199.  * zero), the address space can be destroyed.
  200.  *
  201.  * We know that we don't hold any spinlock.
  202.  */
  203. void as_destroy(as_t *as)
  204. {
  205.     ipl_t ipl;
  206.     bool cond;
  207.     DEADLOCK_PROBE_INIT(p_asidlock);
  208.  
  209.     ASSERT(atomic_get(&as->refcount) == 0);
  210.    
  211.     /*
  212.      * Since there is no reference to this area,
  213.      * it is safe not to lock its mutex.
  214.      */
  215.  
  216.     /*
  217.      * We need to avoid deadlock between TLB shootdown and asidlock.
  218.      * We therefore try to take asid conditionally and if we don't succeed,
  219.      * we enable interrupts and try again. This is done while preemption is
  220.      * disabled to prevent nested context switches. We also depend on the
  221.      * fact that so far no spinlocks are held.
  222.      */
  223.     preemption_disable();
  224.     ipl = interrupts_read();
  225. retry:
  226.     interrupts_disable();
  227.     if (!spinlock_trylock(&asidlock)) {
  228.         interrupts_enable();
  229.         DEADLOCK_PROBE(p_asidlock, DEADLOCK_THRESHOLD);
  230.         goto retry;
  231.     }
  232.     preemption_enable();    /* Interrupts disabled, enable preemption */
  233.     if (as->asid != ASID_INVALID && as != AS_KERNEL) {
  234.         if (as != AS && as->cpu_refcount == 0)
  235.             list_remove(&as->inactive_as_with_asid_link);
  236.         asid_put(as->asid);
  237.     }
  238.     spinlock_unlock(&asidlock);
  239.  
  240.     /*
  241.      * Destroy address space areas of the address space.
  242.      * The B+tree must be walked carefully because it is
  243.      * also being destroyed.
  244.      */
  245.     for (cond = true; cond; ) {
  246.         btree_node_t *node;
  247.  
  248.         ASSERT(!list_empty(&as->as_area_btree.leaf_head));
  249.         node = list_get_instance(as->as_area_btree.leaf_head.next,
  250.             btree_node_t, leaf_link);
  251.  
  252.         if ((cond = node->keys)) {
  253.             as_area_destroy(as, node->key[0]);
  254.         }
  255.     }
  256.  
  257.     btree_destroy(&as->as_area_btree);
  258. #ifdef AS_PAGE_TABLE
  259.     page_table_destroy(as->genarch.page_table);
  260. #else
  261.     page_table_destroy(NULL);
  262. #endif
  263.  
  264.     interrupts_restore(ipl);
  265.  
  266. #ifdef __OBJC__
  267.     [as free];
  268. #else
  269.     slab_free(as_slab, as);
  270. #endif
  271. }
  272.  
  273. /** Create address space area of common attributes.
  274.  *
  275.  * The created address space area is added to the target address space.
  276.  *
  277.  * @param as Target address space.
  278.  * @param flags Flags of the area memory.
  279.  * @param size Size of area.
  280.  * @param base Base address of area.
  281.  * @param attrs Attributes of the area.
  282.  * @param backend Address space area backend. NULL if no backend is used.
  283.  * @param backend_data NULL or a pointer to an array holding two void *.
  284.  *
  285.  * @return Address space area on success or NULL on failure.
  286.  */
  287. as_area_t *
  288. as_area_create(as_t *as, int flags, size_t size, uintptr_t base, int attrs,
  289.            mem_backend_t *backend, mem_backend_data_t *backend_data)
  290. {
  291.     ipl_t ipl;
  292.     as_area_t *a;
  293.    
  294.     if (base % PAGE_SIZE)
  295.         return NULL;
  296.  
  297.     if (!size)
  298.         return NULL;
  299.  
  300.     /* Writeable executable areas are not supported. */
  301.     if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
  302.         return NULL;
  303.    
  304.     ipl = interrupts_disable();
  305.     mutex_lock(&as->lock);
  306.    
  307.     if (!check_area_conflicts(as, base, size, NULL)) {
  308.         mutex_unlock(&as->lock);
  309.         interrupts_restore(ipl);
  310.         return NULL;
  311.     }
  312.    
  313.     a = (as_area_t *) malloc(sizeof(as_area_t), 0);
  314.  
  315.     mutex_initialize(&a->lock, MUTEX_PASSIVE);
  316.    
  317.     a->as = as;
  318.     a->flags = flags;
  319.     a->attributes = attrs;
  320.     a->pages = SIZE2FRAMES(size);
  321.     a->base = base;
  322.     a->sh_info = NULL;
  323.     a->backend = backend;
  324.     if (backend_data)
  325.         a->backend_data = *backend_data;
  326.     else
  327.         memsetb(&a->backend_data, sizeof(a->backend_data), 0);
  328.  
  329.     btree_create(&a->used_space);
  330.    
  331.     btree_insert(&as->as_area_btree, base, (void *) a, NULL);
  332.  
  333.     mutex_unlock(&as->lock);
  334.     interrupts_restore(ipl);
  335.  
  336.     return a;
  337. }
  338.  
  339. /** Find address space area and change it.
  340.  *
  341.  * @param as Address space.
  342.  * @param address Virtual address belonging to the area to be changed. Must be
  343.  *     page-aligned.
  344.  * @param size New size of the virtual memory block starting at address.
  345.  * @param flags Flags influencing the remap operation. Currently unused.
  346.  *
  347.  * @return Zero on success or a value from @ref errno.h otherwise.
  348.  */
  349. int as_area_resize(as_t *as, uintptr_t address, size_t size, int flags)
  350. {
  351.     as_area_t *area;
  352.     ipl_t ipl;
  353.     size_t pages;
  354.    
  355.     ipl = interrupts_disable();
  356.     mutex_lock(&as->lock);
  357.    
  358.     /*
  359.      * Locate the area.
  360.      */
  361.     area = find_area_and_lock(as, address);
  362.     if (!area) {
  363.         mutex_unlock(&as->lock);
  364.         interrupts_restore(ipl);
  365.         return ENOENT;
  366.     }
  367.  
  368.     if (area->backend == &phys_backend) {
  369.         /*
  370.          * Remapping of address space areas associated
  371.          * with memory mapped devices is not supported.
  372.          */
  373.         mutex_unlock(&area->lock);
  374.         mutex_unlock(&as->lock);
  375.         interrupts_restore(ipl);
  376.         return ENOTSUP;
  377.     }
  378.     if (area->sh_info) {
  379.         /*
  380.          * Remapping of shared address space areas
  381.          * is not supported.
  382.          */
  383.         mutex_unlock(&area->lock);
  384.         mutex_unlock(&as->lock);
  385.         interrupts_restore(ipl);
  386.         return ENOTSUP;
  387.     }
  388.  
  389.     pages = SIZE2FRAMES((address - area->base) + size);
  390.     if (!pages) {
  391.         /*
  392.          * Zero size address space areas are not allowed.
  393.          */
  394.         mutex_unlock(&area->lock);
  395.         mutex_unlock(&as->lock);
  396.         interrupts_restore(ipl);
  397.         return EPERM;
  398.     }
  399.    
  400.     if (pages < area->pages) {
  401.         bool cond;
  402.         uintptr_t start_free = area->base + pages*PAGE_SIZE;
  403.  
  404.         /*
  405.          * Shrinking the area.
  406.          * No need to check for overlaps.
  407.          */
  408.  
  409.         /*
  410.          * Start TLB shootdown sequence.
  411.          */
  412.         tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base +
  413.             pages * PAGE_SIZE, area->pages - pages);
  414.  
  415.         /*
  416.          * Remove frames belonging to used space starting from
  417.          * the highest addresses downwards until an overlap with
  418.          * the resized address space area is found. Note that this
  419.          * is also the right way to remove part of the used_space
  420.          * B+tree leaf list.
  421.          */    
  422.         for (cond = true; cond;) {
  423.             btree_node_t *node;
  424.        
  425.             ASSERT(!list_empty(&area->used_space.leaf_head));
  426.             node =
  427.                 list_get_instance(area->used_space.leaf_head.prev,
  428.                 btree_node_t, leaf_link);
  429.             if ((cond = (bool) node->keys)) {
  430.                 uintptr_t b = node->key[node->keys - 1];
  431.                 count_t c =
  432.                     (count_t) node->value[node->keys - 1];
  433.                 unsigned int i = 0;
  434.            
  435.                 if (overlaps(b, c * PAGE_SIZE, area->base,
  436.                     pages * PAGE_SIZE)) {
  437.                    
  438.                     if (b + c * PAGE_SIZE <= start_free) {
  439.                         /*
  440.                          * The whole interval fits
  441.                          * completely in the resized
  442.                          * address space area.
  443.                          */
  444.                         break;
  445.                     }
  446.        
  447.                     /*
  448.                      * Part of the interval corresponding
  449.                      * to b and c overlaps with the resized
  450.                      * address space area.
  451.                      */
  452.        
  453.                     cond = false;   /* we are almost done */
  454.                     i = (start_free - b) >> PAGE_WIDTH;
  455.                     if (!used_space_remove(area, start_free, c - i))
  456.                         panic("Could not remove used space.\n");
  457.                 } else {
  458.                     /*
  459.                      * The interval of used space can be
  460.                      * completely removed.
  461.                      */
  462.                     if (!used_space_remove(area, b, c))
  463.                         panic("Could not remove used space.\n");
  464.                 }
  465.            
  466.                 for (; i < c; i++) {
  467.                     pte_t *pte;
  468.            
  469.                     page_table_lock(as, false);
  470.                     pte = page_mapping_find(as, b +
  471.                         i * PAGE_SIZE);
  472.                     ASSERT(pte && PTE_VALID(pte) &&
  473.                         PTE_PRESENT(pte));
  474.                     if (area->backend &&
  475.                         area->backend->frame_free) {
  476.                         area->backend->frame_free(area,
  477.                             b + i * PAGE_SIZE,
  478.                             PTE_GET_FRAME(pte));
  479.                     }
  480.                     page_mapping_remove(as, b +
  481.                         i * PAGE_SIZE);
  482.                     page_table_unlock(as, false);
  483.                 }
  484.             }
  485.         }
  486.  
  487.         /*
  488.          * Finish TLB shootdown sequence.
  489.          */
  490.  
  491.         tlb_invalidate_pages(as->asid, area->base + pages * PAGE_SIZE,
  492.             area->pages - pages);
  493.         /*
  494.          * Invalidate software translation caches (e.g. TSB on sparc64).
  495.          */
  496.         as_invalidate_translation_cache(as, area->base +
  497.             pages * PAGE_SIZE, area->pages - pages);
  498.         tlb_shootdown_finalize();
  499.        
  500.     } else {
  501.         /*
  502.          * Growing the area.
  503.          * Check for overlaps with other address space areas.
  504.          */
  505.         if (!check_area_conflicts(as, address, pages * PAGE_SIZE,
  506.             area)) {
  507.             mutex_unlock(&area->lock);
  508.             mutex_unlock(&as->lock);       
  509.             interrupts_restore(ipl);
  510.             return EADDRNOTAVAIL;
  511.         }
  512.     }
  513.  
  514.     area->pages = pages;
  515.    
  516.     mutex_unlock(&area->lock);
  517.     mutex_unlock(&as->lock);
  518.     interrupts_restore(ipl);
  519.  
  520.     return 0;
  521. }
  522.  
  523. /** Destroy address space area.
  524.  *
  525.  * @param as Address space.
  526.  * @param address Address withing the area to be deleted.
  527.  *
  528.  * @return Zero on success or a value from @ref errno.h on failure.
  529.  */
  530. int as_area_destroy(as_t *as, uintptr_t address)
  531. {
  532.     as_area_t *area;
  533.     uintptr_t base;
  534.     link_t *cur;
  535.     ipl_t ipl;
  536.  
  537.     ipl = interrupts_disable();
  538.     mutex_lock(&as->lock);
  539.  
  540.     area = find_area_and_lock(as, address);
  541.     if (!area) {
  542.         mutex_unlock(&as->lock);
  543.         interrupts_restore(ipl);
  544.         return ENOENT;
  545.     }
  546.  
  547.     base = area->base;
  548.  
  549.     /*
  550.      * Start TLB shootdown sequence.
  551.      */
  552.     tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, area->pages);
  553.  
  554.     /*
  555.      * Visit only the pages mapped by used_space B+tree.
  556.      */
  557.     for (cur = area->used_space.leaf_head.next;
  558.         cur != &area->used_space.leaf_head; cur = cur->next) {
  559.         btree_node_t *node;
  560.         unsigned int i;
  561.        
  562.         node = list_get_instance(cur, btree_node_t, leaf_link);
  563.         for (i = 0; i < node->keys; i++) {
  564.             uintptr_t b = node->key[i];
  565.             count_t j;
  566.             pte_t *pte;
  567.            
  568.             for (j = 0; j < (count_t) node->value[i]; j++) {
  569.                 page_table_lock(as, false);
  570.                 pte = page_mapping_find(as, b + j * PAGE_SIZE);
  571.                 ASSERT(pte && PTE_VALID(pte) &&
  572.                     PTE_PRESENT(pte));
  573.                 if (area->backend &&
  574.                     area->backend->frame_free) {
  575.                     area->backend->frame_free(area, b +
  576.                         j * PAGE_SIZE, PTE_GET_FRAME(pte));
  577.                 }
  578.                 page_mapping_remove(as, b + j * PAGE_SIZE);            
  579.                 page_table_unlock(as, false);
  580.             }
  581.         }
  582.     }
  583.  
  584.     /*
  585.      * Finish TLB shootdown sequence.
  586.      */
  587.  
  588.     tlb_invalidate_pages(as->asid, area->base, area->pages);
  589.     /*
  590.      * Invalidate potential software translation caches (e.g. TSB on
  591.      * sparc64).
  592.      */
  593.     as_invalidate_translation_cache(as, area->base, area->pages);
  594.     tlb_shootdown_finalize();
  595.    
  596.     btree_destroy(&area->used_space);
  597.  
  598.     area->attributes |= AS_AREA_ATTR_PARTIAL;
  599.    
  600.     if (area->sh_info)
  601.         sh_info_remove_reference(area->sh_info);
  602.        
  603.     mutex_unlock(&area->lock);
  604.  
  605.     /*
  606.      * Remove the empty area from address space.
  607.      */
  608.     btree_remove(&as->as_area_btree, base, NULL);
  609.    
  610.     free(area);
  611.    
  612.     mutex_unlock(&as->lock);
  613.     interrupts_restore(ipl);
  614.     return 0;
  615. }
  616.  
  617. /** Share address space area with another or the same address space.
  618.  *
  619.  * Address space area mapping is shared with a new address space area.
  620.  * If the source address space area has not been shared so far,
  621.  * a new sh_info is created. The new address space area simply gets the
  622.  * sh_info of the source area. The process of duplicating the
  623.  * mapping is done through the backend share function.
  624.  *
  625.  * @param src_as Pointer to source address space.
  626.  * @param src_base Base address of the source address space area.
  627.  * @param acc_size Expected size of the source area.
  628.  * @param dst_as Pointer to destination address space.
  629.  * @param dst_base Target base address.
  630.  * @param dst_flags_mask Destination address space area flags mask.
  631.  *
  632.  * @return Zero on success or ENOENT if there is no such task or if there is no
  633.  * such address space area, EPERM if there was a problem in accepting the area
  634.  * or ENOMEM if there was a problem in allocating destination address space
  635.  * area. ENOTSUP is returned if the address space area backend does not support
  636.  * sharing.
  637.  */
  638. int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size,
  639.     as_t *dst_as, uintptr_t dst_base, int dst_flags_mask)
  640. {
  641.     ipl_t ipl;
  642.     int src_flags;
  643.     size_t src_size;
  644.     as_area_t *src_area, *dst_area;
  645.     share_info_t *sh_info;
  646.     mem_backend_t *src_backend;
  647.     mem_backend_data_t src_backend_data;
  648.    
  649.     ipl = interrupts_disable();
  650.     mutex_lock(&src_as->lock);
  651.     src_area = find_area_and_lock(src_as, src_base);
  652.     if (!src_area) {
  653.         /*
  654.          * Could not find the source address space area.
  655.          */
  656.         mutex_unlock(&src_as->lock);
  657.         interrupts_restore(ipl);
  658.         return ENOENT;
  659.     }
  660.  
  661.     if (!src_area->backend || !src_area->backend->share) {
  662.         /*
  663.          * There is no backend or the backend does not
  664.          * know how to share the area.
  665.          */
  666.         mutex_unlock(&src_area->lock);
  667.         mutex_unlock(&src_as->lock);
  668.         interrupts_restore(ipl);
  669.         return ENOTSUP;
  670.     }
  671.    
  672.     src_size = src_area->pages * PAGE_SIZE;
  673.     src_flags = src_area->flags;
  674.     src_backend = src_area->backend;
  675.     src_backend_data = src_area->backend_data;
  676.  
  677.     /* Share the cacheable flag from the original mapping */
  678.     if (src_flags & AS_AREA_CACHEABLE)
  679.         dst_flags_mask |= AS_AREA_CACHEABLE;
  680.  
  681.     if (src_size != acc_size ||
  682.         (src_flags & dst_flags_mask) != dst_flags_mask) {
  683.         mutex_unlock(&src_area->lock);
  684.         mutex_unlock(&src_as->lock);
  685.         interrupts_restore(ipl);
  686.         return EPERM;
  687.     }
  688.  
  689.     /*
  690.      * Now we are committed to sharing the area.
  691.      * First, prepare the area for sharing.
  692.      * Then it will be safe to unlock it.
  693.      */
  694.     sh_info = src_area->sh_info;
  695.     if (!sh_info) {
  696.         sh_info = (share_info_t *) malloc(sizeof(share_info_t), 0);
  697.         mutex_initialize(&sh_info->lock, MUTEX_PASSIVE);
  698.         sh_info->refcount = 2;
  699.         btree_create(&sh_info->pagemap);
  700.         src_area->sh_info = sh_info;
  701.         /*
  702.          * Call the backend to setup sharing.
  703.          */
  704.         src_area->backend->share(src_area);
  705.     } else {
  706.         mutex_lock(&sh_info->lock);
  707.         sh_info->refcount++;
  708.         mutex_unlock(&sh_info->lock);
  709.     }
  710.  
  711.     mutex_unlock(&src_area->lock);
  712.     mutex_unlock(&src_as->lock);
  713.  
  714.     /*
  715.      * Create copy of the source address space area.
  716.      * The destination area is created with AS_AREA_ATTR_PARTIAL
  717.      * attribute set which prevents race condition with
  718.      * preliminary as_page_fault() calls.
  719.      * The flags of the source area are masked against dst_flags_mask
  720.      * to support sharing in less privileged mode.
  721.      */
  722.     dst_area = as_area_create(dst_as, dst_flags_mask, src_size, dst_base,
  723.         AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data);
  724.     if (!dst_area) {
  725.         /*
  726.          * Destination address space area could not be created.
  727.          */
  728.         sh_info_remove_reference(sh_info);
  729.        
  730.         interrupts_restore(ipl);
  731.         return ENOMEM;
  732.     }
  733.  
  734.     /*
  735.      * Now the destination address space area has been
  736.      * fully initialized. Clear the AS_AREA_ATTR_PARTIAL
  737.      * attribute and set the sh_info.
  738.      */
  739.     mutex_lock(&dst_as->lock); 
  740.     mutex_lock(&dst_area->lock);
  741.     dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL;
  742.     dst_area->sh_info = sh_info;
  743.     mutex_unlock(&dst_area->lock);
  744.     mutex_unlock(&dst_as->lock);   
  745.  
  746.     interrupts_restore(ipl);
  747.    
  748.     return 0;
  749. }
  750.  
  751. /** Check access mode for address space area.
  752.  *
  753.  * The address space area must be locked prior to this call.
  754.  *
  755.  * @param area Address space area.
  756.  * @param access Access mode.
  757.  *
  758.  * @return False if access violates area's permissions, true otherwise.
  759.  */
  760. bool as_area_check_access(as_area_t *area, pf_access_t access)
  761. {
  762.     int flagmap[] = {
  763.         [PF_ACCESS_READ] = AS_AREA_READ,
  764.         [PF_ACCESS_WRITE] = AS_AREA_WRITE,
  765.         [PF_ACCESS_EXEC] = AS_AREA_EXEC
  766.     };
  767.  
  768.     if (!(area->flags & flagmap[access]))
  769.         return false;
  770.    
  771.     return true;
  772. }
  773.  
  774. /** Change adress area flags.
  775.  *
  776.  * The idea is to have the same data, but with a different access mode.
  777.  * This is needed e.g. for writing code into memory and then executing it.
  778.  * In order for this to work properly, this may copy the data
  779.  * into private anonymous memory (unless it's already there).
  780.  *
  781.  * @param as Address space.
  782.  * @param flags Flags of the area memory.
  783.  * @param address Address withing the area to be changed.
  784.  *
  785.  * @return Zero on success or a value from @ref errno.h on failure.
  786.  */
  787. int as_area_change_flags(as_t *as, int flags, uintptr_t address)
  788. {
  789.     as_area_t *area;
  790.     uintptr_t base;
  791.     link_t *cur;
  792.     ipl_t ipl;
  793.     int page_flags;
  794.     uintptr_t *old_frame;
  795.     index_t frame_idx;
  796.     count_t used_pages;
  797.  
  798.     /* Flags for the new memory mapping */
  799.     page_flags = area_flags_to_page_flags(flags);
  800.  
  801.     ipl = interrupts_disable();
  802.     mutex_lock(&as->lock);
  803.  
  804.     area = find_area_and_lock(as, address);
  805.     if (!area) {
  806.         mutex_unlock(&as->lock);
  807.         interrupts_restore(ipl);
  808.         return ENOENT;
  809.     }
  810.  
  811.     if (area->sh_info || area->backend != &anon_backend) {
  812.         /* Copying shared areas not supported yet */
  813.         /* Copying non-anonymous memory not supported yet */
  814.         mutex_unlock(&area->lock);
  815.         mutex_unlock(&as->lock);
  816.         interrupts_restore(ipl);
  817.         return ENOTSUP;
  818.     }
  819.  
  820.     base = area->base;
  821.  
  822.     /*
  823.      * Compute total number of used pages in the used_space B+tree
  824.      */
  825.     used_pages = 0;
  826.  
  827.     for (cur = area->used_space.leaf_head.next;
  828.         cur != &area->used_space.leaf_head; cur = cur->next) {
  829.         btree_node_t *node;
  830.         unsigned int i;
  831.        
  832.         node = list_get_instance(cur, btree_node_t, leaf_link);
  833.         for (i = 0; i < node->keys; i++) {
  834.             used_pages += (count_t) node->value[i];
  835.         }
  836.     }
  837.  
  838.     /* An array for storing frame numbers */
  839.     old_frame = malloc(used_pages * sizeof(uintptr_t), 0);
  840.  
  841.     /*
  842.      * Start TLB shootdown sequence.
  843.      */
  844.     tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, area->pages);
  845.  
  846.     /*
  847.      * Remove used pages from page tables and remember their frame
  848.      * numbers.
  849.      */
  850.     frame_idx = 0;
  851.  
  852.     for (cur = area->used_space.leaf_head.next;
  853.         cur != &area->used_space.leaf_head; cur = cur->next) {
  854.         btree_node_t *node;
  855.         unsigned int i;
  856.        
  857.         node = list_get_instance(cur, btree_node_t, leaf_link);
  858.         for (i = 0; i < node->keys; i++) {
  859.             uintptr_t b = node->key[i];
  860.             count_t j;
  861.             pte_t *pte;
  862.            
  863.             for (j = 0; j < (count_t) node->value[i]; j++) {
  864.                 page_table_lock(as, false);
  865.                 pte = page_mapping_find(as, b + j * PAGE_SIZE);
  866.                 ASSERT(pte && PTE_VALID(pte) &&
  867.                     PTE_PRESENT(pte));
  868.                 old_frame[frame_idx++] = PTE_GET_FRAME(pte);
  869.  
  870.                 /* Remove old mapping */
  871.                 page_mapping_remove(as, b + j * PAGE_SIZE);
  872.                 page_table_unlock(as, false);
  873.             }
  874.         }
  875.     }
  876.  
  877.     /*
  878.      * Finish TLB shootdown sequence.
  879.      */
  880.  
  881.     tlb_invalidate_pages(as->asid, area->base, area->pages);
  882.     /*
  883.      * Invalidate potential software translation caches (e.g. TSB on
  884.      * sparc64).
  885.      */
  886.     as_invalidate_translation_cache(as, area->base, area->pages);
  887.     tlb_shootdown_finalize();
  888.  
  889.     /*
  890.      * Map pages back in with new flags. This step is kept separate
  891.      * so that there's no instant when the memory area could be
  892.      * accesed with both the old and the new flags at once.
  893.      */
  894.     frame_idx = 0;
  895.  
  896.     for (cur = area->used_space.leaf_head.next;
  897.         cur != &area->used_space.leaf_head; cur = cur->next) {
  898.         btree_node_t *node;
  899.         unsigned int i;
  900.        
  901.         node = list_get_instance(cur, btree_node_t, leaf_link);
  902.         for (i = 0; i < node->keys; i++) {
  903.             uintptr_t b = node->key[i];
  904.             count_t j;
  905.            
  906.             for (j = 0; j < (count_t) node->value[i]; j++) {
  907.                 page_table_lock(as, false);
  908.  
  909.                 /* Insert the new mapping */
  910.                 page_mapping_insert(as, b + j * PAGE_SIZE,
  911.                     old_frame[frame_idx++], page_flags);
  912.  
  913.                 page_table_unlock(as, false);
  914.             }
  915.         }
  916.     }
  917.  
  918.     free(old_frame);
  919.  
  920.     mutex_unlock(&area->lock);
  921.     mutex_unlock(&as->lock);
  922.     interrupts_restore(ipl);
  923.  
  924.     return 0;
  925. }
  926.  
  927.  
  928. /** Handle page fault within the current address space.
  929.  *
  930.  * This is the high-level page fault handler. It decides
  931.  * whether the page fault can be resolved by any backend
  932.  * and if so, it invokes the backend to resolve the page
  933.  * fault.
  934.  *
  935.  * Interrupts are assumed disabled.
  936.  *
  937.  * @param page Faulting page.
  938.  * @param access Access mode that caused the fault (i.e. read/write/exec).
  939.  * @param istate Pointer to interrupted state.
  940.  *
  941.  * @return AS_PF_FAULT on page fault, AS_PF_OK on success or AS_PF_DEFER if the
  942.  *     fault was caused by copy_to_uspace() or copy_from_uspace().
  943.  */
  944. int as_page_fault(uintptr_t page, pf_access_t access, istate_t *istate)
  945. {
  946.     pte_t *pte;
  947.     as_area_t *area;
  948.    
  949.     if (!THREAD)
  950.         return AS_PF_FAULT;
  951.        
  952.     ASSERT(AS);
  953.  
  954.     mutex_lock(&AS->lock);
  955.     area = find_area_and_lock(AS, page);   
  956.     if (!area) {
  957.         /*
  958.          * No area contained mapping for 'page'.
  959.          * Signal page fault to low-level handler.
  960.          */
  961.         mutex_unlock(&AS->lock);
  962.         goto page_fault;
  963.     }
  964.  
  965.     if (area->attributes & AS_AREA_ATTR_PARTIAL) {
  966.         /*
  967.          * The address space area is not fully initialized.
  968.          * Avoid possible race by returning error.
  969.          */
  970.         mutex_unlock(&area->lock);
  971.         mutex_unlock(&AS->lock);
  972.         goto page_fault;       
  973.     }
  974.  
  975.     if (!area->backend || !area->backend->page_fault) {
  976.         /*
  977.          * The address space area is not backed by any backend
  978.          * or the backend cannot handle page faults.
  979.          */
  980.         mutex_unlock(&area->lock);
  981.         mutex_unlock(&AS->lock);
  982.         goto page_fault;       
  983.     }
  984.  
  985.     page_table_lock(AS, false);
  986.    
  987.     /*
  988.      * To avoid race condition between two page faults
  989.      * on the same address, we need to make sure
  990.      * the mapping has not been already inserted.
  991.      */
  992.     if ((pte = page_mapping_find(AS, page))) {
  993.         if (PTE_PRESENT(pte)) {
  994.             if (((access == PF_ACCESS_READ) && PTE_READABLE(pte)) ||
  995.                 (access == PF_ACCESS_WRITE && PTE_WRITABLE(pte)) ||
  996.                 (access == PF_ACCESS_EXEC && PTE_EXECUTABLE(pte))) {
  997.                 page_table_unlock(AS, false);
  998.                 mutex_unlock(&area->lock);
  999.                 mutex_unlock(&AS->lock);
  1000.                 return AS_PF_OK;
  1001.             }
  1002.         }
  1003.     }
  1004.    
  1005.     /*
  1006.      * Resort to the backend page fault handler.
  1007.      */
  1008.     if (area->backend->page_fault(area, page, access) != AS_PF_OK) {
  1009.         page_table_unlock(AS, false);
  1010.         mutex_unlock(&area->lock);
  1011.         mutex_unlock(&AS->lock);
  1012.         goto page_fault;
  1013.     }
  1014.    
  1015.     page_table_unlock(AS, false);
  1016.     mutex_unlock(&area->lock);
  1017.     mutex_unlock(&AS->lock);
  1018.     return AS_PF_OK;
  1019.  
  1020. page_fault:
  1021.     if (THREAD->in_copy_from_uspace) {
  1022.         THREAD->in_copy_from_uspace = false;
  1023.         istate_set_retaddr(istate,
  1024.             (uintptr_t) &memcpy_from_uspace_failover_address);
  1025.     } else if (THREAD->in_copy_to_uspace) {
  1026.         THREAD->in_copy_to_uspace = false;
  1027.         istate_set_retaddr(istate,
  1028.             (uintptr_t) &memcpy_to_uspace_failover_address);
  1029.     } else {
  1030.         return AS_PF_FAULT;
  1031.     }
  1032.  
  1033.     return AS_PF_DEFER;
  1034. }
  1035.  
  1036. /** Switch address spaces.
  1037.  *
  1038.  * Note that this function cannot sleep as it is essentially a part of
  1039.  * scheduling. Sleeping here would lead to deadlock on wakeup. Another
  1040.  * thing which is forbidden in this context is locking the address space.
  1041.  *
  1042.  * When this function is enetered, no spinlocks may be held.
  1043.  *
  1044.  * @param old Old address space or NULL.
  1045.  * @param new New address space.
  1046.  */
  1047. void as_switch(as_t *old_as, as_t *new_as)
  1048. {
  1049.     DEADLOCK_PROBE_INIT(p_asidlock);
  1050.     preemption_disable();
  1051. retry:
  1052.     (void) interrupts_disable();
  1053.     if (!spinlock_trylock(&asidlock)) {
  1054.         /*
  1055.          * Avoid deadlock with TLB shootdown.
  1056.          * We can enable interrupts here because
  1057.          * preemption is disabled. We should not be
  1058.          * holding any other lock.
  1059.          */
  1060.         (void) interrupts_enable();
  1061.         DEADLOCK_PROBE(p_asidlock, DEADLOCK_THRESHOLD);
  1062.         goto retry;
  1063.     }
  1064.     preemption_enable();
  1065.  
  1066.     /*
  1067.      * First, take care of the old address space.
  1068.      */
  1069.     if (old_as) {
  1070.         ASSERT(old_as->cpu_refcount);
  1071.         if((--old_as->cpu_refcount == 0) && (old_as != AS_KERNEL)) {
  1072.             /*
  1073.              * The old address space is no longer active on
  1074.              * any processor. It can be appended to the
  1075.              * list of inactive address spaces with assigned
  1076.              * ASID.
  1077.              */
  1078.             ASSERT(old_as->asid != ASID_INVALID);
  1079.             list_append(&old_as->inactive_as_with_asid_link,
  1080.                 &inactive_as_with_asid_head);
  1081.         }
  1082.  
  1083.         /*
  1084.          * Perform architecture-specific tasks when the address space
  1085.          * is being removed from the CPU.
  1086.          */
  1087.         as_deinstall_arch(old_as);
  1088.     }
  1089.  
  1090.     /*
  1091.      * Second, prepare the new address space.
  1092.      */
  1093.     if ((new_as->cpu_refcount++ == 0) && (new_as != AS_KERNEL)) {
  1094.         if (new_as->asid != ASID_INVALID)
  1095.             list_remove(&new_as->inactive_as_with_asid_link);
  1096.         else
  1097.             new_as->asid = asid_get();
  1098.     }
  1099. #ifdef AS_PAGE_TABLE
  1100.     SET_PTL0_ADDRESS(new_as->genarch.page_table);
  1101. #endif
  1102.    
  1103.     /*
  1104.      * Perform architecture-specific steps.
  1105.      * (e.g. write ASID to hardware register etc.)
  1106.      */
  1107.     as_install_arch(new_as);
  1108.  
  1109.     spinlock_unlock(&asidlock);
  1110.    
  1111.     AS = new_as;
  1112. }
  1113.  
  1114. /** Convert address space area flags to page flags.
  1115.  *
  1116.  * @param aflags Flags of some address space area.
  1117.  *
  1118.  * @return Flags to be passed to page_mapping_insert().
  1119.  */
  1120. int area_flags_to_page_flags(int aflags)
  1121. {
  1122.     int flags;
  1123.  
  1124.     flags = PAGE_USER | PAGE_PRESENT;
  1125.    
  1126.     if (aflags & AS_AREA_READ)
  1127.         flags |= PAGE_READ;
  1128.        
  1129.     if (aflags & AS_AREA_WRITE)
  1130.         flags |= PAGE_WRITE;
  1131.    
  1132.     if (aflags & AS_AREA_EXEC)
  1133.         flags |= PAGE_EXEC;
  1134.    
  1135.     if (aflags & AS_AREA_CACHEABLE)
  1136.         flags |= PAGE_CACHEABLE;
  1137.        
  1138.     return flags;
  1139. }
  1140.  
  1141. /** Compute flags for virtual address translation subsytem.
  1142.  *
  1143.  * The address space area must be locked.
  1144.  * Interrupts must be disabled.
  1145.  *
  1146.  * @param a Address space area.
  1147.  *
  1148.  * @return Flags to be used in page_mapping_insert().
  1149.  */
  1150. int as_area_get_flags(as_area_t *a)
  1151. {
  1152.     return area_flags_to_page_flags(a->flags);
  1153. }
  1154.  
  1155. /** Create page table.
  1156.  *
  1157.  * Depending on architecture, create either address space
  1158.  * private or global page table.
  1159.  *
  1160.  * @param flags Flags saying whether the page table is for kernel address space.
  1161.  *
  1162.  * @return First entry of the page table.
  1163.  */
  1164. pte_t *page_table_create(int flags)
  1165. {
  1166. #ifdef __OBJC__
  1167.     return [as_t page_table_create: flags];
  1168. #else
  1169.     ASSERT(as_operations);
  1170.     ASSERT(as_operations->page_table_create);
  1171.    
  1172.     return as_operations->page_table_create(flags);
  1173. #endif
  1174. }
  1175.  
  1176. /** Destroy page table.
  1177.  *
  1178.  * Destroy page table in architecture specific way.
  1179.  *
  1180.  * @param page_table Physical address of PTL0.
  1181.  */
  1182. void page_table_destroy(pte_t *page_table)
  1183. {
  1184. #ifdef __OBJC__
  1185.     return [as_t page_table_destroy: page_table];
  1186. #else
  1187.     ASSERT(as_operations);
  1188.     ASSERT(as_operations->page_table_destroy);
  1189.    
  1190.     as_operations->page_table_destroy(page_table);
  1191. #endif
  1192. }
  1193.  
  1194. /** Lock page table.
  1195.  *
  1196.  * This function should be called before any page_mapping_insert(),
  1197.  * page_mapping_remove() and page_mapping_find().
  1198.  *
  1199.  * Locking order is such that address space areas must be locked
  1200.  * prior to this call. Address space can be locked prior to this
  1201.  * call in which case the lock argument is false.
  1202.  *
  1203.  * @param as Address space.
  1204.  * @param lock If false, do not attempt to lock as->lock.
  1205.  */
  1206. void page_table_lock(as_t *as, bool lock)
  1207. {
  1208. #ifdef __OBJC__
  1209.     [as page_table_lock: lock];
  1210. #else
  1211.     ASSERT(as_operations);
  1212.     ASSERT(as_operations->page_table_lock);
  1213.    
  1214.     as_operations->page_table_lock(as, lock);
  1215. #endif
  1216. }
  1217.  
  1218. /** Unlock page table.
  1219.  *
  1220.  * @param as Address space.
  1221.  * @param unlock If false, do not attempt to unlock as->lock.
  1222.  */
  1223. void page_table_unlock(as_t *as, bool unlock)
  1224. {
  1225. #ifdef __OBJC__
  1226.     [as page_table_unlock: unlock];
  1227. #else
  1228.     ASSERT(as_operations);
  1229.     ASSERT(as_operations->page_table_unlock);
  1230.    
  1231.     as_operations->page_table_unlock(as, unlock);
  1232. #endif
  1233. }
  1234.  
  1235.  
  1236. /** Find address space area and lock it.
  1237.  *
  1238.  * The address space must be locked and interrupts must be disabled.
  1239.  *
  1240.  * @param as Address space.
  1241.  * @param va Virtual address.
  1242.  *
  1243.  * @return Locked address space area containing va on success or NULL on
  1244.  *     failure.
  1245.  */
  1246. as_area_t *find_area_and_lock(as_t *as, uintptr_t va)
  1247. {
  1248.     as_area_t *a;
  1249.     btree_node_t *leaf, *lnode;
  1250.     unsigned int i;
  1251.    
  1252.     a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);
  1253.     if (a) {
  1254.         /* va is the base address of an address space area */
  1255.         mutex_lock(&a->lock);
  1256.         return a;
  1257.     }
  1258.    
  1259.     /*
  1260.      * Search the leaf node and the righmost record of its left neighbour
  1261.      * to find out whether this is a miss or va belongs to an address
  1262.      * space area found there.
  1263.      */
  1264.    
  1265.     /* First, search the leaf node itself. */
  1266.     for (i = 0; i < leaf->keys; i++) {
  1267.         a = (as_area_t *) leaf->value[i];
  1268.         mutex_lock(&a->lock);
  1269.         if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) {
  1270.             return a;
  1271.         }
  1272.         mutex_unlock(&a->lock);
  1273.     }
  1274.  
  1275.     /*
  1276.      * Second, locate the left neighbour and test its last record.
  1277.      * Because of its position in the B+tree, it must have base < va.
  1278.      */
  1279.     lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf);
  1280.     if (lnode) {
  1281.         a = (as_area_t *) lnode->value[lnode->keys - 1];
  1282.         mutex_lock(&a->lock);
  1283.         if (va < a->base + a->pages * PAGE_SIZE) {
  1284.             return a;
  1285.         }
  1286.         mutex_unlock(&a->lock);
  1287.     }
  1288.  
  1289.     return NULL;
  1290. }
  1291.  
  1292. /** Check area conflicts with other areas.
  1293.  *
  1294.  * The address space must be locked and interrupts must be disabled.
  1295.  *
  1296.  * @param as Address space.
  1297.  * @param va Starting virtual address of the area being tested.
  1298.  * @param size Size of the area being tested.
  1299.  * @param avoid_area Do not touch this area.
  1300.  *
  1301.  * @return True if there is no conflict, false otherwise.
  1302.  */
  1303. bool check_area_conflicts(as_t *as, uintptr_t va, size_t size,
  1304.               as_area_t *avoid_area)
  1305. {
  1306.     as_area_t *a;
  1307.     btree_node_t *leaf, *node;
  1308.     unsigned int i;
  1309.    
  1310.     /*
  1311.      * We don't want any area to have conflicts with NULL page.
  1312.      */
  1313.     if (overlaps(va, size, NULL, PAGE_SIZE))
  1314.         return false;
  1315.    
  1316.     /*
  1317.      * The leaf node is found in O(log n), where n is proportional to
  1318.      * the number of address space areas belonging to as.
  1319.      * The check for conflicts is then attempted on the rightmost
  1320.      * record in the left neighbour, the leftmost record in the right
  1321.      * neighbour and all records in the leaf node itself.
  1322.      */
  1323.    
  1324.     if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) {
  1325.         if (a != avoid_area)
  1326.             return false;
  1327.     }
  1328.    
  1329.     /* First, check the two border cases. */
  1330.     if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
  1331.         a = (as_area_t *) node->value[node->keys - 1];
  1332.         mutex_lock(&a->lock);
  1333.         if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
  1334.             mutex_unlock(&a->lock);
  1335.             return false;
  1336.         }
  1337.         mutex_unlock(&a->lock);
  1338.     }
  1339.     node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf);
  1340.     if (node) {
  1341.         a = (as_area_t *) node->value[0];
  1342.         mutex_lock(&a->lock);
  1343.         if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
  1344.             mutex_unlock(&a->lock);
  1345.             return false;
  1346.         }
  1347.         mutex_unlock(&a->lock);
  1348.     }
  1349.    
  1350.     /* Second, check the leaf node. */
  1351.     for (i = 0; i < leaf->keys; i++) {
  1352.         a = (as_area_t *) leaf->value[i];
  1353.    
  1354.         if (a == avoid_area)
  1355.             continue;
  1356.    
  1357.         mutex_lock(&a->lock);
  1358.         if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
  1359.             mutex_unlock(&a->lock);
  1360.             return false;
  1361.         }
  1362.         mutex_unlock(&a->lock);
  1363.     }
  1364.  
  1365.     /*
  1366.      * So far, the area does not conflict with other areas.
  1367.      * Check if it doesn't conflict with kernel address space.
  1368.      */  
  1369.     if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
  1370.         return !overlaps(va, size,
  1371.             KERNEL_ADDRESS_SPACE_START,
  1372.             KERNEL_ADDRESS_SPACE_END - KERNEL_ADDRESS_SPACE_START);
  1373.     }
  1374.  
  1375.     return true;
  1376. }
  1377.  
  1378. /** Return size of the address space area with given base.
  1379.  *
  1380.  * @param base      Arbitrary address insede the address space area.
  1381.  *
  1382.  * @return      Size of the address space area in bytes or zero if it
  1383.  *          does not exist.
  1384.  */
  1385. size_t as_area_get_size(uintptr_t base)
  1386. {
  1387.     ipl_t ipl;
  1388.     as_area_t *src_area;
  1389.     size_t size;
  1390.  
  1391.     ipl = interrupts_disable();
  1392.     src_area = find_area_and_lock(AS, base);
  1393.     if (src_area){
  1394.         size = src_area->pages * PAGE_SIZE;
  1395.         mutex_unlock(&src_area->lock);
  1396.     } else {
  1397.         size = 0;
  1398.     }
  1399.     interrupts_restore(ipl);
  1400.     return size;
  1401. }
  1402.  
  1403. /** Mark portion of address space area as used.
  1404.  *
  1405.  * The address space area must be already locked.
  1406.  *
  1407.  * @param a Address space area.
  1408.  * @param page First page to be marked.
  1409.  * @param count Number of page to be marked.
  1410.  *
  1411.  * @return 0 on failure and 1 on success.
  1412.  */
  1413. int used_space_insert(as_area_t *a, uintptr_t page, count_t count)
  1414. {
  1415.     btree_node_t *leaf, *node;
  1416.     count_t pages;
  1417.     unsigned int i;
  1418.  
  1419.     ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
  1420.     ASSERT(count);
  1421.  
  1422.     pages = (count_t) btree_search(&a->used_space, page, &leaf);
  1423.     if (pages) {
  1424.         /*
  1425.          * We hit the beginning of some used space.
  1426.          */
  1427.         return 0;
  1428.     }
  1429.  
  1430.     if (!leaf->keys) {
  1431.         btree_insert(&a->used_space, page, (void *) count, leaf);
  1432.         return 1;
  1433.     }
  1434.  
  1435.     node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
  1436.     if (node) {
  1437.         uintptr_t left_pg = node->key[node->keys - 1];
  1438.         uintptr_t right_pg = leaf->key[0];
  1439.         count_t left_cnt = (count_t) node->value[node->keys - 1];
  1440.         count_t right_cnt = (count_t) leaf->value[0];
  1441.        
  1442.         /*
  1443.          * Examine the possibility that the interval fits
  1444.          * somewhere between the rightmost interval of
  1445.          * the left neigbour and the first interval of the leaf.
  1446.          */
  1447.          
  1448.         if (page >= right_pg) {
  1449.             /* Do nothing. */
  1450.         } else if (overlaps(page, count * PAGE_SIZE, left_pg,
  1451.             left_cnt * PAGE_SIZE)) {
  1452.             /* The interval intersects with the left interval. */
  1453.             return 0;
  1454.         } else if (overlaps(page, count * PAGE_SIZE, right_pg,
  1455.             right_cnt * PAGE_SIZE)) {
  1456.             /* The interval intersects with the right interval. */
  1457.             return 0;          
  1458.         } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&
  1459.             (page + count * PAGE_SIZE == right_pg)) {
  1460.             /*
  1461.              * The interval can be added by merging the two already
  1462.              * present intervals.
  1463.              */
  1464.             node->value[node->keys - 1] += count + right_cnt;
  1465.             btree_remove(&a->used_space, right_pg, leaf);
  1466.             return 1;
  1467.         } else if (page == left_pg + left_cnt * PAGE_SIZE) {
  1468.             /*
  1469.              * The interval can be added by simply growing the left
  1470.              * interval.
  1471.              */
  1472.             node->value[node->keys - 1] += count;
  1473.             return 1;
  1474.         } else if (page + count * PAGE_SIZE == right_pg) {
  1475.             /*
  1476.              * The interval can be addded by simply moving base of
  1477.              * the right interval down and increasing its size
  1478.              * accordingly.
  1479.              */
  1480.             leaf->value[0] += count;
  1481.             leaf->key[0] = page;
  1482.             return 1;
  1483.         } else {
  1484.             /*
  1485.              * The interval is between both neigbouring intervals,
  1486.              * but cannot be merged with any of them.
  1487.              */
  1488.             btree_insert(&a->used_space, page, (void *) count,
  1489.                 leaf);
  1490.             return 1;
  1491.         }
  1492.     } else if (page < leaf->key[0]) {
  1493.         uintptr_t right_pg = leaf->key[0];
  1494.         count_t right_cnt = (count_t) leaf->value[0];
  1495.    
  1496.         /*
  1497.          * Investigate the border case in which the left neighbour does
  1498.          * not exist but the interval fits from the left.
  1499.          */
  1500.          
  1501.         if (overlaps(page, count * PAGE_SIZE, right_pg,
  1502.             right_cnt * PAGE_SIZE)) {
  1503.             /* The interval intersects with the right interval. */
  1504.             return 0;
  1505.         } else if (page + count * PAGE_SIZE == right_pg) {
  1506.             /*
  1507.              * The interval can be added by moving the base of the
  1508.              * right interval down and increasing its size
  1509.              * accordingly.
  1510.              */
  1511.             leaf->key[0] = page;
  1512.             leaf->value[0] += count;
  1513.             return 1;
  1514.         } else {
  1515.             /*
  1516.              * The interval doesn't adjoin with the right interval.
  1517.              * It must be added individually.
  1518.              */
  1519.             btree_insert(&a->used_space, page, (void *) count,
  1520.                 leaf);
  1521.             return 1;
  1522.         }
  1523.     }
  1524.  
  1525.     node = btree_leaf_node_right_neighbour(&a->used_space, leaf);
  1526.     if (node) {
  1527.         uintptr_t left_pg = leaf->key[leaf->keys - 1];
  1528.         uintptr_t right_pg = node->key[0];
  1529.         count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
  1530.         count_t right_cnt = (count_t) node->value[0];
  1531.        
  1532.         /*
  1533.          * Examine the possibility that the interval fits
  1534.          * somewhere between the leftmost interval of
  1535.          * the right neigbour and the last interval of the leaf.
  1536.          */
  1537.  
  1538.         if (page < left_pg) {
  1539.             /* Do nothing. */
  1540.         } else if (overlaps(page, count * PAGE_SIZE, left_pg,
  1541.             left_cnt * PAGE_SIZE)) {
  1542.             /* The interval intersects with the left interval. */
  1543.             return 0;
  1544.         } else if (overlaps(page, count * PAGE_SIZE, right_pg,
  1545.             right_cnt * PAGE_SIZE)) {
  1546.             /* The interval intersects with the right interval. */
  1547.             return 0;          
  1548.         } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&
  1549.             (page + count * PAGE_SIZE == right_pg)) {
  1550.             /*
  1551.              * The interval can be added by merging the two already
  1552.              * present intervals.
  1553.              * */
  1554.             leaf->value[leaf->keys - 1] += count + right_cnt;
  1555.             btree_remove(&a->used_space, right_pg, node);
  1556.             return 1;
  1557.         } else if (page == left_pg + left_cnt * PAGE_SIZE) {
  1558.             /*
  1559.              * The interval can be added by simply growing the left
  1560.              * interval.
  1561.              * */
  1562.             leaf->value[leaf->keys - 1] +=  count;
  1563.             return 1;
  1564.         } else if (page + count * PAGE_SIZE == right_pg) {
  1565.             /*
  1566.              * The interval can be addded by simply moving base of
  1567.              * the right interval down and increasing its size
  1568.              * accordingly.
  1569.              */
  1570.             node->value[0] += count;
  1571.             node->key[0] = page;
  1572.             return 1;
  1573.         } else {
  1574.             /*
  1575.              * The interval is between both neigbouring intervals,
  1576.              * but cannot be merged with any of them.
  1577.              */
  1578.             btree_insert(&a->used_space, page, (void *) count,
  1579.                 leaf);
  1580.             return 1;
  1581.         }
  1582.     } else if (page >= leaf->key[leaf->keys - 1]) {
  1583.         uintptr_t left_pg = leaf->key[leaf->keys - 1];
  1584.         count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
  1585.    
  1586.         /*
  1587.          * Investigate the border case in which the right neighbour
  1588.          * does not exist but the interval fits from the right.
  1589.          */
  1590.          
  1591.         if (overlaps(page, count * PAGE_SIZE, left_pg,
  1592.             left_cnt * PAGE_SIZE)) {
  1593.             /* The interval intersects with the left interval. */
  1594.             return 0;
  1595.         } else if (left_pg + left_cnt * PAGE_SIZE == page) {
  1596.             /*
  1597.              * The interval can be added by growing the left
  1598.              * interval.
  1599.              */
  1600.             leaf->value[leaf->keys - 1] += count;
  1601.             return 1;
  1602.         } else {
  1603.             /*
  1604.              * The interval doesn't adjoin with the left interval.
  1605.              * It must be added individually.
  1606.              */
  1607.             btree_insert(&a->used_space, page, (void *) count,
  1608.                 leaf);
  1609.             return 1;
  1610.         }
  1611.     }
  1612.    
  1613.     /*
  1614.      * Note that if the algorithm made it thus far, the interval can fit
  1615.      * only between two other intervals of the leaf. The two border cases
  1616.      * were already resolved.
  1617.      */
  1618.     for (i = 1; i < leaf->keys; i++) {
  1619.         if (page < leaf->key[i]) {
  1620.             uintptr_t left_pg = leaf->key[i - 1];
  1621.             uintptr_t right_pg = leaf->key[i];
  1622.             count_t left_cnt = (count_t) leaf->value[i - 1];
  1623.             count_t right_cnt = (count_t) leaf->value[i];
  1624.  
  1625.             /*
  1626.              * The interval fits between left_pg and right_pg.
  1627.              */
  1628.  
  1629.             if (overlaps(page, count * PAGE_SIZE, left_pg,
  1630.                 left_cnt * PAGE_SIZE)) {
  1631.                 /*
  1632.                  * The interval intersects with the left
  1633.                  * interval.
  1634.                  */
  1635.                 return 0;
  1636.             } else if (overlaps(page, count * PAGE_SIZE, right_pg,
  1637.                 right_cnt * PAGE_SIZE)) {
  1638.                 /*
  1639.                  * The interval intersects with the right
  1640.                  * interval.
  1641.                  */
  1642.                 return 0;          
  1643.             } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&
  1644.                 (page + count * PAGE_SIZE == right_pg)) {
  1645.                 /*
  1646.                  * The interval can be added by merging the two
  1647.                  * already present intervals.
  1648.                  */
  1649.                 leaf->value[i - 1] += count + right_cnt;
  1650.                 btree_remove(&a->used_space, right_pg, leaf);
  1651.                 return 1;
  1652.             } else if (page == left_pg + left_cnt * PAGE_SIZE) {
  1653.                 /*
  1654.                  * The interval can be added by simply growing
  1655.                  * the left interval.
  1656.                  */
  1657.                 leaf->value[i - 1] += count;
  1658.                 return 1;
  1659.             } else if (page + count * PAGE_SIZE == right_pg) {
  1660.                 /*
  1661.                      * The interval can be addded by simply moving
  1662.                  * base of the right interval down and
  1663.                  * increasing its size accordingly.
  1664.                  */
  1665.                 leaf->value[i] += count;
  1666.                 leaf->key[i] = page;
  1667.                 return 1;
  1668.             } else {
  1669.                 /*
  1670.                  * The interval is between both neigbouring
  1671.                  * intervals, but cannot be merged with any of
  1672.                  * them.
  1673.                  */
  1674.                 btree_insert(&a->used_space, page,
  1675.                     (void *) count, leaf);
  1676.                 return 1;
  1677.             }
  1678.         }
  1679.     }
  1680.  
  1681.     panic("Inconsistency detected while adding %" PRIc " pages of used space at "
  1682.         "%p.\n", count, page);
  1683. }
  1684.  
  1685. /** Mark portion of address space area as unused.
  1686.  *
  1687.  * The address space area must be already locked.
  1688.  *
  1689.  * @param a Address space area.
  1690.  * @param page First page to be marked.
  1691.  * @param count Number of page to be marked.
  1692.  *
  1693.  * @return 0 on failure and 1 on success.
  1694.  */
  1695. int used_space_remove(as_area_t *a, uintptr_t page, count_t count)
  1696. {
  1697.     btree_node_t *leaf, *node;
  1698.     count_t pages;
  1699.     unsigned int i;
  1700.  
  1701.     ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
  1702.     ASSERT(count);
  1703.  
  1704.     pages = (count_t) btree_search(&a->used_space, page, &leaf);
  1705.     if (pages) {
  1706.         /*
  1707.          * We are lucky, page is the beginning of some interval.
  1708.          */
  1709.         if (count > pages) {
  1710.             return 0;
  1711.         } else if (count == pages) {
  1712.             btree_remove(&a->used_space, page, leaf);
  1713.             return 1;
  1714.         } else {
  1715.             /*
  1716.              * Find the respective interval.
  1717.              * Decrease its size and relocate its start address.
  1718.              */
  1719.             for (i = 0; i < leaf->keys; i++) {
  1720.                 if (leaf->key[i] == page) {
  1721.                     leaf->key[i] += count * PAGE_SIZE;
  1722.                     leaf->value[i] -= count;
  1723.                     return 1;
  1724.                 }
  1725.             }
  1726.             goto error;
  1727.         }
  1728.     }
  1729.  
  1730.     node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
  1731.     if (node && page < leaf->key[0]) {
  1732.         uintptr_t left_pg = node->key[node->keys - 1];
  1733.         count_t left_cnt = (count_t) node->value[node->keys - 1];
  1734.  
  1735.         if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,
  1736.             count * PAGE_SIZE)) {
  1737.             if (page + count * PAGE_SIZE ==
  1738.                 left_pg + left_cnt * PAGE_SIZE) {
  1739.                 /*
  1740.                  * The interval is contained in the rightmost
  1741.                  * interval of the left neighbour and can be
  1742.                  * removed by updating the size of the bigger
  1743.                  * interval.
  1744.                  */
  1745.                 node->value[node->keys - 1] -= count;
  1746.                 return 1;
  1747.             } else if (page + count * PAGE_SIZE <
  1748.                 left_pg + left_cnt*PAGE_SIZE) {
  1749.                 count_t new_cnt;
  1750.                
  1751.                 /*
  1752.                  * The interval is contained in the rightmost
  1753.                  * interval of the left neighbour but its
  1754.                  * removal requires both updating the size of
  1755.                  * the original interval and also inserting a
  1756.                  * new interval.
  1757.                  */
  1758.                 new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -
  1759.                     (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
  1760.                 node->value[node->keys - 1] -= count + new_cnt;
  1761.                 btree_insert(&a->used_space, page +
  1762.                     count * PAGE_SIZE, (void *) new_cnt, leaf);
  1763.                 return 1;
  1764.             }
  1765.         }
  1766.         return 0;
  1767.     } else if (page < leaf->key[0]) {
  1768.         return 0;
  1769.     }
  1770.    
  1771.     if (page > leaf->key[leaf->keys - 1]) {
  1772.         uintptr_t left_pg = leaf->key[leaf->keys - 1];
  1773.         count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
  1774.  
  1775.         if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,
  1776.             count * PAGE_SIZE)) {
  1777.             if (page + count * PAGE_SIZE ==
  1778.                 left_pg + left_cnt * PAGE_SIZE) {
  1779.                 /*
  1780.                  * The interval is contained in the rightmost
  1781.                  * interval of the leaf and can be removed by
  1782.                  * updating the size of the bigger interval.
  1783.                  */
  1784.                 leaf->value[leaf->keys - 1] -= count;
  1785.                 return 1;
  1786.             } else if (page + count * PAGE_SIZE < left_pg +
  1787.                 left_cnt * PAGE_SIZE) {
  1788.                 count_t new_cnt;
  1789.                
  1790.                 /*
  1791.                  * The interval is contained in the rightmost
  1792.                  * interval of the leaf but its removal
  1793.                  * requires both updating the size of the
  1794.                  * original interval and also inserting a new
  1795.                  * interval.
  1796.                  */
  1797.                 new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -
  1798.                     (page + count * PAGE_SIZE)) >> PAGE_WIDTH;
  1799.                 leaf->value[leaf->keys - 1] -= count + new_cnt;
  1800.                 btree_insert(&a->used_space, page +
  1801.                     count * PAGE_SIZE, (void *) new_cnt, leaf);
  1802.                 return 1;
  1803.             }
  1804.         }
  1805.         return 0;
  1806.     }  
  1807.    
  1808.     /*
  1809.      * The border cases have been already resolved.
  1810.      * Now the interval can be only between intervals of the leaf.
  1811.      */
  1812.     for (i = 1; i < leaf->keys - 1; i++) {
  1813.         if (page < leaf->key[i]) {
  1814.             uintptr_t left_pg = leaf->key[i - 1];
  1815.             count_t left_cnt = (count_t) leaf->value[i - 1];
  1816.  
  1817.             /*
  1818.              * Now the interval is between intervals corresponding
  1819.              * to (i - 1) and i.
  1820.              */
  1821.             if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,
  1822.                 count * PAGE_SIZE)) {
  1823.                 if (page + count * PAGE_SIZE ==
  1824.                     left_pg + left_cnt*PAGE_SIZE) {
  1825.                     /*
  1826.                      * The interval is contained in the
  1827.                      * interval (i - 1) of the leaf and can
  1828.                      * be removed by updating the size of
  1829.                      * the bigger interval.
  1830.                      */
  1831.                     leaf->value[i - 1] -= count;
  1832.                     return 1;
  1833.                 } else if (page + count * PAGE_SIZE <
  1834.                     left_pg + left_cnt * PAGE_SIZE) {
  1835.                     count_t new_cnt;
  1836.                
  1837.                     /*
  1838.                      * The interval is contained in the
  1839.                      * interval (i - 1) of the leaf but its
  1840.                      * removal requires both updating the
  1841.                      * size of the original interval and
  1842.                      * also inserting a new interval.
  1843.                      */
  1844.                     new_cnt = ((left_pg +
  1845.                         left_cnt * PAGE_SIZE) -
  1846.                         (page + count * PAGE_SIZE)) >>
  1847.                         PAGE_WIDTH;
  1848.                     leaf->value[i - 1] -= count + new_cnt;
  1849.                     btree_insert(&a->used_space, page +
  1850.                         count * PAGE_SIZE, (void *) new_cnt,
  1851.                         leaf);
  1852.                     return 1;
  1853.                 }
  1854.             }
  1855.             return 0;
  1856.         }
  1857.     }
  1858.  
  1859. error:
  1860.     panic("Inconsistency detected while removing %" PRIc " pages of used space "
  1861.         "from %p.\n", count, page);
  1862. }
  1863.  
  1864. /** Remove reference to address space area share info.
  1865.  *
  1866.  * If the reference count drops to 0, the sh_info is deallocated.
  1867.  *
  1868.  * @param sh_info Pointer to address space area share info.
  1869.  */
  1870. void sh_info_remove_reference(share_info_t *sh_info)
  1871. {
  1872.     bool dealloc = false;
  1873.  
  1874.     mutex_lock(&sh_info->lock);
  1875.     ASSERT(sh_info->refcount);
  1876.     if (--sh_info->refcount == 0) {
  1877.         dealloc = true;
  1878.         link_t *cur;
  1879.        
  1880.         /*
  1881.          * Now walk carefully the pagemap B+tree and free/remove
  1882.          * reference from all frames found there.
  1883.          */
  1884.         for (cur = sh_info->pagemap.leaf_head.next;
  1885.             cur != &sh_info->pagemap.leaf_head; cur = cur->next) {
  1886.             btree_node_t *node;
  1887.             unsigned int i;
  1888.            
  1889.             node = list_get_instance(cur, btree_node_t, leaf_link);
  1890.             for (i = 0; i < node->keys; i++)
  1891.                 frame_free((uintptr_t) node->value[i]);
  1892.         }
  1893.        
  1894.     }
  1895.     mutex_unlock(&sh_info->lock);
  1896.    
  1897.     if (dealloc) {
  1898.         btree_destroy(&sh_info->pagemap);
  1899.         free(sh_info);
  1900.     }
  1901. }
  1902.  
  1903. /*
  1904.  * Address space related syscalls.
  1905.  */
  1906.  
  1907. /** Wrapper for as_area_create(). */
  1908. unative_t sys_as_area_create(uintptr_t address, size_t size, int flags)
  1909. {
  1910.     if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address,
  1911.         AS_AREA_ATTR_NONE, &anon_backend, NULL))
  1912.         return (unative_t) address;
  1913.     else
  1914.         return (unative_t) -1;
  1915. }
  1916.  
  1917. /** Wrapper for as_area_resize(). */
  1918. unative_t sys_as_area_resize(uintptr_t address, size_t size, int flags)
  1919. {
  1920.     return (unative_t) as_area_resize(AS, address, size, 0);
  1921. }
  1922.  
  1923. /** Wrapper for as_area_change_flags(). */
  1924. unative_t sys_as_area_change_flags(uintptr_t address, int flags)
  1925. {
  1926.     return (unative_t) as_area_change_flags(AS, flags, address);
  1927. }
  1928.  
  1929. /** Wrapper for as_area_destroy(). */
  1930. unative_t sys_as_area_destroy(uintptr_t address)
  1931. {
  1932.     return (unative_t) as_area_destroy(AS, address);
  1933. }
  1934.  
  1935. /** Print out information about address space.
  1936.  *
  1937.  * @param as Address space.
  1938.  */
  1939. void as_print(as_t *as)
  1940. {
  1941.     ipl_t ipl;
  1942.    
  1943.     ipl = interrupts_disable();
  1944.     mutex_lock(&as->lock);
  1945.    
  1946.     /* print out info about address space areas */
  1947.     link_t *cur;
  1948.     for (cur = as->as_area_btree.leaf_head.next;
  1949.         cur != &as->as_area_btree.leaf_head; cur = cur->next) {
  1950.         btree_node_t *node;
  1951.        
  1952.         node = list_get_instance(cur, btree_node_t, leaf_link);
  1953.        
  1954.         unsigned int i;
  1955.         for (i = 0; i < node->keys; i++) {
  1956.             as_area_t *area = node->value[i];
  1957.        
  1958.             mutex_lock(&area->lock);
  1959.             printf("as_area: %p, base=%p, pages=%" PRIc " (%p - %p)\n",
  1960.                 area, area->base, area->pages, area->base,
  1961.                 area->base + FRAMES2SIZE(area->pages));
  1962.             mutex_unlock(&area->lock);
  1963.         }
  1964.     }
  1965.    
  1966.     mutex_unlock(&as->lock);
  1967.     interrupts_restore(ipl);
  1968. }
  1969.  
  1970. /** @}
  1971.  */
  1972.