Subversion Repositories HelenOS

Rev

Rev 4377 | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (c) 2001-2006 Jakub Jermar
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. /** @addtogroup genericmm
  30.  * @{
  31.  */
  32.  
  33. /**
  34.  * @file
  35.  * @brief   Address space related functions.
  36.  *
  37.  * This file contains address space manipulation functions.
  38.  * Roughly speaking, this is a higher-level client of
  39.  * Virtual Address Translation (VAT) subsystem.
  40.  *
  41.  * Functionality provided by this file allows one to
  42.  * create address spaces and create, resize and share
  43.  * address space areas.
  44.  *
  45.  * @see page.c
  46.  *
  47.  */
  48.  
  49. #include <mm/as.h>
  50. #include <arch/mm/as.h>
  51. #include <mm/page.h>
  52. #include <mm/frame.h>
  53. #include <mm/slab.h>
  54. #include <mm/tlb.h>
  55. #include <arch/mm/page.h>
  56. #include <genarch/mm/page_pt.h>
  57. #include <genarch/mm/page_ht.h>
  58. #include <mm/asid.h>
  59. #include <arch/mm/asid.h>
  60. #include <preemption.h>
  61. #include <synch/spinlock.h>
  62. #include <synch/mutex.h>
  63. #include <adt/list.h>
  64. #include <adt/btree.h>
  65. #include <proc/task.h>
  66. #include <proc/thread.h>
  67. #include <arch/asm.h>
  68. #include <panic.h>
  69. #include <debug.h>
  70. #include <print.h>
  71. #include <memstr.h>
  72. #include <macros.h>
  73. #include <arch.h>
  74. #include <errno.h>
  75. #include <config.h>
  76. #include <align.h>
  77. #include <arch/types.h>
  78. #include <syscall/copy.h>
  79. #include <arch/interrupt.h>
  80.  
  81. #ifdef CONFIG_VIRT_IDX_DCACHE
  82. #include <arch/mm/cache.h>
  83. #endif /* CONFIG_VIRT_IDX_DCACHE */
  84.  
  85. /**
  86.  * Each architecture decides what functions will be used to carry out
  87.  * address space operations such as creating or locking page tables.
  88.  */
  89. as_operations_t *as_operations = NULL;
  90.  
  91. /**
  92.  * Slab for as_t objects.
  93.  */
  94. static slab_cache_t *as_slab;
  95.  
  96. /**
  97.  * This lock serializes access to the ASID subsystem.
  98.  * It protects:
  99.  * - inactive_as_with_asid_head list
  100.  * - as->asid for each as of the as_t type
  101.  * - asids_allocated counter
  102.  */
  103. SPINLOCK_INITIALIZE(asidlock);
  104.  
  105. /**
  106.  * This list contains address spaces that are not active on any
  107.  * processor and that have valid ASID.
  108.  */
  109. LIST_INITIALIZE(inactive_as_with_asid_head);
  110.  
  111. /** Kernel address space. */
  112. as_t *AS_KERNEL = NULL;
  113.  
  114. static int area_flags_to_page_flags(int);
  115. static bool check_area_conflicts(as_t *, uintptr_t, size_t, as_area_t *);
  116. static void sh_info_remove_reference(share_info_t *);
  117.  
  118. static int as_constructor(void *obj, int flags)
  119. {
  120.     as_t *as = (as_t *) obj;
  121.     int rc;
  122.  
  123.     link_initialize(&as->inactive_as_with_asid_link);
  124.     mutex_initialize(&as->lock, MUTEX_PASSIVE);
  125.    
  126.     rc = as_constructor_arch(as, flags);
  127.    
  128.     return rc;
  129. }
  130.  
  131. static int as_destructor(void *obj)
  132. {
  133.     as_t *as = (as_t *) obj;
  134.  
  135.     return as_destructor_arch(as);
  136. }
  137.  
  138. /** Initialize address space subsystem. */
  139. void as_init(void)
  140. {
  141.     as_arch_init();
  142.  
  143.     as_slab = slab_cache_create("as_slab", sizeof(as_t), 0,
  144.         as_constructor, as_destructor, SLAB_CACHE_MAGDEFERRED);
  145.    
  146.     AS_KERNEL = as_create(FLAG_AS_KERNEL);
  147.     if (!AS_KERNEL)
  148.         panic("Cannot create kernel address space.");
  149.    
  150.     /* Make sure the kernel address space
  151.      * reference count never drops to zero.
  152.      */
  153.     atomic_set(&AS_KERNEL->refcount, 1);
  154. }
  155.  
  156. /** Create address space.
  157.  *
  158.  * @param flags     Flags that influence the way in wich the address space
  159.  *          is created.
  160.  */
  161. as_t *as_create(int flags)
  162. {
  163.     as_t *as;
  164.  
  165.     as = (as_t *) slab_alloc(as_slab, 0);
  166.     (void) as_create_arch(as, 0);
  167.    
  168.     btree_create(&as->as_area_btree);
  169.    
  170.     if (flags & FLAG_AS_KERNEL)
  171.         as->asid = ASID_KERNEL;
  172.     else
  173.         as->asid = ASID_INVALID;
  174.    
  175.     atomic_set(&as->refcount, 0);
  176.     as->cpu_refcount = 0;
  177. #ifdef AS_PAGE_TABLE
  178.     as->genarch.page_table = page_table_create(flags);
  179. #else
  180.     page_table_create(flags);
  181. #endif
  182.    
  183.     return as;
  184. }
  185.  
  186. /** Destroy adress space.
  187.  *
  188.  * When there are no tasks referencing this address space (i.e. its refcount is
  189.  * zero), the address space can be destroyed.
  190.  *
  191.  * We know that we don't hold any spinlock.
  192.  *
  193.  * @param as        Address space to be destroyed.
  194.  */
  195. void as_destroy(as_t *as)
  196. {
  197.     ipl_t ipl;
  198.     bool cond;
  199.     DEADLOCK_PROBE_INIT(p_asidlock);
  200.  
  201.     ASSERT(atomic_get(&as->refcount) == 0);
  202.    
  203.     /*
  204.      * Since there is no reference to this area,
  205.      * it is safe not to lock its mutex.
  206.      */
  207.  
  208.     /*
  209.      * We need to avoid deadlock between TLB shootdown and asidlock.
  210.      * We therefore try to take asid conditionally and if we don't succeed,
  211.      * we enable interrupts and try again. This is done while preemption is
  212.      * disabled to prevent nested context switches. We also depend on the
  213.      * fact that so far no spinlocks are held.
  214.      */
  215.     preemption_disable();
  216.     ipl = interrupts_read();
  217. retry:
  218.     interrupts_disable();
  219.     if (!spinlock_trylock(&asidlock)) {
  220.         interrupts_enable();
  221.         DEADLOCK_PROBE(p_asidlock, DEADLOCK_THRESHOLD);
  222.         goto retry;
  223.     }
  224.     preemption_enable();    /* Interrupts disabled, enable preemption */
  225.     if (as->asid != ASID_INVALID && as != AS_KERNEL) {
  226.         if (as != AS && as->cpu_refcount == 0)
  227.             list_remove(&as->inactive_as_with_asid_link);
  228.         asid_put(as->asid);
  229.     }
  230.     spinlock_unlock(&asidlock);
  231.  
  232.     /*
  233.      * Destroy address space areas of the address space.
  234.      * The B+tree must be walked carefully because it is
  235.      * also being destroyed.
  236.      */
  237.     for (cond = true; cond; ) {
  238.         btree_node_t *node;
  239.  
  240.         ASSERT(!list_empty(&as->as_area_btree.leaf_head));
  241.         node = list_get_instance(as->as_area_btree.leaf_head.next,
  242.             btree_node_t, leaf_link);
  243.  
  244.         if ((cond = node->keys)) {
  245.             as_area_destroy(as, node->key[0]);
  246.         }
  247.     }
  248.  
  249.     btree_destroy(&as->as_area_btree);
  250. #ifdef AS_PAGE_TABLE
  251.     page_table_destroy(as->genarch.page_table);
  252. #else
  253.     page_table_destroy(NULL);
  254. #endif
  255.  
  256.     interrupts_restore(ipl);
  257.  
  258.     slab_free(as_slab, as);
  259. }
  260.  
  261. /** Create address space area of common attributes.
  262.  *
  263.  * The created address space area is added to the target address space.
  264.  *
  265.  * @param as        Target address space.
  266.  * @param flags     Flags of the area memory.
  267.  * @param size      Size of area.
  268.  * @param base      Base address of area.
  269.  * @param attrs     Attributes of the area.
  270.  * @param backend   Address space area backend. NULL if no backend is used.
  271.  * @param backend_data  NULL or a pointer to an array holding two void *.
  272.  *
  273.  * @return      Address space area on success or NULL on failure.
  274.  */
  275. as_area_t *
  276. as_area_create(as_t *as, int flags, size_t size, uintptr_t base, int attrs,
  277.     mem_backend_t *backend, mem_backend_data_t *backend_data)
  278. {
  279.     ipl_t ipl;
  280.     as_area_t *a;
  281.    
  282.     if (base % PAGE_SIZE)
  283.         return NULL;
  284.  
  285.     if (!size)
  286.         return NULL;
  287.  
  288.     /* Writeable executable areas are not supported. */
  289.     if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
  290.         return NULL;
  291.    
  292.     ipl = interrupts_disable();
  293.     mutex_lock(&as->lock);
  294.    
  295.     if (!check_area_conflicts(as, base, size, NULL)) {
  296.         mutex_unlock(&as->lock);
  297.         interrupts_restore(ipl);
  298.         return NULL;
  299.     }
  300.    
  301.     a = (as_area_t *) malloc(sizeof(as_area_t), 0);
  302.  
  303.     mutex_initialize(&a->lock, MUTEX_PASSIVE);
  304.    
  305.     a->as = as;
  306.     a->flags = flags;
  307.     a->attributes = attrs;
  308.     a->pages = SIZE2FRAMES(size);
  309.     a->base = base;
  310.     a->sh_info = NULL;
  311.     a->backend = backend;
  312.     if (backend_data)
  313.         a->backend_data = *backend_data;
  314.     else
  315.         memsetb(&a->backend_data, sizeof(a->backend_data), 0);
  316.  
  317.     btree_create(&a->used_space);
  318.    
  319.     btree_insert(&as->as_area_btree, base, (void *) a, NULL);
  320.  
  321.     mutex_unlock(&as->lock);
  322.     interrupts_restore(ipl);
  323.  
  324.     return a;
  325. }
  326.  
  327. /** Find address space area and change it.
  328.  *
  329.  * @param as        Address space.
  330.  * @param address   Virtual address belonging to the area to be changed.
  331.  *          Must be page-aligned.
  332.  * @param size      New size of the virtual memory block starting at
  333.  *          address.
  334.  * @param flags     Flags influencing the remap operation. Currently unused.
  335.  *
  336.  * @return      Zero on success or a value from @ref errno.h otherwise.
  337.  */
  338. int as_area_resize(as_t *as, uintptr_t address, size_t size, int flags)
  339. {
  340.     as_area_t *area;
  341.     ipl_t ipl;
  342.     size_t pages;
  343.    
  344.     ipl = interrupts_disable();
  345.     mutex_lock(&as->lock);
  346.    
  347.     /*
  348.      * Locate the area.
  349.      */
  350.     area = find_area_and_lock(as, address);
  351.     if (!area) {
  352.         mutex_unlock(&as->lock);
  353.         interrupts_restore(ipl);
  354.         return ENOENT;
  355.     }
  356.  
  357.     if (area->backend == &phys_backend) {
  358.         /*
  359.          * Remapping of address space areas associated
  360.          * with memory mapped devices is not supported.
  361.          */
  362.         mutex_unlock(&area->lock);
  363.         mutex_unlock(&as->lock);
  364.         interrupts_restore(ipl);
  365.         return ENOTSUP;
  366.     }
  367.     if (area->sh_info) {
  368.         /*
  369.          * Remapping of shared address space areas
  370.          * is not supported.
  371.          */
  372.         mutex_unlock(&area->lock);
  373.         mutex_unlock(&as->lock);
  374.         interrupts_restore(ipl);
  375.         return ENOTSUP;
  376.     }
  377.  
  378.     pages = SIZE2FRAMES((address - area->base) + size);
  379.     if (!pages) {
  380.         /*
  381.          * Zero size address space areas are not allowed.
  382.          */
  383.         mutex_unlock(&area->lock);
  384.         mutex_unlock(&as->lock);
  385.         interrupts_restore(ipl);
  386.         return EPERM;
  387.     }
  388.    
  389.     if (pages < area->pages) {
  390.         bool cond;
  391.         uintptr_t start_free = area->base + pages * PAGE_SIZE;
  392.  
  393.         /*
  394.          * Shrinking the area.
  395.          * No need to check for overlaps.
  396.          */
  397.  
  398.         /*
  399.          * Start TLB shootdown sequence.
  400.          */
  401.         tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base +
  402.             pages * PAGE_SIZE, area->pages - pages);
  403.  
  404.         /*
  405.          * Remove frames belonging to used space starting from
  406.          * the highest addresses downwards until an overlap with
  407.          * the resized address space area is found. Note that this
  408.          * is also the right way to remove part of the used_space
  409.          * B+tree leaf list.
  410.          */    
  411.         for (cond = true; cond;) {
  412.             btree_node_t *node;
  413.        
  414.             ASSERT(!list_empty(&area->used_space.leaf_head));
  415.             node =
  416.                 list_get_instance(area->used_space.leaf_head.prev,
  417.                 btree_node_t, leaf_link);
  418.             if ((cond = (bool) node->keys)) {
  419.                 uintptr_t b = node->key[node->keys - 1];
  420.                 size_t c =
  421.                     (size_t) node->value[node->keys - 1];
  422.                 unsigned int i = 0;
  423.            
  424.                 if (overlaps(b, c * PAGE_SIZE, area->base,
  425.                     pages * PAGE_SIZE)) {
  426.                    
  427.                     if (b + c * PAGE_SIZE <= start_free) {
  428.                         /*
  429.                          * The whole interval fits
  430.                          * completely in the resized
  431.                          * address space area.
  432.                          */
  433.                         break;
  434.                     }
  435.        
  436.                     /*
  437.                      * Part of the interval corresponding
  438.                      * to b and c overlaps with the resized
  439.                      * address space area.
  440.                      */
  441.        
  442.                     cond = false;   /* we are almost done */
  443.                     i = (start_free - b) >> PAGE_WIDTH;
  444.                     if (!used_space_remove(area, start_free,
  445.                         c - i))
  446.                         panic("Cannot remove used "
  447.                             "space.");
  448.                 } else {
  449.                     /*
  450.                      * The interval of used space can be
  451.                      * completely removed.
  452.                      */
  453.                     if (!used_space_remove(area, b, c))
  454.                         panic("Cannot remove used "
  455.                             "space.");
  456.                 }
  457.            
  458.                 for (; i < c; i++) {
  459.                     pte_t *pte;
  460.            
  461.                     page_table_lock(as, false);
  462.                     pte = page_mapping_find(as, b +
  463.                         i * PAGE_SIZE);
  464.                     ASSERT(pte && PTE_VALID(pte) &&
  465.                         PTE_PRESENT(pte));
  466.                     if (area->backend &&
  467.                         area->backend->frame_free) {
  468.                         area->backend->frame_free(area,
  469.                             b + i * PAGE_SIZE,
  470.                             PTE_GET_FRAME(pte));
  471.                     }
  472.                     page_mapping_remove(as, b +
  473.                         i * PAGE_SIZE);
  474.                     page_table_unlock(as, false);
  475.                 }
  476.             }
  477.         }
  478.  
  479.         /*
  480.          * Finish TLB shootdown sequence.
  481.          */
  482.  
  483.         tlb_invalidate_pages(as->asid, area->base + pages * PAGE_SIZE,
  484.             area->pages - pages);
  485.         /*
  486.          * Invalidate software translation caches (e.g. TSB on sparc64).
  487.          */
  488.         as_invalidate_translation_cache(as, area->base +
  489.             pages * PAGE_SIZE, area->pages - pages);
  490.         tlb_shootdown_finalize();
  491.        
  492.     } else {
  493.         /*
  494.          * Growing the area.
  495.          * Check for overlaps with other address space areas.
  496.          */
  497.         if (!check_area_conflicts(as, address, pages * PAGE_SIZE,
  498.             area)) {
  499.             mutex_unlock(&area->lock);
  500.             mutex_unlock(&as->lock);       
  501.             interrupts_restore(ipl);
  502.             return EADDRNOTAVAIL;
  503.         }
  504.     }
  505.  
  506.     area->pages = pages;
  507.    
  508.     mutex_unlock(&area->lock);
  509.     mutex_unlock(&as->lock);
  510.     interrupts_restore(ipl);
  511.  
  512.     return 0;
  513. }
  514.  
  515. /** Destroy address space area.
  516.  *
  517.  * @param as        Address space.
  518.  * @param address   Address within the area to be deleted.
  519.  *
  520.  * @return      Zero on success or a value from @ref errno.h on failure.
  521.  */
  522. int as_area_destroy(as_t *as, uintptr_t address)
  523. {
  524.     as_area_t *area;
  525.     uintptr_t base;
  526.     link_t *cur;
  527.     ipl_t ipl;
  528.  
  529.     ipl = interrupts_disable();
  530.     mutex_lock(&as->lock);
  531.  
  532.     area = find_area_and_lock(as, address);
  533.     if (!area) {
  534.         mutex_unlock(&as->lock);
  535.         interrupts_restore(ipl);
  536.         return ENOENT;
  537.     }
  538.  
  539.     base = area->base;
  540.  
  541.     /*
  542.      * Start TLB shootdown sequence.
  543.      */
  544.     tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, area->pages);
  545.  
  546.     /*
  547.      * Visit only the pages mapped by used_space B+tree.
  548.      */
  549.     for (cur = area->used_space.leaf_head.next;
  550.         cur != &area->used_space.leaf_head; cur = cur->next) {
  551.         btree_node_t *node;
  552.         unsigned int i;
  553.        
  554.         node = list_get_instance(cur, btree_node_t, leaf_link);
  555.         for (i = 0; i < node->keys; i++) {
  556.             uintptr_t b = node->key[i];
  557.             size_t j;
  558.             pte_t *pte;
  559.            
  560.             for (j = 0; j < (size_t) node->value[i]; j++) {
  561.                 page_table_lock(as, false);
  562.                 pte = page_mapping_find(as, b + j * PAGE_SIZE);
  563.                 ASSERT(pte && PTE_VALID(pte) &&
  564.                     PTE_PRESENT(pte));
  565.                 if (area->backend &&
  566.                     area->backend->frame_free) {
  567.                     area->backend->frame_free(area, b +
  568.                         j * PAGE_SIZE, PTE_GET_FRAME(pte));
  569.                 }
  570.                 page_mapping_remove(as, b + j * PAGE_SIZE);            
  571.                 page_table_unlock(as, false);
  572.             }
  573.         }
  574.     }
  575.  
  576.     /*
  577.      * Finish TLB shootdown sequence.
  578.      */
  579.  
  580.     tlb_invalidate_pages(as->asid, area->base, area->pages);
  581.     /*
  582.      * Invalidate potential software translation caches (e.g. TSB on
  583.      * sparc64).
  584.      */
  585.     as_invalidate_translation_cache(as, area->base, area->pages);
  586.     tlb_shootdown_finalize();
  587.    
  588.     btree_destroy(&area->used_space);
  589.  
  590.     area->attributes |= AS_AREA_ATTR_PARTIAL;
  591.    
  592.     if (area->sh_info)
  593.         sh_info_remove_reference(area->sh_info);
  594.        
  595.     mutex_unlock(&area->lock);
  596.  
  597.     /*
  598.      * Remove the empty area from address space.
  599.      */
  600.     btree_remove(&as->as_area_btree, base, NULL);
  601.    
  602.     free(area);
  603.    
  604.     mutex_unlock(&as->lock);
  605.     interrupts_restore(ipl);
  606.     return 0;
  607. }
  608.  
  609. /** Share address space area with another or the same address space.
  610.  *
  611.  * Address space area mapping is shared with a new address space area.
  612.  * If the source address space area has not been shared so far,
  613.  * a new sh_info is created. The new address space area simply gets the
  614.  * sh_info of the source area. The process of duplicating the
  615.  * mapping is done through the backend share function.
  616.  *
  617.  * @param src_as    Pointer to source address space.
  618.  * @param src_base  Base address of the source address space area.
  619.  * @param acc_size  Expected size of the source area.
  620.  * @param dst_as    Pointer to destination address space.
  621.  * @param dst_base  Target base address.
  622.  * @param dst_flags_mask Destination address space area flags mask.
  623.  *
  624.  * @return      Zero on success or ENOENT if there is no such task or if
  625.  *          there is no such address space area, EPERM if there was
  626.  *          a problem in accepting the area or ENOMEM if there was a
  627.  *          problem in allocating destination address space area.
  628.  *          ENOTSUP is returned if the address space area backend
  629.  *          does not support sharing.
  630.  */
  631. int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size,
  632.     as_t *dst_as, uintptr_t dst_base, int dst_flags_mask)
  633. {
  634.     ipl_t ipl;
  635.     int src_flags;
  636.     size_t src_size;
  637.     as_area_t *src_area, *dst_area;
  638.     share_info_t *sh_info;
  639.     mem_backend_t *src_backend;
  640.     mem_backend_data_t src_backend_data;
  641.    
  642.     ipl = interrupts_disable();
  643.     mutex_lock(&src_as->lock);
  644.     src_area = find_area_and_lock(src_as, src_base);
  645.     if (!src_area) {
  646.         /*
  647.          * Could not find the source address space area.
  648.          */
  649.         mutex_unlock(&src_as->lock);
  650.         interrupts_restore(ipl);
  651.         return ENOENT;
  652.     }
  653.  
  654.     if (!src_area->backend || !src_area->backend->share) {
  655.         /*
  656.          * There is no backend or the backend does not
  657.          * know how to share the area.
  658.          */
  659.         mutex_unlock(&src_area->lock);
  660.         mutex_unlock(&src_as->lock);
  661.         interrupts_restore(ipl);
  662.         return ENOTSUP;
  663.     }
  664.    
  665.     src_size = src_area->pages * PAGE_SIZE;
  666.     src_flags = src_area->flags;
  667.     src_backend = src_area->backend;
  668.     src_backend_data = src_area->backend_data;
  669.  
  670.     /* Share the cacheable flag from the original mapping */
  671.     if (src_flags & AS_AREA_CACHEABLE)
  672.         dst_flags_mask |= AS_AREA_CACHEABLE;
  673.  
  674.     if (src_size != acc_size ||
  675.         (src_flags & dst_flags_mask) != dst_flags_mask) {
  676.         mutex_unlock(&src_area->lock);
  677.         mutex_unlock(&src_as->lock);
  678.         interrupts_restore(ipl);
  679.         return EPERM;
  680.     }
  681.  
  682.     /*
  683.      * Now we are committed to sharing the area.
  684.      * First, prepare the area for sharing.
  685.      * Then it will be safe to unlock it.
  686.      */
  687.     sh_info = src_area->sh_info;
  688.     if (!sh_info) {
  689.         sh_info = (share_info_t *) malloc(sizeof(share_info_t), 0);
  690.         mutex_initialize(&sh_info->lock, MUTEX_PASSIVE);
  691.         sh_info->refcount = 2;
  692.         btree_create(&sh_info->pagemap);
  693.         src_area->sh_info = sh_info;
  694.         /*
  695.          * Call the backend to setup sharing.
  696.          */
  697.         src_area->backend->share(src_area);
  698.     } else {
  699.         mutex_lock(&sh_info->lock);
  700.         sh_info->refcount++;
  701.         mutex_unlock(&sh_info->lock);
  702.     }
  703.  
  704.     mutex_unlock(&src_area->lock);
  705.     mutex_unlock(&src_as->lock);
  706.  
  707.     /*
  708.      * Create copy of the source address space area.
  709.      * The destination area is created with AS_AREA_ATTR_PARTIAL
  710.      * attribute set which prevents race condition with
  711.      * preliminary as_page_fault() calls.
  712.      * The flags of the source area are masked against dst_flags_mask
  713.      * to support sharing in less privileged mode.
  714.      */
  715.     dst_area = as_area_create(dst_as, dst_flags_mask, src_size, dst_base,
  716.         AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data);
  717.     if (!dst_area) {
  718.         /*
  719.          * Destination address space area could not be created.
  720.          */
  721.         sh_info_remove_reference(sh_info);
  722.        
  723.         interrupts_restore(ipl);
  724.         return ENOMEM;
  725.     }
  726.  
  727.     /*
  728.      * Now the destination address space area has been
  729.      * fully initialized. Clear the AS_AREA_ATTR_PARTIAL
  730.      * attribute and set the sh_info.
  731.      */
  732.     mutex_lock(&dst_as->lock); 
  733.     mutex_lock(&dst_area->lock);
  734.     dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL;
  735.     dst_area->sh_info = sh_info;
  736.     mutex_unlock(&dst_area->lock);
  737.     mutex_unlock(&dst_as->lock);   
  738.  
  739.     interrupts_restore(ipl);
  740.    
  741.     return 0;
  742. }
  743.  
  744. /** Check access mode for address space area.
  745.  *
  746.  * The address space area must be locked prior to this call.
  747.  *
  748.  * @param area      Address space area.
  749.  * @param access    Access mode.
  750.  *
  751.  * @return      False if access violates area's permissions, true
  752.  *          otherwise.
  753.  */
  754. bool as_area_check_access(as_area_t *area, pf_access_t access)
  755. {
  756.     int flagmap[] = {
  757.         [PF_ACCESS_READ] = AS_AREA_READ,
  758.         [PF_ACCESS_WRITE] = AS_AREA_WRITE,
  759.         [PF_ACCESS_EXEC] = AS_AREA_EXEC
  760.     };
  761.  
  762.     if (!(area->flags & flagmap[access]))
  763.         return false;
  764.    
  765.     return true;
  766. }
  767.  
  768. /** Change adress space area flags.
  769.  *
  770.  * The idea is to have the same data, but with a different access mode.
  771.  * This is needed e.g. for writing code into memory and then executing it.
  772.  * In order for this to work properly, this may copy the data
  773.  * into private anonymous memory (unless it's already there).
  774.  *
  775.  * @param as      Address space.
  776.  * @param flags   Flags of the area memory.
  777.  * @param address Address within the area to be changed.
  778.  *
  779.  * @return Zero on success or a value from @ref errno.h on failure.
  780.  *
  781.  */
  782. int as_area_change_flags(as_t *as, int flags, uintptr_t address)
  783. {
  784.     as_area_t *area;
  785.     uintptr_t base;
  786.     link_t *cur;
  787.     ipl_t ipl;
  788.     int page_flags;
  789.     uintptr_t *old_frame;
  790.     size_t frame_idx;
  791.     size_t used_pages;
  792.    
  793.     /* Flags for the new memory mapping */
  794.     page_flags = area_flags_to_page_flags(flags);
  795.  
  796.     ipl = interrupts_disable();
  797.     mutex_lock(&as->lock);
  798.  
  799.     area = find_area_and_lock(as, address);
  800.     if (!area) {
  801.         mutex_unlock(&as->lock);
  802.         interrupts_restore(ipl);
  803.         return ENOENT;
  804.     }
  805.  
  806.     if ((area->sh_info) || (area->backend != &anon_backend)) {
  807.         /* Copying shared areas not supported yet */
  808.         /* Copying non-anonymous memory not supported yet */
  809.         mutex_unlock(&area->lock);
  810.         mutex_unlock(&as->lock);
  811.         interrupts_restore(ipl);
  812.         return ENOTSUP;
  813.     }
  814.  
  815.     base = area->base;
  816.  
  817.     /*
  818.      * Compute total number of used pages in the used_space B+tree
  819.      */
  820.     used_pages = 0;
  821.  
  822.     for (cur = area->used_space.leaf_head.next;
  823.         cur != &area->used_space.leaf_head; cur = cur->next) {
  824.         btree_node_t *node;
  825.         unsigned int i;
  826.        
  827.         node = list_get_instance(cur, btree_node_t, leaf_link);
  828.         for (i = 0; i < node->keys; i++) {
  829.             used_pages += (size_t) node->value[i];
  830.         }
  831.     }
  832.  
  833.     /* An array for storing frame numbers */
  834.     old_frame = malloc(used_pages * sizeof(uintptr_t), 0);
  835.  
  836.     /*
  837.      * Start TLB shootdown sequence.
  838.      */
  839.     tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, area->pages);
  840.  
  841.     /*
  842.      * Remove used pages from page tables and remember their frame
  843.      * numbers.
  844.      */
  845.     frame_idx = 0;
  846.  
  847.     for (cur = area->used_space.leaf_head.next;
  848.         cur != &area->used_space.leaf_head; cur = cur->next) {
  849.         btree_node_t *node;
  850.         unsigned int i;
  851.        
  852.         node = list_get_instance(cur, btree_node_t, leaf_link);
  853.         for (i = 0; i < node->keys; i++) {
  854.             uintptr_t b = node->key[i];
  855.             size_t j;
  856.             pte_t *pte;
  857.            
  858.             for (j = 0; j < (size_t) node->value[i]; j++) {
  859.                 page_table_lock(as, false);
  860.                 pte = page_mapping_find(as, b + j * PAGE_SIZE);
  861.                 ASSERT(pte && PTE_VALID(pte) &&
  862.                     PTE_PRESENT(pte));
  863.                 old_frame[frame_idx++] = PTE_GET_FRAME(pte);
  864.  
  865.                 /* Remove old mapping */
  866.                 page_mapping_remove(as, b + j * PAGE_SIZE);
  867.                 page_table_unlock(as, false);
  868.             }
  869.         }
  870.     }
  871.  
  872.     /*
  873.      * Finish TLB shootdown sequence.
  874.      */
  875.  
  876.     tlb_invalidate_pages(as->asid, area->base, area->pages);
  877.    
  878.     /*
  879.      * Invalidate potential software translation caches (e.g. TSB on
  880.      * sparc64).
  881.      */
  882.     as_invalidate_translation_cache(as, area->base, area->pages);
  883.     tlb_shootdown_finalize();
  884.  
  885.     /*
  886.      * Set the new flags.
  887.      */
  888.     area->flags = flags;
  889.  
  890.     /*
  891.      * Map pages back in with new flags. This step is kept separate
  892.      * so that the memory area could not be accesed with both the old and
  893.      * the new flags at once.
  894.      */
  895.     frame_idx = 0;
  896.  
  897.     for (cur = area->used_space.leaf_head.next;
  898.         cur != &area->used_space.leaf_head; cur = cur->next) {
  899.         btree_node_t *node;
  900.         unsigned int i;
  901.        
  902.         node = list_get_instance(cur, btree_node_t, leaf_link);
  903.         for (i = 0; i < node->keys; i++) {
  904.             uintptr_t b = node->key[i];
  905.             size_t j;
  906.            
  907.             for (j = 0; j < (size_t) node->value[i]; j++) {
  908.                 page_table_lock(as, false);
  909.  
  910.                 /* Insert the new mapping */
  911.                 page_mapping_insert(as, b + j * PAGE_SIZE,
  912.                     old_frame[frame_idx++], page_flags);
  913.  
  914.                 page_table_unlock(as, false);
  915.             }
  916.         }
  917.     }
  918.  
  919.     free(old_frame);
  920.  
  921.     mutex_unlock(&area->lock);
  922.     mutex_unlock(&as->lock);
  923.     interrupts_restore(ipl);
  924.  
  925.     return 0;
  926. }
  927.  
  928.  
  929. /** Handle page fault within the current address space.
  930.  *
  931.  * This is the high-level page fault handler. It decides whether the page fault
  932.  * can be resolved by any backend and if so, it invokes the backend to resolve
  933.  * the page fault.
  934.  *
  935.  * Interrupts are assumed disabled.
  936.  *
  937.  * @param page      Faulting page.
  938.  * @param access    Access mode that caused the page fault (i.e.
  939.  *          read/write/exec).
  940.  * @param istate    Pointer to the interrupted state.
  941.  *
  942.  * @return      AS_PF_FAULT on page fault, AS_PF_OK on success or
  943.  *          AS_PF_DEFER if the fault was caused by copy_to_uspace()
  944.  *          or copy_from_uspace().
  945.  */
  946. int as_page_fault(uintptr_t page, pf_access_t access, istate_t *istate)
  947. {
  948.     pte_t *pte;
  949.     as_area_t *area;
  950.    
  951.     if (!THREAD)
  952.         return AS_PF_FAULT;
  953.        
  954.     ASSERT(AS);
  955.  
  956.     mutex_lock(&AS->lock);
  957.     area = find_area_and_lock(AS, page);   
  958.     if (!area) {
  959.         /*
  960.          * No area contained mapping for 'page'.
  961.          * Signal page fault to low-level handler.
  962.          */
  963.         mutex_unlock(&AS->lock);
  964.         goto page_fault;
  965.     }
  966.  
  967.     if (area->attributes & AS_AREA_ATTR_PARTIAL) {
  968.         /*
  969.          * The address space area is not fully initialized.
  970.          * Avoid possible race by returning error.
  971.          */
  972.         mutex_unlock(&area->lock);
  973.         mutex_unlock(&AS->lock);
  974.         goto page_fault;       
  975.     }
  976.  
  977.     if (!area->backend || !area->backend->page_fault) {
  978.         /*
  979.          * The address space area is not backed by any backend
  980.          * or the backend cannot handle page faults.
  981.          */
  982.         mutex_unlock(&area->lock);
  983.         mutex_unlock(&AS->lock);
  984.         goto page_fault;       
  985.     }
  986.  
  987.     page_table_lock(AS, false);
  988.    
  989.     /*
  990.      * To avoid race condition between two page faults on the same address,
  991.      * we need to make sure the mapping has not been already inserted.
  992.      */
  993.     if ((pte = page_mapping_find(AS, page))) {
  994.         if (PTE_PRESENT(pte)) {
  995.             if (((access == PF_ACCESS_READ) && PTE_READABLE(pte)) ||
  996.                 (access == PF_ACCESS_WRITE && PTE_WRITABLE(pte)) ||
  997.                 (access == PF_ACCESS_EXEC && PTE_EXECUTABLE(pte))) {
  998.                 page_table_unlock(AS, false);
  999.                 mutex_unlock(&area->lock);
  1000.                 mutex_unlock(&AS->lock);
  1001.                 return AS_PF_OK;
  1002.             }
  1003.         }
  1004.     }
  1005.    
  1006.     /*
  1007.      * Resort to the backend page fault handler.
  1008.      */
  1009.     if (area->backend->page_fault(area, page, access) != AS_PF_OK) {
  1010.         page_table_unlock(AS, false);
  1011.         mutex_unlock(&area->lock);
  1012.         mutex_unlock(&AS->lock);
  1013.         goto page_fault;
  1014.     }
  1015.    
  1016.     page_table_unlock(AS, false);
  1017.     mutex_unlock(&area->lock);
  1018.     mutex_unlock(&AS->lock);
  1019.     return AS_PF_OK;
  1020.  
  1021. page_fault:
  1022.     if (THREAD->in_copy_from_uspace) {
  1023.         THREAD->in_copy_from_uspace = false;
  1024.         istate_set_retaddr(istate,
  1025.             (uintptr_t) &memcpy_from_uspace_failover_address);
  1026.     } else if (THREAD->in_copy_to_uspace) {
  1027.         THREAD->in_copy_to_uspace = false;
  1028.         istate_set_retaddr(istate,
  1029.             (uintptr_t) &memcpy_to_uspace_failover_address);
  1030.     } else {
  1031.         return AS_PF_FAULT;
  1032.     }
  1033.  
  1034.     return AS_PF_DEFER;
  1035. }
  1036.  
  1037. /** Switch address spaces.
  1038.  *
  1039.  * Note that this function cannot sleep as it is essentially a part of
  1040.  * scheduling. Sleeping here would lead to deadlock on wakeup. Another
  1041.  * thing which is forbidden in this context is locking the address space.
  1042.  *
  1043.  * When this function is enetered, no spinlocks may be held.
  1044.  *
  1045.  * @param old       Old address space or NULL.
  1046.  * @param new       New address space.
  1047.  */
  1048. void as_switch(as_t *old_as, as_t *new_as)
  1049. {
  1050.     DEADLOCK_PROBE_INIT(p_asidlock);
  1051.     preemption_disable();
  1052. retry:
  1053.     (void) interrupts_disable();
  1054.     if (!spinlock_trylock(&asidlock)) {
  1055.         /*
  1056.          * Avoid deadlock with TLB shootdown.
  1057.          * We can enable interrupts here because
  1058.          * preemption is disabled. We should not be
  1059.          * holding any other lock.
  1060.          */
  1061.         (void) interrupts_enable();
  1062.         DEADLOCK_PROBE(p_asidlock, DEADLOCK_THRESHOLD);
  1063.         goto retry;
  1064.     }
  1065.     preemption_enable();
  1066.  
  1067.     /*
  1068.      * First, take care of the old address space.
  1069.      */
  1070.     if (old_as) {
  1071.         ASSERT(old_as->cpu_refcount);
  1072.         if((--old_as->cpu_refcount == 0) && (old_as != AS_KERNEL)) {
  1073.             /*
  1074.              * The old address space is no longer active on
  1075.              * any processor. It can be appended to the
  1076.              * list of inactive address spaces with assigned
  1077.              * ASID.
  1078.              */
  1079.             ASSERT(old_as->asid != ASID_INVALID);
  1080.             list_append(&old_as->inactive_as_with_asid_link,
  1081.                 &inactive_as_with_asid_head);
  1082.         }
  1083.  
  1084.         /*
  1085.          * Perform architecture-specific tasks when the address space
  1086.          * is being removed from the CPU.
  1087.          */
  1088.         as_deinstall_arch(old_as);
  1089.     }
  1090.  
  1091.     /*
  1092.      * Second, prepare the new address space.
  1093.      */
  1094.     if ((new_as->cpu_refcount++ == 0) && (new_as != AS_KERNEL)) {
  1095.         if (new_as->asid != ASID_INVALID)
  1096.             list_remove(&new_as->inactive_as_with_asid_link);
  1097.         else
  1098.             new_as->asid = asid_get();
  1099.     }
  1100. #ifdef AS_PAGE_TABLE
  1101.     SET_PTL0_ADDRESS(new_as->genarch.page_table);
  1102. #endif
  1103.    
  1104.     /*
  1105.      * Perform architecture-specific steps.
  1106.      * (e.g. write ASID to hardware register etc.)
  1107.      */
  1108.     as_install_arch(new_as);
  1109.  
  1110.     spinlock_unlock(&asidlock);
  1111.    
  1112.     AS = new_as;
  1113. }
  1114.  
  1115. /** Convert address space area flags to page flags.
  1116.  *
  1117.  * @param aflags    Flags of some address space area.
  1118.  *
  1119.  * @return      Flags to be passed to page_mapping_insert().
  1120.  */
  1121. int area_flags_to_page_flags(int aflags)
  1122. {
  1123.     int flags;
  1124.  
  1125.     flags = PAGE_USER | PAGE_PRESENT;
  1126.    
  1127.     if (aflags & AS_AREA_READ)
  1128.         flags |= PAGE_READ;
  1129.        
  1130.     if (aflags & AS_AREA_WRITE)
  1131.         flags |= PAGE_WRITE;
  1132.    
  1133.     if (aflags & AS_AREA_EXEC)
  1134.         flags |= PAGE_EXEC;
  1135.    
  1136.     if (aflags & AS_AREA_CACHEABLE)
  1137.         flags |= PAGE_CACHEABLE;
  1138.        
  1139.     return flags;
  1140. }
  1141.  
  1142. /** Compute flags for virtual address translation subsytem.
  1143.  *
  1144.  * The address space area must be locked.
  1145.  * Interrupts must be disabled.
  1146.  *
  1147.  * @param a     Address space area.
  1148.  *
  1149.  * @return      Flags to be used in page_mapping_insert().
  1150.  */
  1151. int as_area_get_flags(as_area_t *a)
  1152. {
  1153.     return area_flags_to_page_flags(a->flags);
  1154. }
  1155.  
  1156. /** Create page table.
  1157.  *
  1158.  * Depending on architecture, create either address space private or global page
  1159.  * table.
  1160.  *
  1161.  * @param flags     Flags saying whether the page table is for the kernel
  1162.  *          address space.
  1163.  *
  1164.  * @return      First entry of the page table.
  1165.  */
  1166. pte_t *page_table_create(int flags)
  1167. {
  1168.     ASSERT(as_operations);
  1169.     ASSERT(as_operations->page_table_create);
  1170.    
  1171.     return as_operations->page_table_create(flags);
  1172. }
  1173.  
  1174. /** Destroy page table.
  1175.  *
  1176.  * Destroy page table in architecture specific way.
  1177.  *
  1178.  * @param page_table    Physical address of PTL0.
  1179.  */
  1180. void page_table_destroy(pte_t *page_table)
  1181. {
  1182.     ASSERT(as_operations);
  1183.     ASSERT(as_operations->page_table_destroy);
  1184.    
  1185.     as_operations->page_table_destroy(page_table);
  1186. }
  1187.  
  1188. /** Lock page table.
  1189.  *
  1190.  * This function should be called before any page_mapping_insert(),
  1191.  * page_mapping_remove() and page_mapping_find().
  1192.  *
  1193.  * Locking order is such that address space areas must be locked
  1194.  * prior to this call. Address space can be locked prior to this
  1195.  * call in which case the lock argument is false.
  1196.  *
  1197.  * @param as        Address space.
  1198.  * @param lock      If false, do not attempt to lock as->lock.
  1199.  */
  1200. void page_table_lock(as_t *as, bool lock)
  1201. {
  1202.     ASSERT(as_operations);
  1203.     ASSERT(as_operations->page_table_lock);
  1204.    
  1205.     as_operations->page_table_lock(as, lock);
  1206. }
  1207.  
  1208. /** Unlock page table.
  1209.  *
  1210.  * @param as        Address space.
  1211.  * @param unlock    If false, do not attempt to unlock as->lock.
  1212.  */
  1213. void page_table_unlock(as_t *as, bool unlock)
  1214. {
  1215.     ASSERT(as_operations);
  1216.     ASSERT(as_operations->page_table_unlock);
  1217.    
  1218.     as_operations->page_table_unlock(as, unlock);
  1219. }
  1220.  
  1221.  
  1222. /** Find address space area and lock it.
  1223.  *
  1224.  * The address space must be locked and interrupts must be disabled.
  1225.  *
  1226.  * @param as        Address space.
  1227.  * @param va        Virtual address.
  1228.  *
  1229.  * @return      Locked address space area containing va on success or
  1230.  *          NULL on failure.
  1231.  */
  1232. as_area_t *find_area_and_lock(as_t *as, uintptr_t va)
  1233. {
  1234.     as_area_t *a;
  1235.     btree_node_t *leaf, *lnode;
  1236.     unsigned int i;
  1237.    
  1238.     a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);
  1239.     if (a) {
  1240.         /* va is the base address of an address space area */
  1241.         mutex_lock(&a->lock);
  1242.         return a;
  1243.     }
  1244.    
  1245.     /*
  1246.      * Search the leaf node and the righmost record of its left neighbour
  1247.      * to find out whether this is a miss or va belongs to an address
  1248.      * space area found there.
  1249.      */
  1250.    
  1251.     /* First, search the leaf node itself. */
  1252.     for (i = 0; i < leaf->keys; i++) {
  1253.         a = (as_area_t *) leaf->value[i];
  1254.         mutex_lock(&a->lock);
  1255.         if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) {
  1256.             return a;
  1257.         }
  1258.         mutex_unlock(&a->lock);
  1259.     }
  1260.  
  1261.     /*
  1262.      * Second, locate the left neighbour and test its last record.
  1263.      * Because of its position in the B+tree, it must have base < va.
  1264.      */
  1265.     lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf);
  1266.     if (lnode) {
  1267.         a = (as_area_t *) lnode->value[lnode->keys - 1];
  1268.         mutex_lock(&a->lock);
  1269.         if (va < a->base + a->pages * PAGE_SIZE) {
  1270.             return a;
  1271.         }
  1272.         mutex_unlock(&a->lock);
  1273.     }
  1274.  
  1275.     return NULL;
  1276. }
  1277.  
  1278. /** Check area conflicts with other areas.
  1279.  *
  1280.  * The address space must be locked and interrupts must be disabled.
  1281.  *
  1282.  * @param as        Address space.
  1283.  * @param va        Starting virtual address of the area being tested.
  1284.  * @param size      Size of the area being tested.
  1285.  * @param avoid_area    Do not touch this area.
  1286.  *
  1287.  * @return      True if there is no conflict, false otherwise.
  1288.  */
  1289. bool
  1290. check_area_conflicts(as_t *as, uintptr_t va, size_t size, as_area_t *avoid_area)
  1291. {
  1292.     as_area_t *a;
  1293.     btree_node_t *leaf, *node;
  1294.     unsigned int i;
  1295.    
  1296.     /*
  1297.      * We don't want any area to have conflicts with NULL page.
  1298.      */
  1299.     if (overlaps(va, size, NULL, PAGE_SIZE))
  1300.         return false;
  1301.    
  1302.     /*
  1303.      * The leaf node is found in O(log n), where n is proportional to
  1304.      * the number of address space areas belonging to as.
  1305.      * The check for conflicts is then attempted on the rightmost
  1306.      * record in the left neighbour, the leftmost record in the right
  1307.      * neighbour and all records in the leaf node itself.
  1308.      */
  1309.    
  1310.     if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) {
  1311.         if (a != avoid_area)
  1312.             return false;
  1313.     }
  1314.    
  1315.     /* First, check the two border cases. */
  1316.     if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
  1317.         a = (as_area_t *) node->value[node->keys - 1];
  1318.         mutex_lock(&a->lock);
  1319.         if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
  1320.             mutex_unlock(&a->lock);
  1321.             return false;
  1322.         }
  1323.         mutex_unlock(&a->lock);
  1324.     }
  1325.     node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf);
  1326.     if (node) {
  1327.         a = (as_area_t *) node->value[0];
  1328.         mutex_lock(&a->lock);
  1329.         if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
  1330.             mutex_unlock(&a->lock);
  1331.             return false;
  1332.         }
  1333.         mutex_unlock(&a->lock);
  1334.     }
  1335.    
  1336.     /* Second, check the leaf node. */
  1337.     for (i = 0; i < leaf->keys; i++) {
  1338.         a = (as_area_t *) leaf->value[i];
  1339.    
  1340.         if (a == avoid_area)
  1341.             continue;
  1342.    
  1343.         mutex_lock(&a->lock);
  1344.         if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
  1345.             mutex_unlock(&a->lock);
  1346.             return false;
  1347.         }
  1348.         mutex_unlock(&a->lock);
  1349.     }
  1350.  
  1351.     /*
  1352.      * So far, the area does not conflict with other areas.
  1353.      * Check if it doesn't conflict with kernel address space.
  1354.      */  
  1355.     if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
  1356.         return !overlaps(va, size,
  1357.             KERNEL_ADDRESS_SPACE_START,
  1358.             KERNEL_ADDRESS_SPACE_END - KERNEL_ADDRESS_SPACE_START);
  1359.     }
  1360.  
  1361.     return true;
  1362. }
  1363.  
  1364. /** Return size of the address space area with given base.
  1365.  *
  1366.  * @param base      Arbitrary address insede the address space area.
  1367.  *
  1368.  * @return      Size of the address space area in bytes or zero if it
  1369.  *          does not exist.
  1370.  */
  1371. size_t as_area_get_size(uintptr_t base)
  1372. {
  1373.     ipl_t ipl;
  1374.     as_area_t *src_area;
  1375.     size_t size;
  1376.  
  1377.     ipl = interrupts_disable();
  1378.     src_area = find_area_and_lock(AS, base);
  1379.     if (src_area) {
  1380.         size = src_area->pages * PAGE_SIZE;
  1381.         mutex_unlock(&src_area->lock);
  1382.     } else {
  1383.         size = 0;
  1384.     }
  1385.     interrupts_restore(ipl);
  1386.     return size;
  1387. }
  1388.  
  1389. /** Mark portion of address space area as used.
  1390.  *
  1391.  * The address space area must be already locked.
  1392.  *
  1393.  * @param a     Address space area.
  1394.  * @param page      First page to be marked.
  1395.  * @param count     Number of page to be marked.
  1396.  *
  1397.  * @return      Zero on failure and non-zero on success.
  1398.  */
  1399. int used_space_insert(as_area_t *a, uintptr_t page, size_t count)
  1400. {
  1401.     btree_node_t *leaf, *node;
  1402.     size_t pages;
  1403.     unsigned int i;
  1404.  
  1405.     ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
  1406.     ASSERT(count);
  1407.  
  1408.     pages = (size_t) btree_search(&a->used_space, page, &leaf);
  1409.     if (pages) {
  1410.         /*
  1411.          * We hit the beginning of some used space.
  1412.          */
  1413.         return 0;
  1414.     }
  1415.  
  1416.     if (!leaf->keys) {
  1417.         btree_insert(&a->used_space, page, (void *) count, leaf);
  1418.         return 1;
  1419.     }
  1420.  
  1421.     node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
  1422.     if (node) {
  1423.         uintptr_t left_pg = node->key[node->keys - 1];
  1424.         uintptr_t right_pg = leaf->key[0];
  1425.         size_t left_cnt = (size_t) node->value[node->keys - 1];
  1426.         size_t right_cnt = (size_t) leaf->value[0];
  1427.        
  1428.         /*
  1429.          * Examine the possibility that the interval fits
  1430.          * somewhere between the rightmost interval of
  1431.          * the left neigbour and the first interval of the leaf.
  1432.          */
  1433.          
  1434.         if (page >= right_pg) {
  1435.             /* Do nothing. */
  1436.         } else if (overlaps(page, count * PAGE_SIZE, left_pg,
  1437.             left_cnt * PAGE_SIZE)) {
  1438.             /* The interval intersects with the left interval. */
  1439.             return 0;
  1440.         } else if (overlaps(page, count * PAGE_SIZE, right_pg,
  1441.             right_cnt * PAGE_SIZE)) {
  1442.             /* The interval intersects with the right interval. */
  1443.             return 0;          
  1444.         } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&
  1445.             (page + count * PAGE_SIZE == right_pg)) {
  1446.             /*
  1447.              * The interval can be added by merging the two already
  1448.              * present intervals.
  1449.              */
  1450.             node->value[node->keys - 1] += count + right_cnt;
  1451.             btree_remove(&a->used_space, right_pg, leaf);
  1452.             return 1;
  1453.         } else if (page == left_pg + left_cnt * PAGE_SIZE) {
  1454.             /*
  1455.              * The interval can be added by simply growing the left
  1456.              * interval.
  1457.              */
  1458.             node->value[node->keys - 1] += count;
  1459.             return 1;
  1460.         } else if (page + count * PAGE_SIZE == right_pg) {
  1461.             /*
  1462.              * The interval can be addded by simply moving base of
  1463.              * the right interval down and increasing its size
  1464.              * accordingly.
  1465.              */
  1466.             leaf->value[0] += count;
  1467.             leaf->key[0] = page;
  1468.             return 1;
  1469.         } else {
  1470.             /*
  1471.              * The interval is between both neigbouring intervals,
  1472.              * but cannot be merged with any of them.
  1473.              */
  1474.             btree_insert(&a->used_space, page, (void *) count,
  1475.                 leaf);
  1476.             return 1;
  1477.         }
  1478.     } else if (page < leaf->key[0]) {
  1479.         uintptr_t right_pg = leaf->key[0];
  1480.         size_t right_cnt = (size_t) leaf->value[0];
  1481.    
  1482.         /*
  1483.          * Investigate the border case in which the left neighbour does
  1484.          * not exist but the interval fits from the left.
  1485.          */
  1486.          
  1487.         if (overlaps(page, count * PAGE_SIZE, right_pg,
  1488.             right_cnt * PAGE_SIZE)) {
  1489.             /* The interval intersects with the right interval. */
  1490.             return 0;
  1491.         } else if (page + count * PAGE_SIZE == right_pg) {
  1492.             /*
  1493.              * The interval can be added by moving the base of the
  1494.              * right interval down and increasing its size
  1495.              * accordingly.
  1496.              */
  1497.             leaf->key[0] = page;
  1498.             leaf->value[0] += count;
  1499.             return 1;
  1500.         } else {
  1501.             /*
  1502.              * The interval doesn't adjoin with the right interval.
  1503.              * It must be added individually.
  1504.              */
  1505.             btree_insert(&a->used_space, page, (void *) count,
  1506.                 leaf);
  1507.             return 1;
  1508.         }
  1509.     }
  1510.  
  1511.     node = btree_leaf_node_right_neighbour(&a->used_space, leaf);
  1512.     if (node) {
  1513.         uintptr_t left_pg = leaf->key[leaf->keys - 1];
  1514.         uintptr_t right_pg = node->key[0];
  1515.         size_t left_cnt = (size_t) leaf->value[leaf->keys - 1];
  1516.         size_t right_cnt = (size_t) node->value[0];
  1517.        
  1518.         /*
  1519.          * Examine the possibility that the interval fits
  1520.          * somewhere between the leftmost interval of
  1521.          * the right neigbour and the last interval of the leaf.
  1522.          */
  1523.  
  1524.         if (page < left_pg) {
  1525.             /* Do nothing. */
  1526.         } else if (overlaps(page, count * PAGE_SIZE, left_pg,
  1527.             left_cnt * PAGE_SIZE)) {
  1528.             /* The interval intersects with the left interval. */
  1529.             return 0;
  1530.         } else if (overlaps(page, count * PAGE_SIZE, right_pg,
  1531.             right_cnt * PAGE_SIZE)) {
  1532.             /* The interval intersects with the right interval. */
  1533.             return 0;          
  1534.         } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&
  1535.             (page + count * PAGE_SIZE == right_pg)) {
  1536.             /*
  1537.              * The interval can be added by merging the two already
  1538.              * present intervals.
  1539.              * */
  1540.             leaf->value[leaf->keys - 1] += count + right_cnt;
  1541.             btree_remove(&a->used_space, right_pg, node);
  1542.             return 1;
  1543.         } else if (page == left_pg + left_cnt * PAGE_SIZE) {
  1544.             /*
  1545.              * The interval can be added by simply growing the left
  1546.              * interval.
  1547.              * */
  1548.             leaf->value[leaf->keys - 1] +=  count;
  1549.             return 1;
  1550.         } else if (page + count * PAGE_SIZE == right_pg) {
  1551.             /*
  1552.              * The interval can be addded by simply moving base of
  1553.              * the right interval down and increasing its size
  1554.              * accordingly.
  1555.              */
  1556.             node->value[0] += count;
  1557.             node->key[0] = page;
  1558.             return 1;
  1559.         } else {
  1560.             /*
  1561.              * The interval is between both neigbouring intervals,
  1562.              * but cannot be merged with any of them.
  1563.              */
  1564.             btree_insert(&a->used_space, page, (void *) count,
  1565.                 leaf);
  1566.             return 1;
  1567.         }
  1568.     } else if (page >= leaf->key[leaf->keys - 1]) {
  1569.         uintptr_t left_pg = leaf->key[leaf->keys - 1];
  1570.         size_t left_cnt = (size_t) leaf->value[leaf->keys - 1];
  1571.    
  1572.         /*
  1573.          * Investigate the border case in which the right neighbour
  1574.          * does not exist but the interval fits from the right.
  1575.          */
  1576.          
  1577.         if (overlaps(page, count * PAGE_SIZE, left_pg,
  1578.             left_cnt * PAGE_SIZE)) {
  1579.             /* The interval intersects with the left interval. */
  1580.             return 0;
  1581.         } else if (left_pg + left_cnt * PAGE_SIZE == page) {
  1582.             /*
  1583.              * The interval can be added by growing the left
  1584.              * interval.
  1585.              */
  1586.             leaf->value[leaf->keys - 1] += count;
  1587.             return 1;
  1588.         } else {
  1589.             /*
  1590.              * The interval doesn't adjoin with the left interval.
  1591.              * It must be added individually.
  1592.              */
  1593.             btree_insert(&a->used_space, page, (void *) count,
  1594.                 leaf);
  1595.             return 1;
  1596.         }
  1597.     }
  1598.    
  1599.     /*
  1600.      * Note that if the algorithm made it thus far, the interval can fit
  1601.      * only between two other intervals of the leaf. The two border cases
  1602.      * were already resolved.
  1603.      */
  1604.     for (i = 1; i < leaf->keys; i++) {
  1605.         if (page < leaf->key[i]) {
  1606.             uintptr_t left_pg = leaf->key[i - 1];
  1607.             uintptr_t right_pg = leaf->key[i];
  1608.             size_t left_cnt = (size_t) leaf->value[i - 1];
  1609.             size_t right_cnt = (size_t) leaf->value[i];
  1610.  
  1611.             /*
  1612.              * The interval fits between left_pg and right_pg.
  1613.              */
  1614.  
  1615.             if (overlaps(page, count * PAGE_SIZE, left_pg,
  1616.                 left_cnt * PAGE_SIZE)) {
  1617.                 /*
  1618.                  * The interval intersects with the left
  1619.                  * interval.
  1620.                  */
  1621.                 return 0;
  1622.             } else if (overlaps(page, count * PAGE_SIZE, right_pg,
  1623.                 right_cnt * PAGE_SIZE)) {
  1624.                 /*
  1625.                  * The interval intersects with the right
  1626.                  * interval.
  1627.                  */
  1628.                 return 0;          
  1629.             } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&
  1630.                 (page + count * PAGE_SIZE == right_pg)) {
  1631.                 /*
  1632.                  * The interval can be added by merging the two
  1633.                  * already present intervals.
  1634.                  */
  1635.                 leaf->value[i - 1] += count + right_cnt;
  1636.                 btree_remove(&a->used_space, right_pg, leaf);
  1637.                 return 1;
  1638.             } else if (page == left_pg + left_cnt * PAGE_SIZE) {
  1639.                 /*
  1640.                  * The interval can be added by simply growing
  1641.                  * the left interval.
  1642.                  */
  1643.                 leaf->value[i - 1] += count;
  1644.                 return 1;
  1645.             } else if (page + count * PAGE_SIZE == right_pg) {
  1646.                 /*
  1647.                      * The interval can be addded by simply moving
  1648.                  * base of the right interval down and
  1649.                  * increasing its size accordingly.
  1650.                  */
  1651.                 leaf->value[i] += count;
  1652.                 leaf->key[i] = page;
  1653.                 return 1;
  1654.             } else {
  1655.                 /*
  1656.                  * The interval is between both neigbouring
  1657.                  * intervals, but cannot be merged with any of
  1658.                  * them.
  1659.                  */
  1660.                 btree_insert(&a->used_space, page,
  1661.                     (void *) count, leaf);
  1662.                 return 1;
  1663.             }
  1664.         }
  1665.     }
  1666.  
  1667.     panic("Inconsistency detected while adding %" PRIs " pages of used "
  1668.         "space at %p.", count, page);
  1669. }
  1670.  
  1671. /** Mark portion of address space area as unused.
  1672.  *
  1673.  * The address space area must be already locked.
  1674.  *
  1675.  * @param a     Address space area.
  1676.  * @param page      First page to be marked.
  1677.  * @param count     Number of page to be marked.
  1678.  *
  1679.  * @return      Zero on failure and non-zero on success.
  1680.  */
  1681. int used_space_remove(as_area_t *a, uintptr_t page, size_t count)
  1682. {
  1683.     btree_node_t *leaf, *node;
  1684.     size_t pages;
  1685.     unsigned int i;
  1686.  
  1687.     ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
  1688.     ASSERT(count);
  1689.  
  1690.     pages = (size_t) btree_search(&a->used_space, page, &leaf);
  1691.     if (pages) {
  1692.         /*
  1693.          * We are lucky, page is the beginning of some interval.
  1694.          */
  1695.         if (count > pages) {
  1696.             return 0;
  1697.         } else if (count == pages) {
  1698.             btree_remove(&a->used_space, page, leaf);
  1699.             return 1;
  1700.         } else {
  1701.             /*
  1702.              * Find the respective interval.
  1703.              * Decrease its size and relocate its start address.
  1704.              */
  1705.             for (i = 0; i < leaf->keys; i++) {
  1706.                 if (leaf->key[i] == page) {
  1707.                     leaf->key[i] += count * PAGE_SIZE;
  1708.                     leaf->value[i] -= count;
  1709.                     return 1;
  1710.                 }
  1711.             }
  1712.             goto error;
  1713.         }
  1714.     }
  1715.  
  1716.     node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
  1717.     if (node && page < leaf->key[0]) {
  1718.         uintptr_t left_pg = node->key[node->keys - 1];
  1719.         size_t left_cnt = (size_t) node->value[node->keys - 1];
  1720.  
  1721.         if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,
  1722.             count * PAGE_SIZE)) {
  1723.             if (page + count * PAGE_SIZE ==
  1724.                 left_pg + left_cnt * PAGE_SIZE) {
  1725.                 /*
  1726.                  * The interval is contained in the rightmost
  1727.                  * interval of the left neighbour and can be
  1728.                  * removed by updating the size of the bigger
  1729.                  * interval.
  1730.                  */
  1731.                 node->value[node->keys - 1] -= count;
  1732.                 return 1;
  1733.             } else if (page + count * PAGE_SIZE <
  1734.                 left_pg + left_cnt*PAGE_SIZE) {
  1735.                 size_t new_cnt;
  1736.                
  1737.                 /*
  1738.                  * The interval is contained in the rightmost
  1739.                  * interval of the left neighbour but its
  1740.                  * removal requires both updating the size of
  1741.                  * the original interval and also inserting a
  1742.                  * new interval.
  1743.                  */
  1744.                 new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -
  1745.                     (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
  1746.                 node->value[node->keys - 1] -= count + new_cnt;
  1747.                 btree_insert(&a->used_space, page +
  1748.                     count * PAGE_SIZE, (void *) new_cnt, leaf);
  1749.                 return 1;
  1750.             }
  1751.         }
  1752.         return 0;
  1753.     } else if (page < leaf->key[0]) {
  1754.         return 0;
  1755.     }
  1756.    
  1757.     if (page > leaf->key[leaf->keys - 1]) {
  1758.         uintptr_t left_pg = leaf->key[leaf->keys - 1];
  1759.         size_t left_cnt = (size_t) leaf->value[leaf->keys - 1];
  1760.  
  1761.         if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,
  1762.             count * PAGE_SIZE)) {
  1763.             if (page + count * PAGE_SIZE ==
  1764.                 left_pg + left_cnt * PAGE_SIZE) {
  1765.                 /*
  1766.                  * The interval is contained in the rightmost
  1767.                  * interval of the leaf and can be removed by
  1768.                  * updating the size of the bigger interval.
  1769.                  */
  1770.                 leaf->value[leaf->keys - 1] -= count;
  1771.                 return 1;
  1772.             } else if (page + count * PAGE_SIZE < left_pg +
  1773.                 left_cnt * PAGE_SIZE) {
  1774.                 size_t new_cnt;
  1775.                
  1776.                 /*
  1777.                  * The interval is contained in the rightmost
  1778.                  * interval of the leaf but its removal
  1779.                  * requires both updating the size of the
  1780.                  * original interval and also inserting a new
  1781.                  * interval.
  1782.                  */
  1783.                 new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -
  1784.                     (page + count * PAGE_SIZE)) >> PAGE_WIDTH;
  1785.                 leaf->value[leaf->keys - 1] -= count + new_cnt;
  1786.                 btree_insert(&a->used_space, page +
  1787.                     count * PAGE_SIZE, (void *) new_cnt, leaf);
  1788.                 return 1;
  1789.             }
  1790.         }
  1791.         return 0;
  1792.     }  
  1793.    
  1794.     /*
  1795.      * The border cases have been already resolved.
  1796.      * Now the interval can be only between intervals of the leaf.
  1797.      */
  1798.     for (i = 1; i < leaf->keys - 1; i++) {
  1799.         if (page < leaf->key[i]) {
  1800.             uintptr_t left_pg = leaf->key[i - 1];
  1801.             size_t left_cnt = (size_t) leaf->value[i - 1];
  1802.  
  1803.             /*
  1804.              * Now the interval is between intervals corresponding
  1805.              * to (i - 1) and i.
  1806.              */
  1807.             if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,
  1808.                 count * PAGE_SIZE)) {
  1809.                 if (page + count * PAGE_SIZE ==
  1810.                     left_pg + left_cnt*PAGE_SIZE) {
  1811.                     /*
  1812.                      * The interval is contained in the
  1813.                      * interval (i - 1) of the leaf and can
  1814.                      * be removed by updating the size of
  1815.                      * the bigger interval.
  1816.                      */
  1817.                     leaf->value[i - 1] -= count;
  1818.                     return 1;
  1819.                 } else if (page + count * PAGE_SIZE <
  1820.                     left_pg + left_cnt * PAGE_SIZE) {
  1821.                     size_t new_cnt;
  1822.                
  1823.                     /*
  1824.                      * The interval is contained in the
  1825.                      * interval (i - 1) of the leaf but its
  1826.                      * removal requires both updating the
  1827.                      * size of the original interval and
  1828.                      * also inserting a new interval.
  1829.                      */
  1830.                     new_cnt = ((left_pg +
  1831.                         left_cnt * PAGE_SIZE) -
  1832.                         (page + count * PAGE_SIZE)) >>
  1833.                         PAGE_WIDTH;
  1834.                     leaf->value[i - 1] -= count + new_cnt;
  1835.                     btree_insert(&a->used_space, page +
  1836.                         count * PAGE_SIZE, (void *) new_cnt,
  1837.                         leaf);
  1838.                     return 1;
  1839.                 }
  1840.             }
  1841.             return 0;
  1842.         }
  1843.     }
  1844.  
  1845. error:
  1846.     panic("Inconsistency detected while removing %" PRIs " pages of used "
  1847.         "space from %p.", count, page);
  1848. }
  1849.  
  1850. /** Remove reference to address space area share info.
  1851.  *
  1852.  * If the reference count drops to 0, the sh_info is deallocated.
  1853.  *
  1854.  * @param sh_info   Pointer to address space area share info.
  1855.  */
  1856. void sh_info_remove_reference(share_info_t *sh_info)
  1857. {
  1858.     bool dealloc = false;
  1859.  
  1860.     mutex_lock(&sh_info->lock);
  1861.     ASSERT(sh_info->refcount);
  1862.     if (--sh_info->refcount == 0) {
  1863.         dealloc = true;
  1864.         link_t *cur;
  1865.        
  1866.         /*
  1867.          * Now walk carefully the pagemap B+tree and free/remove
  1868.          * reference from all frames found there.
  1869.          */
  1870.         for (cur = sh_info->pagemap.leaf_head.next;
  1871.             cur != &sh_info->pagemap.leaf_head; cur = cur->next) {
  1872.             btree_node_t *node;
  1873.             unsigned int i;
  1874.            
  1875.             node = list_get_instance(cur, btree_node_t, leaf_link);
  1876.             for (i = 0; i < node->keys; i++)
  1877.                 frame_free((uintptr_t) node->value[i]);
  1878.         }
  1879.        
  1880.     }
  1881.     mutex_unlock(&sh_info->lock);
  1882.    
  1883.     if (dealloc) {
  1884.         btree_destroy(&sh_info->pagemap);
  1885.         free(sh_info);
  1886.     }
  1887. }
  1888.  
  1889. /*
  1890.  * Address space related syscalls.
  1891.  */
  1892.  
  1893. /** Wrapper for as_area_create(). */
  1894. unative_t sys_as_area_create(uintptr_t address, size_t size, int flags)
  1895. {
  1896.     if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address,
  1897.         AS_AREA_ATTR_NONE, &anon_backend, NULL))
  1898.         return (unative_t) address;
  1899.     else
  1900.         return (unative_t) -1;
  1901. }
  1902.  
  1903. /** Wrapper for as_area_resize(). */
  1904. unative_t sys_as_area_resize(uintptr_t address, size_t size, int flags)
  1905. {
  1906.     return (unative_t) as_area_resize(AS, address, size, 0);
  1907. }
  1908.  
  1909. /** Wrapper for as_area_change_flags(). */
  1910. unative_t sys_as_area_change_flags(uintptr_t address, int flags)
  1911. {
  1912.     return (unative_t) as_area_change_flags(AS, flags, address);
  1913. }
  1914.  
  1915. /** Wrapper for as_area_destroy(). */
  1916. unative_t sys_as_area_destroy(uintptr_t address)
  1917. {
  1918.     return (unative_t) as_area_destroy(AS, address);
  1919. }
  1920.  
  1921. /** Print out information about address space.
  1922.  *
  1923.  * @param as        Address space.
  1924.  */
  1925. void as_print(as_t *as)
  1926. {
  1927.     ipl_t ipl;
  1928.    
  1929.     ipl = interrupts_disable();
  1930.     mutex_lock(&as->lock);
  1931.    
  1932.     /* print out info about address space areas */
  1933.     link_t *cur;
  1934.     for (cur = as->as_area_btree.leaf_head.next;
  1935.         cur != &as->as_area_btree.leaf_head; cur = cur->next) {
  1936.         btree_node_t *node;
  1937.        
  1938.         node = list_get_instance(cur, btree_node_t, leaf_link);
  1939.        
  1940.         unsigned int i;
  1941.         for (i = 0; i < node->keys; i++) {
  1942.             as_area_t *area = node->value[i];
  1943.        
  1944.             mutex_lock(&area->lock);
  1945.             printf("as_area: %p, base=%p, pages=%" PRIs
  1946.                 " (%p - %p)\n", area, area->base, area->pages,
  1947.                 area->base, area->base + FRAMES2SIZE(area->pages));
  1948.             mutex_unlock(&area->lock);
  1949.         }
  1950.     }
  1951.    
  1952.     mutex_unlock(&as->lock);
  1953.     interrupts_restore(ipl);
  1954. }
  1955.  
  1956. /** @}
  1957.  */
  1958.