Subversion Repositories HelenOS-historic

Rev

Rev 1735 | Rev 1760 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (C) 2001-2006 Jakub Jermar
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. /** @addtogroup genericmm
  30.  * @{
  31.  */
  32.  
  33. /**
  34.  * @file
  35.  * @brief   Address space related functions.
  36.  *
  37.  * This file contains address space manipulation functions.
  38.  * Roughly speaking, this is a higher-level client of
  39.  * Virtual Address Translation (VAT) subsystem.
  40.  *
  41.  * Functionality provided by this file allows one to
  42.  * create address spaces and create, resize and share
  43.  * address space areas.
  44.  *
  45.  * @see page.c
  46.  *
  47.  */
  48.  
  49. #include <mm/as.h>
  50. #include <arch/mm/as.h>
  51. #include <mm/page.h>
  52. #include <mm/frame.h>
  53. #include <mm/slab.h>
  54. #include <mm/tlb.h>
  55. #include <arch/mm/page.h>
  56. #include <genarch/mm/page_pt.h>
  57. #include <genarch/mm/page_ht.h>
  58. #include <mm/asid.h>
  59. #include <arch/mm/asid.h>
  60. #include <synch/spinlock.h>
  61. #include <synch/mutex.h>
  62. #include <adt/list.h>
  63. #include <adt/btree.h>
  64. #include <proc/task.h>
  65. #include <proc/thread.h>
  66. #include <arch/asm.h>
  67. #include <panic.h>
  68. #include <debug.h>
  69. #include <print.h>
  70. #include <memstr.h>
  71. #include <macros.h>
  72. #include <arch.h>
  73. #include <errno.h>
  74. #include <config.h>
  75. #include <align.h>
  76. #include <arch/types.h>
  77. #include <typedefs.h>
  78. #include <syscall/copy.h>
  79. #include <arch/interrupt.h>
  80.  
  81. /**
  82.  * Each architecture decides what functions will be used to carry out
  83.  * address space operations such as creating or locking page tables.
  84.  */
  85. as_operations_t *as_operations = NULL;
  86.  
  87. /** This lock protects inactive_as_with_asid_head list. It must be acquired before as_t mutex. */
  88. SPINLOCK_INITIALIZE(inactive_as_with_asid_lock);
  89.  
  90. /**
  91.  * This list contains address spaces that are not active on any
  92.  * processor and that have valid ASID.
  93.  */
  94. LIST_INITIALIZE(inactive_as_with_asid_head);
  95.  
  96. /** Kernel address space. */
  97. as_t *AS_KERNEL = NULL;
  98.  
  99. static int area_flags_to_page_flags(int aflags);
  100. static as_area_t *find_area_and_lock(as_t *as, __address va);
  101. static bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area);
  102. static void sh_info_remove_reference(share_info_t *sh_info);
  103.  
  104. /** Initialize address space subsystem. */
  105. void as_init(void)
  106. {
  107.     as_arch_init();
  108.     AS_KERNEL = as_create(FLAG_AS_KERNEL);
  109.     if (!AS_KERNEL)
  110.         panic("can't create kernel address space\n");
  111.    
  112. }
  113.  
  114. /** Create address space.
  115.  *
  116.  * @param flags Flags that influence way in wich the address space is created.
  117.  */
  118. as_t *as_create(int flags)
  119. {
  120.     as_t *as;
  121.  
  122.     as = (as_t *) malloc(sizeof(as_t), 0);
  123.     link_initialize(&as->inactive_as_with_asid_link);
  124.     mutex_initialize(&as->lock);
  125.     btree_create(&as->as_area_btree);
  126.    
  127.     if (flags & FLAG_AS_KERNEL)
  128.         as->asid = ASID_KERNEL;
  129.     else
  130.         as->asid = ASID_INVALID;
  131.    
  132.     as->refcount = 0;
  133.     as->cpu_refcount = 0;
  134.     as->page_table = page_table_create(flags);
  135.  
  136.     return as;
  137. }
  138.  
  139. /** Destroy adress space.
  140.  *
  141.  * When there are no tasks referencing this address space (i.e. its refcount is zero),
  142.  * the address space can be destroyed.
  143.  */
  144. void as_destroy(as_t *as)
  145. {
  146.     ipl_t ipl;
  147.     bool cond;
  148.  
  149.     ASSERT(as->refcount == 0);
  150.    
  151.     /*
  152.      * Since there is no reference to this area,
  153.      * it is safe not to lock its mutex.
  154.      */
  155.     ipl = interrupts_disable();
  156.     spinlock_lock(&inactive_as_with_asid_lock);
  157.     if (as->asid != ASID_INVALID && as != AS_KERNEL) {
  158.         if (as != AS && as->cpu_refcount == 0)
  159.             list_remove(&as->inactive_as_with_asid_link);
  160.         asid_put(as->asid);
  161.     }
  162.     spinlock_unlock(&inactive_as_with_asid_lock);
  163.  
  164.     /*
  165.      * Destroy address space areas of the address space.
  166.      * The B+tee must be walked carefully because it is
  167.      * also being destroyed.
  168.      */
  169.     for (cond = true; cond; ) {
  170.         btree_node_t *node;
  171.  
  172.         ASSERT(!list_empty(&as->as_area_btree.leaf_head));
  173.         node = list_get_instance(as->as_area_btree.leaf_head.next, btree_node_t, leaf_link);
  174.  
  175.         if ((cond = node->keys)) {
  176.             as_area_destroy(as, node->key[0]);
  177.         }
  178.     }
  179.  
  180.     btree_destroy(&as->as_area_btree);
  181.     page_table_destroy(as->page_table);
  182.  
  183.     interrupts_restore(ipl);
  184.    
  185.     free(as);
  186. }
  187.  
  188. /** Create address space area of common attributes.
  189.  *
  190.  * The created address space area is added to the target address space.
  191.  *
  192.  * @param as Target address space.
  193.  * @param flags Flags of the area memory.
  194.  * @param size Size of area.
  195.  * @param base Base address of area.
  196.  * @param attrs Attributes of the area.
  197.  * @param backend Address space area backend. NULL if no backend is used.
  198.  * @param backend_data NULL or a pointer to an array holding two void *.
  199.  *
  200.  * @return Address space area on success or NULL on failure.
  201.  */
  202. as_area_t *as_area_create(as_t *as, int flags, size_t size, __address base, int attrs,
  203.            mem_backend_t *backend, mem_backend_data_t *backend_data)
  204. {
  205.     ipl_t ipl;
  206.     as_area_t *a;
  207.    
  208.     if (base % PAGE_SIZE)
  209.         return NULL;
  210.  
  211.     if (!size)
  212.         return NULL;
  213.  
  214.     /* Writeable executable areas are not supported. */
  215.     if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
  216.         return NULL;
  217.    
  218.     ipl = interrupts_disable();
  219.     mutex_lock(&as->lock);
  220.    
  221.     if (!check_area_conflicts(as, base, size, NULL)) {
  222.         mutex_unlock(&as->lock);
  223.         interrupts_restore(ipl);
  224.         return NULL;
  225.     }
  226.    
  227.     a = (as_area_t *) malloc(sizeof(as_area_t), 0);
  228.  
  229.     mutex_initialize(&a->lock);
  230.    
  231.     a->as = as;
  232.     a->flags = flags;
  233.     a->attributes = attrs;
  234.     a->pages = SIZE2FRAMES(size);
  235.     a->base = base;
  236.     a->sh_info = NULL;
  237.     a->backend = backend;
  238.     if (backend_data)
  239.         a->backend_data = *backend_data;
  240.     else
  241.         memsetb((__address) &a->backend_data, sizeof(a->backend_data), 0);
  242.  
  243.     btree_create(&a->used_space);
  244.    
  245.     btree_insert(&as->as_area_btree, base, (void *) a, NULL);
  246.  
  247.     mutex_unlock(&as->lock);
  248.     interrupts_restore(ipl);
  249.  
  250.     return a;
  251. }
  252.  
  253. /** Find address space area and change it.
  254.  *
  255.  * @param as Address space.
  256.  * @param address Virtual address belonging to the area to be changed. Must be page-aligned.
  257.  * @param size New size of the virtual memory block starting at address.
  258.  * @param flags Flags influencing the remap operation. Currently unused.
  259.  *
  260.  * @return Zero on success or a value from @ref errno.h otherwise.
  261.  */
  262. int as_area_resize(as_t *as, __address address, size_t size, int flags)
  263. {
  264.     as_area_t *area;
  265.     ipl_t ipl;
  266.     size_t pages;
  267.    
  268.     ipl = interrupts_disable();
  269.     mutex_lock(&as->lock);
  270.    
  271.     /*
  272.      * Locate the area.
  273.      */
  274.     area = find_area_and_lock(as, address);
  275.     if (!area) {
  276.         mutex_unlock(&as->lock);
  277.         interrupts_restore(ipl);
  278.         return ENOENT;
  279.     }
  280.  
  281.     if (area->backend == &phys_backend) {
  282.         /*
  283.          * Remapping of address space areas associated
  284.          * with memory mapped devices is not supported.
  285.          */
  286.         mutex_unlock(&area->lock);
  287.         mutex_unlock(&as->lock);
  288.         interrupts_restore(ipl);
  289.         return ENOTSUP;
  290.     }
  291.     if (area->sh_info) {
  292.         /*
  293.          * Remapping of shared address space areas
  294.          * is not supported.
  295.          */
  296.         mutex_unlock(&area->lock);
  297.         mutex_unlock(&as->lock);
  298.         interrupts_restore(ipl);
  299.         return ENOTSUP;
  300.     }
  301.  
  302.     pages = SIZE2FRAMES((address - area->base) + size);
  303.     if (!pages) {
  304.         /*
  305.          * Zero size address space areas are not allowed.
  306.          */
  307.         mutex_unlock(&area->lock);
  308.         mutex_unlock(&as->lock);
  309.         interrupts_restore(ipl);
  310.         return EPERM;
  311.     }
  312.    
  313.     if (pages < area->pages) {
  314.         bool cond;
  315.         __address start_free = area->base + pages*PAGE_SIZE;
  316.  
  317.         /*
  318.          * Shrinking the area.
  319.          * No need to check for overlaps.
  320.          */
  321.  
  322.         /*
  323.          * Start TLB shootdown sequence.
  324.          */
  325.         tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
  326.  
  327.         /*
  328.          * Remove frames belonging to used space starting from
  329.          * the highest addresses downwards until an overlap with
  330.          * the resized address space area is found. Note that this
  331.          * is also the right way to remove part of the used_space
  332.          * B+tree leaf list.
  333.          */    
  334.         for (cond = true; cond;) {
  335.             btree_node_t *node;
  336.        
  337.             ASSERT(!list_empty(&area->used_space.leaf_head));
  338.             node = list_get_instance(area->used_space.leaf_head.prev, btree_node_t, leaf_link);
  339.             if ((cond = (bool) node->keys)) {
  340.                 __address b = node->key[node->keys - 1];
  341.                 count_t c = (count_t) node->value[node->keys - 1];
  342.                 int i = 0;
  343.            
  344.                 if (overlaps(b, c*PAGE_SIZE, area->base, pages*PAGE_SIZE)) {
  345.                    
  346.                     if (b + c*PAGE_SIZE <= start_free) {
  347.                         /*
  348.                          * The whole interval fits completely
  349.                          * in the resized address space area.
  350.                          */
  351.                         break;
  352.                     }
  353.        
  354.                     /*
  355.                      * Part of the interval corresponding to b and c
  356.                      * overlaps with the resized address space area.
  357.                      */
  358.        
  359.                     cond = false;   /* we are almost done */
  360.                     i = (start_free - b) >> PAGE_WIDTH;
  361.                     if (!used_space_remove(area, start_free, c - i))
  362.                         panic("Could not remove used space.");
  363.                 } else {
  364.                     /*
  365.                      * The interval of used space can be completely removed.
  366.                      */
  367.                     if (!used_space_remove(area, b, c))
  368.                         panic("Could not remove used space.\n");
  369.                 }
  370.            
  371.                 for (; i < c; i++) {
  372.                     pte_t *pte;
  373.            
  374.                     page_table_lock(as, false);
  375.                     pte = page_mapping_find(as, b + i*PAGE_SIZE);
  376.                     ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
  377.                     if (area->backend && area->backend->frame_free) {
  378.                         area->backend->frame_free(area,
  379.                             b + i*PAGE_SIZE, PTE_GET_FRAME(pte));
  380.                     }
  381.                     page_mapping_remove(as, b + i*PAGE_SIZE);
  382.                     page_table_unlock(as, false);
  383.                 }
  384.             }
  385.         }
  386.  
  387.         /*
  388.          * Finish TLB shootdown sequence.
  389.          */
  390.         tlb_invalidate_pages(AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
  391.         tlb_shootdown_finalize();
  392.     } else {
  393.         /*
  394.          * Growing the area.
  395.          * Check for overlaps with other address space areas.
  396.          */
  397.         if (!check_area_conflicts(as, address, pages * PAGE_SIZE, area)) {
  398.             mutex_unlock(&area->lock);
  399.             mutex_unlock(&as->lock);       
  400.             interrupts_restore(ipl);
  401.             return EADDRNOTAVAIL;
  402.         }
  403.     }
  404.  
  405.     area->pages = pages;
  406.    
  407.     mutex_unlock(&area->lock);
  408.     mutex_unlock(&as->lock);
  409.     interrupts_restore(ipl);
  410.  
  411.     return 0;
  412. }
  413.  
  414. /** Destroy address space area.
  415.  *
  416.  * @param as Address space.
  417.  * @param address Address withing the area to be deleted.
  418.  *
  419.  * @return Zero on success or a value from @ref errno.h on failure.
  420.  */
  421. int as_area_destroy(as_t *as, __address address)
  422. {
  423.     as_area_t *area;
  424.     __address base;
  425.     link_t *cur;
  426.     ipl_t ipl;
  427.  
  428.     ipl = interrupts_disable();
  429.     mutex_lock(&as->lock);
  430.  
  431.     area = find_area_and_lock(as, address);
  432.     if (!area) {
  433.         mutex_unlock(&as->lock);
  434.         interrupts_restore(ipl);
  435.         return ENOENT;
  436.     }
  437.  
  438.     base = area->base;
  439.  
  440.     /*
  441.      * Start TLB shootdown sequence.
  442.      */
  443.     tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base, area->pages);
  444.  
  445.     /*
  446.      * Visit only the pages mapped by used_space B+tree.
  447.      */
  448.     for (cur = area->used_space.leaf_head.next; cur != &area->used_space.leaf_head; cur = cur->next) {
  449.         btree_node_t *node;
  450.         int i;
  451.        
  452.         node = list_get_instance(cur, btree_node_t, leaf_link);
  453.         for (i = 0; i < node->keys; i++) {
  454.             __address b = node->key[i];
  455.             count_t j;
  456.             pte_t *pte;
  457.            
  458.             for (j = 0; j < (count_t) node->value[i]; j++) {
  459.                 page_table_lock(as, false);
  460.                 pte = page_mapping_find(as, b + j*PAGE_SIZE);
  461.                 ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
  462.                 if (area->backend && area->backend->frame_free) {
  463.                     area->backend->frame_free(area,
  464.                         b + j*PAGE_SIZE, PTE_GET_FRAME(pte));
  465.                 }
  466.                 page_mapping_remove(as, b + j*PAGE_SIZE);
  467.                 page_table_unlock(as, false);
  468.             }
  469.         }
  470.     }
  471.  
  472.     /*
  473.      * Finish TLB shootdown sequence.
  474.      */
  475.     tlb_invalidate_pages(AS->asid, area->base, area->pages);
  476.     tlb_shootdown_finalize();
  477.    
  478.     btree_destroy(&area->used_space);
  479.  
  480.     area->attributes |= AS_AREA_ATTR_PARTIAL;
  481.    
  482.     if (area->sh_info)
  483.         sh_info_remove_reference(area->sh_info);
  484.        
  485.     mutex_unlock(&area->lock);
  486.  
  487.     /*
  488.      * Remove the empty area from address space.
  489.      */
  490.     btree_remove(&AS->as_area_btree, base, NULL);
  491.    
  492.     free(area);
  493.    
  494.     mutex_unlock(&AS->lock);
  495.     interrupts_restore(ipl);
  496.     return 0;
  497. }
  498.  
  499. /** Share address space area with another or the same address space.
  500.  *
  501.  * Address space area mapping is shared with a new address space area.
  502.  * If the source address space area has not been shared so far,
  503.  * a new sh_info is created. The new address space area simply gets the
  504.  * sh_info of the source area. The process of duplicating the
  505.  * mapping is done through the backend share function.
  506.  *
  507.  * @param src_as Pointer to source address space.
  508.  * @param src_base Base address of the source address space area.
  509.  * @param acc_size Expected size of the source area.
  510.  * @param dst_as Pointer to destination address space.
  511.  * @param dst_base Target base address.
  512.  * @param dst_flags_mask Destination address space area flags mask.
  513.  *
  514.  * @return Zero on success or ENOENT if there is no such task or
  515.  *     if there is no such address space area,
  516.  *     EPERM if there was a problem in accepting the area or
  517.  *     ENOMEM if there was a problem in allocating destination
  518.  *     address space area. ENOTSUP is returned if an attempt
  519.  *     to share non-anonymous address space area is detected.
  520.  */
  521. int as_area_share(as_t *src_as, __address src_base, size_t acc_size,
  522.           as_t *dst_as, __address dst_base, int dst_flags_mask)
  523. {
  524.     ipl_t ipl;
  525.     int src_flags;
  526.     size_t src_size;
  527.     as_area_t *src_area, *dst_area;
  528.     share_info_t *sh_info;
  529.     mem_backend_t *src_backend;
  530.     mem_backend_data_t src_backend_data;
  531.    
  532.     ipl = interrupts_disable();
  533.     mutex_lock(&src_as->lock);
  534.     src_area = find_area_and_lock(src_as, src_base);
  535.     if (!src_area) {
  536.         /*
  537.          * Could not find the source address space area.
  538.          */
  539.         mutex_unlock(&src_as->lock);
  540.         interrupts_restore(ipl);
  541.         return ENOENT;
  542.     }
  543.    
  544.     if (!src_area->backend || !src_area->backend->share) {
  545.         /*
  546.          * There is now backend or the backend does not
  547.          * know how to share the area.
  548.          */
  549.         mutex_unlock(&src_area->lock);
  550.         mutex_unlock(&src_as->lock);
  551.         interrupts_restore(ipl);
  552.         return ENOTSUP;
  553.     }
  554.    
  555.     src_size = src_area->pages * PAGE_SIZE;
  556.     src_flags = src_area->flags;
  557.     src_backend = src_area->backend;
  558.     src_backend_data = src_area->backend_data;
  559.  
  560.     /* Share the cacheable flag from the original mapping */
  561.     if (src_flags & AS_AREA_CACHEABLE)
  562.         dst_flags_mask |= AS_AREA_CACHEABLE;
  563.  
  564.     if (src_size != acc_size || (src_flags & dst_flags_mask) != dst_flags_mask) {
  565.         mutex_unlock(&src_area->lock);
  566.         mutex_unlock(&src_as->lock);
  567.         interrupts_restore(ipl);
  568.         return EPERM;
  569.     }
  570.  
  571.     /*
  572.      * Now we are committed to sharing the area.
  573.      * First prepare the area for sharing.
  574.      * Then it will be safe to unlock it.
  575.      */
  576.     sh_info = src_area->sh_info;
  577.     if (!sh_info) {
  578.         sh_info = (share_info_t *) malloc(sizeof(share_info_t), 0);
  579.         mutex_initialize(&sh_info->lock);
  580.         sh_info->refcount = 2;
  581.         btree_create(&sh_info->pagemap);
  582.         src_area->sh_info = sh_info;
  583.     } else {
  584.         mutex_lock(&sh_info->lock);
  585.         sh_info->refcount++;
  586.         mutex_unlock(&sh_info->lock);
  587.     }
  588.  
  589.     src_area->backend->share(src_area);
  590.  
  591.     mutex_unlock(&src_area->lock);
  592.     mutex_unlock(&src_as->lock);
  593.  
  594.     /*
  595.      * Create copy of the source address space area.
  596.      * The destination area is created with AS_AREA_ATTR_PARTIAL
  597.      * attribute set which prevents race condition with
  598.      * preliminary as_page_fault() calls.
  599.      * The flags of the source area are masked against dst_flags_mask
  600.      * to support sharing in less privileged mode.
  601.      */
  602.     dst_area = as_area_create(dst_as, dst_flags_mask, src_size, dst_base,
  603.                   AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data);
  604.     if (!dst_area) {
  605.         /*
  606.          * Destination address space area could not be created.
  607.          */
  608.         sh_info_remove_reference(sh_info);
  609.        
  610.         interrupts_restore(ipl);
  611.         return ENOMEM;
  612.     }
  613.    
  614.     /*
  615.      * Now the destination address space area has been
  616.      * fully initialized. Clear the AS_AREA_ATTR_PARTIAL
  617.      * attribute and set the sh_info.
  618.      */
  619.     mutex_lock(&dst_area->lock);
  620.     dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL;
  621.     dst_area->sh_info = sh_info;
  622.     mutex_unlock(&dst_area->lock);
  623.    
  624.     interrupts_restore(ipl);
  625.    
  626.     return 0;
  627. }
  628.  
  629. /** Check access mode for address space area.
  630.  *
  631.  * The address space area must be locked prior to this call.
  632.  *
  633.  * @param area Address space area.
  634.  * @param access Access mode.
  635.  *
  636.  * @return False if access violates area's permissions, true otherwise.
  637.  */
  638. bool as_area_check_access(as_area_t *area, pf_access_t access)
  639. {
  640.     int flagmap[] = {
  641.         [PF_ACCESS_READ] = AS_AREA_READ,
  642.         [PF_ACCESS_WRITE] = AS_AREA_WRITE,
  643.         [PF_ACCESS_EXEC] = AS_AREA_EXEC
  644.     };
  645.  
  646.     if (!(area->flags & flagmap[access]))
  647.         return false;
  648.    
  649.     return true;
  650. }
  651.  
  652. /** Handle page fault within the current address space.
  653.  *
  654.  * This is the high-level page fault handler. It decides
  655.  * whether the page fault can be resolved by any backend
  656.  * and if so, it invokes the backend to resolve the page
  657.  * fault.
  658.  *
  659.  * Interrupts are assumed disabled.
  660.  *
  661.  * @param page Faulting page.
  662.  * @param access Access mode that caused the fault (i.e. read/write/exec).
  663.  * @param istate Pointer to interrupted state.
  664.  *
  665.  * @return AS_PF_FAULT on page fault, AS_PF_OK on success or AS_PF_DEFER if the
  666.  *     fault was caused by copy_to_uspace() or copy_from_uspace().
  667.  */
  668. int as_page_fault(__address page, pf_access_t access, istate_t *istate)
  669. {
  670.     pte_t *pte;
  671.     as_area_t *area;
  672.    
  673.     if (!THREAD)
  674.         return AS_PF_FAULT;
  675.        
  676.     ASSERT(AS);
  677.  
  678.     mutex_lock(&AS->lock);
  679.     area = find_area_and_lock(AS, page);   
  680.     if (!area) {
  681.         /*
  682.          * No area contained mapping for 'page'.
  683.          * Signal page fault to low-level handler.
  684.          */
  685.         mutex_unlock(&AS->lock);
  686.         goto page_fault;
  687.     }
  688.  
  689.     if (area->attributes & AS_AREA_ATTR_PARTIAL) {
  690.         /*
  691.          * The address space area is not fully initialized.
  692.          * Avoid possible race by returning error.
  693.          */
  694.         mutex_unlock(&area->lock);
  695.         mutex_unlock(&AS->lock);
  696.         goto page_fault;       
  697.     }
  698.  
  699.     if (!area->backend || !area->backend->page_fault) {
  700.         /*
  701.          * The address space area is not backed by any backend
  702.          * or the backend cannot handle page faults.
  703.          */
  704.         mutex_unlock(&area->lock);
  705.         mutex_unlock(&AS->lock);
  706.         goto page_fault;       
  707.     }
  708.  
  709.     page_table_lock(AS, false);
  710.    
  711.     /*
  712.      * To avoid race condition between two page faults
  713.      * on the same address, we need to make sure
  714.      * the mapping has not been already inserted.
  715.      */
  716.     if ((pte = page_mapping_find(AS, page))) {
  717.         if (PTE_PRESENT(pte)) {
  718.             if (((access == PF_ACCESS_READ) && PTE_READABLE(pte)) ||
  719.                 (access == PF_ACCESS_WRITE && PTE_WRITABLE(pte)) ||
  720.                 (access == PF_ACCESS_EXEC && PTE_EXECUTABLE(pte))) {
  721.                 page_table_unlock(AS, false);
  722.                 mutex_unlock(&area->lock);
  723.                 mutex_unlock(&AS->lock);
  724.                 return AS_PF_OK;
  725.             }
  726.         }
  727.     }
  728.    
  729.     /*
  730.      * Resort to the backend page fault handler.
  731.      */
  732.     if (area->backend->page_fault(area, page, access) != AS_PF_OK) {
  733.         page_table_unlock(AS, false);
  734.         mutex_unlock(&area->lock);
  735.         mutex_unlock(&AS->lock);
  736.         goto page_fault;
  737.     }
  738.    
  739.     page_table_unlock(AS, false);
  740.     mutex_unlock(&area->lock);
  741.     mutex_unlock(&AS->lock);
  742.     return AS_PF_OK;
  743.  
  744. page_fault:
  745.     if (THREAD->in_copy_from_uspace) {
  746.         THREAD->in_copy_from_uspace = false;
  747.         istate_set_retaddr(istate, (__address) &memcpy_from_uspace_failover_address);
  748.     } else if (THREAD->in_copy_to_uspace) {
  749.         THREAD->in_copy_to_uspace = false;
  750.         istate_set_retaddr(istate, (__address) &memcpy_to_uspace_failover_address);
  751.     } else {
  752.         return AS_PF_FAULT;
  753.     }
  754.  
  755.     return AS_PF_DEFER;
  756. }
  757.  
  758. /** Switch address spaces.
  759.  *
  760.  * Note that this function cannot sleep as it is essentially a part of
  761.  * scheduling. Sleeping here would lead to deadlock on wakeup.
  762.  *
  763.  * @param old Old address space or NULL.
  764.  * @param new New address space.
  765.  */
  766. void as_switch(as_t *old, as_t *new)
  767. {
  768.     ipl_t ipl;
  769.     bool needs_asid = false;
  770.    
  771.     ipl = interrupts_disable();
  772.     spinlock_lock(&inactive_as_with_asid_lock);
  773.  
  774.     /*
  775.      * First, take care of the old address space.
  776.      */
  777.     if (old) {
  778.         mutex_lock_active(&old->lock);
  779.         ASSERT(old->cpu_refcount);
  780.         if((--old->cpu_refcount == 0) && (old != AS_KERNEL)) {
  781.             /*
  782.              * The old address space is no longer active on
  783.              * any processor. It can be appended to the
  784.              * list of inactive address spaces with assigned
  785.              * ASID.
  786.              */
  787.              ASSERT(old->asid != ASID_INVALID);
  788.              list_append(&old->inactive_as_with_asid_link, &inactive_as_with_asid_head);
  789.         }
  790.         mutex_unlock(&old->lock);
  791.     }
  792.  
  793.     /*
  794.      * Second, prepare the new address space.
  795.      */
  796.     mutex_lock_active(&new->lock);
  797.     if ((new->cpu_refcount++ == 0) && (new != AS_KERNEL)) {
  798.         if (new->asid != ASID_INVALID)
  799.             list_remove(&new->inactive_as_with_asid_link);
  800.         else
  801.             needs_asid = true;  /* defer call to asid_get() until new->lock is released */
  802.     }
  803.     SET_PTL0_ADDRESS(new->page_table);
  804.     mutex_unlock(&new->lock);
  805.  
  806.     if (needs_asid) {
  807.         /*
  808.          * Allocation of new ASID was deferred
  809.          * until now in order to avoid deadlock.
  810.          */
  811.         asid_t asid;
  812.        
  813.         asid = asid_get();
  814.         mutex_lock_active(&new->lock);
  815.         new->asid = asid;
  816.         mutex_unlock(&new->lock);
  817.     }
  818.     spinlock_unlock(&inactive_as_with_asid_lock);
  819.     interrupts_restore(ipl);
  820.    
  821.     /*
  822.      * Perform architecture-specific steps.
  823.      * (e.g. write ASID to hardware register etc.)
  824.      */
  825.     as_install_arch(new);
  826.    
  827.     AS = new;
  828. }
  829.  
  830. /** Convert address space area flags to page flags.
  831.  *
  832.  * @param aflags Flags of some address space area.
  833.  *
  834.  * @return Flags to be passed to page_mapping_insert().
  835.  */
  836. int area_flags_to_page_flags(int aflags)
  837. {
  838.     int flags;
  839.  
  840.     flags = PAGE_USER | PAGE_PRESENT;
  841.    
  842.     if (aflags & AS_AREA_READ)
  843.         flags |= PAGE_READ;
  844.        
  845.     if (aflags & AS_AREA_WRITE)
  846.         flags |= PAGE_WRITE;
  847.    
  848.     if (aflags & AS_AREA_EXEC)
  849.         flags |= PAGE_EXEC;
  850.    
  851.     if (aflags & AS_AREA_CACHEABLE)
  852.         flags |= PAGE_CACHEABLE;
  853.        
  854.     return flags;
  855. }
  856.  
  857. /** Compute flags for virtual address translation subsytem.
  858.  *
  859.  * The address space area must be locked.
  860.  * Interrupts must be disabled.
  861.  *
  862.  * @param a Address space area.
  863.  *
  864.  * @return Flags to be used in page_mapping_insert().
  865.  */
  866. int as_area_get_flags(as_area_t *a)
  867. {
  868.     return area_flags_to_page_flags(a->flags);
  869. }
  870.  
  871. /** Create page table.
  872.  *
  873.  * Depending on architecture, create either address space
  874.  * private or global page table.
  875.  *
  876.  * @param flags Flags saying whether the page table is for kernel address space.
  877.  *
  878.  * @return First entry of the page table.
  879.  */
  880. pte_t *page_table_create(int flags)
  881. {
  882.         ASSERT(as_operations);
  883.         ASSERT(as_operations->page_table_create);
  884.  
  885.         return as_operations->page_table_create(flags);
  886. }
  887.  
  888. /** Destroy page table.
  889.  *
  890.  * Destroy page table in architecture specific way.
  891.  *
  892.  * @param page_table Physical address of PTL0.
  893.  */
  894. void page_table_destroy(pte_t *page_table)
  895. {
  896.         ASSERT(as_operations);
  897.         ASSERT(as_operations->page_table_destroy);
  898.  
  899.         as_operations->page_table_destroy(page_table);
  900. }
  901.  
  902. /** Lock page table.
  903.  *
  904.  * This function should be called before any page_mapping_insert(),
  905.  * page_mapping_remove() and page_mapping_find().
  906.  *
  907.  * Locking order is such that address space areas must be locked
  908.  * prior to this call. Address space can be locked prior to this
  909.  * call in which case the lock argument is false.
  910.  *
  911.  * @param as Address space.
  912.  * @param lock If false, do not attempt to lock as->lock.
  913.  */
  914. void page_table_lock(as_t *as, bool lock)
  915. {
  916.     ASSERT(as_operations);
  917.     ASSERT(as_operations->page_table_lock);
  918.  
  919.     as_operations->page_table_lock(as, lock);
  920. }
  921.  
  922. /** Unlock page table.
  923.  *
  924.  * @param as Address space.
  925.  * @param unlock If false, do not attempt to unlock as->lock.
  926.  */
  927. void page_table_unlock(as_t *as, bool unlock)
  928. {
  929.     ASSERT(as_operations);
  930.     ASSERT(as_operations->page_table_unlock);
  931.  
  932.     as_operations->page_table_unlock(as, unlock);
  933. }
  934.  
  935.  
  936. /** Find address space area and lock it.
  937.  *
  938.  * The address space must be locked and interrupts must be disabled.
  939.  *
  940.  * @param as Address space.
  941.  * @param va Virtual address.
  942.  *
  943.  * @return Locked address space area containing va on success or NULL on failure.
  944.  */
  945. as_area_t *find_area_and_lock(as_t *as, __address va)
  946. {
  947.     as_area_t *a;
  948.     btree_node_t *leaf, *lnode;
  949.     int i;
  950.    
  951.     a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);
  952.     if (a) {
  953.         /* va is the base address of an address space area */
  954.         mutex_lock(&a->lock);
  955.         return a;
  956.     }
  957.    
  958.     /*
  959.      * Search the leaf node and the righmost record of its left neighbour
  960.      * to find out whether this is a miss or va belongs to an address
  961.      * space area found there.
  962.      */
  963.    
  964.     /* First, search the leaf node itself. */
  965.     for (i = 0; i < leaf->keys; i++) {
  966.         a = (as_area_t *) leaf->value[i];
  967.         mutex_lock(&a->lock);
  968.         if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) {
  969.             return a;
  970.         }
  971.         mutex_unlock(&a->lock);
  972.     }
  973.  
  974.     /*
  975.      * Second, locate the left neighbour and test its last record.
  976.      * Because of its position in the B+tree, it must have base < va.
  977.      */
  978.     if ((lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
  979.         a = (as_area_t *) lnode->value[lnode->keys - 1];
  980.         mutex_lock(&a->lock);
  981.         if (va < a->base + a->pages * PAGE_SIZE) {
  982.             return a;
  983.         }
  984.         mutex_unlock(&a->lock);
  985.     }
  986.  
  987.     return NULL;
  988. }
  989.  
  990. /** Check area conflicts with other areas.
  991.  *
  992.  * The address space must be locked and interrupts must be disabled.
  993.  *
  994.  * @param as Address space.
  995.  * @param va Starting virtual address of the area being tested.
  996.  * @param size Size of the area being tested.
  997.  * @param avoid_area Do not touch this area.
  998.  *
  999.  * @return True if there is no conflict, false otherwise.
  1000.  */
  1001. bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area)
  1002. {
  1003.     as_area_t *a;
  1004.     btree_node_t *leaf, *node;
  1005.     int i;
  1006.    
  1007.     /*
  1008.      * We don't want any area to have conflicts with NULL page.
  1009.      */
  1010.     if (overlaps(va, size, NULL, PAGE_SIZE))
  1011.         return false;
  1012.    
  1013.     /*
  1014.      * The leaf node is found in O(log n), where n is proportional to
  1015.      * the number of address space areas belonging to as.
  1016.      * The check for conflicts is then attempted on the rightmost
  1017.      * record in the left neighbour, the leftmost record in the right
  1018.      * neighbour and all records in the leaf node itself.
  1019.      */
  1020.    
  1021.     if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) {
  1022.         if (a != avoid_area)
  1023.             return false;
  1024.     }
  1025.    
  1026.     /* First, check the two border cases. */
  1027.     if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
  1028.         a = (as_area_t *) node->value[node->keys - 1];
  1029.         mutex_lock(&a->lock);
  1030.         if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
  1031.             mutex_unlock(&a->lock);
  1032.             return false;
  1033.         }
  1034.         mutex_unlock(&a->lock);
  1035.     }
  1036.     if ((node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf))) {
  1037.         a = (as_area_t *) node->value[0];
  1038.         mutex_lock(&a->lock);
  1039.         if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
  1040.             mutex_unlock(&a->lock);
  1041.             return false;
  1042.         }
  1043.         mutex_unlock(&a->lock);
  1044.     }
  1045.    
  1046.     /* Second, check the leaf node. */
  1047.     for (i = 0; i < leaf->keys; i++) {
  1048.         a = (as_area_t *) leaf->value[i];
  1049.    
  1050.         if (a == avoid_area)
  1051.             continue;
  1052.    
  1053.         mutex_lock(&a->lock);
  1054.         if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
  1055.             mutex_unlock(&a->lock);
  1056.             return false;
  1057.         }
  1058.         mutex_unlock(&a->lock);
  1059.     }
  1060.  
  1061.     /*
  1062.      * So far, the area does not conflict with other areas.
  1063.      * Check if it doesn't conflict with kernel address space.
  1064.      */  
  1065.     if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
  1066.         return !overlaps(va, size,
  1067.             KERNEL_ADDRESS_SPACE_START, KERNEL_ADDRESS_SPACE_END-KERNEL_ADDRESS_SPACE_START);
  1068.     }
  1069.  
  1070.     return true;
  1071. }
  1072.  
  1073. /** Return size of the address space area with given base.  */
  1074. size_t as_get_size(__address base)
  1075. {
  1076.     ipl_t ipl;
  1077.     as_area_t *src_area;
  1078.     size_t size;
  1079.  
  1080.     ipl = interrupts_disable();
  1081.     src_area = find_area_and_lock(AS, base);
  1082.     if (src_area){
  1083.         size = src_area->pages * PAGE_SIZE;
  1084.         mutex_unlock(&src_area->lock);
  1085.     } else {
  1086.         size = 0;
  1087.     }
  1088.     interrupts_restore(ipl);
  1089.     return size;
  1090. }
  1091.  
  1092. /** Mark portion of address space area as used.
  1093.  *
  1094.  * The address space area must be already locked.
  1095.  *
  1096.  * @param a Address space area.
  1097.  * @param page First page to be marked.
  1098.  * @param count Number of page to be marked.
  1099.  *
  1100.  * @return 0 on failure and 1 on success.
  1101.  */
  1102. int used_space_insert(as_area_t *a, __address page, count_t count)
  1103. {
  1104.     btree_node_t *leaf, *node;
  1105.     count_t pages;
  1106.     int i;
  1107.  
  1108.     ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
  1109.     ASSERT(count);
  1110.  
  1111.     pages = (count_t) btree_search(&a->used_space, page, &leaf);
  1112.     if (pages) {
  1113.         /*
  1114.          * We hit the beginning of some used space.
  1115.          */
  1116.         return 0;
  1117.     }
  1118.  
  1119.     if (!leaf->keys) {
  1120.         btree_insert(&a->used_space, page, (void *) count, leaf);
  1121.         return 1;
  1122.     }
  1123.  
  1124.     node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
  1125.     if (node) {
  1126.         __address left_pg = node->key[node->keys - 1], right_pg = leaf->key[0];
  1127.         count_t left_cnt = (count_t) node->value[node->keys - 1], right_cnt = (count_t) leaf->value[0];
  1128.        
  1129.         /*
  1130.          * Examine the possibility that the interval fits
  1131.          * somewhere between the rightmost interval of
  1132.          * the left neigbour and the first interval of the leaf.
  1133.          */
  1134.          
  1135.         if (page >= right_pg) {
  1136.             /* Do nothing. */
  1137.         } else if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
  1138.             /* The interval intersects with the left interval. */
  1139.             return 0;
  1140.         } else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
  1141.             /* The interval intersects with the right interval. */
  1142.             return 0;          
  1143.         } else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) {
  1144.             /* The interval can be added by merging the two already present intervals. */
  1145.             node->value[node->keys - 1] += count + right_cnt;
  1146.             btree_remove(&a->used_space, right_pg, leaf);
  1147.             return 1;
  1148.         } else if (page == left_pg + left_cnt*PAGE_SIZE) {
  1149.             /* The interval can be added by simply growing the left interval. */
  1150.             node->value[node->keys - 1] += count;
  1151.             return 1;
  1152.         } else if (page + count*PAGE_SIZE == right_pg) {
  1153.             /*
  1154.              * The interval can be addded by simply moving base of the right
  1155.              * interval down and increasing its size accordingly.
  1156.              */
  1157.             leaf->value[0] += count;
  1158.             leaf->key[0] = page;
  1159.             return 1;
  1160.         } else {
  1161.             /*
  1162.              * The interval is between both neigbouring intervals,
  1163.              * but cannot be merged with any of them.
  1164.              */
  1165.             btree_insert(&a->used_space, page, (void *) count, leaf);
  1166.             return 1;
  1167.         }
  1168.     } else if (page < leaf->key[0]) {
  1169.         __address right_pg = leaf->key[0];
  1170.         count_t right_cnt = (count_t) leaf->value[0];
  1171.    
  1172.         /*
  1173.          * Investigate the border case in which the left neighbour does not
  1174.          * exist but the interval fits from the left.
  1175.          */
  1176.          
  1177.         if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
  1178.             /* The interval intersects with the right interval. */
  1179.             return 0;
  1180.         } else if (page + count*PAGE_SIZE == right_pg) {
  1181.             /*
  1182.              * The interval can be added by moving the base of the right interval down
  1183.              * and increasing its size accordingly.
  1184.              */
  1185.             leaf->key[0] = page;
  1186.             leaf->value[0] += count;
  1187.             return 1;
  1188.         } else {
  1189.             /*
  1190.              * The interval doesn't adjoin with the right interval.
  1191.              * It must be added individually.
  1192.              */
  1193.             btree_insert(&a->used_space, page, (void *) count, leaf);
  1194.             return 1;
  1195.         }
  1196.     }
  1197.  
  1198.     node = btree_leaf_node_right_neighbour(&a->used_space, leaf);
  1199.     if (node) {
  1200.         __address left_pg = leaf->key[leaf->keys - 1], right_pg = node->key[0];
  1201.         count_t left_cnt = (count_t) leaf->value[leaf->keys - 1], right_cnt = (count_t) node->value[0];
  1202.        
  1203.         /*
  1204.          * Examine the possibility that the interval fits
  1205.          * somewhere between the leftmost interval of
  1206.          * the right neigbour and the last interval of the leaf.
  1207.          */
  1208.  
  1209.         if (page < left_pg) {
  1210.             /* Do nothing. */
  1211.         } else if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
  1212.             /* The interval intersects with the left interval. */
  1213.             return 0;
  1214.         } else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
  1215.             /* The interval intersects with the right interval. */
  1216.             return 0;          
  1217.         } else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) {
  1218.             /* The interval can be added by merging the two already present intervals. */
  1219.             leaf->value[leaf->keys - 1] += count + right_cnt;
  1220.             btree_remove(&a->used_space, right_pg, node);
  1221.             return 1;
  1222.         } else if (page == left_pg + left_cnt*PAGE_SIZE) {
  1223.             /* The interval can be added by simply growing the left interval. */
  1224.             leaf->value[leaf->keys - 1] +=  count;
  1225.             return 1;
  1226.         } else if (page + count*PAGE_SIZE == right_pg) {
  1227.             /*
  1228.              * The interval can be addded by simply moving base of the right
  1229.              * interval down and increasing its size accordingly.
  1230.              */
  1231.             node->value[0] += count;
  1232.             node->key[0] = page;
  1233.             return 1;
  1234.         } else {
  1235.             /*
  1236.              * The interval is between both neigbouring intervals,
  1237.              * but cannot be merged with any of them.
  1238.              */
  1239.             btree_insert(&a->used_space, page, (void *) count, leaf);
  1240.             return 1;
  1241.         }
  1242.     } else if (page >= leaf->key[leaf->keys - 1]) {
  1243.         __address left_pg = leaf->key[leaf->keys - 1];
  1244.         count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
  1245.    
  1246.         /*
  1247.          * Investigate the border case in which the right neighbour does not
  1248.          * exist but the interval fits from the right.
  1249.          */
  1250.          
  1251.         if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
  1252.             /* The interval intersects with the left interval. */
  1253.             return 0;
  1254.         } else if (left_pg + left_cnt*PAGE_SIZE == page) {
  1255.             /* The interval can be added by growing the left interval. */
  1256.             leaf->value[leaf->keys - 1] += count;
  1257.             return 1;
  1258.         } else {
  1259.             /*
  1260.              * The interval doesn't adjoin with the left interval.
  1261.              * It must be added individually.
  1262.              */
  1263.             btree_insert(&a->used_space, page, (void *) count, leaf);
  1264.             return 1;
  1265.         }
  1266.     }
  1267.    
  1268.     /*
  1269.      * Note that if the algorithm made it thus far, the interval can fit only
  1270.      * between two other intervals of the leaf. The two border cases were already
  1271.      * resolved.
  1272.      */
  1273.     for (i = 1; i < leaf->keys; i++) {
  1274.         if (page < leaf->key[i]) {
  1275.             __address left_pg = leaf->key[i - 1], right_pg = leaf->key[i];
  1276.             count_t left_cnt = (count_t) leaf->value[i - 1], right_cnt = (count_t) leaf->value[i];
  1277.  
  1278.             /*
  1279.              * The interval fits between left_pg and right_pg.
  1280.              */
  1281.  
  1282.             if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
  1283.                 /* The interval intersects with the left interval. */
  1284.                 return 0;
  1285.             } else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
  1286.                 /* The interval intersects with the right interval. */
  1287.                 return 0;          
  1288.             } else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) {
  1289.                 /* The interval can be added by merging the two already present intervals. */
  1290.                 leaf->value[i - 1] += count + right_cnt;
  1291.                 btree_remove(&a->used_space, right_pg, leaf);
  1292.                 return 1;
  1293.             } else if (page == left_pg + left_cnt*PAGE_SIZE) {
  1294.                 /* The interval can be added by simply growing the left interval. */
  1295.                 leaf->value[i - 1] += count;
  1296.                 return 1;
  1297.             } else if (page + count*PAGE_SIZE == right_pg) {
  1298.                 /*
  1299.                      * The interval can be addded by simply moving base of the right
  1300.                  * interval down and increasing its size accordingly.
  1301.                  */
  1302.                 leaf->value[i] += count;
  1303.                 leaf->key[i] = page;
  1304.                 return 1;
  1305.             } else {
  1306.                 /*
  1307.                  * The interval is between both neigbouring intervals,
  1308.                  * but cannot be merged with any of them.
  1309.                  */
  1310.                 btree_insert(&a->used_space, page, (void *) count, leaf);
  1311.                 return 1;
  1312.             }
  1313.         }
  1314.     }
  1315.  
  1316.     panic("Inconsistency detected while adding %d pages of used space at %p.\n", count, page);
  1317. }
  1318.  
  1319. /** Mark portion of address space area as unused.
  1320.  *
  1321.  * The address space area must be already locked.
  1322.  *
  1323.  * @param a Address space area.
  1324.  * @param page First page to be marked.
  1325.  * @param count Number of page to be marked.
  1326.  *
  1327.  * @return 0 on failure and 1 on success.
  1328.  */
  1329. int used_space_remove(as_area_t *a, __address page, count_t count)
  1330. {
  1331.     btree_node_t *leaf, *node;
  1332.     count_t pages;
  1333.     int i;
  1334.  
  1335.     ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
  1336.     ASSERT(count);
  1337.  
  1338.     pages = (count_t) btree_search(&a->used_space, page, &leaf);
  1339.     if (pages) {
  1340.         /*
  1341.          * We are lucky, page is the beginning of some interval.
  1342.          */
  1343.         if (count > pages) {
  1344.             return 0;
  1345.         } else if (count == pages) {
  1346.             btree_remove(&a->used_space, page, leaf);
  1347.             return 1;
  1348.         } else {
  1349.             /*
  1350.              * Find the respective interval.
  1351.              * Decrease its size and relocate its start address.
  1352.              */
  1353.             for (i = 0; i < leaf->keys; i++) {
  1354.                 if (leaf->key[i] == page) {
  1355.                     leaf->key[i] += count*PAGE_SIZE;
  1356.                     leaf->value[i] -= count;
  1357.                     return 1;
  1358.                 }
  1359.             }
  1360.             goto error;
  1361.         }
  1362.     }
  1363.  
  1364.     node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
  1365.     if (node && page < leaf->key[0]) {
  1366.         __address left_pg = node->key[node->keys - 1];
  1367.         count_t left_cnt = (count_t) node->value[node->keys - 1];
  1368.  
  1369.         if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) {
  1370.             if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) {
  1371.                 /*
  1372.                  * The interval is contained in the rightmost interval
  1373.                  * of the left neighbour and can be removed by
  1374.                  * updating the size of the bigger interval.
  1375.                  */
  1376.                 node->value[node->keys - 1] -= count;
  1377.                 return 1;
  1378.             } else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) {
  1379.                 count_t new_cnt;
  1380.                
  1381.                 /*
  1382.                  * The interval is contained in the rightmost interval
  1383.                  * of the left neighbour but its removal requires
  1384.                  * both updating the size of the original interval and
  1385.                  * also inserting a new interval.
  1386.                  */
  1387.                 new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
  1388.                 node->value[node->keys - 1] -= count + new_cnt;
  1389.                 btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf);
  1390.                 return 1;
  1391.             }
  1392.         }
  1393.         return 0;
  1394.     } else if (page < leaf->key[0]) {
  1395.         return 0;
  1396.     }
  1397.    
  1398.     if (page > leaf->key[leaf->keys - 1]) {
  1399.         __address left_pg = leaf->key[leaf->keys - 1];
  1400.         count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
  1401.  
  1402.         if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) {
  1403.             if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) {
  1404.                 /*
  1405.                  * The interval is contained in the rightmost interval
  1406.                  * of the leaf and can be removed by updating the size
  1407.                  * of the bigger interval.
  1408.                  */
  1409.                 leaf->value[leaf->keys - 1] -= count;
  1410.                 return 1;
  1411.             } else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) {
  1412.                 count_t new_cnt;
  1413.                
  1414.                 /*
  1415.                  * The interval is contained in the rightmost interval
  1416.                  * of the leaf but its removal requires both updating
  1417.                  * the size of the original interval and
  1418.                  * also inserting a new interval.
  1419.                  */
  1420.                 new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
  1421.                 leaf->value[leaf->keys - 1] -= count + new_cnt;
  1422.                 btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf);
  1423.                 return 1;
  1424.             }
  1425.         }
  1426.         return 0;
  1427.     }  
  1428.    
  1429.     /*
  1430.      * The border cases have been already resolved.
  1431.      * Now the interval can be only between intervals of the leaf.
  1432.      */
  1433.     for (i = 1; i < leaf->keys - 1; i++) {
  1434.         if (page < leaf->key[i]) {
  1435.             __address left_pg = leaf->key[i - 1];
  1436.             count_t left_cnt = (count_t) leaf->value[i - 1];
  1437.  
  1438.             /*
  1439.              * Now the interval is between intervals corresponding to (i - 1) and i.
  1440.              */
  1441.             if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) {
  1442.                 if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) {
  1443.                     /*
  1444.                     * The interval is contained in the interval (i - 1)
  1445.                      * of the leaf and can be removed by updating the size
  1446.                      * of the bigger interval.
  1447.                      */
  1448.                     leaf->value[i - 1] -= count;
  1449.                     return 1;
  1450.                 } else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) {
  1451.                     count_t new_cnt;
  1452.                
  1453.                     /*
  1454.                      * The interval is contained in the interval (i - 1)
  1455.                      * of the leaf but its removal requires both updating
  1456.                      * the size of the original interval and
  1457.                      * also inserting a new interval.
  1458.                      */
  1459.                     new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
  1460.                     leaf->value[i - 1] -= count + new_cnt;
  1461.                     btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf);
  1462.                     return 1;
  1463.                 }
  1464.             }
  1465.             return 0;
  1466.         }
  1467.     }
  1468.  
  1469. error:
  1470.     panic("Inconsistency detected while removing %d pages of used space from %p.\n", count, page);
  1471. }
  1472.  
  1473. /** Remove reference to address space area share info.
  1474.  *
  1475.  * If the reference count drops to 0, the sh_info is deallocated.
  1476.  *
  1477.  * @param sh_info Pointer to address space area share info.
  1478.  */
  1479. void sh_info_remove_reference(share_info_t *sh_info)
  1480. {
  1481.     bool dealloc = false;
  1482.  
  1483.     mutex_lock(&sh_info->lock);
  1484.     ASSERT(sh_info->refcount);
  1485.     if (--sh_info->refcount == 0) {
  1486.         dealloc = true;
  1487.         link_t *cur;
  1488.        
  1489.         /*
  1490.          * Now walk carefully the pagemap B+tree and free/remove
  1491.          * reference from all frames found there.
  1492.          */
  1493.         for (cur = sh_info->pagemap.leaf_head.next; cur != &sh_info->pagemap.leaf_head; cur = cur->next) {
  1494.             btree_node_t *node;
  1495.             int i;
  1496.            
  1497.             node = list_get_instance(cur, btree_node_t, leaf_link);
  1498.             for (i = 0; i < node->keys; i++)
  1499.                 frame_free(ADDR2PFN((__address) node->value[i]));
  1500.         }
  1501.        
  1502.     }
  1503.     mutex_unlock(&sh_info->lock);
  1504.    
  1505.     if (dealloc) {
  1506.         btree_destroy(&sh_info->pagemap);
  1507.         free(sh_info);
  1508.     }
  1509. }
  1510.  
  1511. /*
  1512.  * Address space related syscalls.
  1513.  */
  1514.  
  1515. /** Wrapper for as_area_create(). */
  1516. __native sys_as_area_create(__address address, size_t size, int flags)
  1517. {
  1518.     if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address, AS_AREA_ATTR_NONE, &anon_backend, NULL))
  1519.         return (__native) address;
  1520.     else
  1521.         return (__native) -1;
  1522. }
  1523.  
  1524. /** Wrapper for as_area_resize. */
  1525. __native sys_as_area_resize(__address address, size_t size, int flags)
  1526. {
  1527.     return (__native) as_area_resize(AS, address, size, 0);
  1528. }
  1529.  
  1530. /** Wrapper for as_area_destroy. */
  1531. __native sys_as_area_destroy(__address address)
  1532. {
  1533.     return (__native) as_area_destroy(AS, address);
  1534. }
  1535.  
  1536. /** @}
  1537.  */
  1538.