Subversion Repositories HelenOS-historic

Rev

Rev 1409 | Rev 1413 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (C) 2001-2006 Jakub Jermar
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. /**
  30.  * @file    as.c
  31.  * @brief   Address space related functions.
  32.  *
  33.  * This file contains address space manipulation functions.
  34.  * Roughly speaking, this is a higher-level client of
  35.  * Virtual Address Translation (VAT) subsystem.
  36.  *
  37.  * Functionality provided by this file allows one to
  38.  * create address space and create, resize and share
  39.  * address space areas.
  40.  *
  41.  * @see page.c
  42.  *
  43.  */
  44.  
  45. #include <mm/as.h>
  46. #include <arch/mm/as.h>
  47. #include <mm/page.h>
  48. #include <mm/frame.h>
  49. #include <mm/slab.h>
  50. #include <mm/tlb.h>
  51. #include <arch/mm/page.h>
  52. #include <genarch/mm/page_pt.h>
  53. #include <genarch/mm/page_ht.h>
  54. #include <mm/asid.h>
  55. #include <arch/mm/asid.h>
  56. #include <synch/spinlock.h>
  57. #include <synch/mutex.h>
  58. #include <adt/list.h>
  59. #include <adt/btree.h>
  60. #include <proc/task.h>
  61. #include <proc/thread.h>
  62. #include <arch/asm.h>
  63. #include <panic.h>
  64. #include <debug.h>
  65. #include <print.h>
  66. #include <memstr.h>
  67. #include <macros.h>
  68. #include <arch.h>
  69. #include <errno.h>
  70. #include <config.h>
  71. #include <align.h>
  72. #include <arch/types.h>
  73. #include <typedefs.h>
  74. #include <syscall/copy.h>
  75. #include <arch/interrupt.h>
  76.  
  77. /** This structure contains information associated with the shared address space area. */
  78. struct share_info {
  79.     mutex_t lock;       /**< This lock must be acquired only when the as_area lock is held. */
  80.     count_t refcount;   /**< This structure can be deallocated if refcount drops to 0. */
  81.     btree_t pagemap;    /**< B+tree containing complete map of anonymous pages of the shared area. */
  82. };
  83.  
  84. as_operations_t *as_operations = NULL;
  85.  
  86. /** Address space lock. It protects inactive_as_with_asid_head. Must be acquired before as_t mutex. */
  87. SPINLOCK_INITIALIZE(as_lock);
  88.  
  89. /**
  90.  * This list contains address spaces that are not active on any
  91.  * processor and that have valid ASID.
  92.  */
  93. LIST_INITIALIZE(inactive_as_with_asid_head);
  94.  
  95. /** Kernel address space. */
  96. as_t *AS_KERNEL = NULL;
  97.  
  98. static int area_flags_to_page_flags(int aflags);
  99. static as_area_t *find_area_and_lock(as_t *as, __address va);
  100. static bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area);
  101. static void sh_info_remove_reference(share_info_t *sh_info);
  102.  
  103. /** Initialize address space subsystem. */
  104. void as_init(void)
  105. {
  106.     as_arch_init();
  107.     AS_KERNEL = as_create(FLAG_AS_KERNEL);
  108.     if (!AS_KERNEL)
  109.         panic("can't create kernel address space\n");
  110.    
  111. }
  112.  
  113. /** Create address space.
  114.  *
  115.  * @param flags Flags that influence way in wich the address space is created.
  116.  */
  117. as_t *as_create(int flags)
  118. {
  119.     as_t *as;
  120.  
  121.     as = (as_t *) malloc(sizeof(as_t), 0);
  122.     link_initialize(&as->inactive_as_with_asid_link);
  123.     mutex_initialize(&as->lock);
  124.     btree_create(&as->as_area_btree);
  125.    
  126.     if (flags & FLAG_AS_KERNEL)
  127.         as->asid = ASID_KERNEL;
  128.     else
  129.         as->asid = ASID_INVALID;
  130.    
  131.     as->refcount = 0;
  132.     as->page_table = page_table_create(flags);
  133.  
  134.     return as;
  135. }
  136.  
  137. /** Free Adress space */
  138. void as_free(as_t *as)
  139. {
  140.     ASSERT(as->refcount == 0);
  141.  
  142.     /* TODO: free as_areas and other resources held by as */
  143.     /* TODO: free page table */
  144.     free(as);
  145. }
  146.  
  147. /** Create address space area of common attributes.
  148.  *
  149.  * The created address space area is added to the target address space.
  150.  *
  151.  * @param as Target address space.
  152.  * @param flags Flags of the area memory.
  153.  * @param size Size of area.
  154.  * @param base Base address of area.
  155.  * @param attrs Attributes of the area.
  156.  * @param backend Address space area backend. NULL if no backend is used.
  157.  * @param backend_data NULL or a pointer to an array holding two void *.
  158.  *
  159.  * @return Address space area on success or NULL on failure.
  160.  */
  161. as_area_t *as_area_create(as_t *as, int flags, size_t size, __address base, int attrs,
  162.            mem_backend_t *backend, void **backend_data)
  163. {
  164.     ipl_t ipl;
  165.     as_area_t *a;
  166.    
  167.     if (base % PAGE_SIZE)
  168.         return NULL;
  169.  
  170.     if (!size)
  171.         return NULL;
  172.  
  173.     /* Writeable executable areas are not supported. */
  174.     if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
  175.         return NULL;
  176.    
  177.     ipl = interrupts_disable();
  178.     mutex_lock(&as->lock);
  179.    
  180.     if (!check_area_conflicts(as, base, size, NULL)) {
  181.         mutex_unlock(&as->lock);
  182.         interrupts_restore(ipl);
  183.         return NULL;
  184.     }
  185.    
  186.     a = (as_area_t *) malloc(sizeof(as_area_t), 0);
  187.  
  188.     mutex_initialize(&a->lock);
  189.    
  190.     a->flags = flags;
  191.     a->attributes = attrs;
  192.     a->pages = SIZE2FRAMES(size);
  193.     a->base = base;
  194.     a->sh_info = NULL;
  195.     a->backend = backend;
  196.     if (backend_data) {
  197.         a->backend_data[0] = backend_data[0];
  198.         a->backend_data[1] = backend_data[1];
  199.     }
  200.     btree_create(&a->used_space);
  201.    
  202.     btree_insert(&as->as_area_btree, base, (void *) a, NULL);
  203.  
  204.     mutex_unlock(&as->lock);
  205.     interrupts_restore(ipl);
  206.  
  207.     return a;
  208. }
  209.  
  210. /** Find address space area and change it.
  211.  *
  212.  * @param as Address space.
  213.  * @param address Virtual address belonging to the area to be changed. Must be page-aligned.
  214.  * @param size New size of the virtual memory block starting at address.
  215.  * @param flags Flags influencing the remap operation. Currently unused.
  216.  *
  217.  * @return Zero on success or a value from @ref errno.h otherwise.
  218.  */
  219. int as_area_resize(as_t *as, __address address, size_t size, int flags)
  220. {
  221.     as_area_t *area;
  222.     ipl_t ipl;
  223.     size_t pages;
  224.    
  225.     ipl = interrupts_disable();
  226.     mutex_lock(&as->lock);
  227.    
  228.     /*
  229.      * Locate the area.
  230.      */
  231.     area = find_area_and_lock(as, address);
  232.     if (!area) {
  233.         mutex_unlock(&as->lock);
  234.         interrupts_restore(ipl);
  235.         return ENOENT;
  236.     }
  237.  
  238.     if (area->flags & AS_AREA_DEVICE) {
  239.         /*
  240.          * Remapping of address space areas associated
  241.          * with memory mapped devices is not supported.
  242.          */
  243.         mutex_unlock(&area->lock);
  244.         mutex_unlock(&as->lock);
  245.         interrupts_restore(ipl);
  246.         return ENOTSUP;
  247.     }
  248.     if (area->sh_info) {
  249.         /*
  250.          * Remapping of shared address space areas
  251.          * is not supported.
  252.          */
  253.         mutex_unlock(&area->lock);
  254.         mutex_unlock(&as->lock);
  255.         interrupts_restore(ipl);
  256.         return ENOTSUP;
  257.     }
  258.  
  259.     pages = SIZE2FRAMES((address - area->base) + size);
  260.     if (!pages) {
  261.         /*
  262.          * Zero size address space areas are not allowed.
  263.          */
  264.         mutex_unlock(&area->lock);
  265.         mutex_unlock(&as->lock);
  266.         interrupts_restore(ipl);
  267.         return EPERM;
  268.     }
  269.    
  270.     if (pages < area->pages) {
  271.         bool cond;
  272.         __address start_free = area->base + pages*PAGE_SIZE;
  273.  
  274.         /*
  275.          * Shrinking the area.
  276.          * No need to check for overlaps.
  277.          */
  278.  
  279.         /*
  280.          * Remove frames belonging to used space starting from
  281.          * the highest addresses downwards until an overlap with
  282.          * the resized address space area is found. Note that this
  283.          * is also the right way to remove part of the used_space
  284.          * B+tree leaf list.
  285.          */    
  286.         for (cond = true; cond;) {
  287.             btree_node_t *node;
  288.        
  289.             ASSERT(!list_empty(&area->used_space.leaf_head));
  290.             node = list_get_instance(area->used_space.leaf_head.prev, btree_node_t, leaf_link);
  291.             if ((cond = (bool) node->keys)) {
  292.                 __address b = node->key[node->keys - 1];
  293.                 count_t c = (count_t) node->value[node->keys - 1];
  294.                 int i = 0;
  295.            
  296.                 if (overlaps(b, c*PAGE_SIZE, area->base, pages*PAGE_SIZE)) {
  297.                    
  298.                     if (b + c*PAGE_SIZE <= start_free) {
  299.                         /*
  300.                          * The whole interval fits completely
  301.                          * in the resized address space area.
  302.                          */
  303.                         break;
  304.                     }
  305.        
  306.                     /*
  307.                      * Part of the interval corresponding to b and c
  308.                      * overlaps with the resized address space area.
  309.                      */
  310.        
  311.                     cond = false;   /* we are almost done */
  312.                     i = (start_free - b) >> PAGE_WIDTH;
  313.                     if (!used_space_remove(area, start_free, c - i))
  314.                         panic("Could not remove used space.");
  315.                 } else {
  316.                     /*
  317.                      * The interval of used space can be completely removed.
  318.                      */
  319.                     if (!used_space_remove(area, b, c))
  320.                         panic("Could not remove used space.\n");
  321.                 }
  322.            
  323.                 for (; i < c; i++) {
  324.                     pte_t *pte;
  325.            
  326.                     page_table_lock(as, false);
  327.                     pte = page_mapping_find(as, b + i*PAGE_SIZE);
  328.                     ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
  329.                     if (area->backend && area->backend->backend_frame_free) {
  330.                         area->backend->backend_frame_free(area,
  331.                             b + i*PAGE_SIZE, PTE_GET_FRAME(pte));
  332.                     }
  333.                     page_mapping_remove(as, b + i*PAGE_SIZE);
  334.                     page_table_unlock(as, false);
  335.                 }
  336.             }
  337.         }
  338.         /*
  339.          * Invalidate TLB's.
  340.          */
  341.         tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
  342.         tlb_invalidate_pages(AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
  343.         tlb_shootdown_finalize();
  344.     } else {
  345.         /*
  346.          * Growing the area.
  347.          * Check for overlaps with other address space areas.
  348.          */
  349.         if (!check_area_conflicts(as, address, pages * PAGE_SIZE, area)) {
  350.             mutex_unlock(&area->lock);
  351.             mutex_unlock(&as->lock);       
  352.             interrupts_restore(ipl);
  353.             return EADDRNOTAVAIL;
  354.         }
  355.     }
  356.  
  357.     area->pages = pages;
  358.    
  359.     mutex_unlock(&area->lock);
  360.     mutex_unlock(&as->lock);
  361.     interrupts_restore(ipl);
  362.  
  363.     return 0;
  364. }
  365.  
  366. /** Destroy address space area.
  367.  *
  368.  * @param as Address space.
  369.  * @param address Address withing the area to be deleted.
  370.  *
  371.  * @return Zero on success or a value from @ref errno.h on failure.
  372.  */
  373. int as_area_destroy(as_t *as, __address address)
  374. {
  375.     as_area_t *area;
  376.     __address base;
  377.     ipl_t ipl;
  378.     bool cond;
  379.  
  380.     ipl = interrupts_disable();
  381.     mutex_lock(&as->lock);
  382.  
  383.     area = find_area_and_lock(as, address);
  384.     if (!area) {
  385.         mutex_unlock(&as->lock);
  386.         interrupts_restore(ipl);
  387.         return ENOENT;
  388.     }
  389.  
  390.     base = area->base;
  391.  
  392.     /*
  393.      * Visit only the pages mapped by used_space B+tree.
  394.      * Note that we must be very careful when walking the tree
  395.      * leaf list and removing used space as the leaf list changes
  396.      * unpredictibly after each remove. The solution is to actually
  397.      * not walk the tree at all, but to remove items from the head
  398.      * of the leaf list until there are some keys left.
  399.      */
  400.     for (cond = true; cond;) {
  401.         btree_node_t *node;
  402.        
  403.         ASSERT(!list_empty(&area->used_space.leaf_head));
  404.         node = list_get_instance(area->used_space.leaf_head.next, btree_node_t, leaf_link);
  405.         if ((cond = (bool) node->keys)) {
  406.             __address b = node->key[0];
  407.             count_t i;
  408.             pte_t *pte;
  409.            
  410.             for (i = 0; i < (count_t) node->value[0]; i++) {
  411.                 page_table_lock(as, false);
  412.                 pte = page_mapping_find(as, b + i*PAGE_SIZE);
  413.                 ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
  414.                 if (area->backend && area->backend->backend_frame_free) {
  415.                     area->backend->backend_frame_free(area,
  416.                         b + i*PAGE_SIZE, PTE_GET_FRAME(pte));
  417.                 }
  418.                 page_mapping_remove(as, b + i*PAGE_SIZE);
  419.                 page_table_unlock(as, false);
  420.             }
  421.             if (!used_space_remove(area, b, i))
  422.                 panic("Could not remove used space.\n");
  423.         }
  424.     }
  425.     btree_destroy(&area->used_space);
  426.  
  427.     /*
  428.      * Invalidate TLB's.
  429.      */
  430.     tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base, area->pages);
  431.     tlb_invalidate_pages(AS->asid, area->base, area->pages);
  432.     tlb_shootdown_finalize();
  433.  
  434.     area->attributes |= AS_AREA_ATTR_PARTIAL;
  435.    
  436.     if (area->sh_info)
  437.         sh_info_remove_reference(area->sh_info);
  438.        
  439.     mutex_unlock(&area->lock);
  440.  
  441.     /*
  442.      * Remove the empty area from address space.
  443.      */
  444.     btree_remove(&AS->as_area_btree, base, NULL);
  445.    
  446.     free(area);
  447.    
  448.     mutex_unlock(&AS->lock);
  449.     interrupts_restore(ipl);
  450.     return 0;
  451. }
  452.  
  453. /** Steal address space area from another task.
  454.  *
  455.  * Address space area is stolen from another task
  456.  * Moreover, any existing mapping
  457.  * is copied as well, providing thus a mechanism
  458.  * for sharing group of pages. The source address
  459.  * space area and any associated mapping is preserved.
  460.  *
  461.  * @param src_task Pointer of source task
  462.  * @param src_base Base address of the source address space area.
  463.  * @param acc_size Expected size of the source area
  464.  * @param dst_base Target base address
  465.  *
  466.  * @return Zero on success or ENOENT if there is no such task or
  467.  *     if there is no such address space area,
  468.  *     EPERM if there was a problem in accepting the area or
  469.  *     ENOMEM if there was a problem in allocating destination
  470.  *     address space area.
  471.  */
  472. int as_area_steal(task_t *src_task, __address src_base, size_t acc_size,
  473.           __address dst_base)
  474. {
  475.     ipl_t ipl;
  476.     count_t i;
  477.     as_t *src_as;      
  478.     int src_flags;
  479.     size_t src_size;
  480.     as_area_t *src_area, *dst_area;
  481.  
  482.     ipl = interrupts_disable();
  483.     spinlock_lock(&src_task->lock);
  484.     src_as = src_task->as;
  485.    
  486.     mutex_lock(&src_as->lock);
  487.     src_area = find_area_and_lock(src_as, src_base);
  488.     if (!src_area) {
  489.         /*
  490.          * Could not find the source address space area.
  491.          */
  492.         spinlock_unlock(&src_task->lock);
  493.         mutex_unlock(&src_as->lock);
  494.         interrupts_restore(ipl);
  495.         return ENOENT;
  496.     }
  497.     src_size = src_area->pages * PAGE_SIZE;
  498.     src_flags = src_area->flags;
  499.     mutex_unlock(&src_area->lock);
  500.     mutex_unlock(&src_as->lock);
  501.  
  502.     if (src_size != acc_size) {
  503.         spinlock_unlock(&src_task->lock);
  504.         interrupts_restore(ipl);
  505.         return EPERM;
  506.     }
  507.     /*
  508.      * Create copy of the source address space area.
  509.      * The destination area is created with AS_AREA_ATTR_PARTIAL
  510.      * attribute set which prevents race condition with
  511.      * preliminary as_page_fault() calls.
  512.      */
  513.     dst_area = as_area_create(AS, src_flags, src_size, dst_base, AS_AREA_ATTR_PARTIAL, &anon_backend, NULL);
  514.     if (!dst_area) {
  515.         /*
  516.          * Destination address space area could not be created.
  517.          */
  518.         spinlock_unlock(&src_task->lock);
  519.         interrupts_restore(ipl);
  520.         return ENOMEM;
  521.     }
  522.    
  523.     spinlock_unlock(&src_task->lock);
  524.    
  525.     /*
  526.      * Avoid deadlock by first locking the address space with lower address.
  527.      */
  528.     if (AS < src_as) {
  529.         mutex_lock(&AS->lock);
  530.         mutex_lock(&src_as->lock);
  531.     } else {
  532.         mutex_lock(&AS->lock);
  533.         mutex_lock(&src_as->lock);
  534.     }
  535.    
  536.     for (i = 0; i < SIZE2FRAMES(src_size); i++) {
  537.         pte_t *pte;
  538.         __address frame;
  539.            
  540.         page_table_lock(src_as, false);
  541.         pte = page_mapping_find(src_as, src_base + i*PAGE_SIZE);
  542.         if (pte && PTE_VALID(pte)) {
  543.             ASSERT(PTE_PRESENT(pte));
  544.             frame = PTE_GET_FRAME(pte);
  545.             if (!(src_flags & AS_AREA_DEVICE))
  546.                 frame_reference_add(ADDR2PFN(frame));
  547.             page_table_unlock(src_as, false);
  548.         } else {
  549.             page_table_unlock(src_as, false);
  550.             continue;
  551.         }
  552.        
  553.         page_table_lock(AS, false);
  554.         page_mapping_insert(AS, dst_base + i*PAGE_SIZE, frame, area_flags_to_page_flags(src_flags));
  555.         page_table_unlock(AS, false);
  556.     }
  557.  
  558.     /*
  559.      * Now the destination address space area has been
  560.      * fully initialized. Clear the AS_AREA_ATTR_PARTIAL
  561.      * attribute.
  562.      */
  563.     mutex_lock(&dst_area->lock);
  564.     dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL;
  565.     mutex_unlock(&dst_area->lock);
  566.    
  567.     mutex_unlock(&AS->lock);
  568.     mutex_unlock(&src_as->lock);
  569.     interrupts_restore(ipl);
  570.    
  571.     return 0;
  572. }
  573.  
  574. /** Initialize mapping for one page of address space.
  575.  *
  576.  * This functions maps 'page' to 'frame' according
  577.  * to attributes of the address space area to
  578.  * wich 'page' belongs.
  579.  *
  580.  * @param as Target address space.
  581.  * @param page Virtual page within the area.
  582.  * @param frame Physical frame to which page will be mapped.
  583.  */
  584. void as_set_mapping(as_t *as, __address page, __address frame)
  585. {
  586.     as_area_t *area;
  587.     ipl_t ipl;
  588.    
  589.     ipl = interrupts_disable();
  590.     page_table_lock(as, true);
  591.    
  592.     area = find_area_and_lock(as, page);
  593.     if (!area) {
  594.         panic("Page not part of any as_area.\n");
  595.     }
  596.  
  597.     ASSERT(!area->backend);
  598.    
  599.     page_mapping_insert(as, page, frame, as_area_get_flags(area));
  600.     if (!used_space_insert(area, page, 1))
  601.         panic("Could not insert used space.\n");
  602.    
  603.     mutex_unlock(&area->lock);
  604.     page_table_unlock(as, true);
  605.     interrupts_restore(ipl);
  606. }
  607.  
  608. /** Handle page fault within the current address space.
  609.  *
  610.  * This is the high-level page fault handler. It decides
  611.  * whether the page fault can be resolved by any backend
  612.  * and if so, it invokes the backend to resolve the page
  613.  * fault.
  614.  *
  615.  * Interrupts are assumed disabled.
  616.  *
  617.  * @param page Faulting page.
  618.  * @param access Access mode that caused the fault (i.e. read/write/exec).
  619.  * @param istate Pointer to interrupted state.
  620.  *
  621.  * @return AS_PF_FAULT on page fault, AS_PF_OK on success or AS_PF_DEFER if the
  622.  *     fault was caused by copy_to_uspace() or copy_from_uspace().
  623.  */
  624. int as_page_fault(__address page, pf_access_t access, istate_t *istate)
  625. {
  626.     pte_t *pte;
  627.     as_area_t *area;
  628.    
  629.     if (!THREAD)
  630.         return AS_PF_FAULT;
  631.        
  632.     ASSERT(AS);
  633.  
  634.     mutex_lock(&AS->lock);
  635.     area = find_area_and_lock(AS, page);   
  636.     if (!area) {
  637.         /*
  638.          * No area contained mapping for 'page'.
  639.          * Signal page fault to low-level handler.
  640.          */
  641.         mutex_unlock(&AS->lock);
  642.         goto page_fault;
  643.     }
  644.  
  645.     if (area->attributes & AS_AREA_ATTR_PARTIAL) {
  646.         /*
  647.          * The address space area is not fully initialized.
  648.          * Avoid possible race by returning error.
  649.          */
  650.         mutex_unlock(&area->lock);
  651.         mutex_unlock(&AS->lock);
  652.         goto page_fault;       
  653.     }
  654.  
  655.     if (!area->backend || !area->backend->backend_page_fault) {
  656.         /*
  657.          * The address space area is not backed by any backend
  658.          * or the backend cannot handle page faults.
  659.          */
  660.         mutex_unlock(&area->lock);
  661.         mutex_unlock(&AS->lock);
  662.         goto page_fault;       
  663.     }
  664.  
  665.     page_table_lock(AS, false);
  666.    
  667.     /*
  668.      * To avoid race condition between two page faults
  669.      * on the same address, we need to make sure
  670.      * the mapping has not been already inserted.
  671.      */
  672.     if ((pte = page_mapping_find(AS, page))) {
  673.         if (PTE_PRESENT(pte)) {
  674.             page_table_unlock(AS, false);
  675.             mutex_unlock(&area->lock);
  676.             mutex_unlock(&AS->lock);
  677.             return AS_PF_OK;
  678.         }
  679.     }
  680.    
  681.     /*
  682.      * Resort to the backend page fault handler.
  683.      */
  684.     if (area->backend->backend_page_fault(area, page, access) != AS_PF_OK) {
  685.         page_table_unlock(AS, false);
  686.         mutex_unlock(&area->lock);
  687.         mutex_unlock(&AS->lock);
  688.         goto page_fault;
  689.     }
  690.    
  691.     page_table_unlock(AS, false);
  692.     mutex_unlock(&area->lock);
  693.     mutex_unlock(&AS->lock);
  694.     return AS_PF_OK;
  695.  
  696. page_fault:
  697.     if (THREAD->in_copy_from_uspace) {
  698.         THREAD->in_copy_from_uspace = false;
  699.         istate_set_retaddr(istate, (__address) &memcpy_from_uspace_failover_address);
  700.     } else if (THREAD->in_copy_to_uspace) {
  701.         THREAD->in_copy_to_uspace = false;
  702.         istate_set_retaddr(istate, (__address) &memcpy_to_uspace_failover_address);
  703.     } else {
  704.         return AS_PF_FAULT;
  705.     }
  706.  
  707.     return AS_PF_DEFER;
  708. }
  709.  
  710. /** Switch address spaces.
  711.  *
  712.  * Note that this function cannot sleep as it is essentially a part of
  713.  * the scheduling. Sleeping here would lead to deadlock on wakeup.
  714.  *
  715.  * @param old Old address space or NULL.
  716.  * @param new New address space.
  717.  */
  718. void as_switch(as_t *old, as_t *new)
  719. {
  720.     ipl_t ipl;
  721.     bool needs_asid = false;
  722.    
  723.     ipl = interrupts_disable();
  724.     spinlock_lock(&as_lock);
  725.  
  726.     /*
  727.      * First, take care of the old address space.
  728.      */
  729.     if (old) {
  730.         mutex_lock_active(&old->lock);
  731.         ASSERT(old->refcount);
  732.         if((--old->refcount == 0) && (old != AS_KERNEL)) {
  733.             /*
  734.              * The old address space is no longer active on
  735.              * any processor. It can be appended to the
  736.              * list of inactive address spaces with assigned
  737.              * ASID.
  738.              */
  739.              ASSERT(old->asid != ASID_INVALID);
  740.              list_append(&old->inactive_as_with_asid_link, &inactive_as_with_asid_head);
  741.         }
  742.         mutex_unlock(&old->lock);
  743.     }
  744.  
  745.     /*
  746.      * Second, prepare the new address space.
  747.      */
  748.     mutex_lock_active(&new->lock);
  749.     if ((new->refcount++ == 0) && (new != AS_KERNEL)) {
  750.         if (new->asid != ASID_INVALID)
  751.             list_remove(&new->inactive_as_with_asid_link);
  752.         else
  753.             needs_asid = true;  /* defer call to asid_get() until new->lock is released */
  754.     }
  755.     SET_PTL0_ADDRESS(new->page_table);
  756.     mutex_unlock(&new->lock);
  757.  
  758.     if (needs_asid) {
  759.         /*
  760.          * Allocation of new ASID was deferred
  761.          * until now in order to avoid deadlock.
  762.          */
  763.         asid_t asid;
  764.        
  765.         asid = asid_get();
  766.         mutex_lock_active(&new->lock);
  767.         new->asid = asid;
  768.         mutex_unlock(&new->lock);
  769.     }
  770.     spinlock_unlock(&as_lock);
  771.     interrupts_restore(ipl);
  772.    
  773.     /*
  774.      * Perform architecture-specific steps.
  775.      * (e.g. write ASID to hardware register etc.)
  776.      */
  777.     as_install_arch(new);
  778.    
  779.     AS = new;
  780. }
  781.  
  782. /** Convert address space area flags to page flags.
  783.  *
  784.  * @param aflags Flags of some address space area.
  785.  *
  786.  * @return Flags to be passed to page_mapping_insert().
  787.  */
  788. int area_flags_to_page_flags(int aflags)
  789. {
  790.     int flags;
  791.  
  792.     flags = PAGE_USER | PAGE_PRESENT;
  793.    
  794.     if (aflags & AS_AREA_READ)
  795.         flags |= PAGE_READ;
  796.        
  797.     if (aflags & AS_AREA_WRITE)
  798.         flags |= PAGE_WRITE;
  799.    
  800.     if (aflags & AS_AREA_EXEC)
  801.         flags |= PAGE_EXEC;
  802.    
  803.     if (!(aflags & AS_AREA_DEVICE))
  804.         flags |= PAGE_CACHEABLE;
  805.        
  806.     return flags;
  807. }
  808.  
  809. /** Compute flags for virtual address translation subsytem.
  810.  *
  811.  * The address space area must be locked.
  812.  * Interrupts must be disabled.
  813.  *
  814.  * @param a Address space area.
  815.  *
  816.  * @return Flags to be used in page_mapping_insert().
  817.  */
  818. int as_area_get_flags(as_area_t *a)
  819. {
  820.     return area_flags_to_page_flags(a->flags);
  821. }
  822.  
  823. /** Create page table.
  824.  *
  825.  * Depending on architecture, create either address space
  826.  * private or global page table.
  827.  *
  828.  * @param flags Flags saying whether the page table is for kernel address space.
  829.  *
  830.  * @return First entry of the page table.
  831.  */
  832. pte_t *page_table_create(int flags)
  833. {
  834.         ASSERT(as_operations);
  835.         ASSERT(as_operations->page_table_create);
  836.  
  837.         return as_operations->page_table_create(flags);
  838. }
  839.  
  840. /** Lock page table.
  841.  *
  842.  * This function should be called before any page_mapping_insert(),
  843.  * page_mapping_remove() and page_mapping_find().
  844.  *
  845.  * Locking order is such that address space areas must be locked
  846.  * prior to this call. Address space can be locked prior to this
  847.  * call in which case the lock argument is false.
  848.  *
  849.  * @param as Address space.
  850.  * @param lock If false, do not attempt to lock as->lock.
  851.  */
  852. void page_table_lock(as_t *as, bool lock)
  853. {
  854.     ASSERT(as_operations);
  855.     ASSERT(as_operations->page_table_lock);
  856.  
  857.     as_operations->page_table_lock(as, lock);
  858. }
  859.  
  860. /** Unlock page table.
  861.  *
  862.  * @param as Address space.
  863.  * @param unlock If false, do not attempt to unlock as->lock.
  864.  */
  865. void page_table_unlock(as_t *as, bool unlock)
  866. {
  867.     ASSERT(as_operations);
  868.     ASSERT(as_operations->page_table_unlock);
  869.  
  870.     as_operations->page_table_unlock(as, unlock);
  871. }
  872.  
  873.  
  874. /** Find address space area and lock it.
  875.  *
  876.  * The address space must be locked and interrupts must be disabled.
  877.  *
  878.  * @param as Address space.
  879.  * @param va Virtual address.
  880.  *
  881.  * @return Locked address space area containing va on success or NULL on failure.
  882.  */
  883. as_area_t *find_area_and_lock(as_t *as, __address va)
  884. {
  885.     as_area_t *a;
  886.     btree_node_t *leaf, *lnode;
  887.     int i;
  888.    
  889.     a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);
  890.     if (a) {
  891.         /* va is the base address of an address space area */
  892.         mutex_lock(&a->lock);
  893.         return a;
  894.     }
  895.    
  896.     /*
  897.      * Search the leaf node and the righmost record of its left neighbour
  898.      * to find out whether this is a miss or va belongs to an address
  899.      * space area found there.
  900.      */
  901.    
  902.     /* First, search the leaf node itself. */
  903.     for (i = 0; i < leaf->keys; i++) {
  904.         a = (as_area_t *) leaf->value[i];
  905.         mutex_lock(&a->lock);
  906.         if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) {
  907.             return a;
  908.         }
  909.         mutex_unlock(&a->lock);
  910.     }
  911.  
  912.     /*
  913.      * Second, locate the left neighbour and test its last record.
  914.      * Because of its position in the B+tree, it must have base < va.
  915.      */
  916.     if ((lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
  917.         a = (as_area_t *) lnode->value[lnode->keys - 1];
  918.         mutex_lock(&a->lock);
  919.         if (va < a->base + a->pages * PAGE_SIZE) {
  920.             return a;
  921.         }
  922.         mutex_unlock(&a->lock);
  923.     }
  924.  
  925.     return NULL;
  926. }
  927.  
  928. /** Check area conflicts with other areas.
  929.  *
  930.  * The address space must be locked and interrupts must be disabled.
  931.  *
  932.  * @param as Address space.
  933.  * @param va Starting virtual address of the area being tested.
  934.  * @param size Size of the area being tested.
  935.  * @param avoid_area Do not touch this area.
  936.  *
  937.  * @return True if there is no conflict, false otherwise.
  938.  */
  939. bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area)
  940. {
  941.     as_area_t *a;
  942.     btree_node_t *leaf, *node;
  943.     int i;
  944.    
  945.     /*
  946.      * We don't want any area to have conflicts with NULL page.
  947.      */
  948.     if (overlaps(va, size, NULL, PAGE_SIZE))
  949.         return false;
  950.    
  951.     /*
  952.      * The leaf node is found in O(log n), where n is proportional to
  953.      * the number of address space areas belonging to as.
  954.      * The check for conflicts is then attempted on the rightmost
  955.      * record in the left neighbour, the leftmost record in the right
  956.      * neighbour and all records in the leaf node itself.
  957.      */
  958.    
  959.     if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) {
  960.         if (a != avoid_area)
  961.             return false;
  962.     }
  963.    
  964.     /* First, check the two border cases. */
  965.     if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
  966.         a = (as_area_t *) node->value[node->keys - 1];
  967.         mutex_lock(&a->lock);
  968.         if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
  969.             mutex_unlock(&a->lock);
  970.             return false;
  971.         }
  972.         mutex_unlock(&a->lock);
  973.     }
  974.     if ((node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf))) {
  975.         a = (as_area_t *) node->value[0];
  976.         mutex_lock(&a->lock);
  977.         if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
  978.             mutex_unlock(&a->lock);
  979.             return false;
  980.         }
  981.         mutex_unlock(&a->lock);
  982.     }
  983.    
  984.     /* Second, check the leaf node. */
  985.     for (i = 0; i < leaf->keys; i++) {
  986.         a = (as_area_t *) leaf->value[i];
  987.    
  988.         if (a == avoid_area)
  989.             continue;
  990.    
  991.         mutex_lock(&a->lock);
  992.         if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
  993.             mutex_unlock(&a->lock);
  994.             return false;
  995.         }
  996.         mutex_unlock(&a->lock);
  997.     }
  998.  
  999.     /*
  1000.      * So far, the area does not conflict with other areas.
  1001.      * Check if it doesn't conflict with kernel address space.
  1002.      */  
  1003.     if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
  1004.         return !overlaps(va, size,
  1005.             KERNEL_ADDRESS_SPACE_START, KERNEL_ADDRESS_SPACE_END-KERNEL_ADDRESS_SPACE_START);
  1006.     }
  1007.  
  1008.     return true;
  1009. }
  1010.  
  1011. /** Return size of the address space area with given base.  */
  1012. size_t as_get_size(__address base)
  1013. {
  1014.     ipl_t ipl;
  1015.     as_area_t *src_area;
  1016.     size_t size;
  1017.  
  1018.     ipl = interrupts_disable();
  1019.     src_area = find_area_and_lock(AS, base);
  1020.     if (src_area){
  1021.         size = src_area->pages * PAGE_SIZE;
  1022.         mutex_unlock(&src_area->lock);
  1023.     } else {
  1024.         size = 0;
  1025.     }
  1026.     interrupts_restore(ipl);
  1027.     return size;
  1028. }
  1029.  
  1030. /** Mark portion of address space area as used.
  1031.  *
  1032.  * The address space area must be already locked.
  1033.  *
  1034.  * @param a Address space area.
  1035.  * @param page First page to be marked.
  1036.  * @param count Number of page to be marked.
  1037.  *
  1038.  * @return 0 on failure and 1 on success.
  1039.  */
  1040. int used_space_insert(as_area_t *a, __address page, count_t count)
  1041. {
  1042.     btree_node_t *leaf, *node;
  1043.     count_t pages;
  1044.     int i;
  1045.  
  1046.     ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
  1047.     ASSERT(count);
  1048.  
  1049.     pages = (count_t) btree_search(&a->used_space, page, &leaf);
  1050.     if (pages) {
  1051.         /*
  1052.          * We hit the beginning of some used space.
  1053.          */
  1054.         return 0;
  1055.     }
  1056.  
  1057.     node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
  1058.     if (node) {
  1059.         __address left_pg = node->key[node->keys - 1], right_pg = leaf->key[0];
  1060.         count_t left_cnt = (count_t) node->value[node->keys - 1], right_cnt = (count_t) leaf->value[0];
  1061.        
  1062.         /*
  1063.          * Examine the possibility that the interval fits
  1064.          * somewhere between the rightmost interval of
  1065.          * the left neigbour and the first interval of the leaf.
  1066.          */
  1067.          
  1068.         if (page >= right_pg) {
  1069.             /* Do nothing. */
  1070.         } else if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
  1071.             /* The interval intersects with the left interval. */
  1072.             return 0;
  1073.         } else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
  1074.             /* The interval intersects with the right interval. */
  1075.             return 0;          
  1076.         } else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) {
  1077.             /* The interval can be added by merging the two already present intervals. */
  1078.             node->value[node->keys - 1] += count + right_cnt;
  1079.             btree_remove(&a->used_space, right_pg, leaf);
  1080.             return 1;
  1081.         } else if (page == left_pg + left_cnt*PAGE_SIZE) {
  1082.             /* The interval can be added by simply growing the left interval. */
  1083.             node->value[node->keys - 1] += count;
  1084.             return 1;
  1085.         } else if (page + count*PAGE_SIZE == right_pg) {
  1086.             /*
  1087.              * The interval can be addded by simply moving base of the right
  1088.              * interval down and increasing its size accordingly.
  1089.              */
  1090.             leaf->value[0] += count;
  1091.             leaf->key[0] = page;
  1092.             return 1;
  1093.         } else {
  1094.             /*
  1095.              * The interval is between both neigbouring intervals,
  1096.              * but cannot be merged with any of them.
  1097.              */
  1098.             btree_insert(&a->used_space, page, (void *) count, leaf);
  1099.             return 1;
  1100.         }
  1101.     } else if (page < leaf->key[0]) {
  1102.         __address right_pg = leaf->key[0];
  1103.         count_t right_cnt = (count_t) leaf->value[0];
  1104.    
  1105.         /*
  1106.          * Investigate the border case in which the left neighbour does not
  1107.          * exist but the interval fits from the left.
  1108.          */
  1109.          
  1110.         if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
  1111.             /* The interval intersects with the right interval. */
  1112.             return 0;
  1113.         } else if (page + count*PAGE_SIZE == right_pg) {
  1114.             /*
  1115.              * The interval can be added by moving the base of the right interval down
  1116.              * and increasing its size accordingly.
  1117.              */
  1118.             leaf->key[0] = page;
  1119.             leaf->value[0] += count;
  1120.             return 1;
  1121.         } else {
  1122.             /*
  1123.              * The interval doesn't adjoin with the right interval.
  1124.              * It must be added individually.
  1125.              */
  1126.             btree_insert(&a->used_space, page, (void *) count, leaf);
  1127.             return 1;
  1128.         }
  1129.     }
  1130.  
  1131.     node = btree_leaf_node_right_neighbour(&a->used_space, leaf);
  1132.     if (node) {
  1133.         __address left_pg = leaf->key[leaf->keys - 1], right_pg = node->key[0];
  1134.         count_t left_cnt = (count_t) leaf->value[leaf->keys - 1], right_cnt = (count_t) node->value[0];
  1135.        
  1136.         /*
  1137.          * Examine the possibility that the interval fits
  1138.          * somewhere between the leftmost interval of
  1139.          * the right neigbour and the last interval of the leaf.
  1140.          */
  1141.  
  1142.         if (page < left_pg) {
  1143.             /* Do nothing. */
  1144.         } else if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
  1145.             /* The interval intersects with the left interval. */
  1146.             return 0;
  1147.         } else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
  1148.             /* The interval intersects with the right interval. */
  1149.             return 0;          
  1150.         } else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) {
  1151.             /* The interval can be added by merging the two already present intervals. */
  1152.             leaf->value[leaf->keys - 1] += count + right_cnt;
  1153.             btree_remove(&a->used_space, right_pg, node);
  1154.             return 1;
  1155.         } else if (page == left_pg + left_cnt*PAGE_SIZE) {
  1156.             /* The interval can be added by simply growing the left interval. */
  1157.             leaf->value[leaf->keys - 1] +=  count;
  1158.             return 1;
  1159.         } else if (page + count*PAGE_SIZE == right_pg) {
  1160.             /*
  1161.              * The interval can be addded by simply moving base of the right
  1162.              * interval down and increasing its size accordingly.
  1163.              */
  1164.             node->value[0] += count;
  1165.             node->key[0] = page;
  1166.             return 1;
  1167.         } else {
  1168.             /*
  1169.              * The interval is between both neigbouring intervals,
  1170.              * but cannot be merged with any of them.
  1171.              */
  1172.             btree_insert(&a->used_space, page, (void *) count, leaf);
  1173.             return 1;
  1174.         }
  1175.     } else if (page >= leaf->key[leaf->keys - 1]) {
  1176.         __address left_pg = leaf->key[leaf->keys - 1];
  1177.         count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
  1178.    
  1179.         /*
  1180.          * Investigate the border case in which the right neighbour does not
  1181.          * exist but the interval fits from the right.
  1182.          */
  1183.          
  1184.         if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
  1185.             /* The interval intersects with the left interval. */
  1186.             return 0;
  1187.         } else if (left_pg + left_cnt*PAGE_SIZE == page) {
  1188.             /* The interval can be added by growing the left interval. */
  1189.             leaf->value[leaf->keys - 1] += count;
  1190.             return 1;
  1191.         } else {
  1192.             /*
  1193.              * The interval doesn't adjoin with the left interval.
  1194.              * It must be added individually.
  1195.              */
  1196.             btree_insert(&a->used_space, page, (void *) count, leaf);
  1197.             return 1;
  1198.         }
  1199.     }
  1200.    
  1201.     /*
  1202.      * Note that if the algorithm made it thus far, the interval can fit only
  1203.      * between two other intervals of the leaf. The two border cases were already
  1204.      * resolved.
  1205.      */
  1206.     for (i = 1; i < leaf->keys; i++) {
  1207.         if (page < leaf->key[i]) {
  1208.             __address left_pg = leaf->key[i - 1], right_pg = leaf->key[i];
  1209.             count_t left_cnt = (count_t) leaf->value[i - 1], right_cnt = (count_t) leaf->value[i];
  1210.  
  1211.             /*
  1212.              * The interval fits between left_pg and right_pg.
  1213.              */
  1214.  
  1215.             if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
  1216.                 /* The interval intersects with the left interval. */
  1217.                 return 0;
  1218.             } else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
  1219.                 /* The interval intersects with the right interval. */
  1220.                 return 0;          
  1221.             } else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) {
  1222.                 /* The interval can be added by merging the two already present intervals. */
  1223.                 leaf->value[i - 1] += count + right_cnt;
  1224.                 btree_remove(&a->used_space, right_pg, leaf);
  1225.                 return 1;
  1226.             } else if (page == left_pg + left_cnt*PAGE_SIZE) {
  1227.                 /* The interval can be added by simply growing the left interval. */
  1228.                 leaf->value[i - 1] += count;
  1229.                 return 1;
  1230.             } else if (page + count*PAGE_SIZE == right_pg) {
  1231.                 /*
  1232.                      * The interval can be addded by simply moving base of the right
  1233.                  * interval down and increasing its size accordingly.
  1234.                  */
  1235.                 leaf->value[i] += count;
  1236.                 leaf->key[i] = page;
  1237.                 return 1;
  1238.             } else {
  1239.                 /*
  1240.                  * The interval is between both neigbouring intervals,
  1241.                  * but cannot be merged with any of them.
  1242.                  */
  1243.                 btree_insert(&a->used_space, page, (void *) count, leaf);
  1244.                 return 1;
  1245.             }
  1246.         }
  1247.     }
  1248.  
  1249.     panic("Inconsistency detected while adding %d pages of used space at %P.\n", count, page);
  1250. }
  1251.  
  1252. /** Mark portion of address space area as unused.
  1253.  *
  1254.  * The address space area must be already locked.
  1255.  *
  1256.  * @param a Address space area.
  1257.  * @param page First page to be marked.
  1258.  * @param count Number of page to be marked.
  1259.  *
  1260.  * @return 0 on failure and 1 on success.
  1261.  */
  1262. int used_space_remove(as_area_t *a, __address page, count_t count)
  1263. {
  1264.     btree_node_t *leaf, *node;
  1265.     count_t pages;
  1266.     int i;
  1267.  
  1268.     ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
  1269.     ASSERT(count);
  1270.  
  1271.     pages = (count_t) btree_search(&a->used_space, page, &leaf);
  1272.     if (pages) {
  1273.         /*
  1274.          * We are lucky, page is the beginning of some interval.
  1275.          */
  1276.         if (count > pages) {
  1277.             return 0;
  1278.         } else if (count == pages) {
  1279.             btree_remove(&a->used_space, page, leaf);
  1280.             return 1;
  1281.         } else {
  1282.             /*
  1283.              * Find the respective interval.
  1284.              * Decrease its size and relocate its start address.
  1285.              */
  1286.             for (i = 0; i < leaf->keys; i++) {
  1287.                 if (leaf->key[i] == page) {
  1288.                     leaf->key[i] += count*PAGE_SIZE;
  1289.                     leaf->value[i] -= count;
  1290.                     return 1;
  1291.                 }
  1292.             }
  1293.             goto error;
  1294.         }
  1295.     }
  1296.  
  1297.     node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
  1298.     if (node && page < leaf->key[0]) {
  1299.         __address left_pg = node->key[node->keys - 1];
  1300.         count_t left_cnt = (count_t) node->value[node->keys - 1];
  1301.  
  1302.         if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) {
  1303.             if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) {
  1304.                 /*
  1305.                  * The interval is contained in the rightmost interval
  1306.                  * of the left neighbour and can be removed by
  1307.                  * updating the size of the bigger interval.
  1308.                  */
  1309.                 node->value[node->keys - 1] -= count;
  1310.                 return 1;
  1311.             } else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) {
  1312.                 count_t new_cnt;
  1313.                
  1314.                 /*
  1315.                  * The interval is contained in the rightmost interval
  1316.                  * of the left neighbour but its removal requires
  1317.                  * both updating the size of the original interval and
  1318.                  * also inserting a new interval.
  1319.                  */
  1320.                 new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
  1321.                 node->value[node->keys - 1] -= count + new_cnt;
  1322.                 btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf);
  1323.                 return 1;
  1324.             }
  1325.         }
  1326.         return 0;
  1327.     } else if (page < leaf->key[0]) {
  1328.         return 0;
  1329.     }
  1330.    
  1331.     if (page > leaf->key[leaf->keys - 1]) {
  1332.         __address left_pg = leaf->key[leaf->keys - 1];
  1333.         count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
  1334.  
  1335.         if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) {
  1336.             if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) {
  1337.                 /*
  1338.                  * The interval is contained in the rightmost interval
  1339.                  * of the leaf and can be removed by updating the size
  1340.                  * of the bigger interval.
  1341.                  */
  1342.                 leaf->value[leaf->keys - 1] -= count;
  1343.                 return 1;
  1344.             } else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) {
  1345.                 count_t new_cnt;
  1346.                
  1347.                 /*
  1348.                  * The interval is contained in the rightmost interval
  1349.                  * of the leaf but its removal requires both updating
  1350.                  * the size of the original interval and
  1351.                  * also inserting a new interval.
  1352.                  */
  1353.                 new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
  1354.                 leaf->value[leaf->keys - 1] -= count + new_cnt;
  1355.                 btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf);
  1356.                 return 1;
  1357.             }
  1358.         }
  1359.         return 0;
  1360.     }  
  1361.    
  1362.     /*
  1363.      * The border cases have been already resolved.
  1364.      * Now the interval can be only between intervals of the leaf.
  1365.      */
  1366.     for (i = 1; i < leaf->keys - 1; i++) {
  1367.         if (page < leaf->key[i]) {
  1368.             __address left_pg = leaf->key[i - 1];
  1369.             count_t left_cnt = (count_t) leaf->value[i - 1];
  1370.  
  1371.             /*
  1372.              * Now the interval is between intervals corresponding to (i - 1) and i.
  1373.              */
  1374.             if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) {
  1375.                 if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) {
  1376.                     /*
  1377.                     * The interval is contained in the interval (i - 1)
  1378.                      * of the leaf and can be removed by updating the size
  1379.                      * of the bigger interval.
  1380.                      */
  1381.                     leaf->value[i - 1] -= count;
  1382.                     return 1;
  1383.                 } else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) {
  1384.                     count_t new_cnt;
  1385.                
  1386.                     /*
  1387.                      * The interval is contained in the interval (i - 1)
  1388.                      * of the leaf but its removal requires both updating
  1389.                      * the size of the original interval and
  1390.                      * also inserting a new interval.
  1391.                      */
  1392.                     new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
  1393.                     leaf->value[i - 1] -= count + new_cnt;
  1394.                     btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf);
  1395.                     return 1;
  1396.                 }
  1397.             }
  1398.             return 0;
  1399.         }
  1400.     }
  1401.  
  1402. error:
  1403.     panic("Inconsistency detected while removing %d pages of used space from %P.\n", count, page);
  1404. }
  1405.  
  1406. /** Remove reference to address space area share info.
  1407.  *
  1408.  * If the reference count drops to 0, the sh_info is deallocated.
  1409.  *
  1410.  * @param sh_info Pointer to address space area share info.
  1411.  */
  1412. void sh_info_remove_reference(share_info_t *sh_info)
  1413. {
  1414.     bool dealloc = false;
  1415.  
  1416.     mutex_lock(&sh_info->lock);
  1417.     ASSERT(sh_info->refcount);
  1418.     if (--sh_info->refcount == 0) {
  1419.         dealloc = true;
  1420.         bool cond;
  1421.        
  1422.         /*
  1423.          * Now walk carefully the pagemap B+tree and free/remove
  1424.          * reference from all frames found there.
  1425.          */
  1426.         for (cond = true; cond;) {
  1427.             btree_node_t *node;
  1428.            
  1429.             ASSERT(!list_empty(&sh_info->pagemap.leaf_head));
  1430.             node = list_get_instance(sh_info->pagemap.leaf_head.next, btree_node_t, leaf_link);
  1431.             if ((cond = node->keys)) {
  1432.                 frame_free(ADDR2PFN((__address) node->value[0]));
  1433.                 btree_remove(&sh_info->pagemap, node->key[0], node);
  1434.             }
  1435.         }
  1436.        
  1437.     }
  1438.     mutex_unlock(&sh_info->lock);
  1439.    
  1440.     if (dealloc) {
  1441.         btree_destroy(&sh_info->pagemap);
  1442.         free(sh_info);
  1443.     }
  1444. }
  1445.  
  1446. static int anon_page_fault(as_area_t *area, __address addr, pf_access_t access);
  1447. static void anon_frame_free(as_area_t *area, __address page, __address frame);
  1448.  
  1449. /*
  1450.  * Anonymous memory backend.
  1451.  */
  1452. mem_backend_t anon_backend = {
  1453.     .backend_page_fault = anon_page_fault,
  1454.     .backend_frame_free = anon_frame_free
  1455. };
  1456.  
  1457. /** Service a page fault in the anonymous memory address space area.
  1458.  *
  1459.  * The address space area and page tables must be already locked.
  1460.  *
  1461.  * @param area Pointer to the address space area.
  1462.  * @param addr Faulting virtual address.
  1463.  * @param access Access mode that caused the fault (i.e. read/write/exec).
  1464.  *
  1465.  * @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. serviced).
  1466.  */
  1467. int anon_page_fault(as_area_t *area, __address addr, pf_access_t access)
  1468. {
  1469.     __address frame;
  1470.  
  1471.     if (area->sh_info) {
  1472.         btree_node_t *leaf;
  1473.        
  1474.         /*
  1475.          * The area is shared, chances are that the mapping can be found
  1476.          * in the pagemap of the address space area share info structure.
  1477.          * In the case that the pagemap does not contain the respective
  1478.          * mapping, a new frame is allocated and the mapping is created.
  1479.          */
  1480.         mutex_lock(&area->sh_info->lock);
  1481.         frame = (__address) btree_search(&area->sh_info->pagemap, ALIGN_DOWN(addr, PAGE_SIZE), &leaf);
  1482.         if (!frame) {
  1483.             bool allocate = true;
  1484.             int i;
  1485.            
  1486.             /*
  1487.              * Zero can be returned as a valid frame address.
  1488.              * Just a small workaround.
  1489.              */
  1490.             for (i = 0; i < leaf->keys; i++) {
  1491.                 if (leaf->key[i] == ALIGN_DOWN(addr, PAGE_SIZE)) {
  1492.                     allocate = false;
  1493.                     break;
  1494.                 }
  1495.             }
  1496.             if (allocate) {
  1497.                 frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
  1498.                 memsetb(PA2KA(frame), FRAME_SIZE, 0);
  1499.                
  1500.                 /*
  1501.                  * Insert the address of the newly allocated frame to the pagemap.
  1502.                  */
  1503.                 btree_insert(&area->sh_info->pagemap, ALIGN_DOWN(addr, PAGE_SIZE), (void *) frame, leaf);
  1504.             }
  1505.         }
  1506.         mutex_unlock(&area->sh_info->lock);
  1507.     } else {
  1508.  
  1509.         /*
  1510.          * In general, there can be several reasons that
  1511.          * can have caused this fault.
  1512.          *
  1513.          * - non-existent mapping: the area is an anonymous
  1514.          *   area (e.g. heap or stack) and so far has not been
  1515.          *   allocated a frame for the faulting page
  1516.          *
  1517.          * - non-present mapping: another possibility,
  1518.          *   currently not implemented, would be frame
  1519.          *   reuse; when this becomes a possibility,
  1520.          *   do not forget to distinguish between
  1521.          *   the different causes
  1522.          */
  1523.         frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
  1524.         memsetb(PA2KA(frame), FRAME_SIZE, 0);
  1525.     }
  1526.    
  1527.     /*
  1528.      * Map 'page' to 'frame'.
  1529.      * Note that TLB shootdown is not attempted as only new information is being
  1530.      * inserted into page tables.
  1531.      */
  1532.     page_mapping_insert(AS, addr, frame, as_area_get_flags(area));
  1533.     if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1))
  1534.         panic("Could not insert used space.\n");
  1535.        
  1536.     return AS_PF_OK;
  1537. }
  1538.  
  1539. /** Free a frame that is backed by the anonymous memory backend.
  1540.  *
  1541.  * The address space area and page tables must be already locked.
  1542.  *
  1543.  * @param area Ignored.
  1544.  * @param page Ignored.
  1545.  * @param frame Frame to be released.
  1546.  */
  1547. void anon_frame_free(as_area_t *area, __address page, __address frame)
  1548. {
  1549.     frame_free(ADDR2PFN(frame));
  1550. }
  1551.  
  1552. /*
  1553.  * Address space related syscalls.
  1554.  */
  1555.  
  1556. /** Wrapper for as_area_create(). */
  1557. __native sys_as_area_create(__address address, size_t size, int flags)
  1558. {
  1559.     if (as_area_create(AS, flags, size, address, AS_AREA_ATTR_NONE, &anon_backend, NULL))
  1560.         return (__native) address;
  1561.     else
  1562.         return (__native) -1;
  1563. }
  1564.  
  1565. /** Wrapper for as_area_resize. */
  1566. __native sys_as_area_resize(__address address, size_t size, int flags)
  1567. {
  1568.     return (__native) as_area_resize(AS, address, size, 0);
  1569. }
  1570.  
  1571. /** Wrapper for as_area_destroy. */
  1572. __native sys_as_area_destroy(__address address)
  1573. {
  1574.     return (__native) as_area_destroy(AS, address);
  1575. }
  1576.