Subversion Repositories HelenOS-historic

Rev

Rev 1238 | Rev 1248 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (C) 2001-2006 Jakub Jermar
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. /*
  30.  * This file contains address space manipulation functions.
  31.  * Roughly speaking, this is a higher-level client of
  32.  * Virtual Address Translation (VAT) subsystem.
  33.  */
  34.  
  35. #include <mm/as.h>
  36. #include <arch/mm/as.h>
  37. #include <mm/page.h>
  38. #include <mm/frame.h>
  39. #include <mm/slab.h>
  40. #include <mm/tlb.h>
  41. #include <arch/mm/page.h>
  42. #include <genarch/mm/page_pt.h>
  43. #include <genarch/mm/page_ht.h>
  44. #include <mm/asid.h>
  45. #include <arch/mm/asid.h>
  46. #include <synch/spinlock.h>
  47. #include <adt/list.h>
  48. #include <adt/btree.h>
  49. #include <proc/task.h>
  50. #include <arch/asm.h>
  51. #include <panic.h>
  52. #include <debug.h>
  53. #include <print.h>
  54. #include <memstr.h>
  55. #include <macros.h>
  56. #include <arch.h>
  57. #include <errno.h>
  58. #include <config.h>
  59. #include <arch/types.h>
  60. #include <typedefs.h>
  61.  
  62. as_operations_t *as_operations = NULL;
  63.  
  64. /** Address space lock. It protects inactive_as_with_asid_head. */
  65. SPINLOCK_INITIALIZE(as_lock);
  66.  
  67. /**
  68.  * This list contains address spaces that are not active on any
  69.  * processor and that have valid ASID.
  70.  */
  71. LIST_INITIALIZE(inactive_as_with_asid_head);
  72.  
  73. /** Kernel address space. */
  74. as_t *AS_KERNEL = NULL;
  75.  
  76. static int area_flags_to_page_flags(int aflags);
  77. static int get_area_flags(as_area_t *a);
  78. static as_area_t *find_area_and_lock(as_t *as, __address va);
  79. static bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area);
  80.  
  81. /** Initialize address space subsystem. */
  82. void as_init(void)
  83. {
  84.     as_arch_init();
  85.     AS_KERNEL = as_create(FLAG_AS_KERNEL);
  86.         if (!AS_KERNEL)
  87.                 panic("can't create kernel address space\n");
  88. }
  89.  
  90. /** Create address space.
  91.  *
  92.  * @param flags Flags that influence way in wich the address space is created.
  93.  */
  94. as_t *as_create(int flags)
  95. {
  96.     as_t *as;
  97.  
  98.     as = (as_t *) malloc(sizeof(as_t), 0);
  99.     link_initialize(&as->inactive_as_with_asid_link);
  100.     spinlock_initialize(&as->lock, "as_lock");
  101.     btree_create(&as->as_area_btree);
  102.    
  103.     if (flags & FLAG_AS_KERNEL)
  104.         as->asid = ASID_KERNEL;
  105.     else
  106.         as->asid = ASID_INVALID;
  107.    
  108.     as->refcount = 0;
  109.     as->page_table = page_table_create(flags);
  110.  
  111.     return as;
  112. }
  113.  
  114. /** Free Adress space */
  115. void as_free(as_t *as)
  116. {
  117.     ASSERT(as->refcount == 0);
  118.  
  119.     /* TODO: free as_areas and other resources held by as */
  120.     /* TODO: free page table */
  121.     free(as);
  122. }
  123.  
  124. /** Create address space area of common attributes.
  125.  *
  126.  * The created address space area is added to the target address space.
  127.  *
  128.  * @param as Target address space.
  129.  * @param flags Flags of the area memory.
  130.  * @param size Size of area.
  131.  * @param base Base address of area.
  132.  * @param attrs Attributes of the area.
  133.  *
  134.  * @return Address space area on success or NULL on failure.
  135.  */
  136. as_area_t *as_area_create(as_t *as, int flags, size_t size, __address base, int attrs)
  137. {
  138.     ipl_t ipl;
  139.     as_area_t *a;
  140.    
  141.     if (base % PAGE_SIZE)
  142.         return NULL;
  143.  
  144.     if (!size)
  145.         return NULL;
  146.  
  147.     /* Writeable executable areas are not supported. */
  148.     if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
  149.         return NULL;
  150.    
  151.     ipl = interrupts_disable();
  152.     spinlock_lock(&as->lock);
  153.    
  154.     if (!check_area_conflicts(as, base, size, NULL)) {
  155.         spinlock_unlock(&as->lock);
  156.         interrupts_restore(ipl);
  157.         return NULL;
  158.     }
  159.    
  160.     a = (as_area_t *) malloc(sizeof(as_area_t), 0);
  161.  
  162.     spinlock_initialize(&a->lock, "as_area_lock");
  163.    
  164.     a->flags = flags;
  165.     a->attributes = attrs;
  166.     a->pages = SIZE2FRAMES(size);
  167.     a->base = base;
  168.    
  169.     btree_insert(&as->as_area_btree, base, (void *) a, NULL);
  170.  
  171.     spinlock_unlock(&as->lock);
  172.     interrupts_restore(ipl);
  173.  
  174.     return a;
  175. }
  176.  
  177. /** Find address space area and change it.
  178.  *
  179.  * @param as Address space.
  180.  * @param address Virtual address belonging to the area to be changed. Must be page-aligned.
  181.  * @param size New size of the virtual memory block starting at address.
  182.  * @param flags Flags influencing the remap operation. Currently unused.
  183.  *
  184.  * @return address on success, (__address) -1 otherwise.
  185.  */
  186. __address as_area_resize(as_t *as, __address address, size_t size, int flags)
  187. {
  188.     as_area_t *area = NULL;
  189.     ipl_t ipl;
  190.     size_t pages;
  191.    
  192.     ipl = interrupts_disable();
  193.     spinlock_lock(&as->lock);
  194.    
  195.     /*
  196.      * Locate the area.
  197.      */
  198.     area = find_area_and_lock(as, address);
  199.     if (!area) {
  200.         spinlock_unlock(&as->lock);
  201.         interrupts_restore(ipl);
  202.         return (__address) -1;
  203.     }
  204.  
  205.     if (area->flags & AS_AREA_DEVICE) {
  206.         /*
  207.          * Remapping of address space areas associated
  208.          * with memory mapped devices is not supported.
  209.          */
  210.         spinlock_unlock(&area->lock);
  211.         spinlock_unlock(&as->lock);
  212.         interrupts_restore(ipl);
  213.         return (__address) -1;
  214.     }
  215.  
  216.     pages = SIZE2FRAMES((address - area->base) + size);
  217.     if (!pages) {
  218.         /*
  219.          * Zero size address space areas are not allowed.
  220.          */
  221.         spinlock_unlock(&area->lock);
  222.         spinlock_unlock(&as->lock);
  223.         interrupts_restore(ipl);
  224.         return (__address) -1;
  225.     }
  226.    
  227.     if (pages < area->pages) {
  228.         int i;
  229.  
  230.         /*
  231.          * Shrinking the area.
  232.          * No need to check for overlaps.
  233.          */
  234.         for (i = pages; i < area->pages; i++) {
  235.             pte_t *pte;
  236.            
  237.             /*
  238.              * Releasing physical memory.
  239.              * This depends on the fact that the memory was allocated using frame_alloc().
  240.              */
  241.             page_table_lock(as, false);
  242.             pte = page_mapping_find(as, area->base + i*PAGE_SIZE);
  243.             if (pte && PTE_VALID(pte)) {
  244.                 __address frame;
  245.  
  246.                 ASSERT(PTE_PRESENT(pte));
  247.                 frame = PTE_GET_FRAME(pte);
  248.                 page_mapping_remove(as, area->base + i*PAGE_SIZE);
  249.                 page_table_unlock(as, false);
  250.  
  251.                 frame_free(ADDR2PFN(frame));
  252.             } else {
  253.                 page_table_unlock(as, false);
  254.             }
  255.         }
  256.         /*
  257.          * Invalidate TLB's.
  258.          */
  259.         tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
  260.         tlb_invalidate_pages(AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
  261.         tlb_shootdown_finalize();
  262.     } else {
  263.         /*
  264.          * Growing the area.
  265.          * Check for overlaps with other address space areas.
  266.          */
  267.         if (!check_area_conflicts(as, address, pages * PAGE_SIZE, area)) {
  268.             spinlock_unlock(&area->lock);
  269.             spinlock_unlock(&as->lock);    
  270.             interrupts_restore(ipl);
  271.             return (__address) -1;
  272.         }
  273.     }
  274.  
  275.     area->pages = pages;
  276.    
  277.     spinlock_unlock(&area->lock);
  278.     spinlock_unlock(&as->lock);
  279.     interrupts_restore(ipl);
  280.  
  281.     return address;
  282. }
  283.  
  284. /** Send address space area to another task.
  285.  *
  286.  * Address space area is sent to the specified task.
  287.  * If the destination task is willing to accept the
  288.  * area, a new area is created according to the
  289.  * source area. Moreover, any existing mapping
  290.  * is copied as well, providing thus a mechanism
  291.  * for sharing group of pages. The source address
  292.  * space area and any associated mapping is preserved.
  293.  *
  294.  * @param dst_id Task ID of the accepting task.
  295.  * @param src_base Base address of the source address space area.
  296.  *
  297.  * @return 0 on success or ENOENT if there is no such task or
  298.  *     if there is no such address space area,
  299.  *     EPERM if there was a problem in accepting the area or
  300.  *     ENOMEM if there was a problem in allocating destination
  301.  *     address space area.
  302.  */
  303. int as_area_send(task_id_t dst_id, __address src_base)
  304. {
  305.     ipl_t ipl;
  306.     task_t *t;
  307.     count_t i;
  308.     as_t *dst_as;
  309.     __address dst_base;
  310.     int src_flags;
  311.     size_t src_size;
  312.     as_area_t *src_area, *dst_area;
  313.    
  314.     ipl = interrupts_disable();
  315.     spinlock_lock(&tasks_lock);
  316.    
  317.     t = task_find_by_id(dst_id);
  318.     if (!NULL) {
  319.         spinlock_unlock(&tasks_lock);
  320.         interrupts_restore(ipl);
  321.         return ENOENT;
  322.     }
  323.  
  324.     spinlock_lock(&t->lock);
  325.     spinlock_unlock(&tasks_lock);
  326.  
  327.     dst_as = t->as;
  328.     dst_base = (__address) t->accept_arg.base;
  329.    
  330.     if (dst_as == AS) {
  331.         /*
  332.          * The two tasks share the entire address space.
  333.          * Return error since there is no point in continuing.
  334.          */
  335.         spinlock_unlock(&t->lock);
  336.         interrupts_restore(ipl);
  337.         return EPERM;
  338.     }
  339.    
  340.     spinlock_lock(&AS->lock);
  341.     src_area = find_area_and_lock(AS, src_base);
  342.     if (!src_area) {
  343.         /*
  344.          * Could not find the source address space area.
  345.          */
  346.         spinlock_unlock(&t->lock);
  347.         spinlock_unlock(&AS->lock);
  348.         interrupts_restore(ipl);
  349.         return ENOENT;
  350.     }
  351.     src_size = src_area->pages * PAGE_SIZE;
  352.     src_flags = src_area->flags;
  353.     spinlock_unlock(&src_area->lock);
  354.     spinlock_unlock(&AS->lock);
  355.  
  356.     if ((t->accept_arg.task_id != TASK->taskid) || (t->accept_arg.size != src_size) ||
  357.         (t->accept_arg.flags != src_flags)) {
  358.         /*
  359.          * Discrepancy in either task ID, size or flags.
  360.          */
  361.         spinlock_unlock(&t->lock);
  362.         interrupts_restore(ipl);
  363.         return EPERM;
  364.     }
  365.    
  366.     /*
  367.      * Create copy of the source address space area.
  368.      * The destination area is created with AS_AREA_ATTR_PARTIAL
  369.      * attribute set which prevents race condition with
  370.      * preliminary as_page_fault() calls.
  371.      */
  372.     dst_area = as_area_create(dst_as, src_flags, src_size, dst_base, AS_AREA_ATTR_PARTIAL);
  373.     if (!dst_area) {
  374.         /*
  375.          * Destination address space area could not be created.
  376.          */
  377.         spinlock_unlock(&t->lock);
  378.         interrupts_restore(ipl);
  379.         return ENOMEM;
  380.     }
  381.    
  382.     memsetb((__address) &t->accept_arg, sizeof(as_area_acptsnd_arg_t), 0);
  383.     spinlock_unlock(&t->lock);
  384.    
  385.     /*
  386.      * Avoid deadlock by first locking the address space with lower address.
  387.      */
  388.     if (dst_as < AS) {
  389.         spinlock_lock(&dst_as->lock);
  390.         spinlock_lock(&AS->lock);
  391.     } else {
  392.         spinlock_lock(&AS->lock);
  393.         spinlock_lock(&dst_as->lock);
  394.     }
  395.    
  396.     for (i = 0; i < SIZE2FRAMES(src_size); i++) {
  397.         pte_t *pte;
  398.         __address frame;
  399.            
  400.         page_table_lock(AS, false);
  401.         pte = page_mapping_find(AS, src_base + i*PAGE_SIZE);
  402.         if (pte && PTE_VALID(pte)) {
  403.             ASSERT(PTE_PRESENT(pte));
  404.             frame = PTE_GET_FRAME(pte);
  405.             if (!(src_flags & AS_AREA_DEVICE))
  406.                 frame_reference_add(ADDR2PFN(frame));
  407.             page_table_unlock(AS, false);
  408.         } else {
  409.             page_table_unlock(AS, false);
  410.             continue;
  411.         }
  412.        
  413.         page_table_lock(dst_as, false);
  414.         page_mapping_insert(dst_as, dst_base + i*PAGE_SIZE, frame, area_flags_to_page_flags(src_flags));
  415.         page_table_unlock(dst_as, false);
  416.     }
  417.  
  418.     /*
  419.      * Now the destination address space area has been
  420.      * fully initialized. Clear the AS_AREA_ATTR_PARTIAL
  421.      * attribute.
  422.      */
  423.     spinlock_lock(&dst_area->lock);
  424.     dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL;
  425.     spinlock_unlock(&dst_area->lock);
  426.    
  427.     spinlock_unlock(&AS->lock);
  428.     spinlock_unlock(&dst_as->lock);
  429.     interrupts_restore(ipl);
  430.    
  431.     return 0;
  432. }
  433.  
  434. /** Initialize mapping for one page of address space.
  435.  *
  436.  * This functions maps 'page' to 'frame' according
  437.  * to attributes of the address space area to
  438.  * wich 'page' belongs.
  439.  *
  440.  * @param as Target address space.
  441.  * @param page Virtual page within the area.
  442.  * @param frame Physical frame to which page will be mapped.
  443.  */
  444. void as_set_mapping(as_t *as, __address page, __address frame)
  445. {
  446.     as_area_t *area;
  447.     ipl_t ipl;
  448.    
  449.     ipl = interrupts_disable();
  450.     page_table_lock(as, true);
  451.    
  452.     area = find_area_and_lock(as, page);
  453.     if (!area) {
  454.         panic("page not part of any as_area\n");
  455.     }
  456.  
  457.     page_mapping_insert(as, page, frame, get_area_flags(area));
  458.    
  459.     spinlock_unlock(&area->lock);
  460.     page_table_unlock(as, true);
  461.     interrupts_restore(ipl);
  462. }
  463.  
  464. /** Handle page fault within the current address space.
  465.  *
  466.  * This is the high-level page fault handler.
  467.  * Interrupts are assumed disabled.
  468.  *
  469.  * @param page Faulting page.
  470.  *
  471.  * @return 0 on page fault, 1 on success.
  472.  */
  473. int as_page_fault(__address page)
  474. {
  475.     pte_t *pte;
  476.     as_area_t *area;
  477.     __address frame;
  478.    
  479.     ASSERT(AS);
  480.  
  481.     spinlock_lock(&AS->lock);
  482.     area = find_area_and_lock(AS, page);   
  483.     if (!area) {
  484.         /*
  485.          * No area contained mapping for 'page'.
  486.          * Signal page fault to low-level handler.
  487.          */
  488.         spinlock_unlock(&AS->lock);
  489.         return 0;
  490.     }
  491.  
  492.     if (area->attributes & AS_AREA_ATTR_PARTIAL) {
  493.         /*
  494.          * The address space area is not fully initialized.
  495.          * Avoid possible race by returning error.
  496.          */
  497.         spinlock_unlock(&area->lock);
  498.         spinlock_unlock(&AS->lock);
  499.         return 0;      
  500.     }
  501.  
  502.     ASSERT(!(area->flags & AS_AREA_DEVICE));
  503.  
  504.     page_table_lock(AS, false);
  505.    
  506.     /*
  507.      * To avoid race condition between two page faults
  508.      * on the same address, we need to make sure
  509.      * the mapping has not been already inserted.
  510.      */
  511.     if ((pte = page_mapping_find(AS, page))) {
  512.         if (PTE_PRESENT(pte)) {
  513.             page_table_unlock(AS, false);
  514.             spinlock_unlock(&area->lock);
  515.             spinlock_unlock(&AS->lock);
  516.             return 1;
  517.         }
  518.     }
  519.  
  520.     /*
  521.      * In general, there can be several reasons that
  522.      * can have caused this fault.
  523.      *
  524.      * - non-existent mapping: the area is a scratch
  525.      *   area (e.g. stack) and so far has not been
  526.      *   allocated a frame for the faulting page
  527.      *
  528.      * - non-present mapping: another possibility,
  529.      *   currently not implemented, would be frame
  530.      *   reuse; when this becomes a possibility,
  531.      *   do not forget to distinguish between
  532.      *   the different causes
  533.      */
  534.     frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
  535.     memsetb(PA2KA(frame), FRAME_SIZE, 0);
  536.    
  537.     /*
  538.      * Map 'page' to 'frame'.
  539.      * Note that TLB shootdown is not attempted as only new information is being
  540.      * inserted into page tables.
  541.      */
  542.     page_mapping_insert(AS, page, frame, get_area_flags(area));
  543.     page_table_unlock(AS, false);
  544.    
  545.     spinlock_unlock(&area->lock);
  546.     spinlock_unlock(&AS->lock);
  547.     return 1;
  548. }
  549.  
  550. /** Switch address spaces.
  551.  *
  552.  * @param old Old address space or NULL.
  553.  * @param new New address space.
  554.  */
  555. void as_switch(as_t *old, as_t *new)
  556. {
  557.     ipl_t ipl;
  558.     bool needs_asid = false;
  559.    
  560.     ipl = interrupts_disable();
  561.     spinlock_lock(&as_lock);
  562.  
  563.     /*
  564.      * First, take care of the old address space.
  565.      */
  566.     if (old) {
  567.         spinlock_lock(&old->lock);
  568.         ASSERT(old->refcount);
  569.         if((--old->refcount == 0) && (old != AS_KERNEL)) {
  570.             /*
  571.              * The old address space is no longer active on
  572.              * any processor. It can be appended to the
  573.              * list of inactive address spaces with assigned
  574.              * ASID.
  575.              */
  576.              ASSERT(old->asid != ASID_INVALID);
  577.              list_append(&old->inactive_as_with_asid_link, &inactive_as_with_asid_head);
  578.         }
  579.         spinlock_unlock(&old->lock);
  580.     }
  581.  
  582.     /*
  583.      * Second, prepare the new address space.
  584.      */
  585.     spinlock_lock(&new->lock);
  586.     if ((new->refcount++ == 0) && (new != AS_KERNEL)) {
  587.         if (new->asid != ASID_INVALID)
  588.             list_remove(&new->inactive_as_with_asid_link);
  589.         else
  590.             needs_asid = true;  /* defer call to asid_get() until new->lock is released */
  591.     }
  592.     SET_PTL0_ADDRESS(new->page_table);
  593.     spinlock_unlock(&new->lock);
  594.  
  595.     if (needs_asid) {
  596.         /*
  597.          * Allocation of new ASID was deferred
  598.          * until now in order to avoid deadlock.
  599.          */
  600.         asid_t asid;
  601.        
  602.         asid = asid_get();
  603.         spinlock_lock(&new->lock);
  604.         new->asid = asid;
  605.         spinlock_unlock(&new->lock);
  606.     }
  607.     spinlock_unlock(&as_lock);
  608.     interrupts_restore(ipl);
  609.    
  610.     /*
  611.      * Perform architecture-specific steps.
  612.      * (e.g. write ASID to hardware register etc.)
  613.      */
  614.     as_install_arch(new);
  615.    
  616.     AS = new;
  617. }
  618.  
  619. /** Convert address space area flags to page flags.
  620.  *
  621.  * @param aflags Flags of some address space area.
  622.  *
  623.  * @return Flags to be passed to page_mapping_insert().
  624.  */
  625. int area_flags_to_page_flags(int aflags)
  626. {
  627.     int flags;
  628.  
  629.     flags = PAGE_USER | PAGE_PRESENT;
  630.    
  631.     if (aflags & AS_AREA_READ)
  632.         flags |= PAGE_READ;
  633.        
  634.     if (aflags & AS_AREA_WRITE)
  635.         flags |= PAGE_WRITE;
  636.    
  637.     if (aflags & AS_AREA_EXEC)
  638.         flags |= PAGE_EXEC;
  639.    
  640.     if (!(aflags & AS_AREA_DEVICE))
  641.         flags |= PAGE_CACHEABLE;
  642.        
  643.     return flags;
  644. }
  645.  
  646. /** Compute flags for virtual address translation subsytem.
  647.  *
  648.  * The address space area must be locked.
  649.  * Interrupts must be disabled.
  650.  *
  651.  * @param a Address space area.
  652.  *
  653.  * @return Flags to be used in page_mapping_insert().
  654.  */
  655. int get_area_flags(as_area_t *a)
  656. {
  657.     return area_flags_to_page_flags(a->flags);
  658. }
  659.  
  660. /** Create page table.
  661.  *
  662.  * Depending on architecture, create either address space
  663.  * private or global page table.
  664.  *
  665.  * @param flags Flags saying whether the page table is for kernel address space.
  666.  *
  667.  * @return First entry of the page table.
  668.  */
  669. pte_t *page_table_create(int flags)
  670. {
  671.         ASSERT(as_operations);
  672.         ASSERT(as_operations->page_table_create);
  673.  
  674.         return as_operations->page_table_create(flags);
  675. }
  676.  
  677. /** Lock page table.
  678.  *
  679.  * This function should be called before any page_mapping_insert(),
  680.  * page_mapping_remove() and page_mapping_find().
  681.  *
  682.  * Locking order is such that address space areas must be locked
  683.  * prior to this call. Address space can be locked prior to this
  684.  * call in which case the lock argument is false.
  685.  *
  686.  * @param as Address space.
  687.  * @param as_locked If false, do not attempt to lock as->lock.
  688.  */
  689. void page_table_lock(as_t *as, bool lock)
  690. {
  691.     ASSERT(as_operations);
  692.     ASSERT(as_operations->page_table_lock);
  693.  
  694.     as_operations->page_table_lock(as, lock);
  695. }
  696.  
  697. /** Unlock page table.
  698.  *
  699.  * @param as Address space.
  700.  * @param as_locked If false, do not attempt to unlock as->lock.
  701.  */
  702. void page_table_unlock(as_t *as, bool unlock)
  703. {
  704.     ASSERT(as_operations);
  705.     ASSERT(as_operations->page_table_unlock);
  706.  
  707.     as_operations->page_table_unlock(as, unlock);
  708. }
  709.  
  710.  
  711. /** Find address space area and lock it.
  712.  *
  713.  * The address space must be locked and interrupts must be disabled.
  714.  *
  715.  * @param as Address space.
  716.  * @param va Virtual address.
  717.  *
  718.  * @return Locked address space area containing va on success or NULL on failure.
  719.  */
  720. as_area_t *find_area_and_lock(as_t *as, __address va)
  721. {
  722.     as_area_t *a;
  723.     btree_node_t *leaf, *lnode;
  724.     int i;
  725.    
  726.     a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);
  727.     if (a) {
  728.         /* va is the base address of an address space area */
  729.         spinlock_lock(&a->lock);
  730.         return a;
  731.     }
  732.    
  733.     /*
  734.      * Search the leaf node and the righmost record of its left neighbour
  735.      * to find out whether this is a miss or va belongs to an address
  736.      * space area found there.
  737.      */
  738.    
  739.     /* First, search the leaf node itself. */
  740.     for (i = 0; i < leaf->keys; i++) {
  741.         a = (as_area_t *) leaf->value[i];
  742.         spinlock_lock(&a->lock);
  743.         if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) {
  744.             return a;
  745.         }
  746.         spinlock_unlock(&a->lock);
  747.     }
  748.  
  749.     /*
  750.      * Second, locate the left neighbour and test its last record.
  751.      * Because of its position in the B+tree, it must have base < va.
  752.      */
  753.     if ((lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
  754.         a = (as_area_t *) lnode->value[lnode->keys - 1];
  755.         spinlock_lock(&a->lock);
  756.         if (va < a->base + a->pages * PAGE_SIZE) {
  757.             return a;
  758.         }
  759.         spinlock_unlock(&a->lock);
  760.     }
  761.  
  762.     return NULL;
  763. }
  764.  
  765. /** Check area conflicts with other areas.
  766.  *
  767.  * The address space must be locked and interrupts must be disabled.
  768.  *
  769.  * @param as Address space.
  770.  * @param va Starting virtual address of the area being tested.
  771.  * @param size Size of the area being tested.
  772.  * @param avoid_area Do not touch this area.
  773.  *
  774.  * @return True if there is no conflict, false otherwise.
  775.  */
  776. bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area)
  777. {
  778.     as_area_t *a;
  779.     btree_node_t *leaf, *node;
  780.     int i;
  781.    
  782.     /*
  783.      * We don't want any area to have conflicts with NULL page.
  784.      */
  785.     if (overlaps(va, size, NULL, PAGE_SIZE))
  786.         return false;
  787.    
  788.     /*
  789.      * The leaf node is found in O(log n), where n is proportional to
  790.      * the number of address space areas belonging to as.
  791.      * The check for conflicts is then attempted on the rightmost
  792.      * record in the left neighbour, the leftmost record in the right
  793.      * neighbour and all records in the leaf node itself.
  794.      */
  795.    
  796.     if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) {
  797.         if (a != avoid_area)
  798.             return false;
  799.     }
  800.    
  801.     /* First, check the two border cases. */
  802.     if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
  803.         a = (as_area_t *) node->value[node->keys - 1];
  804.         spinlock_lock(&a->lock);
  805.         if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
  806.             spinlock_unlock(&a->lock);
  807.             return false;
  808.         }
  809.         spinlock_unlock(&a->lock);
  810.     }
  811.     if ((node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf))) {
  812.         a = (as_area_t *) node->value[0];
  813.         spinlock_lock(&a->lock);
  814.         if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
  815.             spinlock_unlock(&a->lock);
  816.             return false;
  817.         }
  818.         spinlock_unlock(&a->lock);
  819.     }
  820.    
  821.     /* Second, check the leaf node. */
  822.     for (i = 0; i < leaf->keys; i++) {
  823.         a = (as_area_t *) leaf->value[i];
  824.    
  825.         if (a == avoid_area)
  826.             continue;
  827.    
  828.         spinlock_lock(&a->lock);
  829.         if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
  830.             spinlock_unlock(&a->lock);
  831.             return false;
  832.         }
  833.         spinlock_unlock(&a->lock);
  834.     }
  835.  
  836.     /*
  837.      * So far, the area does not conflict with other areas.
  838.      * Check if it doesn't conflict with kernel address space.
  839.      */  
  840.     if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
  841.         return !overlaps(va, size,
  842.             KERNEL_ADDRESS_SPACE_START, KERNEL_ADDRESS_SPACE_END-KERNEL_ADDRESS_SPACE_START);
  843.     }
  844.  
  845.     return true;
  846. }
  847.  
  848. /*
  849.  * Address space related syscalls.
  850.  */
  851.  
  852. /** Wrapper for as_area_create(). */
  853. __native sys_as_area_create(__address address, size_t size, int flags)
  854. {
  855.     if (as_area_create(AS, flags, size, address, AS_AREA_ATTR_NONE))
  856.         return (__native) address;
  857.     else
  858.         return (__native) -1;
  859. }
  860.  
  861. /** Wrapper for as_area_resize. */
  862. __native sys_as_area_resize(__address address, size_t size, int flags)
  863. {
  864.     return as_area_resize(AS, address, size, 0);
  865. }
  866.  
  867. /** Prepare task for accepting address space area from another task.
  868.  *
  869.  * @param uspace_accept_arg Accept structure passed from userspace.
  870.  *
  871.  * @return EPERM if the task ID encapsulated in @uspace_accept_arg references
  872.  *     TASK. Otherwise zero is returned.
  873.  */
  874. __native sys_as_area_accept(as_area_acptsnd_arg_t *uspace_accept_arg)
  875. {
  876.     as_area_acptsnd_arg_t arg;
  877.    
  878.     copy_from_uspace(&arg, uspace_accept_arg, sizeof(as_area_acptsnd_arg_t));
  879.    
  880.     if (!arg.size)
  881.         return (__native) EPERM;
  882.    
  883.     if (arg.task_id == TASK->taskid) {
  884.         /*
  885.          * Accepting from itself is not allowed.
  886.          */
  887.         return (__native) EPERM;
  888.     }
  889.    
  890.     memcpy(&TASK->accept_arg, &arg, sizeof(as_area_acptsnd_arg_t));
  891.    
  892.         return 0;
  893. }
  894.  
  895. /** Wrapper for as_area_send. */
  896. __native sys_as_area_send(as_area_acptsnd_arg_t *uspace_send_arg)
  897. {
  898.     as_area_acptsnd_arg_t arg;
  899.    
  900.     copy_from_uspace(&arg, uspace_send_arg, sizeof(as_area_acptsnd_arg_t));
  901.  
  902.     if (!arg.size)
  903.         return (__native) EPERM;
  904.    
  905.     if (arg.task_id == TASK->taskid) {
  906.         /*
  907.          * Sending to itself is not allowed.
  908.          */
  909.         return (__native) EPERM;
  910.     }
  911.  
  912.     return (__native) as_area_send(arg.task_id, (__address) arg.base);
  913. }
  914.