Subversion Repositories HelenOS-historic

Rev

Rev 1227 | Rev 1236 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (C) 2001-2006 Jakub Jermar
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. /*
  30.  * This file contains address space manipulation functions.
  31.  * Roughly speaking, this is a higher-level client of
  32.  * Virtual Address Translation (VAT) subsystem.
  33.  */
  34.  
  35. #include <mm/as.h>
  36. #include <arch/mm/as.h>
  37. #include <mm/page.h>
  38. #include <mm/frame.h>
  39. #include <mm/slab.h>
  40. #include <mm/tlb.h>
  41. #include <arch/mm/page.h>
  42. #include <genarch/mm/page_pt.h>
  43. #include <genarch/mm/page_ht.h>
  44. #include <mm/asid.h>
  45. #include <arch/mm/asid.h>
  46. #include <arch/types.h>
  47. #include <typedefs.h>
  48. #include <synch/spinlock.h>
  49. #include <config.h>
  50. #include <adt/list.h>
  51. #include <adt/btree.h>
  52. #include <panic.h>
  53. #include <arch/asm.h>
  54. #include <debug.h>
  55. #include <memstr.h>
  56. #include <macros.h>
  57. #include <arch.h>
  58. #include <print.h>
  59.  
  60. as_operations_t *as_operations = NULL;
  61.  
  62. /** Address space lock. It protects inactive_as_with_asid_head. */
  63. SPINLOCK_INITIALIZE(as_lock);
  64.  
  65. /**
  66.  * This list contains address spaces that are not active on any
  67.  * processor and that have valid ASID.
  68.  */
  69. LIST_INITIALIZE(inactive_as_with_asid_head);
  70.  
  71. /** Kernel address space. */
  72. as_t *AS_KERNEL = NULL;
  73.  
  74. static int get_area_flags(as_area_t *a);
  75. static as_area_t *find_area_and_lock(as_t *as, __address va);
  76. static bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area);
  77.  
  78. /** Initialize address space subsystem. */
  79. void as_init(void)
  80. {
  81.     as_arch_init();
  82.     AS_KERNEL = as_create(FLAG_AS_KERNEL);
  83.         if (!AS_KERNEL)
  84.                 panic("can't create kernel address space\n");
  85. }
  86.  
  87. /** Create address space.
  88.  *
  89.  * @param flags Flags that influence way in wich the address space is created.
  90.  */
  91. as_t *as_create(int flags)
  92. {
  93.     as_t *as;
  94.  
  95.     as = (as_t *) malloc(sizeof(as_t), 0);
  96.     link_initialize(&as->inactive_as_with_asid_link);
  97.     spinlock_initialize(&as->lock, "as_lock");
  98.     btree_create(&as->as_area_btree);
  99.    
  100.     if (flags & FLAG_AS_KERNEL)
  101.         as->asid = ASID_KERNEL;
  102.     else
  103.         as->asid = ASID_INVALID;
  104.    
  105.     as->refcount = 0;
  106.     as->page_table = page_table_create(flags);
  107.  
  108.     return as;
  109. }
  110.  
  111. /** Free Adress space */
  112. void as_free(as_t *as)
  113. {
  114.     ASSERT(as->refcount == 0);
  115.  
  116.     /* TODO: free as_areas and other resources held by as */
  117.     /* TODO: free page table */
  118.     free(as);
  119. }
  120.  
  121. /** Create address space area of common attributes.
  122.  *
  123.  * The created address space area is added to the target address space.
  124.  *
  125.  * @param as Target address space.
  126.  * @param flags Flags of the area.
  127.  * @param size Size of area.
  128.  * @param base Base address of area.
  129.  *
  130.  * @return Address space area on success or NULL on failure.
  131.  */
  132. as_area_t *as_area_create(as_t *as, int flags, size_t size, __address base)
  133. {
  134.     ipl_t ipl;
  135.     as_area_t *a;
  136.    
  137.     if (base % PAGE_SIZE)
  138.         return NULL;
  139.  
  140.     if (!size)
  141.         return NULL;
  142.  
  143.     /* Writeable executable areas are not supported. */
  144.     if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
  145.         return NULL;
  146.    
  147.     ipl = interrupts_disable();
  148.     spinlock_lock(&as->lock);
  149.    
  150.     if (!check_area_conflicts(as, base, size, NULL)) {
  151.         spinlock_unlock(&as->lock);
  152.         interrupts_restore(ipl);
  153.         return NULL;
  154.     }
  155.    
  156.     a = (as_area_t *) malloc(sizeof(as_area_t), 0);
  157.  
  158.     spinlock_initialize(&a->lock, "as_area_lock");
  159.    
  160.     a->flags = flags;
  161.     a->pages = SIZE2FRAMES(size);
  162.     a->base = base;
  163.    
  164.     btree_insert(&as->as_area_btree, base, (void *) a, NULL);
  165.  
  166.     spinlock_unlock(&as->lock);
  167.     interrupts_restore(ipl);
  168.  
  169.     return a;
  170. }
  171.  
  172. /** Initialize mapping for one page of address space.
  173.  *
  174.  * This functions maps 'page' to 'frame' according
  175.  * to attributes of the address space area to
  176.  * wich 'page' belongs.
  177.  *
  178.  * @param as Target address space.
  179.  * @param page Virtual page within the area.
  180.  * @param frame Physical frame to which page will be mapped.
  181.  */
  182. void as_set_mapping(as_t *as, __address page, __address frame)
  183. {
  184.     as_area_t *area;
  185.     ipl_t ipl;
  186.    
  187.     ipl = interrupts_disable();
  188.     page_table_lock(as, true);
  189.    
  190.     area = find_area_and_lock(as, page);
  191.     if (!area) {
  192.         panic("page not part of any as_area\n");
  193.     }
  194.  
  195.     page_mapping_insert(as, page, frame, get_area_flags(area));
  196.    
  197.     spinlock_unlock(&area->lock);
  198.     page_table_unlock(as, true);
  199.     interrupts_restore(ipl);
  200. }
  201.  
  202. /** Handle page fault within the current address space.
  203.  *
  204.  * This is the high-level page fault handler.
  205.  * Interrupts are assumed disabled.
  206.  *
  207.  * @param page Faulting page.
  208.  *
  209.  * @return 0 on page fault, 1 on success.
  210.  */
  211. int as_page_fault(__address page)
  212. {
  213.     pte_t *pte;
  214.     as_area_t *area;
  215.     __address frame;
  216.    
  217.     ASSERT(AS);
  218.  
  219.     spinlock_lock(&AS->lock);
  220.     area = find_area_and_lock(AS, page);   
  221.     if (!area) {
  222.         /*
  223.          * No area contained mapping for 'page'.
  224.          * Signal page fault to low-level handler.
  225.          */
  226.         spinlock_unlock(&AS->lock);
  227.         return 0;
  228.     }
  229.  
  230.     ASSERT(!(area->flags & AS_AREA_DEVICE));
  231.  
  232.     page_table_lock(AS, false);
  233.    
  234.     /*
  235.      * To avoid race condition between two page faults
  236.      * on the same address, we need to make sure
  237.      * the mapping has not been already inserted.
  238.      */
  239.     if ((pte = page_mapping_find(AS, page))) {
  240.         if (PTE_PRESENT(pte)) {
  241.             page_table_unlock(AS, false);
  242.             spinlock_unlock(&area->lock);
  243.             spinlock_unlock(&AS->lock);
  244.             return 1;
  245.         }
  246.     }
  247.  
  248.     /*
  249.      * In general, there can be several reasons that
  250.      * can have caused this fault.
  251.      *
  252.      * - non-existent mapping: the area is a scratch
  253.      *   area (e.g. stack) and so far has not been
  254.      *   allocated a frame for the faulting page
  255.      *
  256.      * - non-present mapping: another possibility,
  257.      *   currently not implemented, would be frame
  258.      *   reuse; when this becomes a possibility,
  259.      *   do not forget to distinguish between
  260.      *   the different causes
  261.      */
  262.     frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
  263.     memsetb(PA2KA(frame), FRAME_SIZE, 0);
  264.    
  265.     /*
  266.      * Map 'page' to 'frame'.
  267.      * Note that TLB shootdown is not attempted as only new information is being
  268.      * inserted into page tables.
  269.      */
  270.     page_mapping_insert(AS, page, frame, get_area_flags(area));
  271.     page_table_unlock(AS, false);
  272.    
  273.     spinlock_unlock(&area->lock);
  274.     spinlock_unlock(&AS->lock);
  275.     return 1;
  276. }
  277.  
  278. /** Switch address spaces.
  279.  *
  280.  * @param old Old address space or NULL.
  281.  * @param new New address space.
  282.  */
  283. void as_switch(as_t *old, as_t *new)
  284. {
  285.     ipl_t ipl;
  286.     bool needs_asid = false;
  287.    
  288.     ipl = interrupts_disable();
  289.     spinlock_lock(&as_lock);
  290.  
  291.     /*
  292.      * First, take care of the old address space.
  293.      */
  294.     if (old) {
  295.         spinlock_lock(&old->lock);
  296.         ASSERT(old->refcount);
  297.         if((--old->refcount == 0) && (old != AS_KERNEL)) {
  298.             /*
  299.              * The old address space is no longer active on
  300.              * any processor. It can be appended to the
  301.              * list of inactive address spaces with assigned
  302.              * ASID.
  303.              */
  304.              ASSERT(old->asid != ASID_INVALID);
  305.              list_append(&old->inactive_as_with_asid_link, &inactive_as_with_asid_head);
  306.         }
  307.         spinlock_unlock(&old->lock);
  308.     }
  309.  
  310.     /*
  311.      * Second, prepare the new address space.
  312.      */
  313.     spinlock_lock(&new->lock);
  314.     if ((new->refcount++ == 0) && (new != AS_KERNEL)) {
  315.         if (new->asid != ASID_INVALID)
  316.             list_remove(&new->inactive_as_with_asid_link);
  317.         else
  318.             needs_asid = true;  /* defer call to asid_get() until new->lock is released */
  319.     }
  320.     SET_PTL0_ADDRESS(new->page_table);
  321.     spinlock_unlock(&new->lock);
  322.  
  323.     if (needs_asid) {
  324.         /*
  325.          * Allocation of new ASID was deferred
  326.          * until now in order to avoid deadlock.
  327.          */
  328.         asid_t asid;
  329.        
  330.         asid = asid_get();
  331.         spinlock_lock(&new->lock);
  332.         new->asid = asid;
  333.         spinlock_unlock(&new->lock);
  334.     }
  335.     spinlock_unlock(&as_lock);
  336.     interrupts_restore(ipl);
  337.    
  338.     /*
  339.      * Perform architecture-specific steps.
  340.      * (e.g. write ASID to hardware register etc.)
  341.      */
  342.     as_install_arch(new);
  343.    
  344.     AS = new;
  345. }
  346.  
  347. /** Compute flags for virtual address translation subsytem.
  348.  *
  349.  * The address space area must be locked.
  350.  * Interrupts must be disabled.
  351.  *
  352.  * @param a Address space area.
  353.  *
  354.  * @return Flags to be used in page_mapping_insert().
  355.  */
  356. int get_area_flags(as_area_t *a)
  357. {
  358.     int flags;
  359.  
  360.     flags = PAGE_USER | PAGE_PRESENT;
  361.    
  362.     if (a->flags & AS_AREA_READ)
  363.         flags |= PAGE_READ;
  364.        
  365.     if (a->flags & AS_AREA_WRITE)
  366.         flags |= PAGE_WRITE;
  367.    
  368.     if (a->flags & AS_AREA_EXEC)
  369.         flags |= PAGE_EXEC;
  370.    
  371.     if (!(a->flags & AS_AREA_DEVICE))
  372.         flags |= PAGE_CACHEABLE;
  373.        
  374.     return flags;
  375. }
  376.  
  377. /** Create page table.
  378.  *
  379.  * Depending on architecture, create either address space
  380.  * private or global page table.
  381.  *
  382.  * @param flags Flags saying whether the page table is for kernel address space.
  383.  *
  384.  * @return First entry of the page table.
  385.  */
  386. pte_t *page_table_create(int flags)
  387. {
  388.         ASSERT(as_operations);
  389.         ASSERT(as_operations->page_table_create);
  390.  
  391.         return as_operations->page_table_create(flags);
  392. }
  393.  
  394. /** Lock page table.
  395.  *
  396.  * This function should be called before any page_mapping_insert(),
  397.  * page_mapping_remove() and page_mapping_find().
  398.  *
  399.  * Locking order is such that address space areas must be locked
  400.  * prior to this call. Address space can be locked prior to this
  401.  * call in which case the lock argument is false.
  402.  *
  403.  * @param as Address space.
  404.  * @param as_locked If false, do not attempt to lock as->lock.
  405.  */
  406. void page_table_lock(as_t *as, bool lock)
  407. {
  408.     ASSERT(as_operations);
  409.     ASSERT(as_operations->page_table_lock);
  410.  
  411.     as_operations->page_table_lock(as, lock);
  412. }
  413.  
  414. /** Unlock page table.
  415.  *
  416.  * @param as Address space.
  417.  * @param as_locked If false, do not attempt to unlock as->lock.
  418.  */
  419. void page_table_unlock(as_t *as, bool unlock)
  420. {
  421.     ASSERT(as_operations);
  422.     ASSERT(as_operations->page_table_unlock);
  423.  
  424.     as_operations->page_table_unlock(as, unlock);
  425. }
  426.  
  427. /** Find address space area and change it.
  428.  *
  429.  * @param as Address space.
  430.  * @param address Virtual address belonging to the area to be changed. Must be page-aligned.
  431.  * @param size New size of the virtual memory block starting at address.
  432.  * @param flags Flags influencing the remap operation. Currently unused.
  433.  *
  434.  * @return address on success, (__address) -1 otherwise.
  435.  */
  436. __address as_area_resize(as_t *as, __address address, size_t size, int flags)
  437. {
  438.     as_area_t *area = NULL;
  439.     ipl_t ipl;
  440.     size_t pages;
  441.    
  442.     ipl = interrupts_disable();
  443.     spinlock_lock(&as->lock);
  444.    
  445.     /*
  446.      * Locate the area.
  447.      */
  448.     area = find_area_and_lock(as, address);
  449.     if (!area) {
  450.         spinlock_unlock(&as->lock);
  451.         interrupts_restore(ipl);
  452.         return (__address) -1;
  453.     }
  454.  
  455.     if (area->flags & AS_AREA_DEVICE) {
  456.         /*
  457.          * Remapping of address space areas associated
  458.          * with memory mapped devices is not supported.
  459.          */
  460.         spinlock_unlock(&area->lock);
  461.         spinlock_unlock(&as->lock);
  462.         interrupts_restore(ipl);
  463.         return (__address) -1;
  464.     }
  465.  
  466.     pages = SIZE2FRAMES((address - area->base) + size);
  467.     if (!pages) {
  468.         /*
  469.          * Zero size address space areas are not allowed.
  470.          */
  471.         spinlock_unlock(&area->lock);
  472.         spinlock_unlock(&as->lock);
  473.         interrupts_restore(ipl);
  474.         return (__address) -1;
  475.     }
  476.    
  477.     if (pages < area->pages) {
  478.         int i;
  479.  
  480.         /*
  481.          * Shrinking the area.
  482.          * No need to check for overlaps.
  483.          */
  484.         for (i = pages; i < area->pages; i++) {
  485.             pte_t *pte;
  486.            
  487.             /*
  488.              * Releasing physical memory.
  489.              * This depends on the fact that the memory was allocated using frame_alloc().
  490.              */
  491.             page_table_lock(as, false);
  492.             pte = page_mapping_find(as, area->base + i*PAGE_SIZE);
  493.             if (pte && PTE_VALID(pte)) {
  494.                 __address frame;
  495.  
  496.                 ASSERT(PTE_PRESENT(pte));
  497.                 frame = PTE_GET_FRAME(pte);
  498.                 page_mapping_remove(as, area->base + i*PAGE_SIZE);
  499.                 page_table_unlock(as, false);
  500.  
  501.                 frame_free(ADDR2PFN(frame));
  502.             } else {
  503.                 page_table_unlock(as, false);
  504.             }
  505.         }
  506.         /*
  507.          * Invalidate TLB's.
  508.          */
  509.         tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
  510.         tlb_invalidate_pages(AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
  511.         tlb_shootdown_finalize();
  512.     } else {
  513.         /*
  514.          * Growing the area.
  515.          * Check for overlaps with other address space areas.
  516.          */
  517.         if (!check_area_conflicts(as, address, pages * PAGE_SIZE, area)) {
  518.             spinlock_unlock(&area->lock);
  519.             spinlock_unlock(&as->lock);    
  520.             interrupts_restore(ipl);
  521.             return (__address) -1;
  522.         }
  523.     }
  524.  
  525.     area->pages = pages;
  526.    
  527.     spinlock_unlock(&area->lock);
  528.     spinlock_unlock(&as->lock);
  529.     interrupts_restore(ipl);
  530.  
  531.     return address;
  532. }
  533.  
  534. /** Find address space area and lock it.
  535.  *
  536.  * The address space must be locked and interrupts must be disabled.
  537.  *
  538.  * @param as Address space.
  539.  * @param va Virtual address.
  540.  *
  541.  * @return Locked address space area containing va on success or NULL on failure.
  542.  */
  543. as_area_t *find_area_and_lock(as_t *as, __address va)
  544. {
  545.     as_area_t *a;
  546.     btree_node_t *leaf, *lnode;
  547.     int i;
  548.    
  549.     a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);
  550.     if (a) {
  551.         /* va is the base address of an address space area */
  552.         spinlock_lock(&a->lock);
  553.         return a;
  554.     }
  555.    
  556.     /*
  557.      * Search the leaf node and the righmost record of its left neighbour
  558.      * to find out whether this is a miss or va belongs to an address
  559.      * space area found there.
  560.      */
  561.    
  562.     /* First, search the leaf node itself. */
  563.     for (i = 0; i < leaf->keys; i++) {
  564.         a = (as_area_t *) leaf->value[i];
  565.         spinlock_lock(&a->lock);
  566.         if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) {
  567.             return a;
  568.         }
  569.         spinlock_unlock(&a->lock);
  570.     }
  571.  
  572.     /*
  573.      * Second, locate the left neighbour and test its last record.
  574.      * Because of its position in the B+tree, it must have base < va.
  575.      */
  576.     if ((lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
  577.         a = (as_area_t *) lnode->value[lnode->keys - 1];
  578.         spinlock_lock(&a->lock);
  579.         if (va < a->base + a->pages * PAGE_SIZE) {
  580.             return a;
  581.         }
  582.         spinlock_unlock(&a->lock);
  583.     }
  584.  
  585.     return NULL;
  586. }
  587.  
  588. /** Check area conflicts with other areas.
  589.  *
  590.  * The address space must be locked and interrupts must be disabled.
  591.  *
  592.  * @param as Address space.
  593.  * @param va Starting virtual address of the area being tested.
  594.  * @param size Size of the area being tested.
  595.  * @param avoid_area Do not touch this area.
  596.  *
  597.  * @return True if there is no conflict, false otherwise.
  598.  */
  599. bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area)
  600. {
  601.     as_area_t *a;
  602.     btree_node_t *leaf, *node;
  603.     int i;
  604.    
  605.     /*
  606.      * We don't want any area to have conflicts with NULL page.
  607.      */
  608.     if (overlaps(va, size, NULL, PAGE_SIZE))
  609.         return false;
  610.    
  611.     /*
  612.      * The leaf node is found in O(log n), where n is proportional to
  613.      * the number of address space areas belonging to as.
  614.      * The check for conflicts is then attempted on the rightmost
  615.      * record in the left neighbour, the leftmost record in the right
  616.      * neighbour and all records in the leaf node itself.
  617.      */
  618.    
  619.     if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) {
  620.         if (a != avoid_area)
  621.             return false;
  622.     }
  623.    
  624.     /* First, check the two border cases. */
  625.     if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
  626.         a = (as_area_t *) node->value[node->keys - 1];
  627.         spinlock_lock(&a->lock);
  628.         if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
  629.             spinlock_unlock(&a->lock);
  630.             return false;
  631.         }
  632.         spinlock_unlock(&a->lock);
  633.     }
  634.     if ((node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf))) {
  635.         a = (as_area_t *) node->value[0];
  636.         spinlock_lock(&a->lock);
  637.         if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
  638.             spinlock_unlock(&a->lock);
  639.             return false;
  640.         }
  641.         spinlock_unlock(&a->lock);
  642.     }
  643.    
  644.     /* Second, check the leaf node. */
  645.     for (i = 0; i < leaf->keys; i++) {
  646.         a = (as_area_t *) leaf->value[i];
  647.    
  648.         if (a == avoid_area)
  649.             continue;
  650.    
  651.         spinlock_lock(&a->lock);
  652.         if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
  653.             spinlock_unlock(&a->lock);
  654.             return false;
  655.         }
  656.         spinlock_unlock(&a->lock);
  657.     }
  658.  
  659.     /*
  660.      * So far, the area does not conflict with other areas.
  661.      * Check if it doesn't conflict with kernel address space.
  662.      */  
  663.     if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
  664.         return !overlaps(va, size,
  665.             KERNEL_ADDRESS_SPACE_START, KERNEL_ADDRESS_SPACE_END-KERNEL_ADDRESS_SPACE_START);
  666.     }
  667.  
  668.     return true;
  669. }
  670.