Subversion Repositories HelenOS-historic

Rev

Rev 1178 | Rev 1227 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (C) 2001-2006 Jakub Jermar
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. /*
  30.  * This file contains address space manipulation functions.
  31.  * Roughly speaking, this is a higher-level client of
  32.  * Virtual Address Translation (VAT) subsystem.
  33.  */
  34.  
  35. #include <mm/as.h>
  36. #include <arch/mm/as.h>
  37. #include <mm/page.h>
  38. #include <mm/frame.h>
  39. #include <mm/slab.h>
  40. #include <mm/tlb.h>
  41. #include <arch/mm/page.h>
  42. #include <genarch/mm/page_pt.h>
  43. #include <genarch/mm/page_ht.h>
  44. #include <mm/asid.h>
  45. #include <arch/mm/asid.h>
  46. #include <arch/types.h>
  47. #include <typedefs.h>
  48. #include <synch/spinlock.h>
  49. #include <config.h>
  50. #include <adt/list.h>
  51. #include <adt/btree.h>
  52. #include <panic.h>
  53. #include <arch/asm.h>
  54. #include <debug.h>
  55. #include <memstr.h>
  56. #include <macros.h>
  57. #include <arch.h>
  58. #include <print.h>
  59.  
  60. as_operations_t *as_operations = NULL;
  61.  
  62. /** Address space lock. It protects inactive_as_with_asid_head. */
  63. SPINLOCK_INITIALIZE(as_lock);
  64.  
  65. /**
  66.  * This list contains address spaces that are not active on any
  67.  * processor and that have valid ASID.
  68.  */
  69. LIST_INITIALIZE(inactive_as_with_asid_head);
  70.  
  71. /** Kernel address space. */
  72. as_t *AS_KERNEL = NULL;
  73.  
  74. static int get_area_flags(as_area_t *a);
  75. static as_area_t *find_area_and_lock(as_t *as, __address va);
  76. static bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area);
  77.  
  78. /** Initialize address space subsystem. */
  79. void as_init(void)
  80. {
  81.     as_arch_init();
  82.     AS_KERNEL = as_create(FLAG_AS_KERNEL);
  83.         if (!AS_KERNEL)
  84.                 panic("can't create kernel address space\n");
  85. }
  86.  
  87. /** Create address space.
  88.  *
  89.  * @param flags Flags that influence way in wich the address space is created.
  90.  */
  91. as_t *as_create(int flags)
  92. {
  93.     as_t *as;
  94.  
  95.     as = (as_t *) malloc(sizeof(as_t), 0);
  96.     link_initialize(&as->inactive_as_with_asid_link);
  97.     spinlock_initialize(&as->lock, "as_lock");
  98.     btree_create(&as->as_area_btree);
  99.    
  100.     if (flags & FLAG_AS_KERNEL)
  101.         as->asid = ASID_KERNEL;
  102.     else
  103.         as->asid = ASID_INVALID;
  104.    
  105.     as->refcount = 0;
  106.     as->page_table = page_table_create(flags);
  107.  
  108.     return as;
  109. }
  110.  
  111. /** Free Adress space */
  112. void as_free(as_t *as)
  113. {
  114.     ASSERT(as->refcount == 0);
  115.  
  116.     /* TODO: free as_areas and other resources held by as */
  117.     /* TODO: free page table */
  118.     free(as);
  119. }
  120.  
  121. /** Create address space area of common attributes.
  122.  *
  123.  * The created address space area is added to the target address space.
  124.  *
  125.  * @param as Target address space.
  126.  * @param flags Flags of the area.
  127.  * @param size Size of area.
  128.  * @param base Base address of area.
  129.  *
  130.  * @return Address space area on success or NULL on failure.
  131.  */
  132. as_area_t *as_area_create(as_t *as, int flags, size_t size, __address base)
  133. {
  134.     ipl_t ipl;
  135.     as_area_t *a;
  136.    
  137.     if (base % PAGE_SIZE)
  138.         return NULL;
  139.  
  140.     /* Writeable executable areas are not supported. */
  141.     if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
  142.         return NULL;
  143.    
  144.     ipl = interrupts_disable();
  145.     spinlock_lock(&as->lock);
  146.    
  147.     if (!check_area_conflicts(as, base, size, NULL)) {
  148.         spinlock_unlock(&as->lock);
  149.         interrupts_restore(ipl);
  150.         return NULL;
  151.     }
  152.    
  153.     a = (as_area_t *) malloc(sizeof(as_area_t), 0);
  154.  
  155.     spinlock_initialize(&a->lock, "as_area_lock");
  156.    
  157.     a->flags = flags;
  158.     a->pages = SIZE2FRAMES(size);
  159.     a->base = base;
  160.    
  161.     btree_insert(&as->as_area_btree, base, (void *) a, NULL);
  162.  
  163.     spinlock_unlock(&as->lock);
  164.     interrupts_restore(ipl);
  165.  
  166.     return a;
  167. }
  168.  
  169. /** Initialize mapping for one page of address space.
  170.  *
  171.  * This functions maps 'page' to 'frame' according
  172.  * to attributes of the address space area to
  173.  * wich 'page' belongs.
  174.  *
  175.  * @param as Target address space.
  176.  * @param page Virtual page within the area.
  177.  * @param frame Physical frame to which page will be mapped.
  178.  */
  179. void as_set_mapping(as_t *as, __address page, __address frame)
  180. {
  181.     as_area_t *area;
  182.     ipl_t ipl;
  183.    
  184.     ipl = interrupts_disable();
  185.     page_table_lock(as, true);
  186.    
  187.     area = find_area_and_lock(as, page);
  188.     if (!area) {
  189.         panic("page not part of any as_area\n");
  190.     }
  191.  
  192.     page_mapping_insert(as, page, frame, get_area_flags(area));
  193.    
  194.     spinlock_unlock(&area->lock);
  195.     page_table_unlock(as, true);
  196.     interrupts_restore(ipl);
  197. }
  198.  
  199. /** Handle page fault within the current address space.
  200.  *
  201.  * This is the high-level page fault handler.
  202.  * Interrupts are assumed disabled.
  203.  *
  204.  * @param page Faulting page.
  205.  *
  206.  * @return 0 on page fault, 1 on success.
  207.  */
  208. int as_page_fault(__address page)
  209. {
  210.     pte_t *pte;
  211.     as_area_t *area;
  212.     __address frame;
  213.    
  214.     ASSERT(AS);
  215.  
  216.     spinlock_lock(&AS->lock);
  217.     area = find_area_and_lock(AS, page);   
  218.     if (!area) {
  219.         /*
  220.          * No area contained mapping for 'page'.
  221.          * Signal page fault to low-level handler.
  222.          */
  223.         spinlock_unlock(&AS->lock);
  224.         return 0;
  225.     }
  226.  
  227.     ASSERT(!(area->flags & AS_AREA_DEVICE));
  228.  
  229.     page_table_lock(AS, false);
  230.    
  231.     /*
  232.      * To avoid race condition between two page faults
  233.      * on the same address, we need to make sure
  234.      * the mapping has not been already inserted.
  235.      */
  236.     if ((pte = page_mapping_find(AS, page))) {
  237.         if (PTE_PRESENT(pte)) {
  238.             page_table_unlock(AS, false);
  239.             spinlock_unlock(&area->lock);
  240.             spinlock_unlock(&AS->lock);
  241.             return 1;
  242.         }
  243.     }
  244.  
  245.     /*
  246.      * In general, there can be several reasons that
  247.      * can have caused this fault.
  248.      *
  249.      * - non-existent mapping: the area is a scratch
  250.      *   area (e.g. stack) and so far has not been
  251.      *   allocated a frame for the faulting page
  252.      *
  253.      * - non-present mapping: another possibility,
  254.      *   currently not implemented, would be frame
  255.      *   reuse; when this becomes a possibility,
  256.      *   do not forget to distinguish between
  257.      *   the different causes
  258.      */
  259.     frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
  260.     memsetb(PA2KA(frame), FRAME_SIZE, 0);
  261.    
  262.     /*
  263.      * Map 'page' to 'frame'.
  264.      * Note that TLB shootdown is not attempted as only new information is being
  265.      * inserted into page tables.
  266.      */
  267.     page_mapping_insert(AS, page, frame, get_area_flags(area));
  268.     page_table_unlock(AS, false);
  269.    
  270.     spinlock_unlock(&area->lock);
  271.     spinlock_unlock(&AS->lock);
  272.     return 1;
  273. }
  274.  
  275. /** Switch address spaces.
  276.  *
  277.  * @param old Old address space or NULL.
  278.  * @param new New address space.
  279.  */
  280. void as_switch(as_t *old, as_t *new)
  281. {
  282.     ipl_t ipl;
  283.     bool needs_asid = false;
  284.    
  285.     ipl = interrupts_disable();
  286.     spinlock_lock(&as_lock);
  287.  
  288.     /*
  289.      * First, take care of the old address space.
  290.      */
  291.     if (old) {
  292.         spinlock_lock(&old->lock);
  293.         ASSERT(old->refcount);
  294.         if((--old->refcount == 0) && (old != AS_KERNEL)) {
  295.             /*
  296.              * The old address space is no longer active on
  297.              * any processor. It can be appended to the
  298.              * list of inactive address spaces with assigned
  299.              * ASID.
  300.              */
  301.              ASSERT(old->asid != ASID_INVALID);
  302.              list_append(&old->inactive_as_with_asid_link, &inactive_as_with_asid_head);
  303.         }
  304.         spinlock_unlock(&old->lock);
  305.     }
  306.  
  307.     /*
  308.      * Second, prepare the new address space.
  309.      */
  310.     spinlock_lock(&new->lock);
  311.     if ((new->refcount++ == 0) && (new != AS_KERNEL)) {
  312.         if (new->asid != ASID_INVALID)
  313.             list_remove(&new->inactive_as_with_asid_link);
  314.         else
  315.             needs_asid = true;  /* defer call to asid_get() until new->lock is released */
  316.     }
  317.     SET_PTL0_ADDRESS(new->page_table);
  318.     spinlock_unlock(&new->lock);
  319.  
  320.     if (needs_asid) {
  321.         /*
  322.          * Allocation of new ASID was deferred
  323.          * until now in order to avoid deadlock.
  324.          */
  325.         asid_t asid;
  326.        
  327.         asid = asid_get();
  328.         spinlock_lock(&new->lock);
  329.         new->asid = asid;
  330.         spinlock_unlock(&new->lock);
  331.     }
  332.     spinlock_unlock(&as_lock);
  333.     interrupts_restore(ipl);
  334.    
  335.     /*
  336.      * Perform architecture-specific steps.
  337.      * (e.g. write ASID to hardware register etc.)
  338.      */
  339.     as_install_arch(new);
  340.    
  341.     AS = new;
  342. }
  343.  
  344. /** Compute flags for virtual address translation subsytem.
  345.  *
  346.  * The address space area must be locked.
  347.  * Interrupts must be disabled.
  348.  *
  349.  * @param a Address space area.
  350.  *
  351.  * @return Flags to be used in page_mapping_insert().
  352.  */
  353. int get_area_flags(as_area_t *a)
  354. {
  355.     int flags;
  356.  
  357.     flags = PAGE_USER | PAGE_PRESENT;
  358.    
  359.     if (a->flags & AS_AREA_READ)
  360.         flags |= PAGE_READ;
  361.        
  362.     if (a->flags & AS_AREA_WRITE)
  363.         flags |= PAGE_WRITE;
  364.    
  365.     if (a->flags & AS_AREA_EXEC)
  366.         flags |= PAGE_EXEC;
  367.    
  368.     if (!(a->flags & AS_AREA_DEVICE))
  369.         flags |= PAGE_CACHEABLE;
  370.        
  371.     return flags;
  372. }
  373.  
  374. /** Create page table.
  375.  *
  376.  * Depending on architecture, create either address space
  377.  * private or global page table.
  378.  *
  379.  * @param flags Flags saying whether the page table is for kernel address space.
  380.  *
  381.  * @return First entry of the page table.
  382.  */
  383. pte_t *page_table_create(int flags)
  384. {
  385.         ASSERT(as_operations);
  386.         ASSERT(as_operations->page_table_create);
  387.  
  388.         return as_operations->page_table_create(flags);
  389. }
  390.  
  391. /** Lock page table.
  392.  *
  393.  * This function should be called before any page_mapping_insert(),
  394.  * page_mapping_remove() and page_mapping_find().
  395.  *
  396.  * Locking order is such that address space areas must be locked
  397.  * prior to this call. Address space can be locked prior to this
  398.  * call in which case the lock argument is false.
  399.  *
  400.  * @param as Address space.
  401.  * @param as_locked If false, do not attempt to lock as->lock.
  402.  */
  403. void page_table_lock(as_t *as, bool lock)
  404. {
  405.     ASSERT(as_operations);
  406.     ASSERT(as_operations->page_table_lock);
  407.  
  408.     as_operations->page_table_lock(as, lock);
  409. }
  410.  
  411. /** Unlock page table.
  412.  *
  413.  * @param as Address space.
  414.  * @param as_locked If false, do not attempt to unlock as->lock.
  415.  */
  416. void page_table_unlock(as_t *as, bool unlock)
  417. {
  418.     ASSERT(as_operations);
  419.     ASSERT(as_operations->page_table_unlock);
  420.  
  421.     as_operations->page_table_unlock(as, unlock);
  422. }
  423.  
  424. /** Find address space area and change it.
  425.  *
  426.  * @param as Address space.
  427.  * @param address Virtual address belonging to the area to be changed. Must be page-aligned.
  428.  * @param size New size of the virtual memory block starting at address.
  429.  * @param flags Flags influencing the remap operation. Currently unused.
  430.  *
  431.  * @return address on success, (__address) -1 otherwise.
  432.  */
  433. __address as_remap(as_t *as, __address address, size_t size, int flags)
  434. {
  435.     as_area_t *area = NULL;
  436.     ipl_t ipl;
  437.     size_t pages;
  438.    
  439.     ipl = interrupts_disable();
  440.     spinlock_lock(&as->lock);
  441.    
  442.     /*
  443.      * Locate the area.
  444.      */
  445.     area = find_area_and_lock(as, address);
  446.     if (!area) {
  447.         spinlock_unlock(&as->lock);
  448.         interrupts_restore(ipl);
  449.         return (__address) -1;
  450.     }
  451.  
  452.     if (area->flags & AS_AREA_DEVICE) {
  453.         /*
  454.          * Remapping of address space areas associated
  455.          * with memory mapped devices is not supported.
  456.          */
  457.         spinlock_unlock(&area->lock);
  458.         spinlock_unlock(&as->lock);
  459.         interrupts_restore(ipl);
  460.         return (__address) -1;
  461.     }
  462.  
  463.     pages = SIZE2FRAMES((address - area->base) + size);
  464.     if (pages < area->pages) {
  465.         int i;
  466.  
  467.         /*
  468.          * Shrinking the area.
  469.          * No need to check for overlaps.
  470.          */
  471.         for (i = pages; i < area->pages; i++) {
  472.             pte_t *pte;
  473.            
  474.             /*
  475.              * Releasing physical memory.
  476.              * This depends on the fact that the memory was allocated using frame_alloc().
  477.              */
  478.             page_table_lock(as, false);
  479.             pte = page_mapping_find(as, area->base + i*PAGE_SIZE);
  480.             if (pte && PTE_VALID(pte)) {
  481.                 __address frame;
  482.  
  483.                 ASSERT(PTE_PRESENT(pte));
  484.                 frame = PTE_GET_FRAME(pte);
  485.                 page_mapping_remove(as, area->base + i*PAGE_SIZE);
  486.                 page_table_unlock(as, false);
  487.  
  488.                 frame_free(ADDR2PFN(frame));
  489.             } else {
  490.                 page_table_unlock(as, false);
  491.             }
  492.         }
  493.         /*
  494.          * Invalidate TLB's.
  495.          */
  496.         tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
  497.         tlb_invalidate_pages(AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
  498.         tlb_shootdown_finalize();
  499.     } else {
  500.         /*
  501.          * Growing the area.
  502.          * Check for overlaps with other address space areas.
  503.          */
  504.         if (!check_area_conflicts(as, address, pages * PAGE_SIZE, area)) {
  505.             spinlock_unlock(&area->lock);
  506.             spinlock_unlock(&as->lock);    
  507.             interrupts_restore(ipl);
  508.             return (__address) -1;
  509.         }
  510.     }
  511.  
  512.     area->pages = pages;
  513.    
  514.     spinlock_unlock(&area->lock);
  515.     spinlock_unlock(&as->lock);
  516.     interrupts_restore(ipl);
  517.  
  518.     return address;
  519. }
  520.  
  521. /** Find address space area and lock it.
  522.  *
  523.  * The address space must be locked and interrupts must be disabled.
  524.  *
  525.  * @param as Address space.
  526.  * @param va Virtual address.
  527.  *
  528.  * @return Locked address space area containing va on success or NULL on failure.
  529.  */
  530. as_area_t *find_area_and_lock(as_t *as, __address va)
  531. {
  532.     as_area_t *a;
  533.     btree_node_t *leaf, *lnode;
  534.     int i;
  535.    
  536.     a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);
  537.     if (a) {
  538.         /* va is the base address of an address space area */
  539.         spinlock_lock(&a->lock);
  540.         return a;
  541.     }
  542.    
  543.     /*
  544.      * Search the leaf node and the righmost record of its left neighbour
  545.      * to find out whether this is a miss or va belongs to an address
  546.      * space area found there.
  547.      */
  548.    
  549.     /* First, search the leaf node itself. */
  550.     for (i = 0; i < leaf->keys; i++) {
  551.         a = (as_area_t *) leaf->value[i];
  552.         spinlock_lock(&a->lock);
  553.         if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) {
  554.             return a;
  555.         }
  556.         spinlock_unlock(&a->lock);
  557.     }
  558.  
  559.     /*
  560.      * Second, locate the left neighbour and test its last record.
  561.      * Because of its position in the B+tree, it must have base < va.
  562.      */
  563.     if ((lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
  564.         a = (as_area_t *) lnode->value[lnode->keys - 1];
  565.         spinlock_lock(&a->lock);
  566.         if (va < a->base + a->pages * PAGE_SIZE) {
  567.             return a;
  568.         }
  569.         spinlock_unlock(&a->lock);
  570.     }
  571.  
  572.     return NULL;
  573. }
  574.  
  575. /** Check area conflicts with other areas.
  576.  *
  577.  * The address space must be locked and interrupts must be disabled.
  578.  *
  579.  * @param as Address space.
  580.  * @param va Starting virtual address of the area being tested.
  581.  * @param size Size of the area being tested.
  582.  * @param avoid_area Do not touch this area.
  583.  *
  584.  * @return True if there is no conflict, false otherwise.
  585.  */
  586. bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area)
  587. {
  588.     as_area_t *a;
  589.     btree_node_t *leaf, *node;
  590.     int i;
  591.    
  592.     /*
  593.      * We don't want any area to have conflicts with NULL page.
  594.      */
  595.     if (overlaps(va, size, NULL, PAGE_SIZE))
  596.         return false;
  597.    
  598.     /*
  599.      * The leaf node is found in O(log n), where n is proportional to
  600.      * the number of address space areas belonging to as.
  601.      * The check for conflicts is then attempted on the rightmost
  602.      * record in the left neighbour, the leftmost record in the right
  603.      * neighbour and all records in the leaf node itself.
  604.      */
  605.    
  606.     if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) {
  607.         if (a != avoid_area)
  608.             return false;
  609.     }
  610.    
  611.     /* First, check the two border cases. */
  612.     if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
  613.         a = (as_area_t *) node->value[node->keys - 1];
  614.         spinlock_lock(&a->lock);
  615.         if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
  616.             spinlock_unlock(&a->lock);
  617.             return false;
  618.         }
  619.         spinlock_unlock(&a->lock);
  620.     }
  621.     if ((node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf))) {
  622.         a = (as_area_t *) node->value[0];
  623.         spinlock_lock(&a->lock);
  624.         if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
  625.             spinlock_unlock(&a->lock);
  626.             return false;
  627.         }
  628.         spinlock_unlock(&a->lock);
  629.     }
  630.    
  631.     /* Second, check the leaf node. */
  632.     for (i = 0; i < leaf->keys; i++) {
  633.         a = (as_area_t *) leaf->value[i];
  634.    
  635.         if (a == avoid_area)
  636.             continue;
  637.    
  638.         spinlock_lock(&a->lock);
  639.         if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
  640.             spinlock_unlock(&a->lock);
  641.             return false;
  642.         }
  643.         spinlock_unlock(&a->lock);
  644.     }
  645.  
  646.     /*
  647.      * So far, the area does not conflict with other areas.
  648.      * Check if it doesn't conflict with kernel address space.
  649.      */  
  650.     if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
  651.         return !overlaps(va, size,
  652.             KERNEL_ADDRESS_SPACE_START, KERNEL_ADDRESS_SPACE_END-KERNEL_ADDRESS_SPACE_START);
  653.     }
  654.  
  655.     return true;
  656. }
  657.