Subversion Repositories HelenOS-historic

Rev

Rev 704 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (C) 2001-2006 Jakub Jermar
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. /*
  30.  * This file contains address space manipulation functions.
  31.  * Roughly speaking, this is a higher-level client of
  32.  * Virtual Address Translation (VAT) subsystem.
  33.  */
  34.  
  35. #include <mm/as.h>
  36. #include <mm/page.h>
  37. #include <mm/frame.h>
  38. #include <mm/tlb.h>
  39. #include <mm/heap.h>
  40. #include <arch/mm/page.h>
  41. #include <genarch/mm/page_pt.h>
  42. #include <arch/mm/asid.h>
  43. #include <arch/mm/as.h>
  44. #include <arch/types.h>
  45. #include <typedefs.h>
  46. #include <synch/spinlock.h>
  47. #include <config.h>
  48. #include <list.h>
  49. #include <panic.h>
  50. #include <arch/asm.h>
  51. #include <debug.h>
  52. #include <memstr.h>
  53. #include <arch.h>
  54. #include <print.h>
  55.  
  56. #define KAS_START_INDEX     PTL0_INDEX(KERNEL_ADDRESS_SPACE_START)
  57. #define KAS_END_INDEX       PTL0_INDEX(KERNEL_ADDRESS_SPACE_END)
  58. #define KAS_INDICES     (1+(KAS_END_INDEX-KAS_START_INDEX))
  59.  
  60. /*
  61.  * Here we assume that PFN (Physical Frame Numbers) space
  62.  * is smaller than the width of index_t. UNALLOCATED_PFN
  63.  * can be then used to mark mappings wich were not
  64.  * allocated a physical frame.
  65.  */
  66. #define UNALLOCATED_PFN     ((index_t) -1)
  67.  
  68. /** Create address space. */
  69. /*
  70.  * FIXME: this interface must be meaningful for all possible VAT
  71.  *    (Virtual Address Translation) mechanisms.
  72.  */
  73. as_t *as_create(pte_t *ptl0)
  74. {
  75.     as_t *as;
  76.  
  77.     as = (as_t *) malloc(sizeof(as_t));
  78.     if (as) {
  79.         spinlock_initialize(&as->lock, "as_lock");
  80.         list_initialize(&as->as_area_head);
  81.  
  82.         as->asid = asid_get();
  83.  
  84.         as->ptl0 = ptl0;
  85.         if (!as->ptl0) {
  86.             pte_t *src_ptl0, *dst_ptl0;
  87.        
  88.             src_ptl0 = (pte_t *) PA2KA((__address) GET_PTL0_ADDRESS());
  89.             dst_ptl0 = (pte_t *) frame_alloc(FRAME_KA | FRAME_PANIC, ONE_FRAME, NULL);
  90.  
  91. //          memsetb((__address) dst_ptl0, PAGE_SIZE, 0);
  92. //          memcpy((void *) &dst_ptl0[KAS_START_INDEX], (void *) &src_ptl0[KAS_START_INDEX], KAS_INDICES);
  93.            
  94.             memcpy((void *) dst_ptl0,(void *) src_ptl0, PAGE_SIZE);
  95.  
  96.             as->ptl0 = (pte_t *) KA2PA((__address) dst_ptl0);
  97.         }
  98.     }
  99.  
  100.     return as;
  101. }
  102.  
  103. /** Create address space area of common attributes.
  104.  *
  105.  * The created address space area is added to the target address space.
  106.  *
  107.  * @param as Target address space.
  108.  * @param type Type of area.
  109.  * @param size Size of area in multiples of PAGE_SIZE.
  110.  * @param base Base address of area.
  111.  *
  112.  * @return Address space area on success or NULL on failure.
  113.  */
  114. as_area_t *as_area_create(as_t *as, as_area_type_t type, size_t size, __address base)
  115. {
  116.     ipl_t ipl;
  117.     as_area_t *a;
  118.    
  119.     if (base % PAGE_SIZE)
  120.         panic("addr not aligned to a page boundary");
  121.    
  122.     ipl = interrupts_disable();
  123.     spinlock_lock(&as->lock);
  124.    
  125.     /*
  126.      * TODO: test as_area which is to be created doesn't overlap with an existing one.
  127.      */
  128.    
  129.     a = (as_area_t *) malloc(sizeof(as_area_t));
  130.     if (a) {
  131.         int i;
  132.    
  133.         a->mapping = (index_t *) malloc(size * sizeof(index_t));
  134.         if (!a->mapping) {
  135.             free(a);
  136.             spinlock_unlock(&as->lock);
  137.             interrupts_restore(ipl);
  138.             return NULL;
  139.         }
  140.        
  141.         for (i=0; i<size; i++) {
  142.             /*
  143.              * Frames will be allocated on-demand by
  144.              * as_page_fault().
  145.              */
  146.             a->mapping[i] = UNALLOCATED_PFN;
  147.         }
  148.        
  149.         spinlock_initialize(&a->lock, "as_area_lock");
  150.            
  151.         link_initialize(&a->link);         
  152.         a->type = type;
  153.         a->size = size;
  154.         a->base = base;
  155.        
  156.         list_append(&a->link, &as->as_area_head);
  157.  
  158.     }
  159.  
  160.     spinlock_unlock(&as->lock);
  161.     interrupts_restore(ipl);
  162.    
  163.     return a;
  164. }
  165.  
  166. /** Load mapping for address space area.
  167.  *
  168.  * Initialize a->mapping.
  169.  *
  170.  * @param a Target address space area.
  171.  * @param pfn Array of frame numbers. Number of elements must match with a->mapping.
  172.  */
  173. void as_area_load_mapping(as_area_t *a, index_t *pfn)
  174. {
  175.     ipl_t ipl;
  176.     int i;
  177.    
  178.     ipl = interrupts_disable();
  179.     spinlock_lock(&a->lock);
  180.  
  181.     for (i = 0; i < a->size; i++) {
  182.         ASSERT(a->mapping[i] == UNALLOCATED_PFN);
  183.         a->mapping[i] = pfn[i];
  184.     }
  185.    
  186.     spinlock_unlock(&a->lock);
  187.     interrupts_restore(ipl);
  188. }
  189.  
  190. /** Handle page fault within the current address space.
  191.  *
  192.  * This is the high-level page fault handler.
  193.  * Interrupts are assumed disabled.
  194.  *
  195.  * @param page Faulting page.
  196.  *
  197.  * @return 0 on page fault, 1 if address space operation
  198.  */
  199. int as_page_fault(__address page)
  200. {
  201.     int flags;
  202.     link_t *cur;
  203.     as_area_t *a, *area = NULL;
  204.     index_t vpn;
  205.     __address frame;
  206.    
  207.     ASSERT(AS);
  208.     spinlock_lock(&AS->lock);
  209.    
  210.     /*
  211.      * Search this areas of this address space for presence of 'page'.
  212.      */
  213.     for (cur = AS->as_area_head.next; cur != &AS->as_area_head; cur = cur->next) {
  214.         a = list_get_instance(cur, as_area_t, link);
  215.         spinlock_lock(&a->lock);
  216.  
  217.         if ((page >= a->base) && (page < a->base + a->size * PAGE_SIZE)) {
  218.  
  219.             /*
  220.              * We found the area containing 'page'.
  221.              * TODO: access checking
  222.              */
  223.            
  224.             vpn = (page - a->base) / PAGE_SIZE;
  225.             area = a;
  226.             break;
  227.         }
  228.        
  229.         spinlock_unlock(&a->lock);
  230.     }
  231.    
  232.     if (!area) {
  233.         /*
  234.          * No area contained mapping for 'page'.
  235.          * Signal page fault to low-level handler.
  236.          */
  237.         spinlock_unlock(&AS->lock);
  238.         return 0;
  239.     }
  240.  
  241.     /*
  242.      * Note: area->lock is held.
  243.      */
  244.    
  245.     /*
  246.      * Decide if a frame needs to be allocated.
  247.      * If so, allocate it and adjust area->mapping map.
  248.      */
  249.     if (area->mapping[vpn] == UNALLOCATED_PFN) {
  250.         frame = frame_alloc(0, ONE_FRAME, NULL);
  251.         memsetb(frame, FRAME_SIZE, 0);
  252.         area->mapping[vpn] = frame / FRAME_SIZE;
  253.     } else {
  254.         frame = area->mapping[vpn] * FRAME_SIZE;
  255.     }
  256.    
  257.     switch (area->type) {
  258.         case AS_AREA_TEXT:
  259.             flags = PAGE_EXEC | PAGE_READ | PAGE_USER | PAGE_PRESENT | PAGE_CACHEABLE;
  260.             break;
  261.         case AS_AREA_DATA:
  262.         case AS_AREA_STACK:
  263.             flags = PAGE_READ | PAGE_WRITE | PAGE_USER | PAGE_PRESENT | PAGE_CACHEABLE;
  264.             break;
  265.         default:
  266.             panic("unexpected as_area_type_t %d", area->type);
  267.     }
  268.  
  269.     /*
  270.      * Map 'page' to 'frame'.
  271.      * Note that TLB shootdown is not attempted as only new information is being
  272.      * inserted into page tables.
  273.      */
  274.     page_mapping_insert(page, AS->asid, frame, flags, (__address) AS->ptl0);
  275.    
  276.     spinlock_unlock(&area->lock);
  277.     spinlock_unlock(&AS->lock);
  278.  
  279.     return 1;
  280. }
  281.  
  282. /** Install address space on CPU.
  283.  *
  284.  * @param as Address space.
  285.  */
  286. void as_install(as_t *as)
  287. {
  288.     ipl_t ipl;
  289.    
  290.     ipl = interrupts_disable();
  291.     spinlock_lock(&as->lock);
  292.     ASSERT(as->ptl0);
  293.     SET_PTL0_ADDRESS(as->ptl0);
  294.     spinlock_unlock(&as->lock);
  295.     interrupts_restore(ipl);
  296.  
  297.     /*
  298.      * Perform architecture-specific steps.
  299.      * (e.g. invalidate TLB, install ASID etc.)
  300.      */
  301.     as_install_arch(as);
  302.    
  303.     AS = as;
  304. }
  305.