Subversion Repositories HelenOS-historic

Rev

Rev 718 | Rev 754 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (C) 2001-2006 Jakub Jermar
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. /*
  30.  * This file contains address space manipulation functions.
  31.  * Roughly speaking, this is a higher-level client of
  32.  * Virtual Address Translation (VAT) subsystem.
  33.  */
  34.  
  35. #include <mm/as.h>
  36. #include <mm/asid.h>
  37. #include <mm/page.h>
  38. #include <mm/frame.h>
  39. #include <mm/tlb.h>
  40. #include <mm/heap.h>
  41. #include <arch/mm/page.h>
  42. #include <genarch/mm/page_pt.h>
  43. #include <mm/asid.h>
  44. #include <arch/mm/asid.h>
  45. #include <arch/mm/as.h>
  46. #include <arch/types.h>
  47. #include <typedefs.h>
  48. #include <synch/spinlock.h>
  49. #include <config.h>
  50. #include <list.h>
  51. #include <panic.h>
  52. #include <arch/asm.h>
  53. #include <debug.h>
  54. #include <memstr.h>
  55. #include <arch.h>
  56. #include <print.h>
  57.  
  58. #define KAS_START_INDEX     PTL0_INDEX(KERNEL_ADDRESS_SPACE_START)
  59. #define KAS_END_INDEX       PTL0_INDEX(KERNEL_ADDRESS_SPACE_END)
  60. #define KAS_INDICES     (1+(KAS_END_INDEX-KAS_START_INDEX))
  61.  
  62. /*
  63.  * Here we assume that PFN (Physical Frame Number) space
  64.  * is smaller than the width of index_t. UNALLOCATED_PFN
  65.  * can be then used to mark mappings wich were not
  66.  * yet allocated a physical frame.
  67.  */
  68. #define UNALLOCATED_PFN     ((index_t) -1)
  69.  
  70. /** Create address space. */
  71. /*
  72.  * FIXME: this interface must be meaningful for all possible VAT
  73.  *    (Virtual Address Translation) mechanisms.
  74.  */
  75. as_t *as_create(pte_t *ptl0, int flags)
  76. {
  77.     as_t *as;
  78.  
  79.     as = (as_t *) malloc(sizeof(as_t));
  80.     if (as) {
  81.         list_initialize(&as->as_with_asid_link);
  82.         spinlock_initialize(&as->lock, "as_lock");
  83.         list_initialize(&as->as_area_head);
  84.  
  85.         if (flags & AS_KERNEL)
  86.             as->asid = ASID_KERNEL;
  87.         else
  88.             as->asid = ASID_INVALID;
  89.  
  90.         as->ptl0 = ptl0;
  91.         if (!as->ptl0) {
  92.             pte_t *src_ptl0, *dst_ptl0;
  93.        
  94.             src_ptl0 = (pte_t *) PA2KA((__address) GET_PTL0_ADDRESS());
  95.             dst_ptl0 = (pte_t *) frame_alloc(FRAME_KA | FRAME_PANIC, ONE_FRAME, NULL);
  96.  
  97. //          memsetb((__address) dst_ptl0, PAGE_SIZE, 0);
  98. //          memcpy((void *) &dst_ptl0[KAS_START_INDEX], (void *) &src_ptl0[KAS_START_INDEX], KAS_INDICES);
  99.            
  100.             memcpy((void *) dst_ptl0,(void *) src_ptl0, PAGE_SIZE);
  101.  
  102.             as->ptl0 = (pte_t *) KA2PA((__address) dst_ptl0);
  103.         }
  104.     }
  105.  
  106.     return as;
  107. }
  108.  
  109. /** Create address space area of common attributes.
  110.  *
  111.  * The created address space area is added to the target address space.
  112.  *
  113.  * @param as Target address space.
  114.  * @param type Type of area.
  115.  * @param size Size of area in multiples of PAGE_SIZE.
  116.  * @param base Base address of area.
  117.  *
  118.  * @return Address space area on success or NULL on failure.
  119.  */
  120. as_area_t *as_area_create(as_t *as, as_area_type_t type, size_t size, __address base)
  121. {
  122.     ipl_t ipl;
  123.     as_area_t *a;
  124.    
  125.     if (base % PAGE_SIZE)
  126.         panic("addr not aligned to a page boundary");
  127.    
  128.     ipl = interrupts_disable();
  129.     spinlock_lock(&as->lock);
  130.    
  131.     /*
  132.      * TODO: test as_area which is to be created doesn't overlap with an existing one.
  133.      */
  134.    
  135.     a = (as_area_t *) malloc(sizeof(as_area_t));
  136.     if (a) {
  137.         int i;
  138.    
  139.         a->mapping = (index_t *) malloc(size * sizeof(index_t));
  140.         if (!a->mapping) {
  141.             free(a);
  142.             spinlock_unlock(&as->lock);
  143.             interrupts_restore(ipl);
  144.             return NULL;
  145.         }
  146.        
  147.         for (i=0; i<size; i++) {
  148.             /*
  149.              * Frames will be allocated on-demand by
  150.              * as_page_fault() or preloaded by
  151.              * as_area_set_mapping().
  152.              */
  153.             a->mapping[i] = UNALLOCATED_PFN;
  154.         }
  155.        
  156.         spinlock_initialize(&a->lock, "as_area_lock");
  157.            
  158.         link_initialize(&a->link);         
  159.         a->type = type;
  160.         a->size = size;
  161.         a->base = base;
  162.        
  163.         list_append(&a->link, &as->as_area_head);
  164.  
  165.     }
  166.  
  167.     spinlock_unlock(&as->lock);
  168.     interrupts_restore(ipl);
  169.  
  170.     return a;
  171. }
  172.  
  173. /** Load mapping for address space area.
  174.  *
  175.  * Initialize a->mapping.
  176.  *
  177.  * @param a   Target address space area.
  178.  * @param vpn Page number relative to area start.
  179.  * @param pfn Frame number to map.
  180.  */
  181. void as_area_set_mapping(as_area_t *a, index_t vpn, index_t pfn)
  182. {
  183.     ASSERT(vpn < a->size);
  184.     ASSERT(a->mapping[vpn] == UNALLOCATED_PFN);
  185.     ASSERT(pfn != UNALLOCATED_PFN);
  186.    
  187.     ipl_t ipl;
  188.    
  189.     ipl = interrupts_disable();
  190.     spinlock_lock(&a->lock);
  191.    
  192.     a->mapping[vpn] = pfn;
  193.    
  194.     spinlock_unlock(&a->lock);
  195.     interrupts_restore(ipl);
  196. }
  197.  
  198. /** Handle page fault within the current address space.
  199.  *
  200.  * This is the high-level page fault handler.
  201.  * Interrupts are assumed disabled.
  202.  *
  203.  * @param page Faulting page.
  204.  *
  205.  * @return 0 on page fault, 1 on success.
  206.  */
  207. int as_page_fault(__address page)
  208. {
  209.     int flags;
  210.     link_t *cur;
  211.     as_area_t *a, *area = NULL;
  212.     index_t vpn;
  213.     __address frame;
  214.    
  215.     ASSERT(AS);
  216.     spinlock_lock(&AS->lock);
  217.    
  218.     /*
  219.      * Search this areas of this address space for presence of 'page'.
  220.      */
  221.     for (cur = AS->as_area_head.next; cur != &AS->as_area_head; cur = cur->next) {
  222.         a = list_get_instance(cur, as_area_t, link);
  223.         spinlock_lock(&a->lock);
  224.  
  225.         if ((page >= a->base) && (page < a->base + a->size * PAGE_SIZE)) {
  226.  
  227.             /*
  228.              * We found the area containing 'page'.
  229.              * TODO: access checking
  230.              */
  231.            
  232.             vpn = (page - a->base) / PAGE_SIZE;
  233.             area = a;
  234.             break;
  235.         }
  236.        
  237.         spinlock_unlock(&a->lock);
  238.     }
  239.    
  240.     if (!area) {
  241.         /*
  242.          * No area contained mapping for 'page'.
  243.          * Signal page fault to low-level handler.
  244.          */
  245.         spinlock_unlock(&AS->lock);
  246.         return 0;
  247.     }
  248.  
  249.     /*
  250.      * Note: area->lock is held.
  251.      */
  252.    
  253.     /*
  254.      * Decide if a frame needs to be allocated.
  255.      * If so, allocate it and adjust area->mapping map.
  256.      */
  257.     if (area->mapping[vpn] == UNALLOCATED_PFN) {
  258.         frame = frame_alloc(0, ONE_FRAME, NULL);
  259.         memsetb(PA2KA(frame), FRAME_SIZE, 0);
  260.         area->mapping[vpn] = frame / FRAME_SIZE;
  261.         ASSERT(area->mapping[vpn] != UNALLOCATED_PFN);
  262.     } else
  263.         frame = area->mapping[vpn] * FRAME_SIZE;
  264.    
  265.     switch (area->type) {
  266.         case AS_AREA_TEXT:
  267.             flags = PAGE_EXEC | PAGE_READ | PAGE_USER | PAGE_PRESENT | PAGE_CACHEABLE;
  268.             break;
  269.         case AS_AREA_DATA:
  270.         case AS_AREA_STACK:
  271.             flags = PAGE_READ | PAGE_WRITE | PAGE_USER | PAGE_PRESENT | PAGE_CACHEABLE;
  272.             break;
  273.         default:
  274.             panic("unexpected as_area_type_t %d", area->type);
  275.     }
  276.  
  277.     /*
  278.      * Map 'page' to 'frame'.
  279.      * Note that TLB shootdown is not attempted as only new information is being
  280.      * inserted into page tables.
  281.      */
  282.     page_mapping_insert(page, AS->asid, frame, flags, (__address) AS->ptl0);
  283.    
  284.     spinlock_unlock(&area->lock);
  285.     spinlock_unlock(&AS->lock);
  286.  
  287.     return 1;
  288. }
  289.  
  290. /** Install address space on CPU.
  291.  *
  292.  * @param as Address space.
  293.  */
  294. void as_install(as_t *as)
  295. {
  296.     ipl_t ipl;
  297.    
  298.     asid_install(as);
  299.    
  300.     ipl = interrupts_disable();
  301.     spinlock_lock(&as->lock);
  302.     ASSERT(as->ptl0);
  303.     SET_PTL0_ADDRESS(as->ptl0);
  304.     spinlock_unlock(&as->lock);
  305.     interrupts_restore(ipl);
  306.  
  307.     /*
  308.      * Perform architecture-specific steps.
  309.      * (e.g. write ASID to hardware register etc.)
  310.      */
  311.     as_install_arch(as);
  312.    
  313.     AS = as;
  314. }
  315.