Subversion Repositories HelenOS

Rev

Rev 1962 | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (C) 2001-2004 Jakub Jermar
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. #include <mm/vm.h>
  30. #include <mm/page.h>
  31. #include <mm/frame.h>
  32. #include <mm/tlb.h>
  33. #include <mm/heap.h>
  34. #include <arch/mm/page.h>
  35. #include <arch/mm/asid.h>
  36. #include <arch/mm/vm.h>
  37. #include <arch/types.h>
  38. #include <typedefs.h>
  39. #include <synch/spinlock.h>
  40. #include <config.h>
  41. #include <list.h>
  42. #include <panic.h>
  43. #include <arch/asm.h>
  44. #include <debug.h>
  45. #include <memstr.h>
  46. #include <arch.h>
  47.  
  48. #define KAS_START_INDEX     PTL0_INDEX(KERNEL_ADDRESS_SPACE_START)
  49. #define KAS_END_INDEX       PTL0_INDEX(KERNEL_ADDRESS_SPACE_END)
  50. #define KAS_INDICES     (1+(KAS_END_INDEX-KAS_START_INDEX))
  51.  
  52. vm_t *vm_create(pte_t *ptl0)
  53. {
  54.     vm_t *m;
  55.  
  56.     m = (vm_t *) malloc(sizeof(vm_t));
  57.     if (m) {
  58.         spinlock_initialize(&m->lock);
  59.         list_initialize(&m->vm_area_head);
  60.  
  61.         m->asid = asid_get();
  62.  
  63.         /*
  64.          * Each vm_t is supposed to have its own page table.
  65.          * It is either passed one or it has to allocate and set one up.
  66.          */
  67.         m->ptl0 = ptl0;
  68.         if (!m->ptl0) {
  69.             pte_t *src_ptl0, *dst_ptl0;
  70.        
  71.             src_ptl0 = (pte_t *) PA2KA((__address) GET_PTL0_ADDRESS());
  72.             dst_ptl0 = (pte_t *) frame_alloc(FRAME_KA | FRAME_PANIC);
  73.  
  74. //          memsetb((__address) dst_ptl0, PAGE_SIZE, 0);
  75. //          memcpy((void *) &dst_ptl0[KAS_START_INDEX], (void *) &src_ptl0[KAS_START_INDEX], KAS_INDICES);
  76.            
  77.             memcpy((void *) dst_ptl0,(void *) src_ptl0, PAGE_SIZE);
  78.  
  79.             m->ptl0 = (pte_t *) KA2PA((__address) dst_ptl0);
  80.         }
  81.     }
  82.  
  83.     return m;
  84. }
  85.  
  86. void vm_destroy(vm_t *m)
  87. {
  88. }
  89.  
  90. vm_area_t *vm_area_create(vm_t *m, vm_type_t type, size_t size, __address addr)
  91. {
  92.     ipl_t ipl;
  93.     vm_area_t *a;
  94.    
  95.     if (addr % PAGE_SIZE)
  96.         panic("addr not aligned to a page boundary");
  97.    
  98.     ipl = interrupts_disable();
  99.     spinlock_lock(&m->lock);
  100.    
  101.     /*
  102.      * TODO: test vm_area which is to be created doesn't overlap with an existing one.
  103.      */
  104.    
  105.     a = (vm_area_t *) malloc(sizeof(vm_area_t));
  106.     if (a) {
  107.         int i;
  108.    
  109.         a->mapping = (__address *) malloc(size * sizeof(__address));
  110.         if (!a->mapping) {
  111.             free(a);
  112.             spinlock_unlock(&m->lock);
  113.             interrupts_restore(ipl);
  114.             return NULL;
  115.         }
  116.        
  117.         for (i=0; i<size; i++)
  118.             a->mapping[i] = frame_alloc(0);
  119.        
  120.         spinlock_initialize(&a->lock);
  121.            
  122.         link_initialize(&a->link);         
  123.         a->type = type;
  124.         a->size = size;
  125.         a->address = addr;
  126.        
  127.         list_append(&a->link, &m->vm_area_head);
  128.  
  129.     }
  130.  
  131.     spinlock_unlock(&m->lock);
  132.     interrupts_restore(ipl);
  133.    
  134.     return a;
  135. }
  136.  
  137. void vm_area_destroy(vm_area_t *a)
  138. {
  139. }
  140.  
  141. void vm_area_map(vm_area_t *a, vm_t *m)
  142. {
  143.     int i, flags;
  144.     ipl_t ipl;
  145.    
  146.     ipl = interrupts_disable();
  147.     spinlock_lock(&m->lock);
  148.     spinlock_lock(&a->lock);
  149.  
  150.     switch (a->type) {
  151.         case VMA_TEXT:
  152.             flags = PAGE_EXEC | PAGE_READ | PAGE_USER | PAGE_PRESENT | PAGE_CACHEABLE;
  153.             break;
  154.         case VMA_DATA:
  155.         case VMA_STACK:
  156.             flags = PAGE_READ | PAGE_WRITE | PAGE_USER | PAGE_PRESENT | PAGE_CACHEABLE;
  157.             break;
  158.         default:
  159.             panic("unexpected vm_type_t %d", a->type);
  160.     }
  161.  
  162.     ASSERT(m->ptl0);
  163.     for (i=0; i<a->size; i++)
  164.         map_page_to_frame(a->address + i*PAGE_SIZE, a->mapping[i], flags, (__address) m->ptl0);
  165.        
  166.     spinlock_unlock(&a->lock);
  167.     spinlock_unlock(&m->lock);
  168.     interrupts_restore(ipl);
  169. }
  170.  
  171. void vm_area_unmap(vm_area_t *a, vm_t *m)
  172. {
  173.     int i;
  174.     ipl_t ipl;
  175.    
  176.     ipl = interrupts_disable();
  177.     spinlock_lock(&m->lock);
  178.     spinlock_lock(&a->lock);
  179.  
  180.     ASSERT(m->ptl0);
  181.     for (i=0; i<a->size; i++)      
  182.         map_page_to_frame(a->address + i*PAGE_SIZE, 0, PAGE_NOT_PRESENT, (__address) m->ptl0);
  183.    
  184.     spinlock_unlock(&a->lock);
  185.     spinlock_unlock(&m->lock);
  186.     interrupts_restore(ipl);
  187. }
  188.  
  189. void vm_install(vm_t *m)
  190. {
  191.     link_t *l;
  192.     ipl_t ipl;
  193.    
  194.     ipl = interrupts_disable();
  195.  
  196.     tlb_shootdown_start();
  197.     spinlock_lock(&m->lock);
  198.  
  199.     ASSERT(m->ptl0);
  200.     SET_PTL0_ADDRESS(m->ptl0);
  201.  
  202.     spinlock_unlock(&m->lock);
  203.     tlb_shootdown_finalize();
  204.  
  205.     interrupts_restore(ipl);
  206.  
  207.     vm_install_arch(m);
  208.    
  209.     VM = m;
  210. }
  211.