Subversion Repositories HelenOS-historic

Rev

Rev 625 | Go to most recent revision | Blame | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (C) 2001-2004 Jakub Jermar
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. #include <mm/vm.h>
  30. #include <mm/page.h>
  31. #include <mm/frame.h>
  32. #include <mm/tlb.h>
  33. #include <mm/heap.h>
  34. #include <arch/mm/page.h>
  35. #include <genarch/mm/page_pt.h>
  36. #include <arch/mm/asid.h>
  37. #include <arch/mm/vm.h>
  38. #include <arch/types.h>
  39. #include <typedefs.h>
  40. #include <synch/spinlock.h>
  41. #include <config.h>
  42. #include <list.h>
  43. #include <panic.h>
  44. #include <arch/asm.h>
  45. #include <debug.h>
  46. #include <memstr.h>
  47. #include <arch.h>
  48.  
  49. #define KAS_START_INDEX     PTL0_INDEX(KERNEL_ADDRESS_SPACE_START)
  50. #define KAS_END_INDEX       PTL0_INDEX(KERNEL_ADDRESS_SPACE_END)
  51. #define KAS_INDICES     (1+(KAS_END_INDEX-KAS_START_INDEX))
  52.  
  53. vm_t *vm_create(pte_t *ptl0)
  54. {
  55.     vm_t *m;
  56.  
  57.     m = (vm_t *) malloc(sizeof(vm_t));
  58.     if (m) {
  59.         spinlock_initialize(&m->lock, "vm_lock");
  60.         list_initialize(&m->vm_area_head);
  61.  
  62.         m->asid = asid_get();
  63.  
  64.         /*
  65.          * Each vm_t is supposed to have its own page table.
  66.          * It is either passed one or it has to allocate and set one up.
  67.          */
  68.         m->ptl0 = ptl0;
  69.         if (!m->ptl0) {
  70.             pte_t *src_ptl0, *dst_ptl0;
  71.        
  72.             src_ptl0 = (pte_t *) PA2KA((__address) GET_PTL0_ADDRESS());
  73.             dst_ptl0 = (pte_t *) frame_alloc(FRAME_KA | FRAME_PANIC, ONE_FRAME);
  74.  
  75. //          memsetb((__address) dst_ptl0, PAGE_SIZE, 0);
  76. //          memcpy((void *) &dst_ptl0[KAS_START_INDEX], (void *) &src_ptl0[KAS_START_INDEX], KAS_INDICES);
  77.            
  78.             memcpy((void *) dst_ptl0,(void *) src_ptl0, PAGE_SIZE);
  79.  
  80.             m->ptl0 = (pte_t *) KA2PA((__address) dst_ptl0);
  81.         }
  82.     }
  83.  
  84.     return m;
  85. }
  86.  
  87. void vm_destroy(vm_t *m)
  88. {
  89. }
  90.  
  91. vm_area_t *vm_area_create(vm_t *m, vm_type_t type, size_t size, __address addr)
  92. {
  93.     ipl_t ipl;
  94.     vm_area_t *a;
  95.    
  96.     if (addr % PAGE_SIZE)
  97.         panic("addr not aligned to a page boundary");
  98.    
  99.     ipl = interrupts_disable();
  100.     spinlock_lock(&m->lock);
  101.    
  102.     /*
  103.      * TODO: test vm_area which is to be created doesn't overlap with an existing one.
  104.      */
  105.    
  106.     a = (vm_area_t *) malloc(sizeof(vm_area_t));
  107.     if (a) {
  108.         int i;
  109.    
  110.         a->mapping = (__address *) malloc(size * sizeof(__address));
  111.         if (!a->mapping) {
  112.             free(a);
  113.             spinlock_unlock(&m->lock);
  114.             interrupts_restore(ipl);
  115.             return NULL;
  116.         }
  117.        
  118.         for (i=0; i<size; i++)
  119.             a->mapping[i] = frame_alloc(0, ONE_FRAME);
  120.        
  121.         spinlock_initialize(&a->lock, "vm_area_lock");
  122.            
  123.         link_initialize(&a->link);         
  124.         a->type = type;
  125.         a->size = size;
  126.         a->address = addr;
  127.        
  128.         list_append(&a->link, &m->vm_area_head);
  129.  
  130.     }
  131.  
  132.     spinlock_unlock(&m->lock);
  133.     interrupts_restore(ipl);
  134.    
  135.     return a;
  136. }
  137.  
  138. void vm_area_destroy(vm_area_t *a)
  139. {
  140. }
  141.  
  142. void vm_area_map(vm_area_t *a, vm_t *m)
  143. {
  144.     int i, flags;
  145.     ipl_t ipl;
  146.    
  147.     ipl = interrupts_disable();
  148.     spinlock_lock(&m->lock);
  149.     spinlock_lock(&a->lock);
  150.  
  151.     switch (a->type) {
  152.         case VMA_TEXT:
  153.             flags = PAGE_EXEC | PAGE_READ | PAGE_USER | PAGE_PRESENT | PAGE_CACHEABLE;
  154.             break;
  155.         case VMA_DATA:
  156.         case VMA_STACK:
  157.             flags = PAGE_READ | PAGE_WRITE | PAGE_USER | PAGE_PRESENT | PAGE_CACHEABLE;
  158.             break;
  159.         default:
  160.             panic("unexpected vm_type_t %d", a->type);
  161.     }
  162.  
  163.     ASSERT(m->ptl0);
  164.     for (i=0; i<a->size; i++)
  165.         page_mapping_insert(a->address + i*PAGE_SIZE, a->mapping[i], flags, (__address) m->ptl0);
  166.        
  167.     spinlock_unlock(&a->lock);
  168.     spinlock_unlock(&m->lock);
  169.     interrupts_restore(ipl);
  170. }
  171.  
  172. void vm_area_unmap(vm_area_t *a, vm_t *m)
  173. {
  174.     int i;
  175.     ipl_t ipl;
  176.    
  177.     ipl = interrupts_disable();
  178.     spinlock_lock(&m->lock);
  179.     spinlock_lock(&a->lock);
  180.  
  181.     ASSERT(m->ptl0);
  182.     for (i=0; i<a->size; i++)      
  183.         page_mapping_insert(a->address + i*PAGE_SIZE, 0, PAGE_NOT_PRESENT, (__address) m->ptl0);
  184.    
  185.     spinlock_unlock(&a->lock);
  186.     spinlock_unlock(&m->lock);
  187.     interrupts_restore(ipl);
  188. }
  189.  
  190. void vm_install(vm_t *m)
  191. {
  192.     ipl_t ipl;
  193.    
  194.     ipl = interrupts_disable();
  195.  
  196.     tlb_shootdown_start();
  197.     spinlock_lock(&m->lock);
  198.  
  199.     ASSERT(m->ptl0);
  200.     SET_PTL0_ADDRESS(m->ptl0);
  201.  
  202.     spinlock_unlock(&m->lock);
  203.     tlb_shootdown_finalize();
  204.  
  205.     interrupts_restore(ipl);
  206.  
  207.     vm_install_arch(m);
  208.    
  209.     VM = m;
  210. }
  211.