Subversion Repositories HelenOS

Rev

Rev 205 | Go to most recent revision | Blame | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (C) 2001-2004 Jakub Jermar
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. #include <mm/vm.h>
  30. #include <mm/page.h>
  31. #include <mm/frame.h>
  32. #include <mm/tlb.h>
  33. #include <mm/heap.h>
  34. #include <arch/mm/page.h>
  35. #include <arch/types.h>
  36. #include <typedefs.h>
  37. #include <synch/spinlock.h>
  38. #include <config.h>
  39. #include <list.h>
  40. #include <panic.h>
  41. #include <arch/asm.h>
  42. #include <debug.h>
  43. #include <memstr.h>
  44. #include <arch.h>
  45.  
  46. #define KAS_START_INDEX     PTL0_INDEX(KERNEL_ADDRESS_SPACE_START)
  47. #define KAS_END_INDEX       PTL0_INDEX(KERNEL_ADDRESS_SPACE_END)
  48. #define KAS_INDICES     (1+(KAS_END_INDEX-KAS_START_INDEX))
  49.  
  50. vm_t *vm_create(pte_t *ptl0)
  51. {
  52.     vm_t *m;
  53.  
  54.     m = (vm_t *) malloc(sizeof(vm_t));
  55.     if (m) {
  56.         spinlock_initialize(&m->lock);
  57.         list_initialize(&m->vm_area_head);
  58.  
  59.         /*
  60.          * Each vm_t is supposed to have its own page table.
  61.          * It is either passed one or it has to allocate and set one up.
  62.          */
  63.         m->ptl0 = ptl0;
  64.         if (!m->ptl0) {
  65.             pte_t *src_ptl0, *dst_ptl0;
  66.        
  67.             src_ptl0 = (pte_t *) PA2KA((__address) GET_PTL0_ADDRESS());
  68.             dst_ptl0 = (pte_t *) frame_alloc(FRAME_KA | FRAME_PANIC);
  69.  
  70.             /* TODO: in theory, it should be only necessary to copy kernel address space... */
  71.             memcpy((void *)PA2KA((__address) dst_ptl0), (void *)GET_PTL0_ADDRESS() , PAGE_SIZE);
  72.  
  73.             m->ptl0 = (pte_t *) KA2PA((__address) dst_ptl0);
  74.         }
  75.     }
  76.  
  77.     return m;
  78. }
  79.  
  80. void vm_destroy(vm_t *m)
  81. {
  82. }
  83.  
  84. vm_area_t *vm_area_create(vm_t *m, vm_type_t type, size_t size, __address addr)
  85. {
  86.     pri_t pri;
  87.     vm_area_t *a;
  88.    
  89.     if (addr % PAGE_SIZE)
  90.         panic("addr not aligned to a page boundary");
  91.    
  92.     pri = cpu_priority_high();
  93.     spinlock_lock(&m->lock);
  94.    
  95.     /*
  96.      * TODO: test vm_area which is to be created doesn't overlap with an existing one.
  97.      */
  98.    
  99.     a = (vm_area_t *) malloc(sizeof(vm_area_t));
  100.     if (a) {
  101.         int i;
  102.    
  103.         a->mapping = (__address *) malloc(size * sizeof(__address));
  104.         if (!a->mapping) {
  105.             free(a);
  106.             spinlock_unlock(&m->lock);
  107.             cpu_priority_restore(pri);
  108.             return NULL;
  109.         }
  110.        
  111.         for (i=0; i<size; i++)
  112.             a->mapping[i] = frame_alloc(0);
  113.        
  114.         spinlock_initialize(&a->lock);
  115.            
  116.         link_initialize(&a->link);         
  117.         a->type = type;
  118.         a->size = size;
  119.         a->address = addr;
  120.        
  121.         list_append(&a->link, &m->vm_area_head);
  122.  
  123.     }
  124.  
  125.     spinlock_unlock(&m->lock);
  126.     cpu_priority_restore(pri);
  127.    
  128.     return a;
  129. }
  130.  
  131. void vm_area_destroy(vm_area_t *a)
  132. {
  133. }
  134.  
  135. void vm_area_map(vm_area_t *a, vm_t *m)
  136. {
  137.     int i, flags;
  138.     pri_t pri;
  139.    
  140.     pri = cpu_priority_high();
  141.     spinlock_lock(&m->lock);
  142.     spinlock_lock(&a->lock);
  143.  
  144.     switch (a->type) {
  145.         case VMA_TEXT:
  146.             flags = PAGE_EXEC | PAGE_READ | PAGE_USER | PAGE_PRESENT | PAGE_CACHEABLE;
  147.             break;
  148.         case VMA_DATA:
  149.         case VMA_STACK:
  150.             flags = PAGE_READ | PAGE_WRITE | PAGE_USER | PAGE_PRESENT | PAGE_CACHEABLE;
  151.             break;
  152.         default:
  153.             panic("unexpected vm_type_t %d", a->type);
  154.     }
  155.  
  156.     ASSERT(m->ptl0);
  157.     for (i=0; i<a->size; i++)
  158.         map_page_to_frame(a->address + i*PAGE_SIZE, a->mapping[i], flags, (__address) m->ptl0);
  159.        
  160.     spinlock_unlock(&a->lock);
  161.     spinlock_unlock(&m->lock);
  162.     cpu_priority_restore(pri);
  163. }
  164.  
  165. void vm_area_unmap(vm_area_t *a, vm_t *m)
  166. {
  167.     int i;
  168.     pri_t pri;
  169.    
  170.     pri = cpu_priority_high();
  171.     spinlock_lock(&m->lock);
  172.     spinlock_lock(&a->lock);
  173.  
  174.     ASSERT(m->ptl0);
  175.     for (i=0; i<a->size; i++)      
  176.         map_page_to_frame(a->address + i*PAGE_SIZE, 0, PAGE_NOT_PRESENT, (__address) m->ptl0);
  177.    
  178.     spinlock_unlock(&a->lock);
  179.     spinlock_unlock(&m->lock);
  180.     cpu_priority_restore(pri);
  181. }
  182.  
  183. void vm_install(vm_t *m)
  184. {
  185.     link_t *l;
  186.     pri_t pri;
  187.    
  188.     pri = cpu_priority_high();
  189.  
  190.     tlb_shootdown_start();
  191.     spinlock_lock(&m->lock);
  192.  
  193.     ASSERT(m->ptl0);
  194.     SET_PTL0_ADDRESS(m->ptl0);
  195.  
  196.     spinlock_unlock(&m->lock);
  197.     tlb_shootdown_finalize();
  198.  
  199.     cpu_priority_restore(pri);
  200. }
  201.