Subversion Repositories HelenOS-historic

Rev

Rev 1702 | Rev 1760 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (C) 2006 Jakub Jermar
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. /** @addtogroup genericmm
  30.  * @{
  31.  */
  32.  
  33. /**
  34.  * @file
  35.  * @brief   Backend for address space areas backed by an ELF image.
  36.  */
  37.  
  38. #include <elf.h>
  39. #include <debug.h>
  40. #include <arch/types.h>
  41. #include <typedefs.h>
  42. #include <mm/as.h>
  43. #include <mm/frame.h>
  44. #include <mm/slab.h>
  45. #include <mm/page.h>
  46. #include <genarch/mm/page_pt.h>
  47. #include <genarch/mm/page_ht.h>
  48. #include <align.h>
  49. #include <memstr.h>
  50. #include <macros.h>
  51. #include <arch.h>
  52.  
  53. static int elf_page_fault(as_area_t *area, __address addr, pf_access_t access);
  54. static void elf_frame_free(as_area_t *area, __address page, __address frame);
  55. static void elf_share(as_area_t *area);
  56.  
  57. mem_backend_t elf_backend = {
  58.     .page_fault = elf_page_fault,
  59.     .frame_free = elf_frame_free,
  60.     .share = elf_share
  61. };
  62.  
  63. /** Service a page fault in the ELF backend address space area.
  64.  *
  65.  * The address space area and page tables must be already locked.
  66.  *
  67.  * @param area Pointer to the address space area.
  68.  * @param addr Faulting virtual address.
  69.  * @param access Access mode that caused the fault (i.e. read/write/exec).
  70.  *
  71.  * @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. serviced).
  72.  */
  73. int elf_page_fault(as_area_t *area, __address addr, pf_access_t access)
  74. {
  75.     elf_header_t *elf = area->backend_data.elf;
  76.     elf_segment_header_t *entry = area->backend_data.segment;
  77.     btree_node_t *leaf;
  78.     __address base, frame;
  79.     index_t i;
  80.  
  81.     if (!as_area_check_access(area, access))
  82.         return AS_PF_FAULT;
  83.  
  84.     ASSERT((addr >= entry->p_vaddr) && (addr < entry->p_vaddr + entry->p_memsz));
  85.     i = (addr - entry->p_vaddr) >> PAGE_WIDTH;
  86.     base = (__address) (((void *) elf) + entry->p_offset);
  87.     ASSERT(ALIGN_UP(base, FRAME_SIZE) == base);
  88.  
  89.     if (area->sh_info) {
  90.         bool found = false;
  91.  
  92.         /*
  93.          * The address space area is shared.
  94.          */
  95.          
  96.         mutex_lock(&area->sh_info->lock);
  97.         frame = (__address) btree_search(&area->sh_info->pagemap,
  98.             ALIGN_DOWN(addr, PAGE_SIZE) - area->base, &leaf);
  99.         if (!frame) {
  100.             int i;
  101.  
  102.             /*
  103.              * Workaround for valid NULL address.
  104.              */
  105.  
  106.             for (i = 0; i < leaf->keys; i++) {
  107.                 if (leaf->key[i] == ALIGN_DOWN(addr, PAGE_SIZE)) {
  108.                     found = true;
  109.                     break;
  110.                 }
  111.             }
  112.         }
  113.         if (frame || found) {
  114.             frame_reference_add(ADDR2PFN(frame));
  115.             page_mapping_insert(AS, addr, frame, as_area_get_flags(area));
  116.             if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1))
  117.                 panic("Could not insert used space.\n");
  118.             mutex_unlock(&area->sh_info->lock);
  119.             return AS_PF_OK;
  120.         }
  121.     }
  122.    
  123.     /*
  124.      * The area is either not shared or the pagemap does not contain the mapping.
  125.      */
  126.    
  127.     if (ALIGN_DOWN(addr, PAGE_SIZE) + PAGE_SIZE < entry->p_vaddr + entry->p_filesz) {
  128.         /*
  129.          * Initialized portion of the segment. The memory is backed
  130.          * directly by the content of the ELF image. Pages are
  131.          * only copied if the segment is writable so that there
  132.          * can be more instantions of the same memory ELF image
  133.          * used at a time. Note that this could be later done
  134.          * as COW.
  135.          */
  136.         if (entry->p_flags & PF_W) {
  137.             frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
  138.             memcpy((void *) PA2KA(frame), (void *) (base + i*FRAME_SIZE), FRAME_SIZE);
  139.            
  140.             if (area->sh_info) {
  141.                 frame_reference_add(ADDR2PFN(frame));
  142.                 btree_insert(&area->sh_info->pagemap, ALIGN_DOWN(addr, PAGE_SIZE) - area->base,
  143.                     (void *) frame, leaf);
  144.             }
  145.  
  146.         } else {
  147.             frame = KA2PA(base + i*FRAME_SIZE);
  148.         }  
  149.     } else if (ALIGN_DOWN(addr, PAGE_SIZE) >= ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) {
  150.         /*
  151.          * This is the uninitialized portion of the segment.
  152.          * It is not physically present in the ELF image.
  153.          * To resolve the situation, a frame must be allocated
  154.          * and cleared.
  155.          */
  156.         frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
  157.         memsetb(PA2KA(frame), FRAME_SIZE, 0);
  158.  
  159.         if (area->sh_info) {
  160.             frame_reference_add(ADDR2PFN(frame));
  161.             btree_insert(&area->sh_info->pagemap, ALIGN_DOWN(addr, PAGE_SIZE) - area->base,
  162.                 (void *) frame, leaf);
  163.         }
  164.  
  165.     } else {
  166.         size_t size;
  167.         /*
  168.          * The mixed case.
  169.          * The lower part is backed by the ELF image and
  170.          * the upper part is anonymous memory.
  171.          */
  172.         size = entry->p_filesz - (i<<PAGE_WIDTH);
  173.         frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
  174.         memsetb(PA2KA(frame) + size, FRAME_SIZE - size, 0);
  175.         memcpy((void *) PA2KA(frame), (void *) (base + i*FRAME_SIZE), size);
  176.  
  177.         if (area->sh_info) {
  178.             frame_reference_add(ADDR2PFN(frame));
  179.             btree_insert(&area->sh_info->pagemap, ALIGN_DOWN(addr, PAGE_SIZE) - area->base,
  180.                 (void *) frame, leaf);
  181.         }
  182.  
  183.     }
  184.    
  185.     if (area->sh_info)
  186.         mutex_unlock(&area->sh_info->lock);
  187.    
  188.     page_mapping_insert(AS, addr, frame, as_area_get_flags(area));
  189.     if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1))
  190.         panic("Could not insert used space.\n");
  191.  
  192.     return AS_PF_OK;
  193. }
  194.  
  195. /** Free a frame that is backed by the ELF backend.
  196.  *
  197.  * The address space area and page tables must be already locked.
  198.  *
  199.  * @param area Pointer to the address space area.
  200.  * @param page Page that is mapped to frame. Must be aligned to PAGE_SIZE.
  201.  * @param frame Frame to be released.
  202.  *
  203.  */
  204. void elf_frame_free(as_area_t *area, __address page, __address frame)
  205. {
  206.     elf_header_t *elf = area->backend_data.elf;
  207.     elf_segment_header_t *entry = area->backend_data.segment;
  208.     __address base;
  209.     index_t i;
  210.    
  211.     ASSERT((page >= entry->p_vaddr) && (page < entry->p_vaddr + entry->p_memsz));
  212.     i = (page - entry->p_vaddr) >> PAGE_WIDTH;
  213.     base = (__address) (((void *) elf) + entry->p_offset);
  214.     ASSERT(ALIGN_UP(base, FRAME_SIZE) == base);
  215.    
  216.     if (page + PAGE_SIZE < ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) {
  217.         if (entry->p_flags & PF_W) {
  218.             /*
  219.              * Free the frame with the copy of writable segment data.
  220.              */
  221.             frame_free(ADDR2PFN(frame));
  222.         }
  223.     } else {
  224.         /*
  225.          * The frame is either anonymous memory or the mixed case (i.e. lower
  226.          * part is backed by the ELF image and the upper is anonymous).
  227.          * In any case, a frame needs to be freed.
  228.          */
  229.         frame_free(ADDR2PFN(frame));
  230.     }
  231. }
  232.  
  233. /** Share ELF image backed address space area.
  234.  *
  235.  * If the area is writable, then all mapped pages are duplicated in the pagemap.
  236.  * Otherwise only portions of the area that are not backed by the ELF image
  237.  * are put into the pagemap.
  238.  *
  239.  * The address space and address space area must be locked prior to the call.
  240.  *
  241.  * @param area Address space area.
  242.  */
  243. void elf_share(as_area_t *area)
  244. {
  245.     elf_segment_header_t *entry = area->backend_data.segment;
  246.     link_t *cur;
  247.     btree_node_t *leaf, *node;
  248.     __address start_anon = entry->p_vaddr + entry->p_filesz;
  249.  
  250.     /*
  251.      * Find the node in which to start linear search.
  252.      */
  253.     if (area->flags & AS_AREA_WRITE) {
  254.         node = list_get_instance(area->used_space.leaf_head.next, btree_node_t, leaf_link);
  255.     } else {
  256.         (void) btree_search(&area->sh_info->pagemap, start_anon, &leaf);
  257.         node = btree_leaf_node_left_neighbour(&area->sh_info->pagemap, leaf);
  258.         if (!node)
  259.             node = leaf;
  260.     }
  261.  
  262.     /*
  263.      * Copy used anonymous portions of the area to sh_info's page map.
  264.      */
  265.     mutex_lock(&area->sh_info->lock);
  266.     for (cur = &node->leaf_link; cur != &area->used_space.leaf_head; cur = cur->next) {
  267.         int i;
  268.        
  269.         node = list_get_instance(cur, btree_node_t, leaf_link);
  270.        
  271.         for (i = 0; i < node->keys; i++) {
  272.             __address base = node->key[i];
  273.             count_t count = (count_t) node->value[i];
  274.             int j;
  275.            
  276.             /*
  277.              * Skip read-only areas of used space that are backed
  278.              * by the ELF image.
  279.              */
  280.             if (!(area->flags & AS_AREA_WRITE))
  281.                 if (base + count*PAGE_SIZE <= start_anon)
  282.                     continue;
  283.            
  284.             for (j = 0; j < count; j++) {
  285.                 pte_t *pte;
  286.            
  287.                 /*
  288.                  * Skip read-only pages that are backed by the ELF image.
  289.                  */
  290.                 if (!(area->flags & AS_AREA_WRITE))
  291.                     if (base + (j + 1)*PAGE_SIZE <= start_anon)
  292.                         continue;
  293.                
  294.                 page_table_lock(area->as, false);
  295.                 pte = page_mapping_find(area->as, base + j*PAGE_SIZE);
  296.                 ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
  297.                 btree_insert(&area->sh_info->pagemap, (base + j*PAGE_SIZE) - area->base,
  298.                     (void *) PTE_GET_FRAME(pte), NULL);
  299.                 page_table_unlock(area->as, false);
  300.                 frame_reference_add(ADDR2PFN(PTE_GET_FRAME(pte)));
  301.             }
  302.                
  303.         }
  304.     }
  305.     mutex_unlock(&area->sh_info->lock);
  306. }
  307.  
  308. /** @}
  309.  */
  310.