Subversion Repositories HelenOS

Rev

Rev 3009 | Rev 4339 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (c) 2006 Jakub Jermar
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. /** @addtogroup genericmm
  30.  * @{
  31.  */
  32.  
  33. /**
  34.  * @file
  35.  * @brief   Backend for address space areas backed by an ELF image.
  36.  */
  37.  
  38. #include <lib/elf.h>
  39. #include <debug.h>
  40. #include <arch/types.h>
  41. #include <mm/as.h>
  42. #include <mm/frame.h>
  43. #include <mm/slab.h>
  44. #include <mm/page.h>
  45. #include <genarch/mm/page_pt.h>
  46. #include <genarch/mm/page_ht.h>
  47. #include <align.h>
  48. #include <memstr.h>
  49. #include <macros.h>
  50. #include <arch.h>
  51. #include <arch/barrier.h>
  52.  
  53. #ifdef CONFIG_VIRT_IDX_DCACHE
  54. #include <arch/mm/cache.h>
  55. #endif
  56.  
  57. static int elf_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access);
  58. static void elf_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame);
  59. static void elf_share(as_area_t *area);
  60.  
  61. mem_backend_t elf_backend = {
  62.     .page_fault = elf_page_fault,
  63.     .frame_free = elf_frame_free,
  64.     .share = elf_share
  65. };
  66.  
  67. /** Service a page fault in the ELF backend address space area.
  68.  *
  69.  * The address space area and page tables must be already locked.
  70.  *
  71.  * @param area      Pointer to the address space area.
  72.  * @param addr      Faulting virtual address.
  73.  * @param access    Access mode that caused the fault (i.e.
  74.  *          read/write/exec).
  75.  *
  76.  * @return      AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK
  77.  *          on success (i.e. serviced).
  78.  */
  79. int elf_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access)
  80. {
  81.     elf_header_t *elf = area->backend_data.elf;
  82.     elf_segment_header_t *entry = area->backend_data.segment;
  83.     btree_node_t *leaf;
  84.     uintptr_t base, frame, page, start_anon;
  85.     index_t i;
  86.     bool dirty = false;
  87.  
  88.     if (!as_area_check_access(area, access))
  89.         return AS_PF_FAULT;
  90.  
  91.     ASSERT((addr >= ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) &&
  92.         (addr < entry->p_vaddr + entry->p_memsz));
  93.     i = (addr - ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) >> PAGE_WIDTH;
  94.     base = (uintptr_t)
  95.         (((void *) elf) + ALIGN_DOWN(entry->p_offset, PAGE_SIZE));
  96.  
  97.     /* Virtual address of faulting page*/
  98.     page = ALIGN_DOWN(addr, PAGE_SIZE);
  99.  
  100.     /* Virtual address of the end of initialized part of segment */
  101.     start_anon = entry->p_vaddr + entry->p_filesz;
  102.  
  103.     if (area->sh_info) {
  104.         bool found = false;
  105.  
  106.         /*
  107.          * The address space area is shared.
  108.          */
  109.        
  110.         mutex_lock(&area->sh_info->lock);
  111.         frame = (uintptr_t) btree_search(&area->sh_info->pagemap,
  112.             page - area->base, &leaf);
  113.         if (!frame) {
  114.             unsigned int i;
  115.  
  116.             /*
  117.              * Workaround for valid NULL address.
  118.              */
  119.  
  120.             for (i = 0; i < leaf->keys; i++) {
  121.                 if (leaf->key[i] == page) {
  122.                     found = true;
  123.                     break;
  124.                 }
  125.             }
  126.         }
  127.         if (frame || found) {
  128.             frame_reference_add(ADDR2PFN(frame));
  129.             page_mapping_insert(AS, addr, frame,
  130.                 as_area_get_flags(area));
  131.             if (!used_space_insert(area, page, 1))
  132.                 panic("Could not insert used space.\n");
  133.             mutex_unlock(&area->sh_info->lock);
  134.             return AS_PF_OK;
  135.         }
  136.     }
  137.  
  138.     /*
  139.      * The area is either not shared or the pagemap does not contain the
  140.      * mapping.
  141.      */
  142.     if (page >= entry->p_vaddr && page + PAGE_SIZE <= start_anon) {
  143.         /*
  144.          * Initialized portion of the segment. The memory is backed
  145.          * directly by the content of the ELF image. Pages are
  146.          * only copied if the segment is writable so that there
  147.          * can be more instantions of the same memory ELF image
  148.          * used at a time. Note that this could be later done
  149.          * as COW.
  150.          */
  151.         if (entry->p_flags & PF_W) {
  152.             frame = (uintptr_t)frame_alloc(ONE_FRAME, 0);
  153.             memcpy((void *) PA2KA(frame),
  154.                 (void *) (base + i * FRAME_SIZE), FRAME_SIZE);
  155.             if (entry->p_flags & PF_X) {
  156.                 smc_coherence_block((void *) PA2KA(frame),
  157.                     FRAME_SIZE);
  158.             }
  159.             dirty = true;
  160.         } else {
  161.             frame = KA2PA(base + i * FRAME_SIZE);
  162.         }  
  163.     } else if (page >= start_anon) {
  164.         /*
  165.          * This is the uninitialized portion of the segment.
  166.          * It is not physically present in the ELF image.
  167.          * To resolve the situation, a frame must be allocated
  168.          * and cleared.
  169.          */
  170.         frame = (uintptr_t)frame_alloc(ONE_FRAME, 0);
  171.         memsetb((void *) PA2KA(frame), FRAME_SIZE, 0);
  172.         dirty = true;
  173.     } else {
  174.         size_t pad_lo, pad_hi;
  175.         /*
  176.          * The mixed case.
  177.          *
  178.          * The middle part is backed by the ELF image and
  179.          * the lower and upper parts are anonymous memory.
  180.          * (The segment can be and often is shorter than 1 page).
  181.          */
  182.         if (page < entry->p_vaddr)
  183.             pad_lo = entry->p_vaddr - page;
  184.         else
  185.             pad_lo = 0;
  186.  
  187.         if (start_anon < page + PAGE_SIZE)
  188.             pad_hi = page + PAGE_SIZE - start_anon;
  189.         else
  190.             pad_hi = 0;
  191.  
  192.         frame = (uintptr_t)frame_alloc(ONE_FRAME, 0);
  193.         memcpy((void *) (PA2KA(frame) + pad_lo),
  194.             (void *) (base + i * FRAME_SIZE + pad_lo),
  195.             FRAME_SIZE - pad_lo - pad_hi);
  196.         if (entry->p_flags & PF_X) {
  197.             smc_coherence_block((void *) (PA2KA(frame) + pad_lo),
  198.                 FRAME_SIZE - pad_lo - pad_hi);
  199.         }
  200.         memsetb((void *) PA2KA(frame), pad_lo, 0);
  201.         memsetb((void *) (PA2KA(frame) + FRAME_SIZE - pad_hi), pad_hi,
  202.             0);
  203.         dirty = true;
  204.     }
  205.  
  206.     if (dirty && area->sh_info) {
  207.         frame_reference_add(ADDR2PFN(frame));
  208.         btree_insert(&area->sh_info->pagemap, page - area->base,
  209.             (void *) frame, leaf);
  210.     }
  211.  
  212.     if (area->sh_info)
  213.         mutex_unlock(&area->sh_info->lock);
  214.  
  215.     page_mapping_insert(AS, addr, frame, as_area_get_flags(area));
  216.     if (!used_space_insert(area, page, 1))
  217.         panic("Could not insert used space.\n");
  218.  
  219.     return AS_PF_OK;
  220. }
  221.  
  222. /** Free a frame that is backed by the ELF backend.
  223.  *
  224.  * The address space area and page tables must be already locked.
  225.  *
  226.  * @param area      Pointer to the address space area.
  227.  * @param page      Page that is mapped to frame. Must be aligned to
  228.  *          PAGE_SIZE.
  229.  * @param frame     Frame to be released.
  230.  *
  231.  */
  232. void elf_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame)
  233. {
  234.     elf_header_t *elf = area->backend_data.elf;
  235.     elf_segment_header_t *entry = area->backend_data.segment;
  236.     uintptr_t base, start_anon;
  237.     index_t i;
  238.  
  239.     ASSERT((page >= ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) &&
  240.         (page < entry->p_vaddr + entry->p_memsz));
  241.     i = (page - ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) >> PAGE_WIDTH;
  242.     base = (uintptr_t) (((void *) elf) +
  243.         ALIGN_DOWN(entry->p_offset, FRAME_SIZE));
  244.     start_anon = entry->p_vaddr + entry->p_filesz;
  245.  
  246.     if (page >= entry->p_vaddr && page + PAGE_SIZE <= start_anon) {
  247.         if (entry->p_flags & PF_W) {
  248.             /*
  249.              * Free the frame with the copy of writable segment
  250.              * data.
  251.              */
  252.             frame_free(frame);
  253.         }
  254.     } else {
  255.         /*
  256.          * The frame is either anonymous memory or the mixed case (i.e.
  257.          * lower part is backed by the ELF image and the upper is
  258.          * anonymous). In any case, a frame needs to be freed.
  259.          */
  260.         frame_free(frame);
  261.     }
  262. }
  263.  
  264. /** Share ELF image backed address space area.
  265.  *
  266.  * If the area is writable, then all mapped pages are duplicated in the pagemap.
  267.  * Otherwise only portions of the area that are not backed by the ELF image
  268.  * are put into the pagemap.
  269.  *
  270.  * The address space and address space area must be locked prior to the call.
  271.  *
  272.  * @param area      Address space area.
  273.  */
  274. void elf_share(as_area_t *area)
  275. {
  276.     elf_segment_header_t *entry = area->backend_data.segment;
  277.     link_t *cur;
  278.     btree_node_t *leaf, *node;
  279.     uintptr_t start_anon = entry->p_vaddr + entry->p_filesz;
  280.  
  281.     /*
  282.      * Find the node in which to start linear search.
  283.      */
  284.     if (area->flags & AS_AREA_WRITE) {
  285.         node = list_get_instance(area->used_space.leaf_head.next,
  286.             btree_node_t, leaf_link);
  287.     } else {
  288.         (void) btree_search(&area->sh_info->pagemap, start_anon, &leaf);
  289.         node = btree_leaf_node_left_neighbour(&area->sh_info->pagemap,
  290.             leaf);
  291.         if (!node)
  292.             node = leaf;
  293.     }
  294.  
  295.     /*
  296.      * Copy used anonymous portions of the area to sh_info's page map.
  297.      */
  298.     mutex_lock(&area->sh_info->lock);
  299.     for (cur = &node->leaf_link; cur != &area->used_space.leaf_head;
  300.         cur = cur->next) {
  301.         unsigned int i;
  302.        
  303.         node = list_get_instance(cur, btree_node_t, leaf_link);
  304.        
  305.         for (i = 0; i < node->keys; i++) {
  306.             uintptr_t base = node->key[i];
  307.             count_t count = (count_t) node->value[i];
  308.             unsigned int j;
  309.            
  310.             /*
  311.              * Skip read-only areas of used space that are backed
  312.              * by the ELF image.
  313.              */
  314.             if (!(area->flags & AS_AREA_WRITE))
  315.                 if (base >= entry->p_vaddr &&
  316.                     base + count * PAGE_SIZE <= start_anon)
  317.                     continue;
  318.            
  319.             for (j = 0; j < count; j++) {
  320.                 pte_t *pte;
  321.            
  322.                 /*
  323.                  * Skip read-only pages that are backed by the
  324.                  * ELF image.
  325.                  */
  326.                 if (!(area->flags & AS_AREA_WRITE))
  327.                     if (base >= entry->p_vaddr &&
  328.                         base + (j + 1) * PAGE_SIZE <=
  329.                         start_anon)
  330.                         continue;
  331.                
  332.                 page_table_lock(area->as, false);
  333.                 pte = page_mapping_find(area->as,
  334.                     base + j * PAGE_SIZE);
  335.                 ASSERT(pte && PTE_VALID(pte) &&
  336.                     PTE_PRESENT(pte));
  337.                 btree_insert(&area->sh_info->pagemap,
  338.                     (base + j * PAGE_SIZE) - area->base,
  339.                     (void *) PTE_GET_FRAME(pte), NULL);
  340.                 page_table_unlock(area->as, false);
  341.  
  342.                 pfn_t pfn = ADDR2PFN(PTE_GET_FRAME(pte));
  343.                 frame_reference_add(pfn);
  344.             }
  345.                
  346.         }
  347.     }
  348.     mutex_unlock(&area->sh_info->lock);
  349. }
  350.  
  351. /** @}
  352.  */
  353.