Subversion Repositories HelenOS-historic

Rev

Rev 1390 | Rev 1609 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (C) 2005 Martin Decky
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. #include <arch/mm/page.h>
  30. #include <genarch/mm/page_pt.h>
  31. #include <arch/mm/frame.h>
  32. #include <arch/asm.h>
  33. #include <mm/frame.h>
  34. #include <mm/page.h>
  35. #include <mm/as.h>
  36. #include <arch.h>
  37. #include <arch/types.h>
  38. #include <arch/exception.h>
  39. #include <align.h>
  40. #include <config.h>
  41. #include <print.h>
  42. #include <symtab.h>
  43.  
  44. static phte_t *phte;
  45.  
  46.  
  47. /** Try to find PTE for faulting address
  48.  *
  49.  * Try to find PTE for faulting address.
  50.  * The as->lock must be held on entry to this function
  51.  * if lock is true.
  52.  *
  53.  * @param as       Address space.
  54.  * @param lock     Lock/unlock the address space.
  55.  * @param badvaddr Faulting virtual address.
  56.  * @param access   Access mode that caused the fault.
  57.  * @param istate   Pointer to interrupted state.
  58.  * @param pfrc     Pointer to variable where as_page_fault() return code will be stored.
  59.  * @return         PTE on success, NULL otherwise.
  60.  *
  61.  */
  62. static pte_t *find_mapping_and_check(as_t *as, bool lock, __address badvaddr, int access,
  63.                      istate_t *istate, int *pfcr)
  64. {
  65.     /*
  66.      * Check if the mapping exists in page tables.
  67.      */
  68.     pte_t *pte = page_mapping_find(as, badvaddr);
  69.     if ((pte) && (pte->p)) {
  70.         /*
  71.          * Mapping found in page tables.
  72.          * Immediately succeed.
  73.          */
  74.         return pte;
  75.     } else {
  76.         int rc;
  77.    
  78.         /*
  79.          * Mapping not found in page tables.
  80.          * Resort to higher-level page fault handler.
  81.          */
  82.         page_table_unlock(as, lock);
  83.         switch (rc = as_page_fault(badvaddr, access, istate)) {
  84.             case AS_PF_OK:
  85.                 /*
  86.                  * The higher-level page fault handler succeeded,
  87.                  * The mapping ought to be in place.
  88.                  */
  89.                 page_table_lock(as, lock);
  90.                 pte = page_mapping_find(as, badvaddr);
  91.                 ASSERT((pte) && (pte->p));
  92.                 return pte;
  93.             case AS_PF_DEFER:
  94.                 page_table_lock(as, lock);
  95.                 *pfcr = rc;
  96.                 return NULL;
  97.             case AS_PF_FAULT:
  98.                 page_table_lock(as, lock);
  99.                 printf("Page fault.\n");
  100.                 *pfcr = rc;
  101.                 return NULL;
  102.             default:
  103.                 panic("unexpected rc (%d)\n", rc);
  104.         }  
  105.     }
  106. }
  107.  
  108.  
  109. static void pht_refill_fail(__address badvaddr, istate_t *istate)
  110. {
  111.     char *symbol = "";
  112.     char *sym2 = "";
  113.  
  114.     char *s = get_symtab_entry(istate->pc);
  115.     if (s)
  116.         symbol = s;
  117.     s = get_symtab_entry(istate->lr);
  118.     if (s)
  119.         sym2 = s;
  120.     panic("%p: PHT Refill Exception at %p (%s<-%s)\n", badvaddr, istate->pc, symbol, sym2);
  121. }
  122.  
  123.  
  124. static void pht_insert(const __address vaddr, const pfn_t pfn)
  125. {
  126.     __u32 page = (vaddr >> 12) & 0xffff;
  127.     __u32 api = (vaddr >> 22) & 0x3f;
  128.     __u32 vsid;
  129.    
  130.     asm volatile (
  131.         "mfsrin %0, %1\n"
  132.         : "=r" (vsid)
  133.         : "r" (vaddr)
  134.     );
  135.    
  136.     /* Primary hash (xor) */
  137.     __u32 h = 0;
  138.     __u32 hash = vsid ^ page;
  139.     __u32 base = (hash & 0x3ff) << 3;
  140.     __u32 i;
  141.     bool found = false;
  142.    
  143.     /* Find unused or colliding
  144.        PTE in PTEG */
  145.     for (i = 0; i < 8; i++) {
  146.         if ((!phte[base + i].v) || ((phte[base + i].vsid == vsid) && (phte[base + i].api == api))) {
  147.             found = true;
  148.             break;
  149.         }
  150.     }
  151.    
  152.     if (!found) {
  153.         /* Secondary hash (not) */
  154.         __u32 base2 = (~hash & 0x3ff) << 3;
  155.        
  156.         /* Find unused or colliding
  157.            PTE in PTEG */
  158.         for (i = 0; i < 8; i++) {
  159.             if ((!phte[base2 + i].v) || ((phte[base2 + i].vsid == vsid) && (phte[base2 + i].api == api))) {
  160.                 found = true;
  161.                 base = base2;
  162.                 h = 1;
  163.                 break;
  164.             }
  165.         }
  166.        
  167.         if (!found) {
  168.             // TODO: A/C precedence groups
  169.             i = page % 8;
  170.         }
  171.     }
  172.    
  173.     phte[base + i].v = 1;
  174.     phte[base + i].vsid = vsid;
  175.     phte[base + i].h = h;
  176.     phte[base + i].api = api;
  177.     phte[base + i].rpn = pfn;
  178.     phte[base + i].r = 0;
  179.     phte[base + i].c = 0;
  180.     phte[base + i].pp = 2; // FIXME
  181. }
  182.  
  183.  
  184. /** Process Instruction/Data Storage Interrupt
  185.  *
  186.  * @param data   True if Data Storage Interrupt.
  187.  * @param istate Interrupted register context.
  188.  *
  189.  */
  190. void pht_refill(bool data, istate_t *istate)
  191. {
  192.     __address badvaddr;
  193.     pte_t *pte;
  194.     int pfcr;
  195.     as_t *as;
  196.     bool lock;
  197.    
  198.     if (AS == NULL) {
  199.         as = AS_KERNEL;
  200.         lock = false;
  201.     } else {
  202.         as = AS;
  203.         lock = true;
  204.     }
  205.    
  206.     if (data) {
  207.         asm volatile (
  208.             "mfdar %0\n"
  209.             : "=r" (badvaddr)
  210.         );
  211.     } else
  212.         badvaddr = istate->pc;
  213.        
  214.     page_table_lock(as, lock);
  215.    
  216.     pte = find_mapping_and_check(as, lock, badvaddr, PF_ACCESS_READ /* FIXME */, istate, &pfcr);
  217.     if (!pte) {
  218.         switch (pfcr) {
  219.             case AS_PF_FAULT:
  220.                 goto fail;
  221.                 break;
  222.             case AS_PF_DEFER:
  223.                 /*
  224.                  * The page fault came during copy_from_uspace()
  225.                  * or copy_to_uspace().
  226.                  */
  227.                 page_table_unlock(as, lock);
  228.                 return;
  229.             default:
  230.                 panic("Unexpected pfrc (%d)\n", pfcr);
  231.         }
  232.     }
  233.    
  234.     pte->a = 1; /* Record access to PTE */
  235.     pht_insert(badvaddr, pte->pfn);
  236.    
  237.     page_table_unlock(as, lock);
  238.     return;
  239.    
  240. fail:
  241.     page_table_unlock(as, lock);
  242.     pht_refill_fail(badvaddr, istate);
  243. }
  244.  
  245.  
  246. void pht_init(void)
  247. {
  248.     memsetb((__address) phte, 1 << PHT_BITS, 0);
  249. }
  250.  
  251.  
  252. void page_arch_init(void)
  253. {
  254.     if (config.cpu_active == 1) {
  255.         page_mapping_operations = &pt_mapping_operations;
  256.        
  257.         __address cur;
  258.         int flags;
  259.        
  260.         /* Frames below 128 MB are mapped using BAT,
  261.            map rest of the physical memory */
  262.         for (cur = 128 << 20; cur < last_frame; cur += FRAME_SIZE) {
  263.             flags = PAGE_CACHEABLE;
  264.             if ((PA2KA(cur) >= config.base) && (PA2KA(cur) < config.base + config.kernel_size))
  265.                 flags |= PAGE_GLOBAL;
  266.             page_mapping_insert(AS_KERNEL, PA2KA(cur), cur, flags);
  267.         }
  268.        
  269.         /* Allocate page hash table */
  270.         phte_t *physical_phte = (phte_t *) PFN2ADDR(frame_alloc(PHT_ORDER, FRAME_KA | FRAME_PANIC));
  271.         phte = (phte_t *) PA2KA((__address) physical_phte);
  272.        
  273.         ASSERT((__address) physical_phte % (1 << PHT_BITS) == 0);
  274.         pht_init();
  275.        
  276.         asm volatile (
  277.             "mtsdr1 %0\n"
  278.             :
  279.             : "r" ((__address) physical_phte)
  280.         );
  281.     }
  282. }
  283.  
  284.  
  285. __address hw_map(__address physaddr, size_t size)
  286. {
  287.     if (last_frame + ALIGN_UP(size, PAGE_SIZE) > KA2PA(KERNEL_ADDRESS_SPACE_END_ARCH))
  288.         panic("Unable to map physical memory %p (%d bytes)", physaddr, size)
  289.    
  290.     __address virtaddr = PA2KA(last_frame);
  291.     pfn_t i;
  292.     for (i = 0; i < ADDR2PFN(ALIGN_UP(size, PAGE_SIZE)); i++)
  293.         page_mapping_insert(AS_KERNEL, virtaddr + PFN2ADDR(i), physaddr + PFN2ADDR(i), PAGE_NOT_CACHEABLE);
  294.    
  295.     last_frame = ALIGN_UP(last_frame + size, FRAME_SIZE);
  296.    
  297.     return virtaddr;
  298. }
  299.