Subversion Repositories HelenOS

Rev

Rev 1780 | Rev 1888 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (C) 2005 Martin Decky
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29.  /** @addtogroup ppc64mm   
  30.  * @{
  31.  */
  32. /** @file
  33.  */
  34.  
  35. #include <arch/mm/page.h>
  36. #include <genarch/mm/page_pt.h>
  37. #include <arch/mm/frame.h>
  38. #include <arch/asm.h>
  39. #include <mm/frame.h>
  40. #include <mm/page.h>
  41. #include <mm/as.h>
  42. #include <arch.h>
  43. #include <arch/types.h>
  44. #include <arch/exception.h>
  45. #include <align.h>
  46. #include <config.h>
  47. #include <print.h>
  48. #include <symtab.h>
  49.  
  50. static phte_t *phte;
  51.  
  52.  
  53. /** Try to find PTE for faulting address
  54.  *
  55.  * Try to find PTE for faulting address.
  56.  * The as->lock must be held on entry to this function
  57.  * if lock is true.
  58.  *
  59.  * @param as       Address space.
  60.  * @param lock     Lock/unlock the address space.
  61.  * @param badvaddr Faulting virtual address.
  62.  * @param access   Access mode that caused the fault.
  63.  * @param istate   Pointer to interrupted state.
  64.  * @param pfrc     Pointer to variable where as_page_fault() return code will be stored.
  65.  * @return         PTE on success, NULL otherwise.
  66.  *
  67.  */
  68. static pte_t *find_mapping_and_check(as_t *as, bool lock, uintptr_t badvaddr, int access,
  69.                      istate_t *istate, int *pfrc)
  70. {
  71.     /*
  72.      * Check if the mapping exists in page tables.
  73.      */
  74.     pte_t *pte = page_mapping_find(as, badvaddr);
  75.     if ((pte) && (pte->p)) {
  76.         /*
  77.          * Mapping found in page tables.
  78.          * Immediately succeed.
  79.          */
  80.         return pte;
  81.     } else {
  82.         int rc;
  83.    
  84.         /*
  85.          * Mapping not found in page tables.
  86.          * Resort to higher-level page fault handler.
  87.          */
  88.         page_table_unlock(as, lock);
  89.         switch (rc = as_page_fault(badvaddr, access, istate)) {
  90.             case AS_PF_OK:
  91.                 /*
  92.                  * The higher-level page fault handler succeeded,
  93.                  * The mapping ought to be in place.
  94.                  */
  95.                 page_table_lock(as, lock);
  96.                 pte = page_mapping_find(as, badvaddr);
  97.                 ASSERT((pte) && (pte->p));
  98.                 *pfrc = 0;
  99.                 return pte;
  100.             case AS_PF_DEFER:
  101.                 page_table_lock(as, lock);
  102.                 *pfrc = rc;
  103.                 return NULL;
  104.             case AS_PF_FAULT:
  105.                 page_table_lock(as, lock);
  106.                 printf("Page fault.\n");
  107.                 *pfrc = rc;
  108.                 return NULL;
  109.             default:
  110.                 panic("unexpected rc (%d)\n", rc);
  111.         }  
  112.     }
  113. }
  114.  
  115.  
  116. static void pht_refill_fail(uintptr_t badvaddr, istate_t *istate)
  117. {
  118.     char *symbol = "";
  119.     char *sym2 = "";
  120.  
  121.     char *s = get_symtab_entry(istate->pc);
  122.     if (s)
  123.         symbol = s;
  124.     s = get_symtab_entry(istate->lr);
  125.     if (s)
  126.         sym2 = s;
  127.     panic("%p: PHT Refill Exception at %p (%s<-%s)\n", badvaddr, istate->pc, symbol, sym2);
  128. }
  129.  
  130.  
  131. static void pht_insert(const uintptr_t vaddr, const pfn_t pfn)
  132. {
  133.     uint32_t page = (vaddr >> 12) & 0xffff;
  134.     uint32_t api = (vaddr >> 22) & 0x3f;
  135.     uint32_t vsid;
  136.    
  137.     asm volatile (
  138.         "mfsrin %0, %1\n"
  139.         : "=r" (vsid)
  140.         : "r" (vaddr)
  141.     );
  142.    
  143.     /* Primary hash (xor) */
  144.     uint32_t h = 0;
  145.     uint32_t hash = vsid ^ page;
  146.     uint32_t base = (hash & 0x3ff) << 3;
  147.     uint32_t i;
  148.     bool found = false;
  149.    
  150.     /* Find unused or colliding
  151.        PTE in PTEG */
  152.     for (i = 0; i < 8; i++) {
  153.         if ((!phte[base + i].v) || ((phte[base + i].vsid == vsid) && (phte[base + i].api == api))) {
  154.             found = true;
  155.             break;
  156.         }
  157.     }
  158.    
  159.     if (!found) {
  160.         /* Secondary hash (not) */
  161.         uint32_t base2 = (~hash & 0x3ff) << 3;
  162.        
  163.         /* Find unused or colliding
  164.            PTE in PTEG */
  165.         for (i = 0; i < 8; i++) {
  166.             if ((!phte[base2 + i].v) || ((phte[base2 + i].vsid == vsid) && (phte[base2 + i].api == api))) {
  167.                 found = true;
  168.                 base = base2;
  169.                 h = 1;
  170.                 break;
  171.             }
  172.         }
  173.        
  174.         if (!found) {
  175.             // TODO: A/C precedence groups
  176.             i = page % 8;
  177.         }
  178.     }
  179.    
  180.     phte[base + i].v = 1;
  181.     phte[base + i].vsid = vsid;
  182.     phte[base + i].h = h;
  183.     phte[base + i].api = api;
  184.     phte[base + i].rpn = pfn;
  185.     phte[base + i].r = 0;
  186.     phte[base + i].c = 0;
  187.     phte[base + i].pp = 2; // FIXME
  188. }
  189.  
  190.  
  191. /** Process Instruction/Data Storage Interrupt
  192.  *
  193.  * @param data   True if Data Storage Interrupt.
  194.  * @param istate Interrupted register context.
  195.  *
  196.  */
  197. void pht_refill(bool data, istate_t *istate)
  198. {
  199.     uintptr_t badvaddr;
  200.     pte_t *pte;
  201.     int pfrc;
  202.     as_t *as;
  203.     bool lock;
  204.    
  205.     if (AS == NULL) {
  206.         as = AS_KERNEL;
  207.         lock = false;
  208.     } else {
  209.         as = AS;
  210.         lock = true;
  211.     }
  212.    
  213.     if (data) {
  214.         asm volatile (
  215.             "mfdar %0\n"
  216.             : "=r" (badvaddr)
  217.         );
  218.     } else
  219.         badvaddr = istate->pc;
  220.        
  221.     page_table_lock(as, lock);
  222.    
  223.     pte = find_mapping_and_check(as, lock, badvaddr, PF_ACCESS_READ /* FIXME */, istate, &pfrc);
  224.     if (!pte) {
  225.         switch (pfrc) {
  226.             case AS_PF_FAULT:
  227.                 goto fail;
  228.                 break;
  229.             case AS_PF_DEFER:
  230.                 /*
  231.                  * The page fault came during copy_from_uspace()
  232.                  * or copy_to_uspace().
  233.                  */
  234.                 page_table_unlock(as, lock);
  235.                 return;
  236.             default:
  237.                 panic("Unexpected pfrc (%d)\n", pfrc);
  238.         }
  239.     }
  240.    
  241.     pte->a = 1; /* Record access to PTE */
  242.     pht_insert(badvaddr, pte->pfn);
  243.    
  244.     page_table_unlock(as, lock);
  245.     return;
  246.    
  247. fail:
  248.     page_table_unlock(as, lock);
  249.     pht_refill_fail(badvaddr, istate);
  250. }
  251.  
  252.  
  253. void pht_init(void)
  254. {
  255.     memsetb((uintptr_t) phte, 1 << PHT_BITS, 0);
  256. }
  257.  
  258.  
  259. void page_arch_init(void)
  260. {
  261.     if (config.cpu_active == 1) {
  262.         page_mapping_operations = &pt_mapping_operations;
  263.        
  264.         uintptr_t cur;
  265.         int flags;
  266.        
  267.         /* Frames below 128 MB are mapped using BAT,
  268.            map rest of the physical memory */
  269.         for (cur = 128 << 20; cur < last_frame; cur += FRAME_SIZE) {
  270.             flags = PAGE_CACHEABLE;
  271.             if ((PA2KA(cur) >= config.base) && (PA2KA(cur) < config.base + config.kernel_size))
  272.                 flags |= PAGE_GLOBAL;
  273.             page_mapping_insert(AS_KERNEL, PA2KA(cur), cur, flags);
  274.         }
  275.        
  276.         /* Allocate page hash table */
  277.         phte_t *physical_phte = (phte_t *) frame_alloc(PHT_ORDER, FRAME_KA | FRAME_ATOMIC);
  278.        
  279.         ASSERT((uintptr_t) physical_phte % (1 << PHT_BITS) == 0);
  280.         pht_init();
  281.        
  282.         asm volatile (
  283.             "mtsdr1 %0\n"
  284.             :
  285.             : "r" ((uintptr_t) physical_phte)
  286.         );
  287.     }
  288. }
  289.  
  290.  
  291. uintptr_t hw_map(uintptr_t physaddr, size_t size)
  292. {
  293.     if (last_frame + ALIGN_UP(size, PAGE_SIZE) > KA2PA(KERNEL_ADDRESS_SPACE_END_ARCH))
  294.         panic("Unable to map physical memory %p (%d bytes)", physaddr, size)
  295.    
  296.     uintptr_t virtaddr = PA2KA(last_frame);
  297.     pfn_t i;
  298.     for (i = 0; i < ADDR2PFN(ALIGN_UP(size, PAGE_SIZE)); i++)
  299.         page_mapping_insert(AS_KERNEL, virtaddr + PFN2ADDR(i), physaddr + PFN2ADDR(i), PAGE_NOT_CACHEABLE);
  300.    
  301.     last_frame = ALIGN_UP(last_frame + size, FRAME_SIZE);
  302.    
  303.     return virtaddr;
  304. }
  305.  
  306.  /** @}
  307.  */
  308.  
  309.