Subversion Repositories HelenOS

Rev

Rev 1702 | Rev 1726 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (C) 2005 Martin Decky
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29.  /** @addtogroup ppc32mm   
  30.  * @{
  31.  */
  32. /** @file
  33.  */
  34.  
  35. #include <arch/mm/page.h>
  36. #include <genarch/mm/page_pt.h>
  37. #include <arch/mm/frame.h>
  38. #include <arch/asm.h>
  39. #include <arch/interrupt.h>
  40. #include <mm/frame.h>
  41. #include <mm/page.h>
  42. #include <mm/as.h>
  43. #include <arch.h>
  44. #include <arch/types.h>
  45. #include <arch/exception.h>
  46. #include <align.h>
  47. #include <config.h>
  48. #include <print.h>
  49. #include <symtab.h>
  50.  
  51. static phte_t *phte;
  52.  
  53.  
  54. /** Try to find PTE for faulting address
  55.  *
  56.  * Try to find PTE for faulting address.
  57.  * The as->lock must be held on entry to this function
  58.  * if lock is true.
  59.  *
  60.  * @param as       Address space.
  61.  * @param lock     Lock/unlock the address space.
  62.  * @param badvaddr Faulting virtual address.
  63.  * @param access   Access mode that caused the fault.
  64.  * @param istate   Pointer to interrupted state.
  65.  * @param pfrc     Pointer to variable where as_page_fault() return code will be stored.
  66.  * @return         PTE on success, NULL otherwise.
  67.  *
  68.  */
  69. static pte_t *find_mapping_and_check(as_t *as, bool lock, __address badvaddr, int access,
  70.                      istate_t *istate, int *pfrc)
  71. {
  72.     /*
  73.      * Check if the mapping exists in page tables.
  74.      */
  75.     pte_t *pte = page_mapping_find(as, badvaddr);
  76.     if ((pte) && (pte->p)) {
  77.         /*
  78.          * Mapping found in page tables.
  79.          * Immediately succeed.
  80.          */
  81.         return pte;
  82.     } else {
  83.         int rc;
  84.    
  85.         /*
  86.          * Mapping not found in page tables.
  87.          * Resort to higher-level page fault handler.
  88.          */
  89.         page_table_unlock(as, lock);
  90.         switch (rc = as_page_fault(badvaddr, access, istate)) {
  91.             case AS_PF_OK:
  92.                 /*
  93.                  * The higher-level page fault handler succeeded,
  94.                  * The mapping ought to be in place.
  95.                  */
  96.                 page_table_lock(as, lock);
  97.                 pte = page_mapping_find(as, badvaddr);
  98.                 ASSERT((pte) && (pte->p));
  99.                 return pte;
  100.             case AS_PF_DEFER:
  101.                 page_table_lock(as, lock);
  102.                 *pfrc = rc;
  103.                 return NULL;
  104.             case AS_PF_FAULT:
  105.                 page_table_lock(as, lock);
  106.                 printf("Page fault.\n");
  107.                 *pfrc = rc;
  108.                 return NULL;
  109.             default:
  110.                 panic("unexpected rc (%d)\n", rc);
  111.         }  
  112.     }
  113. }
  114.  
  115.  
  116. static void pht_refill_fail(__address badvaddr, istate_t *istate)
  117. {
  118.     char *symbol = "";
  119.     char *sym2 = "";
  120.  
  121.     char *s = get_symtab_entry(istate->pc);
  122.     if (s)
  123.         symbol = s;
  124.     s = get_symtab_entry(istate->lr);
  125.     if (s)
  126.         sym2 = s;
  127.     panic("%p: PHT Refill Exception at %p (%s<-%s)\n", badvaddr, istate->pc, symbol, sym2);
  128. }
  129.  
  130.  
  131. static void pht_insert(const __address vaddr, const pfn_t pfn)
  132. {
  133.     __u32 page = (vaddr >> 12) & 0xffff;
  134.     __u32 api = (vaddr >> 22) & 0x3f;
  135.     __u32 vsid;
  136.    
  137.     asm volatile (
  138.         "mfsrin %0, %1\n"
  139.         : "=r" (vsid)
  140.         : "r" (vaddr)
  141.     );
  142.    
  143.     /* Primary hash (xor) */
  144.     __u32 h = 0;
  145.     __u32 hash = vsid ^ page;
  146.     __u32 base = (hash & 0x3ff) << 3;
  147.     __u32 i;
  148.     bool found = false;
  149.    
  150.     /* Find unused or colliding
  151.        PTE in PTEG */
  152.     for (i = 0; i < 8; i++) {
  153.         if ((!phte[base + i].v) || ((phte[base + i].vsid == vsid) && (phte[base + i].api == api))) {
  154.             found = true;
  155.             break;
  156.         }
  157.     }
  158.    
  159.     if (!found) {
  160.         /* Secondary hash (not) */
  161.         __u32 base2 = (~hash & 0x3ff) << 3;
  162.        
  163.         /* Find unused or colliding
  164.            PTE in PTEG */
  165.         for (i = 0; i < 8; i++) {
  166.             if ((!phte[base2 + i].v) || ((phte[base2 + i].vsid == vsid) && (phte[base2 + i].api == api))) {
  167.                 found = true;
  168.                 base = base2;
  169.                 h = 1;
  170.                 break;
  171.             }
  172.         }
  173.        
  174.         if (!found) {
  175.             // TODO: A/C precedence groups
  176.             i = page % 8;
  177.         }
  178.     }
  179.    
  180.     phte[base + i].v = 1;
  181.     phte[base + i].vsid = vsid;
  182.     phte[base + i].h = h;
  183.     phte[base + i].api = api;
  184.     phte[base + i].rpn = pfn;
  185.     phte[base + i].r = 0;
  186.     phte[base + i].c = 0;
  187.     phte[base + i].pp = 2; // FIXME
  188. }
  189.  
  190.  
  191. /** Process Instruction/Data Storage Interrupt
  192.  *
  193.  * @param n Interrupt vector number.
  194.  * @param istate Interrupted register context.
  195.  *
  196.  */
  197. void pht_refill(int n, istate_t *istate)
  198. {
  199.     __address badvaddr;
  200.     pte_t *pte;
  201.     int pfrc;
  202.     as_t *as;
  203.     bool lock;
  204.    
  205.     if (AS == NULL) {
  206.         as = AS_KERNEL;
  207.         lock = false;
  208.     } else {
  209.         as = AS;
  210.         lock = true;
  211.     }
  212.    
  213.     if (n == VECTOR_DATA_STORAGE) {
  214.         asm volatile (
  215.             "mfdar %0\n"
  216.             : "=r" (badvaddr)
  217.         );
  218.     } else
  219.         badvaddr = istate->pc;
  220.        
  221.     page_table_lock(as, lock);
  222.    
  223.     pte = find_mapping_and_check(as, lock, badvaddr, PF_ACCESS_READ /* FIXME */, istate, &pfrc);
  224.     if (!pte) {
  225.         switch (pfrc) {
  226.             case AS_PF_FAULT:
  227.                 goto fail;
  228.                 break;
  229.             case AS_PF_DEFER:
  230.                 /*
  231.                  * The page fault came during copy_from_uspace()
  232.                  * or copy_to_uspace().
  233.                  */
  234.                 page_table_unlock(as, lock);
  235.                 return;
  236.             default:
  237.                 panic("Unexpected pfrc (%d)\n", pfrc);
  238.         }
  239.     }
  240.    
  241.     pte->a = 1; /* Record access to PTE */
  242.     pht_insert(badvaddr, pte->pfn);
  243.    
  244.     page_table_unlock(as, lock);
  245.     return;
  246.    
  247. fail:
  248.     page_table_unlock(as, lock);
  249.     pht_refill_fail(badvaddr, istate);
  250. }
  251.  
  252.  
  253. void pht_init(void)
  254. {
  255.     memsetb((__address) phte, 1 << PHT_BITS, 0);
  256. }
  257.  
  258.  
  259. void page_arch_init(void)
  260. {
  261.     if (config.cpu_active == 1) {
  262.         page_mapping_operations = &pt_mapping_operations;
  263.        
  264.         __address cur;
  265.         int flags;
  266.        
  267.         /* Frames below 128 MB are mapped using BAT,
  268.            map rest of the physical memory */
  269.         for (cur = 128 << 20; cur < last_frame; cur += FRAME_SIZE) {
  270.             flags = PAGE_CACHEABLE;
  271.             if ((PA2KA(cur) >= config.base) && (PA2KA(cur) < config.base + config.kernel_size))
  272.                 flags |= PAGE_GLOBAL;
  273.             page_mapping_insert(AS_KERNEL, PA2KA(cur), cur, flags);
  274.         }
  275.        
  276.         /* Allocate page hash table */
  277.         phte_t *physical_phte = (phte_t *) PFN2ADDR(frame_alloc(PHT_ORDER, FRAME_KA | FRAME_PANIC));
  278.         phte = (phte_t *) PA2KA((__address) physical_phte);
  279.        
  280.         ASSERT((__address) physical_phte % (1 << PHT_BITS) == 0);
  281.         pht_init();
  282.        
  283.         asm volatile (
  284.             "mtsdr1 %0\n"
  285.             :
  286.             : "r" ((__address) physical_phte)
  287.         );
  288.     }
  289. }
  290.  
  291.  
  292. __address hw_map(__address physaddr, size_t size)
  293. {
  294.     if (last_frame + ALIGN_UP(size, PAGE_SIZE) > KA2PA(KERNEL_ADDRESS_SPACE_END_ARCH))
  295.         panic("Unable to map physical memory %p (%d bytes)", physaddr, size)
  296.    
  297.     __address virtaddr = PA2KA(last_frame);
  298.     pfn_t i;
  299.     for (i = 0; i < ADDR2PFN(ALIGN_UP(size, PAGE_SIZE)); i++)
  300.         page_mapping_insert(AS_KERNEL, virtaddr + PFN2ADDR(i), physaddr + PFN2ADDR(i), PAGE_NOT_CACHEABLE);
  301.    
  302.     last_frame = ALIGN_UP(last_frame + size, FRAME_SIZE);
  303.    
  304.     return virtaddr;
  305. }
  306.  
  307.  /** @}
  308.  */
  309.  
  310.