Subversion Repositories HelenOS-historic

Rev

Rev 1708 | Rev 1730 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (C) 2005 Martin Decky
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. /** @addtogroup ppc32mm
  30.  * @{
  31.  */
  32. /** @file
  33.  */
  34.  
  35. #include <arch/mm/page.h>
  36. #include <genarch/mm/page_pt.h>
  37. #include <arch/mm/frame.h>
  38. #include <arch/asm.h>
  39. #include <arch/interrupt.h>
  40. #include <mm/frame.h>
  41. #include <mm/page.h>
  42. #include <mm/as.h>
  43. #include <arch.h>
  44. #include <arch/types.h>
  45. #include <arch/exception.h>
  46. #include <align.h>
  47. #include <config.h>
  48. #include <print.h>
  49. #include <symtab.h>
  50.  
  51.  
  52. /** Try to find PTE for faulting address
  53.  *
  54.  * Try to find PTE for faulting address.
  55.  * The as->lock must be held on entry to this function
  56.  * if lock is true.
  57.  *
  58.  * @param as       Address space.
  59.  * @param lock     Lock/unlock the address space.
  60.  * @param badvaddr Faulting virtual address.
  61.  * @param access   Access mode that caused the fault.
  62.  * @param istate   Pointer to interrupted state.
  63.  * @param pfrc     Pointer to variable where as_page_fault() return code will be stored.
  64.  * @return         PTE on success, NULL otherwise.
  65.  *
  66.  */
  67. static pte_t *find_mapping_and_check(as_t *as, bool lock, __address badvaddr, int access, istate_t *istate, int *pfrc)
  68. {
  69.     /*
  70.      * Check if the mapping exists in page tables.
  71.      */
  72.     pte_t *pte = page_mapping_find(as, badvaddr);
  73.     if ((pte) && (pte->p)) {
  74.         /*
  75.          * Mapping found in page tables.
  76.          * Immediately succeed.
  77.          */
  78.         return pte;
  79.     } else {
  80.         int rc;
  81.    
  82.         /*
  83.          * Mapping not found in page tables.
  84.          * Resort to higher-level page fault handler.
  85.          */
  86.         page_table_unlock(as, lock);
  87.         switch (rc = as_page_fault(badvaddr, access, istate)) {
  88.             case AS_PF_OK:
  89.                 /*
  90.                  * The higher-level page fault handler succeeded,
  91.                  * The mapping ought to be in place.
  92.                  */
  93.                 page_table_lock(as, lock);
  94.                 pte = page_mapping_find(as, badvaddr);
  95.                 ASSERT((pte) && (pte->p));
  96.                 return pte;
  97.             case AS_PF_DEFER:
  98.                 page_table_lock(as, lock);
  99.                 *pfrc = rc;
  100.                 return NULL;
  101.             case AS_PF_FAULT:
  102.                 page_table_lock(as, lock);
  103.                 printf("Page fault.\n");
  104.                 *pfrc = rc;
  105.                 return NULL;
  106.             default:
  107.                 panic("unexpected rc (%d)\n", rc);
  108.         }  
  109.     }
  110. }
  111.  
  112.  
  113. static void pht_refill_fail(__address badvaddr, istate_t *istate)
  114. {
  115.     char *symbol = "";
  116.     char *sym2 = "";
  117.  
  118.     char *s = get_symtab_entry(istate->pc);
  119.     if (s)
  120.         symbol = s;
  121.     s = get_symtab_entry(istate->lr);
  122.     if (s)
  123.         sym2 = s;
  124.     panic("%p: PHT Refill Exception at %p (%s<-%s)\n", badvaddr, istate->pc, symbol, sym2);
  125. }
  126.  
  127.  
  128. static void pht_insert(const __address vaddr, const pfn_t pfn)
  129. {
  130.     __u32 page = (vaddr >> 12) & 0xffff;
  131.     __u32 api = (vaddr >> 22) & 0x3f;
  132.    
  133.     __u32 vsid;
  134.     asm volatile (
  135.         "mfsrin %0, %1\n"
  136.         : "=r" (vsid)
  137.         : "r" (vaddr)
  138.     );
  139.    
  140.     __u32 sdr1;
  141.     asm volatile (
  142.         "mfsdr1 %0\n"
  143.         : "=r" (sdr1)
  144.     );
  145.     phte_t *phte = (phte_t *) PA2KA(sdr1 & 0xffff0000);
  146.    
  147.     /* Primary hash (xor) */
  148.     __u32 h = 0;
  149.     __u32 hash = vsid ^ page;
  150.     __u32 base = (hash & 0x3ff) << 3;
  151.     __u32 i;
  152.     bool found = false;
  153.    
  154.     /* Find unused or colliding
  155.        PTE in PTEG */
  156.     for (i = 0; i < 8; i++) {
  157.         if ((!phte[base + i].v) || ((phte[base + i].vsid == vsid) && (phte[base + i].api == api))) {
  158.             found = true;
  159.             break;
  160.         }
  161.     }
  162.    
  163.     if (!found) {
  164.         /* Secondary hash (not) */
  165.         __u32 base2 = (~hash & 0x3ff) << 3;
  166.        
  167.         /* Find unused or colliding
  168.            PTE in PTEG */
  169.         for (i = 0; i < 8; i++) {
  170.             if ((!phte[base2 + i].v) || ((phte[base2 + i].vsid == vsid) && (phte[base2 + i].api == api))) {
  171.                 found = true;
  172.                 base = base2;
  173.                 h = 1;
  174.                 break;
  175.             }
  176.         }
  177.        
  178.         if (!found) {
  179.             // TODO: A/C precedence groups
  180.             i = page % 8;
  181.         }
  182.     }
  183.    
  184.     phte[base + i].v = 1;
  185.     phte[base + i].vsid = vsid;
  186.     phte[base + i].h = h;
  187.     phte[base + i].api = api;
  188.     phte[base + i].rpn = pfn;
  189.     phte[base + i].r = 0;
  190.     phte[base + i].c = 0;
  191.     phte[base + i].pp = 2; // FIXME
  192. }
  193.  
  194.  
  195. static void pht_real_insert(const __address vaddr, const pfn_t pfn)
  196. {
  197.     __u32 page = (vaddr >> 12) & 0xffff;
  198.     __u32 api = (vaddr >> 22) & 0x3f;
  199.    
  200.     __u32 vsid;
  201.     asm volatile (
  202.         "mfsrin %0, %1\n"
  203.         : "=r" (vsid)
  204.         : "r" (vaddr)
  205.     );
  206.    
  207.     __u32 sdr1;
  208.     asm volatile (
  209.         "mfsdr1 %0\n"
  210.         : "=r" (sdr1)
  211.     );
  212.     phte_t *phte_physical = (phte_t *) (sdr1 & 0xffff0000);
  213.    
  214.     /* Primary hash (xor) */
  215.     __u32 h = 0;
  216.     __u32 hash = vsid ^ page;
  217.     __u32 base = (hash & 0x3ff) << 3;
  218.     __u32 i;
  219.     bool found = false;
  220.    
  221.     /* Find unused or colliding
  222.        PTE in PTEG */
  223.     for (i = 0; i < 8; i++) {
  224.         if ((!phte_physical[base + i].v) || ((phte_physical[base + i].vsid == vsid) && (phte_physical[base + i].api == api))) {
  225.             found = true;
  226.             break;
  227.         }
  228.     }
  229.    
  230.     if (!found) {
  231.         /* Secondary hash (not) */
  232.         __u32 base2 = (~hash & 0x3ff) << 3;
  233.        
  234.         /* Find unused or colliding
  235.            PTE in PTEG */
  236.         for (i = 0; i < 8; i++) {
  237.             if ((!phte_physical[base2 + i].v) || ((phte_physical[base2 + i].vsid == vsid) && (phte_physical[base2 + i].api == api))) {
  238.                 found = true;
  239.                 base = base2;
  240.                 h = 1;
  241.                 break;
  242.             }
  243.         }
  244.        
  245.         if (!found) {
  246.             // TODO: A/C precedence groups
  247.             i = page % 8;
  248.         }
  249.     }
  250.    
  251.     phte_physical[base + i].v = 1;
  252.     phte_physical[base + i].vsid = vsid;
  253.     phte_physical[base + i].h = h;
  254.     phte_physical[base + i].api = api;
  255.     phte_physical[base + i].rpn = pfn;
  256.     phte_physical[base + i].r = 0;
  257.     phte_physical[base + i].c = 0;
  258.     phte_physical[base + i].pp = 2; // FIXME
  259. }
  260.  
  261.  
  262. /** Process Instruction/Data Storage Interrupt
  263.  *
  264.  * @param n Interrupt vector number.
  265.  * @param istate Interrupted register context.
  266.  *
  267.  */
  268. void pht_refill(int n, istate_t *istate)
  269. {
  270.     __address badvaddr;
  271.     pte_t *pte;
  272.     int pfrc;
  273.     as_t *as;
  274.     bool lock;
  275.    
  276.     if (AS == NULL) {
  277.         as = AS_KERNEL;
  278.         lock = false;
  279.     } else {
  280.         as = AS;
  281.         lock = true;
  282.     }
  283.    
  284.     if (n == VECTOR_DATA_STORAGE) {
  285.         asm volatile (
  286.             "mfdar %0\n"
  287.             : "=r" (badvaddr)
  288.         );
  289.     } else
  290.         badvaddr = istate->pc;
  291.        
  292.     page_table_lock(as, lock);
  293.    
  294.     pte = find_mapping_and_check(as, lock, badvaddr, PF_ACCESS_READ /* FIXME */, istate, &pfrc);
  295.     if (!pte) {
  296.         switch (pfrc) {
  297.             case AS_PF_FAULT:
  298.                 goto fail;
  299.                 break;
  300.             case AS_PF_DEFER:
  301.                 /*
  302.                  * The page fault came during copy_from_uspace()
  303.                  * or copy_to_uspace().
  304.                  */
  305.                 page_table_unlock(as, lock);
  306.                 return;
  307.             default:
  308.                 panic("Unexpected pfrc (%d)\n", pfrc);
  309.         }
  310.     }
  311.    
  312.     pte->a = 1; /* Record access to PTE */
  313.     pht_insert(badvaddr, pte->pfn);
  314.    
  315.     page_table_unlock(as, lock);
  316.     return;
  317.    
  318. fail:
  319.     page_table_unlock(as, lock);
  320.     pht_refill_fail(badvaddr, istate);
  321. }
  322.  
  323.  
  324. /** Process Instruction/Data Storage Interrupt in Real Mode
  325.  *
  326.  * @param n Interrupt vector number.
  327.  * @param istate Interrupted register context.
  328.  *
  329.  */
  330. bool pht_real_refill(int n, istate_t *istate)
  331. {
  332.     __address badvaddr;
  333.    
  334.     if (n == VECTOR_DATA_STORAGE) {
  335.         asm volatile (
  336.             "mfdar %0\n"
  337.             : "=r" (badvaddr)
  338.         );
  339.     } else
  340.         badvaddr = istate->pc;
  341.    
  342.     __u32 physmem;
  343.     asm volatile (
  344.         "mfsprg3 %0\n"
  345.         : "=r" (physmem)
  346.     );
  347.    
  348.     if ((badvaddr >= PA2KA(0)) && (badvaddr <= PA2KA(physmem))) {
  349.         pht_real_insert(badvaddr, KA2PA(badvaddr) >> 12);
  350.         return true;
  351.     }
  352.    
  353.     return false;
  354. }
  355.  
  356.  
  357. void pht_init(void)
  358. {
  359.     // FIXME
  360.    
  361.     __u32 sdr1;
  362.     asm volatile (
  363.         "mfsdr1 %0\n"
  364.         : "=r" (sdr1)
  365.     );
  366.     phte_t *phte = (phte_t *) PA2KA(sdr1 & 0xffff0000);
  367.    
  368.     memsetb((__address) phte, 65536, 0);
  369. }
  370.  
  371.  
  372. void page_arch_init(void)
  373. {
  374.     if (config.cpu_active == 1)
  375.         page_mapping_operations = &pt_mapping_operations;
  376. }
  377.  
  378.  
  379. __address hw_map(__address physaddr, size_t size)
  380. {
  381.     if (last_frame + ALIGN_UP(size, PAGE_SIZE) > KA2PA(KERNEL_ADDRESS_SPACE_END_ARCH))
  382.         panic("Unable to map physical memory %p (%d bytes)", physaddr, size)
  383.    
  384.     __address virtaddr = PA2KA(last_frame);
  385.     pfn_t i;
  386.     for (i = 0; i < ADDR2PFN(ALIGN_UP(size, PAGE_SIZE)); i++)
  387.         page_mapping_insert(AS_KERNEL, virtaddr + PFN2ADDR(i), physaddr + PFN2ADDR(i), PAGE_NOT_CACHEABLE);
  388.    
  389.     last_frame = ALIGN_UP(last_frame + size, FRAME_SIZE);
  390.    
  391.     return virtaddr;
  392. }
  393.  
  394. /** @}
  395.  */
  396.