Subversion Repositories HelenOS-historic

Rev

Rev 1384 | Rev 1390 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (C) 2005 Martin Decky
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. #include <arch/mm/page.h>
  30. #include <genarch/mm/page_pt.h>
  31. #include <arch/mm/frame.h>
  32. #include <arch/asm.h>
  33. #include <mm/frame.h>
  34. #include <mm/page.h>
  35. #include <mm/as.h>
  36. #include <arch.h>
  37. #include <arch/types.h>
  38. #include <arch/exception.h>
  39. #include <align.h>
  40. #include <config.h>
  41. #include <print.h>
  42. #include <symtab.h>
  43.  
  44. static phte_t *phte;
  45.  
  46.  
  47. /** Try to find PTE for faulting address
  48.  *
  49.  * Try to find PTE for faulting address.
  50.  * The as->lock must be held on entry to this function
  51.  * if lock is true.
  52.  *
  53.  * @param as       Address space.
  54.  * @param lock     Lock/unlock the address space.
  55.  * @param badvaddr Faulting virtual address.
  56.  * @param istate   Pointer to interrupted state.
  57.  * @param pfrc     Pointer to variable where as_page_fault() return code will be stored.
  58.  * @return         PTE on success, NULL otherwise.
  59.  *
  60.  */
  61. static pte_t *find_mapping_and_check(as_t *as, bool lock, __address badvaddr, istate_t *istate, int *pfcr)
  62. {
  63.     /*
  64.      * Check if the mapping exists in page tables.
  65.      */
  66.     pte_t *pte = page_mapping_find(as, badvaddr);
  67.     if ((pte) && (pte->p)) {
  68.         /*
  69.          * Mapping found in page tables.
  70.          * Immediately succeed.
  71.          */
  72.         return pte;
  73.     } else {
  74.         int rc;
  75.    
  76.         /*
  77.          * Mapping not found in page tables.
  78.          * Resort to higher-level page fault handler.
  79.          */
  80.         page_table_unlock(as, lock);
  81.         switch (rc = as_page_fault(badvaddr, istate)) {
  82.             case AS_PF_OK:
  83.                 /*
  84.                  * The higher-level page fault handler succeeded,
  85.                  * The mapping ought to be in place.
  86.                  */
  87.                 page_table_lock(as, lock);
  88.                 pte = page_mapping_find(as, badvaddr);
  89.                 ASSERT((pte) && (pte->p));
  90.                 return pte;
  91.             case AS_PF_DEFER:
  92.                 page_table_lock(as, lock);
  93.                 *pfcr = rc;
  94.                 return NULL;
  95.             case AS_PF_FAULT:
  96.                 page_table_lock(as, lock);
  97.                 printf("Page fault.\n");
  98.                 *pfcr = rc;
  99.                 return NULL;
  100.             default:
  101.                 panic("unexpected rc (%d)\n", rc);
  102.         }  
  103.     }
  104. }
  105.  
  106.  
  107. static void pht_refill_fail(__address badvaddr, istate_t *istate)
  108. {
  109.     char *symbol = "";
  110.     char *sym2 = "";
  111.  
  112.     char *s = get_symtab_entry(istate->pc);
  113.     if (s)
  114.         symbol = s;
  115.     s = get_symtab_entry(istate->lr);
  116.     if (s)
  117.         sym2 = s;
  118.     panic("%p: PHT Refill Exception at %p (%s<-%s)\n", badvaddr, istate->pc, symbol, sym2);
  119. }
  120.  
  121.  
  122. static void pht_insert(const __address vaddr, const pfn_t pfn)
  123. {
  124.     __u32 page = (vaddr >> 12) & 0xffff;
  125.     __u32 api = (vaddr >> 22) & 0x3f;
  126.     __u32 vsid;
  127.    
  128.     asm volatile (
  129.         "mfsrin %0, %1\n"
  130.         : "=r" (vsid)
  131.         : "r" (vaddr)
  132.     );
  133.    
  134.     /* Primary hash (xor) */
  135.     __u32 h = 0;
  136.     __u32 hash = vsid ^ page;
  137.     __u32 base = (hash & 0x3ff) << 3;
  138.     __u32 i;
  139.     bool found = false;
  140.    
  141.     /* Find unused or colliding
  142.        PTE in PTEG */
  143.     for (i = 0; i < 8; i++) {
  144.         if ((!phte[base + i].v) || ((phte[base + i].vsid == vsid) && (phte[base + i].api == api))) {
  145.             found = true;
  146.             break;
  147.         }
  148.     }
  149.    
  150.     if (!found) {
  151.         /* Secondary hash (not) */
  152.         __u32 base2 = (~hash & 0x3ff) << 3;
  153.        
  154.         /* Find unused or colliding
  155.            PTE in PTEG */
  156.         for (i = 0; i < 8; i++) {
  157.             if (!phte[base2 + i].v) {
  158.                 found = true;
  159.                 base = base2;
  160.                 h = 1;
  161.                 break;
  162.             }
  163.         }
  164.        
  165.         if (!found) {
  166.             // TODO: A/C precedence groups
  167.             i = page % 8;
  168.         }
  169.     }
  170.    
  171.     phte[base + i].v = 1;
  172.     phte[base + i].vsid = vsid;
  173.     phte[base + i].h = h;
  174.     phte[base + i].api = api;
  175.     phte[base + i].rpn = pfn;
  176.     phte[base + i].r = 0;
  177.     phte[base + i].c = 0;
  178.     phte[base + i].pp = 2; // FIXME
  179. }
  180.  
  181.  
  182. /** Process Instruction/Data Storage Interrupt
  183.  *
  184.  * @param data   True if Data Storage Interrupt.
  185.  * @param istate Interrupted register context.
  186.  *
  187.  */
  188. void pht_refill(bool data, istate_t *istate)
  189. {
  190.     __address badvaddr;
  191.     pte_t *pte;
  192.     int pfcr;
  193.     as_t *as;
  194.     bool lock;
  195.    
  196.     if (AS == NULL) {
  197.         as = AS_KERNEL;
  198.         lock = false;
  199.     } else {
  200.         as = AS;
  201.         lock = true;
  202.     }
  203.    
  204.     if (data) {
  205.         asm volatile (
  206.             "mfdar %0\n"
  207.             : "=r" (badvaddr)
  208.         );
  209.     } else
  210.         badvaddr = istate->pc;
  211.        
  212.     page_table_lock(as, lock);
  213.    
  214.     pte = find_mapping_and_check(as, lock, badvaddr, istate, &pfcr);
  215.     if (!pte) {
  216.         switch (pfcr) {
  217.             case AS_PF_FAULT:
  218.                 goto fail;
  219.                 break;
  220.             case AS_PF_DEFER:
  221.                 /*
  222.                  * The page fault came during copy_from_uspace()
  223.                  * or copy_to_uspace().
  224.                  */
  225.                 page_table_unlock(as, lock);
  226.                 return;
  227.             default:
  228.                 panic("Unexpected pfrc (%d)\n", pfcr);
  229.         }
  230.     }
  231.    
  232.     pte->a = 1; /* Record access to PTE */
  233.     pht_insert(badvaddr, pte->pfn);
  234.    
  235.     page_table_unlock(as, lock);
  236.     return;
  237.    
  238. fail:
  239.     page_table_unlock(as, lock);
  240.     pht_refill_fail(badvaddr, istate);
  241. }
  242.  
  243.  
  244. void pht_init(void)
  245. {
  246.     memsetb((__address) phte, 1 << PHT_BITS, 0);
  247. }
  248.  
  249.  
  250. void page_arch_init(void)
  251. {
  252.     if (config.cpu_active == 1) {
  253.         page_mapping_operations = &pt_mapping_operations;
  254.        
  255.         __address cur;
  256.         int flags;
  257.        
  258.         /* Pages below 128 MB are mapped using BAT,
  259.            map rest of the physical memory */
  260.         for (cur = 128 << 20; cur < last_frame; cur += FRAME_SIZE) {
  261.             flags = PAGE_CACHEABLE;
  262.             if ((PA2KA(cur) >= config.base) && (PA2KA(cur) < config.base + config.kernel_size))
  263.                 flags |= PAGE_GLOBAL;
  264.             page_mapping_insert(AS_KERNEL, PA2KA(cur), cur, flags);
  265.         }
  266.        
  267.         /* Allocate page hash table */
  268.         phte_t *physical_phte = (phte_t *) PFN2ADDR(frame_alloc(PHT_ORDER, FRAME_KA | FRAME_PANIC));
  269.         phte = (phte_t *) PA2KA((__address) physical_phte);
  270.        
  271.         ASSERT((__address) physical_phte % (1 << PHT_BITS) == 0);
  272.         pht_init();
  273.        
  274.         asm volatile (
  275.             "mtsdr1 %0\n"
  276.             :
  277.             : "r" ((__address) physical_phte)
  278.         );
  279.     }
  280. }
  281.  
  282.  
  283. __address hw_map(__address physaddr, size_t size)
  284. {
  285.     if (last_frame + ALIGN_UP(size, PAGE_SIZE) > KA2PA(KERNEL_ADDRESS_SPACE_END_ARCH))
  286.         panic("Unable to map physical memory %p (%d bytes)", physaddr, size)
  287.    
  288.     __address virtaddr = PA2KA(last_frame);
  289.     pfn_t i;
  290.     for (i = 0; i < ADDR2PFN(ALIGN_UP(size, PAGE_SIZE)); i++)
  291.         page_mapping_insert(AS_KERNEL, virtaddr + PFN2ADDR(i), physaddr + PFN2ADDR(i), PAGE_NOT_CACHEABLE);
  292.    
  293.     last_frame = ALIGN_UP(last_frame + size, FRAME_SIZE);
  294.    
  295.     return virtaddr;
  296. }
  297.