Subversion Repositories HelenOS

Rev

Rev 3635 | Rev 3766 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (c) 2006 Jakub Jermar
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. /** @addtogroup ia64mm 
  30.  * @{
  31.  */
  32. /** @file
  33.  */
  34.  
  35. /*
  36.  * TLB management.
  37.  */
  38.  
  39. #include <mm/tlb.h>
  40. #include <mm/asid.h>
  41. #include <mm/page.h>
  42. #include <mm/as.h>
  43. #include <arch/mm/tlb.h>
  44. #include <arch/mm/page.h>
  45. #include <arch/mm/vhpt.h>
  46. #include <arch/barrier.h>
  47. #include <arch/interrupt.h>
  48. #include <arch/pal/pal.h>
  49. #include <arch/asm.h>
  50. #include <panic.h>
  51. #include <print.h>
  52. #include <arch.h>
  53. #include <interrupt.h>
  54.  
  55. /** Invalidate all TLB entries. */
  56. void tlb_invalidate_all(void)
  57. {
  58.     ipl_t ipl;
  59.     uintptr_t adr;
  60.     uint32_t count1, count2, stride1, stride2;
  61.        
  62.     unsigned int i, j;
  63.        
  64.     adr = PAL_PTCE_INFO_BASE();
  65.     count1 = PAL_PTCE_INFO_COUNT1();
  66.     count2 = PAL_PTCE_INFO_COUNT2();
  67.     stride1 = PAL_PTCE_INFO_STRIDE1();
  68.     stride2 = PAL_PTCE_INFO_STRIDE2();
  69.        
  70.     ipl = interrupts_disable();
  71.  
  72.     for (i = 0; i < count1; i++) {
  73.         for (j = 0; j < count2; j++) {
  74.             asm volatile (
  75.                 "ptc.e %0 ;;"
  76.                 :
  77.                 : "r" (adr)
  78.             );
  79.             adr += stride2;
  80.         }
  81.         adr += stride1;
  82.     }
  83.  
  84.     interrupts_restore(ipl);
  85.  
  86.     srlz_d();
  87.     srlz_i();
  88. #ifdef CONFIG_VHPT
  89.     vhpt_invalidate_all();
  90. #endif 
  91. }
  92.  
  93. /** Invalidate entries belonging to an address space.
  94.  *
  95.  * @param asid Address space identifier.
  96.  */
  97. void tlb_invalidate_asid(asid_t asid)
  98. {
  99.     tlb_invalidate_all();
  100. }
  101.  
  102.  
  103. void tlb_invalidate_pages(asid_t asid, uintptr_t page, count_t cnt)
  104. {
  105.     region_register rr;
  106.     bool restore_rr = false;
  107.     int b = 0;
  108.     int c = cnt;
  109.  
  110.     uintptr_t va;
  111.     va = page;
  112.  
  113.     rr.word = rr_read(VA2VRN(va));
  114.     if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
  115.         /*
  116.          * The selected region register does not contain required RID.
  117.          * Save the old content of the register and replace the RID.
  118.          */
  119.         region_register rr0;
  120.  
  121.         rr0 = rr;
  122.         rr0.map.rid = ASID2RID(asid, VA2VRN(va));
  123.         rr_write(VA2VRN(va), rr0.word);
  124.         srlz_d();
  125.         srlz_i();
  126.     }
  127.    
  128.     while(c >>= 1)
  129.         b++;
  130.     b >>= 1;
  131.     uint64_t ps;
  132.    
  133.     switch (b) {
  134.     case 0: /*cnt 1-3*/
  135.         ps = PAGE_WIDTH;
  136.         break;
  137.     case 1: /*cnt 4-15*/
  138.         ps = PAGE_WIDTH+2;
  139.         va &= ~((1<<ps)-1);
  140.         break;
  141.     case 2: /*cnt 16-63*/
  142.         ps = PAGE_WIDTH+4;
  143.         va &= ~((1<<ps)-1);
  144.         break;
  145.     case 3: /*cnt 64-255*/
  146.         ps = PAGE_WIDTH+6;
  147.         va &= ~((1<<ps)-1);
  148.         break;
  149.     case 4: /*cnt 256-1023*/
  150.         ps = PAGE_WIDTH+8;
  151.         va &= ~((1<<ps)-1);
  152.         break;
  153.     case 5: /*cnt 1024-4095*/
  154.         ps = PAGE_WIDTH+10;
  155.         va &= ~((1<<ps)-1);
  156.         break;
  157.     case 6: /*cnt 4096-16383*/
  158.         ps = PAGE_WIDTH+12;
  159.         va &= ~((1<<ps)-1);
  160.         break;
  161.     case 7: /*cnt 16384-65535*/
  162.     case 8: /*cnt 65536-(256K-1)*/
  163.         ps = PAGE_WIDTH+14;
  164.         va &= ~((1<<ps)-1);
  165.         break;
  166.     default:
  167.         ps=PAGE_WIDTH+18;
  168.         va&=~((1<<ps)-1);
  169.         break;
  170.     }
  171.     for(; va<(page+cnt*(PAGE_SIZE)); va += (1<<ps)) {
  172.         asm volatile (
  173.             "ptc.l %0,%1;;"
  174.             :
  175.             : "r" (va), "r" (ps<<2)
  176.         );
  177.     }
  178.     srlz_d();
  179.     srlz_i();
  180.    
  181.     if (restore_rr) {
  182.         rr_write(VA2VRN(va), rr.word);
  183.         srlz_d();
  184.         srlz_i();
  185.     }
  186. }
  187.  
  188. /** Insert data into data translation cache.
  189.  *
  190.  * @param va Virtual page address.
  191.  * @param asid Address space identifier.
  192.  * @param entry The rest of TLB entry as required by TLB insertion format.
  193.  */
  194. void dtc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry)
  195. {
  196.     tc_mapping_insert(va, asid, entry, true);
  197. }
  198.  
  199. /** Insert data into instruction translation cache.
  200.  *
  201.  * @param va Virtual page address.
  202.  * @param asid Address space identifier.
  203.  * @param entry The rest of TLB entry as required by TLB insertion format.
  204.  */
  205. void itc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry)
  206. {
  207.     tc_mapping_insert(va, asid, entry, false);
  208. }
  209.  
  210. /** Insert data into instruction or data translation cache.
  211.  *
  212.  * @param va Virtual page address.
  213.  * @param asid Address space identifier.
  214.  * @param entry The rest of TLB entry as required by TLB insertion format.
  215.  * @param dtc If true, insert into data translation cache, use instruction translation cache otherwise.
  216.  */
  217. void tc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtc)
  218. {
  219.     region_register rr;
  220.     bool restore_rr = false;
  221.  
  222.     rr.word = rr_read(VA2VRN(va));
  223.     if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
  224.         /*
  225.          * The selected region register does not contain required RID.
  226.          * Save the old content of the register and replace the RID.
  227.          */
  228.         region_register rr0;
  229.  
  230.         rr0 = rr;
  231.         rr0.map.rid = ASID2RID(asid, VA2VRN(va));
  232.         rr_write(VA2VRN(va), rr0.word);
  233.         srlz_d();
  234.         srlz_i();
  235.     }
  236.    
  237.     asm volatile (
  238.         "mov r8=psr;;\n"
  239.         "rsm %0;;\n"            /* PSR_IC_MASK */
  240.         "srlz.d;;\n"
  241.         "srlz.i;;\n"
  242.         "mov cr.ifa=%1\n"       /* va */
  243.         "mov cr.itir=%2;;\n"        /* entry.word[1] */
  244.         "cmp.eq p6,p7 = %4,r0;;\n"  /* decide between itc and dtc */
  245.         "(p6) itc.i %3;;\n"
  246.         "(p7) itc.d %3;;\n"
  247.         "mov psr.l=r8;;\n"
  248.         "srlz.d;;\n"
  249.         :
  250.         : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (dtc)
  251.         : "p6", "p7", "r8"
  252.     );
  253.    
  254.     if (restore_rr) {
  255.         rr_write(VA2VRN(va), rr.word);
  256.         srlz_d();
  257.         srlz_i();
  258.     }
  259. }
  260.  
  261. /** Insert data into instruction translation register.
  262.  *
  263.  * @param va Virtual page address.
  264.  * @param asid Address space identifier.
  265.  * @param entry The rest of TLB entry as required by TLB insertion format.
  266.  * @param tr Translation register.
  267.  */
  268. void itr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, index_t tr)
  269. {
  270.     tr_mapping_insert(va, asid, entry, false, tr);
  271. }
  272.  
  273. /** Insert data into data translation register.
  274.  *
  275.  * @param va Virtual page address.
  276.  * @param asid Address space identifier.
  277.  * @param entry The rest of TLB entry as required by TLB insertion format.
  278.  * @param tr Translation register.
  279.  */
  280. void dtr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, index_t tr)
  281. {
  282.     tr_mapping_insert(va, asid, entry, true, tr);
  283. }
  284.  
  285. /** Insert data into instruction or data translation register.
  286.  *
  287.  * @param va Virtual page address.
  288.  * @param asid Address space identifier.
  289.  * @param entry The rest of TLB entry as required by TLB insertion format.
  290.  * @param dtr If true, insert into data translation register, use instruction translation register otherwise.
  291.  * @param tr Translation register.
  292.  */
  293. void tr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtr, index_t tr)
  294. {
  295.     region_register rr;
  296.     bool restore_rr = false;
  297.  
  298.     rr.word = rr_read(VA2VRN(va));
  299.     if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
  300.         /*
  301.          * The selected region register does not contain required RID.
  302.          * Save the old content of the register and replace the RID.
  303.          */
  304.         region_register rr0;
  305.  
  306.         rr0 = rr;
  307.         rr0.map.rid = ASID2RID(asid, VA2VRN(va));
  308.         rr_write(VA2VRN(va), rr0.word);
  309.         srlz_d();
  310.         srlz_i();
  311.     }
  312.  
  313.     asm volatile (
  314.         "mov r8=psr;;\n"
  315.         "rsm %0;;\n"            /* PSR_IC_MASK */
  316.         "srlz.d;;\n"
  317.         "srlz.i;;\n"
  318.         "mov cr.ifa=%1\n"           /* va */         
  319.         "mov cr.itir=%2;;\n"        /* entry.word[1] */
  320.         "cmp.eq p6,p7=%5,r0;;\n"    /* decide between itr and dtr */
  321.         "(p6) itr.i itr[%4]=%3;;\n"
  322.         "(p7) itr.d dtr[%4]=%3;;\n"
  323.         "mov psr.l=r8;;\n"
  324.         "srlz.d;;\n"
  325.         :
  326.         : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (tr), "r" (dtr)
  327.         : "p6", "p7", "r8"
  328.     );
  329.    
  330.     if (restore_rr) {
  331.         rr_write(VA2VRN(va), rr.word);
  332.         srlz_d();
  333.         srlz_i();
  334.     }
  335. }
  336.  
  337. /** Insert data into DTLB.
  338.  *
  339.  * @param page Virtual page address including VRN bits.
  340.  * @param frame Physical frame address.
  341.  * @param dtr If true, insert into data translation register, use data translation cache otherwise.
  342.  * @param tr Translation register if dtr is true, ignored otherwise.
  343.  */
  344. void dtlb_kernel_mapping_insert(uintptr_t page, uintptr_t frame, bool dtr, index_t tr)
  345. {
  346.     tlb_entry_t entry;
  347.    
  348.     entry.word[0] = 0;
  349.     entry.word[1] = 0;
  350.    
  351.     entry.p = true;         /* present */
  352.     entry.ma = MA_WRITEBACK;
  353.     entry.a = true;         /* already accessed */
  354.     entry.d = true;         /* already dirty */
  355.     entry.pl = PL_KERNEL;
  356.     entry.ar = AR_READ | AR_WRITE;
  357.     entry.ppn = frame >> PPN_SHIFT;
  358.     entry.ps = PAGE_WIDTH;
  359.    
  360.     if (dtr)
  361.         dtr_mapping_insert(page, ASID_KERNEL, entry, tr);
  362.     else
  363.         dtc_mapping_insert(page, ASID_KERNEL, entry);
  364. }
  365.  
  366. /** Purge kernel entries from DTR.
  367.  *
  368.  * Purge DTR entries used by the kernel.
  369.  *
  370.  * @param page Virtual page address including VRN bits.
  371.  * @param width Width of the purge in bits.
  372.  */
  373. void dtr_purge(uintptr_t page, count_t width)
  374. {
  375.     asm volatile ("ptr.d %0, %1\n" : : "r" (page), "r" (width<<2));
  376. }
  377.  
  378.  
  379. /** Copy content of PTE into data translation cache.
  380.  *
  381.  * @param t PTE.
  382.  */
  383. void dtc_pte_copy(pte_t *t)
  384. {
  385.     tlb_entry_t entry;
  386.  
  387.     entry.word[0] = 0;
  388.     entry.word[1] = 0;
  389.    
  390.     entry.p = t->p;
  391.     entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
  392.     entry.a = t->a;
  393.     entry.d = t->d;
  394.     entry.pl = t->k ? PL_KERNEL : PL_USER;
  395.     entry.ar = t->w ? AR_WRITE : AR_READ;
  396.     entry.ppn = t->frame >> PPN_SHIFT;
  397.     entry.ps = PAGE_WIDTH;
  398.    
  399.     dtc_mapping_insert(t->page, t->as->asid, entry);
  400. #ifdef CONFIG_VHPT
  401.     vhpt_mapping_insert(t->page, t->as->asid, entry);
  402. #endif 
  403. }
  404.  
  405. /** Copy content of PTE into instruction translation cache.
  406.  *
  407.  * @param t PTE.
  408.  */
  409. void itc_pte_copy(pte_t *t)
  410. {
  411.     tlb_entry_t entry;
  412.  
  413.     entry.word[0] = 0;
  414.     entry.word[1] = 0;
  415.    
  416.     ASSERT(t->x);
  417.    
  418.     entry.p = t->p;
  419.     entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
  420.     entry.a = t->a;
  421.     entry.pl = t->k ? PL_KERNEL : PL_USER;
  422.     entry.ar = t->x ? (AR_EXECUTE | AR_READ) : AR_READ;
  423.     entry.ppn = t->frame >> PPN_SHIFT;
  424.     entry.ps = PAGE_WIDTH;
  425.    
  426.     itc_mapping_insert(t->page, t->as->asid, entry);
  427. #ifdef CONFIG_VHPT
  428.     vhpt_mapping_insert(t->page, t->as->asid, entry);
  429. #endif 
  430. }
  431.  
  432. /** Instruction TLB fault handler for faults with VHPT turned off.
  433.  *
  434.  * @param vector Interruption vector.
  435.  * @param istate Structure with saved interruption state.
  436.  */
  437. void alternate_instruction_tlb_fault(uint64_t vector, istate_t *istate)
  438. {
  439.     region_register rr;
  440.     rid_t rid;
  441.     uintptr_t va;
  442.     pte_t *t;
  443.    
  444.     va = istate->cr_ifa;    /* faulting address */
  445.     rr.word = rr_read(VA2VRN(va));
  446.     rid = rr.map.rid;
  447.  
  448.     page_table_lock(AS, true);
  449.     t = page_mapping_find(AS, va);
  450.     if (t) {
  451.         /*
  452.          * The mapping was found in software page hash table.
  453.          * Insert it into data translation cache.
  454.          */
  455.         itc_pte_copy(t);
  456.         page_table_unlock(AS, true);
  457.     } else {
  458.         /*
  459.          * Forward the page fault to address space page fault handler.
  460.          */
  461.         page_table_unlock(AS, true);
  462.         if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
  463.             fault_if_from_uspace(istate,"Page fault at %p",va);
  464.             panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid, istate->cr_iip);
  465.         }
  466.     }
  467. }
  468.  
  469.  
  470.  
  471. static int is_io_page_accessible(int page)
  472. {
  473.     if(TASK->arch.iomap) return bitmap_get(TASK->arch.iomap,page);
  474.     else return 0;
  475. }
  476.  
  477. #define IO_FRAME_BASE 0xFFFFC000000
  478.  
  479. /** There is special handling of memmaped lagacy io, because
  480.  * of 4KB sized access
  481.  * only for userspace
  482.  *
  483.  * @param va virtual address of page fault
  484.  * @param istate Structure with saved interruption state.
  485.  *
  486.  *
  487.  * @return 1 on success, 0 on fail
  488.  */
  489. static int try_memmap_io_insertion(uintptr_t va, istate_t *istate)
  490. {
  491.     if((va >= IO_OFFSET ) && (va < IO_OFFSET + (1<<IO_PAGE_WIDTH)))
  492.         if(TASK){
  493.            
  494.             uint64_t io_page=(va &  ((1<<IO_PAGE_WIDTH)-1)) >> (USPACE_IO_PAGE_WIDTH);
  495.             if(is_io_page_accessible(io_page)){
  496.                 uint64_t page,frame;
  497.  
  498.                 page = IO_OFFSET + (1 << USPACE_IO_PAGE_WIDTH) * io_page;
  499.                 frame = IO_FRAME_BASE + (1 << USPACE_IO_PAGE_WIDTH) * io_page;
  500.  
  501.  
  502.                 tlb_entry_t entry;
  503.    
  504.                 entry.word[0] = 0;
  505.                 entry.word[1] = 0;
  506.    
  507.                 entry.p = true;         /* present */
  508.                 entry.ma = MA_UNCACHEABLE;     
  509.                 entry.a = true;         /* already accessed */
  510.                 entry.d = true;         /* already dirty */
  511.                 entry.pl = PL_USER;
  512.                 entry.ar = AR_READ | AR_WRITE;
  513.                 entry.ppn = frame >> PPN_SHIFT;
  514.                 entry.ps = USPACE_IO_PAGE_WIDTH;
  515.    
  516.                 dtc_mapping_insert(page, TASK->as->asid, entry);
  517.                 return 1;
  518.             }else {
  519.                 fault_if_from_uspace(istate,"IO access fault at %p",va);
  520.                 return 0;
  521.             }      
  522.         } else
  523.             return 0;
  524.     else
  525.         return 0;
  526.        
  527.     return 0;
  528.  
  529. }
  530.  
  531.  
  532.  
  533.  
  534. /** Data TLB fault handler for faults with VHPT turned off.
  535.  *
  536.  * @param vector Interruption vector.
  537.  * @param istate Structure with saved interruption state.
  538.  */
  539. void alternate_data_tlb_fault(uint64_t vector, istate_t *istate)
  540. {
  541.     region_register rr;
  542.     rid_t rid;
  543.     uintptr_t va;
  544.     pte_t *t;
  545.    
  546.     va = istate->cr_ifa;    /* faulting address */
  547.     rr.word = rr_read(VA2VRN(va));
  548.     rid = rr.map.rid;
  549.     if (RID2ASID(rid) == ASID_KERNEL) {
  550.         if (VA2VRN(va) == VRN_KERNEL) {
  551.             /*
  552.              * Provide KA2PA(identity) mapping for faulting piece of
  553.              * kernel address space.
  554.              */
  555.             dtlb_kernel_mapping_insert(va, KA2PA(va), false, 0);
  556.             return;
  557.         }
  558.     }
  559.  
  560.     page_table_lock(AS, true);
  561.     t = page_mapping_find(AS, va);
  562.     if (t) {
  563.         /*
  564.          * The mapping was found in the software page hash table.
  565.          * Insert it into data translation cache.
  566.          */
  567.         dtc_pte_copy(t);
  568.         page_table_unlock(AS, true);
  569.     } else {
  570.         page_table_unlock(AS, true);
  571.         if (try_memmap_io_insertion(va,istate)) return;
  572.         /*
  573.          * Forward the page fault to the address space page fault handler.
  574.          */
  575.         if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
  576.             fault_if_from_uspace(istate,"Page fault at %p",va);
  577.             panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid, istate->cr_iip);
  578.         }
  579.     }
  580. }
  581.  
  582. /** Data nested TLB fault handler.
  583.  *
  584.  * This fault should not occur.
  585.  *
  586.  * @param vector Interruption vector.
  587.  * @param istate Structure with saved interruption state.
  588.  */
  589. void data_nested_tlb_fault(uint64_t vector, istate_t *istate)
  590. {
  591.     panic("%s\n", __func__);
  592. }
  593.  
  594. /** Data Dirty bit fault handler.
  595.  *
  596.  * @param vector Interruption vector.
  597.  * @param istate Structure with saved interruption state.
  598.  */
  599. void data_dirty_bit_fault(uint64_t vector, istate_t *istate)
  600. {
  601.     region_register rr;
  602.     rid_t rid;
  603.     uintptr_t va;
  604.     pte_t *t;
  605.    
  606.     va = istate->cr_ifa;    /* faulting address */
  607.     rr.word = rr_read(VA2VRN(va));
  608.     rid = rr.map.rid;
  609.  
  610.     page_table_lock(AS, true);
  611.     t = page_mapping_find(AS, va);
  612.     ASSERT(t && t->p);
  613.     if (t && t->p && t->w) {
  614.         /*
  615.          * Update the Dirty bit in page tables and reinsert
  616.          * the mapping into DTC.
  617.          */
  618.         t->d = true;
  619.         dtc_pte_copy(t);
  620.     } else {
  621.         if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) {
  622.             fault_if_from_uspace(istate,"Page fault at %p",va);
  623.             panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid, istate->cr_iip);
  624.             t->d = true;
  625.             dtc_pte_copy(t);
  626.         }
  627.     }
  628.     page_table_unlock(AS, true);
  629. }
  630.  
  631. /** Instruction access bit fault handler.
  632.  *
  633.  * @param vector Interruption vector.
  634.  * @param istate Structure with saved interruption state.
  635.  */
  636. void instruction_access_bit_fault(uint64_t vector, istate_t *istate)
  637. {
  638.     region_register rr;
  639.     rid_t rid;
  640.     uintptr_t va;
  641.     pte_t *t;  
  642.  
  643.     va = istate->cr_ifa;    /* faulting address */
  644.     rr.word = rr_read(VA2VRN(va));
  645.     rid = rr.map.rid;
  646.  
  647.     page_table_lock(AS, true);
  648.     t = page_mapping_find(AS, va);
  649.     ASSERT(t && t->p);
  650.     if (t && t->p && t->x) {
  651.         /*
  652.          * Update the Accessed bit in page tables and reinsert
  653.          * the mapping into ITC.
  654.          */
  655.         t->a = true;
  656.         itc_pte_copy(t);
  657.     } else {
  658.         if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
  659.             fault_if_from_uspace(istate,"Page fault at %p",va);
  660.             panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid, istate->cr_iip);
  661.             t->a = true;
  662.             itc_pte_copy(t);
  663.         }
  664.     }
  665.     page_table_unlock(AS, true);
  666. }
  667.  
  668. /** Data access bit fault handler.
  669.  *
  670.  * @param vector Interruption vector.
  671.  * @param istate Structure with saved interruption state.
  672.  */
  673. void data_access_bit_fault(uint64_t vector, istate_t *istate)
  674. {
  675.     region_register rr;
  676.     rid_t rid;
  677.     uintptr_t va;
  678.     pte_t *t;
  679.  
  680.     va = istate->cr_ifa;    /* faulting address */
  681.     rr.word = rr_read(VA2VRN(va));
  682.     rid = rr.map.rid;
  683.  
  684.     page_table_lock(AS, true);
  685.     t = page_mapping_find(AS, va);
  686.     ASSERT(t && t->p);
  687.     if (t && t->p) {
  688.         /*
  689.          * Update the Accessed bit in page tables and reinsert
  690.          * the mapping into DTC.
  691.          */
  692.         t->a = true;
  693.         dtc_pte_copy(t);
  694.     } else {
  695.         if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
  696.             fault_if_from_uspace(istate,"Page fault at %p",va);
  697.             panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid, istate->cr_iip);
  698.             t->a = true;
  699.             itc_pte_copy(t);
  700.         }
  701.     }
  702.     page_table_unlock(AS, true);
  703. }
  704.  
  705. /** Page not present fault handler.
  706.  *
  707.  * @param vector Interruption vector.
  708.  * @param istate Structure with saved interruption state.
  709.  */
  710. void page_not_present(uint64_t vector, istate_t *istate)
  711. {
  712.     region_register rr;
  713.     rid_t rid;
  714.     uintptr_t va;
  715.     pte_t *t;
  716.    
  717.     va = istate->cr_ifa;    /* faulting address */
  718.     rr.word = rr_read(VA2VRN(va));
  719.     rid = rr.map.rid;
  720.  
  721.     page_table_lock(AS, true);
  722.     t = page_mapping_find(AS, va);
  723.     ASSERT(t);
  724.    
  725.     if (t->p) {
  726.         /*
  727.          * If the Present bit is set in page hash table, just copy it
  728.          * and update ITC/DTC.
  729.          */
  730.         if (t->x)
  731.             itc_pte_copy(t);
  732.         else
  733.             dtc_pte_copy(t);
  734.         page_table_unlock(AS, true);
  735.     } else {
  736.         page_table_unlock(AS, true);
  737.         if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
  738.             fault_if_from_uspace(istate,"Page fault at %p",va);
  739.             panic("%s: va=%p, rid=%d\n", __func__, va, rid);
  740.         }
  741.     }
  742. }
  743.  
  744. /** @}
  745.  */
  746.