Subversion Repositories HelenOS

Rev

Rev 2071 | Rev 2089 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (c) 2006 Jakub Jermar
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. /** @addtogroup ia64mm 
  30.  * @{
  31.  */
  32. /** @file
  33.  */
  34.  
  35. /*
  36.  * TLB management.
  37.  */
  38.  
  39. #include <mm/tlb.h>
  40. #include <mm/asid.h>
  41. #include <mm/page.h>
  42. #include <mm/as.h>
  43. #include <arch/mm/tlb.h>
  44. #include <arch/mm/page.h>
  45. #include <arch/mm/vhpt.h>
  46. #include <arch/barrier.h>
  47. #include <arch/interrupt.h>
  48. #include <arch/pal/pal.h>
  49. #include <arch/asm.h>
  50. #include <typedefs.h>
  51. #include <panic.h>
  52. #include <print.h>
  53. #include <arch.h>
  54. #include <interrupt.h>
  55.  
  56. /** Invalidate all TLB entries. */
  57. void tlb_invalidate_all(void)
  58. {
  59.     ipl_t ipl;
  60.     uintptr_t adr;
  61.     uint32_t count1, count2, stride1, stride2;
  62.        
  63.     int i, j;
  64.        
  65.     adr = PAL_PTCE_INFO_BASE();
  66.     count1 = PAL_PTCE_INFO_COUNT1();
  67.     count2 = PAL_PTCE_INFO_COUNT2();
  68.     stride1 = PAL_PTCE_INFO_STRIDE1();
  69.     stride2 = PAL_PTCE_INFO_STRIDE2();
  70.        
  71.     ipl = interrupts_disable();
  72.  
  73.     for(i = 0; i < count1; i++) {
  74.         for(j = 0; j < count2; j++) {
  75.             asm volatile (
  76.                 "ptc.e %0 ;;"
  77.                 :
  78.                 : "r" (adr)
  79.             );
  80.             adr += stride2;
  81.         }
  82.         adr += stride1;
  83.     }
  84.  
  85.     interrupts_restore(ipl);
  86.  
  87.     srlz_d();
  88.     srlz_i();
  89. #ifdef CONFIG_VHPT
  90.     vhpt_invalidate_all();
  91. #endif 
  92. }
  93.  
  94. /** Invalidate entries belonging to an address space.
  95.  *
  96.  * @param asid Address space identifier.
  97.  */
  98. void tlb_invalidate_asid(asid_t asid)
  99. {
  100.     tlb_invalidate_all();
  101. }
  102.  
  103.  
  104. void tlb_invalidate_pages(asid_t asid, uintptr_t page, count_t cnt)
  105. {
  106.     region_register rr;
  107.     bool restore_rr = false;
  108.     int b = 0;
  109.     int c = cnt;
  110.  
  111.     uintptr_t va;
  112.     va = page;
  113.  
  114.     rr.word = rr_read(VA2VRN(va));
  115.     if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
  116.         /*
  117.          * The selected region register does not contain required RID.
  118.          * Save the old content of the register and replace the RID.
  119.          */
  120.         region_register rr0;
  121.  
  122.         rr0 = rr;
  123.         rr0.map.rid = ASID2RID(asid, VA2VRN(va));
  124.         rr_write(VA2VRN(va), rr0.word);
  125.         srlz_d();
  126.         srlz_i();
  127.     }
  128.    
  129.     while(c >>= 1)
  130.         b++;
  131.     b >>= 1;
  132.     uint64_t ps;
  133.    
  134.     switch (b) {
  135.     case 0: /*cnt 1-3*/
  136.         ps = PAGE_WIDTH;
  137.         break;
  138.     case 1: /*cnt 4-15*/
  139.         /*cnt=((cnt-1)/4)+1;*/
  140.         ps = PAGE_WIDTH+2;
  141.         va &= ~((1<<ps)-1);
  142.         break;
  143.     case 2: /*cnt 16-63*/
  144.         /*cnt=((cnt-1)/16)+1;*/
  145.         ps = PAGE_WIDTH+4;
  146.         va &= ~((1<<ps)-1);
  147.         break;
  148.     case 3: /*cnt 64-255*/
  149.         /*cnt=((cnt-1)/64)+1;*/
  150.         ps = PAGE_WIDTH+6;
  151.         va &= ~((1<<ps)-1);
  152.         break;
  153.     case 4: /*cnt 256-1023*/
  154.         /*cnt=((cnt-1)/256)+1;*/
  155.         ps = PAGE_WIDTH+8;
  156.         va &= ~((1<<ps)-1);
  157.         break;
  158.     case 5: /*cnt 1024-4095*/
  159.         /*cnt=((cnt-1)/1024)+1;*/
  160.         ps = PAGE_WIDTH+10;
  161.         va &= ~((1<<ps)-1);
  162.         break;
  163.     case 6: /*cnt 4096-16383*/
  164.         /*cnt=((cnt-1)/4096)+1;*/
  165.         ps = PAGE_WIDTH+12;
  166.         va &= ~((1<<ps)-1);
  167.         break;
  168.     case 7: /*cnt 16384-65535*/
  169.     case 8: /*cnt 65536-(256K-1)*/
  170.         /*cnt=((cnt-1)/16384)+1;*/
  171.         ps = PAGE_WIDTH+14;
  172.         va &= ~((1<<ps)-1);
  173.         break;
  174.     default:
  175.         /*cnt=((cnt-1)/(16384*16))+1;*/
  176.         ps=PAGE_WIDTH+18;
  177.         va&=~((1<<ps)-1);
  178.         break;
  179.     }
  180.     /*cnt+=(page!=va);*/
  181.     for(; va<(page+cnt*(PAGE_SIZE)); va += (1<<ps)) {
  182.         asm volatile (
  183.             "ptc.l %0,%1;;"
  184.             :
  185.             : "r" (va), "r" (ps<<2)
  186.         );
  187.     }
  188.     srlz_d();
  189.     srlz_i();
  190.    
  191.     if (restore_rr) {
  192.         rr_write(VA2VRN(va), rr.word);
  193.         srlz_d();
  194.         srlz_i();
  195.     }
  196. }
  197.  
  198. /** Insert data into data translation cache.
  199.  *
  200.  * @param va Virtual page address.
  201.  * @param asid Address space identifier.
  202.  * @param entry The rest of TLB entry as required by TLB insertion format.
  203.  */
  204. void dtc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry)
  205. {
  206.     tc_mapping_insert(va, asid, entry, true);
  207. }
  208.  
  209. /** Insert data into instruction translation cache.
  210.  *
  211.  * @param va Virtual page address.
  212.  * @param asid Address space identifier.
  213.  * @param entry The rest of TLB entry as required by TLB insertion format.
  214.  */
  215. void itc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry)
  216. {
  217.     tc_mapping_insert(va, asid, entry, false);
  218. }
  219.  
  220. /** Insert data into instruction or data translation cache.
  221.  *
  222.  * @param va Virtual page address.
  223.  * @param asid Address space identifier.
  224.  * @param entry The rest of TLB entry as required by TLB insertion format.
  225.  * @param dtc If true, insert into data translation cache, use instruction translation cache otherwise.
  226.  */
  227. void tc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtc)
  228. {
  229.     region_register rr;
  230.     bool restore_rr = false;
  231.  
  232.     rr.word = rr_read(VA2VRN(va));
  233.     if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
  234.         /*
  235.          * The selected region register does not contain required RID.
  236.          * Save the old content of the register and replace the RID.
  237.          */
  238.         region_register rr0;
  239.  
  240.         rr0 = rr;
  241.         rr0.map.rid = ASID2RID(asid, VA2VRN(va));
  242.         rr_write(VA2VRN(va), rr0.word);
  243.         srlz_d();
  244.         srlz_i();
  245.     }
  246.    
  247.     asm volatile (
  248.         "mov r8=psr;;\n"
  249.         "rsm %0;;\n"            /* PSR_IC_MASK */
  250.         "srlz.d;;\n"
  251.         "srlz.i;;\n"
  252.         "mov cr.ifa=%1\n"       /* va */
  253.         "mov cr.itir=%2;;\n"        /* entry.word[1] */
  254.         "cmp.eq p6,p7 = %4,r0;;\n"  /* decide between itc and dtc */
  255.         "(p6) itc.i %3;;\n"
  256.         "(p7) itc.d %3;;\n"
  257.         "mov psr.l=r8;;\n"
  258.         "srlz.d;;\n"
  259.         :
  260.         : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (dtc)
  261.         : "p6", "p7", "r8"
  262.     );
  263.    
  264.     if (restore_rr) {
  265.         rr_write(VA2VRN(va), rr.word);
  266.         srlz_d();
  267.         srlz_i();
  268.     }
  269. }
  270.  
  271. /** Insert data into instruction translation register.
  272.  *
  273.  * @param va Virtual page address.
  274.  * @param asid Address space identifier.
  275.  * @param entry The rest of TLB entry as required by TLB insertion format.
  276.  * @param tr Translation register.
  277.  */
  278. void itr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, index_t tr)
  279. {
  280.     tr_mapping_insert(va, asid, entry, false, tr);
  281. }
  282.  
  283. /** Insert data into data translation register.
  284.  *
  285.  * @param va Virtual page address.
  286.  * @param asid Address space identifier.
  287.  * @param entry The rest of TLB entry as required by TLB insertion format.
  288.  * @param tr Translation register.
  289.  */
  290. void dtr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, index_t tr)
  291. {
  292.     tr_mapping_insert(va, asid, entry, true, tr);
  293. }
  294.  
  295. /** Insert data into instruction or data translation register.
  296.  *
  297.  * @param va Virtual page address.
  298.  * @param asid Address space identifier.
  299.  * @param entry The rest of TLB entry as required by TLB insertion format.
  300.  * @param dtr If true, insert into data translation register, use instruction translation register otherwise.
  301.  * @param tr Translation register.
  302.  */
  303. void tr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtr, index_t tr)
  304. {
  305.     region_register rr;
  306.     bool restore_rr = false;
  307.  
  308.     rr.word = rr_read(VA2VRN(va));
  309.     if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
  310.         /*
  311.          * The selected region register does not contain required RID.
  312.          * Save the old content of the register and replace the RID.
  313.          */
  314.         region_register rr0;
  315.  
  316.         rr0 = rr;
  317.         rr0.map.rid = ASID2RID(asid, VA2VRN(va));
  318.         rr_write(VA2VRN(va), rr0.word);
  319.         srlz_d();
  320.         srlz_i();
  321.     }
  322.  
  323.     asm volatile (
  324.         "mov r8=psr;;\n"
  325.         "rsm %0;;\n"            /* PSR_IC_MASK */
  326.         "srlz.d;;\n"
  327.         "srlz.i;;\n"
  328.         "mov cr.ifa=%1\n"           /* va */         
  329.         "mov cr.itir=%2;;\n"        /* entry.word[1] */
  330.         "cmp.eq p6,p7=%5,r0;;\n"    /* decide between itr and dtr */
  331.         "(p6) itr.i itr[%4]=%3;;\n"
  332.         "(p7) itr.d dtr[%4]=%3;;\n"
  333.         "mov psr.l=r8;;\n"
  334.         "srlz.d;;\n"
  335.         :
  336.         : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (tr), "r" (dtr)
  337.         : "p6", "p7", "r8"
  338.     );
  339.    
  340.     if (restore_rr) {
  341.         rr_write(VA2VRN(va), rr.word);
  342.         srlz_d();
  343.         srlz_i();
  344.     }
  345. }
  346.  
  347. /** Insert data into DTLB.
  348.  *
  349.  * @param page Virtual page address including VRN bits.
  350.  * @param frame Physical frame address.
  351.  * @param dtr If true, insert into data translation register, use data translation cache otherwise.
  352.  * @param tr Translation register if dtr is true, ignored otherwise.
  353.  */
  354. void dtlb_kernel_mapping_insert(uintptr_t page, uintptr_t frame, bool dtr, index_t tr)
  355. {
  356.     tlb_entry_t entry;
  357.    
  358.     entry.word[0] = 0;
  359.     entry.word[1] = 0;
  360.    
  361.     entry.p = true;         /* present */
  362.     entry.ma = MA_WRITEBACK;
  363.     entry.a = true;         /* already accessed */
  364.     entry.d = true;         /* already dirty */
  365.     entry.pl = PL_KERNEL;
  366.     entry.ar = AR_READ | AR_WRITE;
  367.     entry.ppn = frame >> PPN_SHIFT;
  368.     entry.ps = PAGE_WIDTH;
  369.    
  370.     if (dtr)
  371.         dtr_mapping_insert(page, ASID_KERNEL, entry, tr);
  372.     else
  373.         dtc_mapping_insert(page, ASID_KERNEL, entry);
  374. }
  375.  
  376. /** Purge kernel entries from DTR.
  377.  *
  378.  * Purge DTR entries used by the kernel.
  379.  *
  380.  * @param page Virtual page address including VRN bits.
  381.  * @param width Width of the purge in bits.
  382.  */
  383. void dtr_purge(uintptr_t page, count_t width)
  384. {
  385.     asm volatile ("ptr.d %0, %1\n" : : "r" (page), "r" (width<<2));
  386. }
  387.  
  388.  
  389. /** Copy content of PTE into data translation cache.
  390.  *
  391.  * @param t PTE.
  392.  */
  393. void dtc_pte_copy(pte_t *t)
  394. {
  395.     tlb_entry_t entry;
  396.  
  397.     entry.word[0] = 0;
  398.     entry.word[1] = 0;
  399.    
  400.     entry.p = t->p;
  401.     entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
  402.     entry.a = t->a;
  403.     entry.d = t->d;
  404.     entry.pl = t->k ? PL_KERNEL : PL_USER;
  405.     entry.ar = t->w ? AR_WRITE : AR_READ;
  406.     entry.ppn = t->frame >> PPN_SHIFT;
  407.     entry.ps = PAGE_WIDTH;
  408.    
  409.     dtc_mapping_insert(t->page, t->as->asid, entry);
  410. #ifdef CONFIG_VHPT
  411.     vhpt_mapping_insert(t->page, t->as->asid, entry);
  412. #endif 
  413. }
  414.  
  415. /** Copy content of PTE into instruction translation cache.
  416.  *
  417.  * @param t PTE.
  418.  */
  419. void itc_pte_copy(pte_t *t)
  420. {
  421.     tlb_entry_t entry;
  422.  
  423.     entry.word[0] = 0;
  424.     entry.word[1] = 0;
  425.    
  426.     ASSERT(t->x);
  427.    
  428.     entry.p = t->p;
  429.     entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
  430.     entry.a = t->a;
  431.     entry.pl = t->k ? PL_KERNEL : PL_USER;
  432.     entry.ar = t->x ? (AR_EXECUTE | AR_READ) : AR_READ;
  433.     entry.ppn = t->frame >> PPN_SHIFT;
  434.     entry.ps = PAGE_WIDTH;
  435.    
  436.     itc_mapping_insert(t->page, t->as->asid, entry);
  437. #ifdef CONFIG_VHPT
  438.     vhpt_mapping_insert(t->page, t->as->asid, entry);
  439. #endif 
  440. }
  441.  
  442. /** Instruction TLB fault handler for faults with VHPT turned off.
  443.  *
  444.  * @param vector Interruption vector.
  445.  * @param istate Structure with saved interruption state.
  446.  */
  447. void alternate_instruction_tlb_fault(uint64_t vector, istate_t *istate)
  448. {
  449.     region_register rr;
  450.     rid_t rid;
  451.     uintptr_t va;
  452.     pte_t *t;
  453.    
  454.     va = istate->cr_ifa;    /* faulting address */
  455.     rr.word = rr_read(VA2VRN(va));
  456.     rid = rr.map.rid;
  457.  
  458.     page_table_lock(AS, true);
  459.     t = page_mapping_find(AS, va);
  460.     if (t) {
  461.         /*
  462.          * The mapping was found in software page hash table.
  463.          * Insert it into data translation cache.
  464.          */
  465.         itc_pte_copy(t);
  466.         page_table_unlock(AS, true);
  467.     } else {
  468.         /*
  469.          * Forward the page fault to address space page fault handler.
  470.          */
  471.         page_table_unlock(AS, true);
  472.         if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
  473.             fault_if_from_uspace(istate,"Page fault at %p",va);
  474.             panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
  475.         }
  476.     }
  477. }
  478.  
  479. /** Data TLB fault handler for faults with VHPT turned off.
  480.  *
  481.  * @param vector Interruption vector.
  482.  * @param istate Structure with saved interruption state.
  483.  */
  484. void alternate_data_tlb_fault(uint64_t vector, istate_t *istate)
  485. {
  486.     region_register rr;
  487.     rid_t rid;
  488.     uintptr_t va;
  489.     pte_t *t;
  490.    
  491.     va = istate->cr_ifa;    /* faulting address */
  492.     rr.word = rr_read(VA2VRN(va));
  493.     rid = rr.map.rid;
  494.     if (RID2ASID(rid) == ASID_KERNEL) {
  495.         if (VA2VRN(va) == VRN_KERNEL) {
  496.             /*
  497.              * Provide KA2PA(identity) mapping for faulting piece of
  498.              * kernel address space.
  499.              */
  500.             dtlb_kernel_mapping_insert(va, KA2PA(va), false, 0);
  501.             return;
  502.         }
  503.     }
  504.  
  505.     page_table_lock(AS, true);
  506.     t = page_mapping_find(AS, va);
  507.     if (t) {
  508.         /*
  509.          * The mapping was found in the software page hash table.
  510.          * Insert it into data translation cache.
  511.          */
  512.         dtc_pte_copy(t);
  513.         page_table_unlock(AS, true);
  514.     } else {
  515.         /*
  516.          * Forward the page fault to the address space page fault handler.
  517.          */
  518.         page_table_unlock(AS, true);
  519.         if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
  520.             fault_if_from_uspace(istate,"Page fault at %p",va);
  521.             panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
  522.         }
  523.     }
  524. }
  525.  
  526. /** Data nested TLB fault handler.
  527.  *
  528.  * This fault should not occur.
  529.  *
  530.  * @param vector Interruption vector.
  531.  * @param istate Structure with saved interruption state.
  532.  */
  533. void data_nested_tlb_fault(uint64_t vector, istate_t *istate)
  534. {
  535.     panic("%s\n", __FUNCTION__);
  536. }
  537.  
  538. /** Data Dirty bit fault handler.
  539.  *
  540.  * @param vector Interruption vector.
  541.  * @param istate Structure with saved interruption state.
  542.  */
  543. void data_dirty_bit_fault(uint64_t vector, istate_t *istate)
  544. {
  545.     region_register rr;
  546.     rid_t rid;
  547.     uintptr_t va;
  548.     pte_t *t;
  549.    
  550.     va = istate->cr_ifa;    /* faulting address */
  551.     rr.word = rr_read(VA2VRN(va));
  552.     rid = rr.map.rid;
  553.  
  554.     page_table_lock(AS, true);
  555.     t = page_mapping_find(AS, va);
  556.     ASSERT(t && t->p);
  557.     if (t && t->p && t->w) {
  558.         /*
  559.          * Update the Dirty bit in page tables and reinsert
  560.          * the mapping into DTC.
  561.          */
  562.         t->d = true;
  563.         dtc_pte_copy(t);
  564.     } else {
  565.         if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) {
  566.             fault_if_from_uspace(istate,"Page fault at %p",va);
  567.             panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
  568.             t->d = true;
  569.             dtc_pte_copy(t);
  570.         }
  571.     }
  572.     page_table_unlock(AS, true);
  573. }
  574.  
  575. /** Instruction access bit fault handler.
  576.  *
  577.  * @param vector Interruption vector.
  578.  * @param istate Structure with saved interruption state.
  579.  */
  580. void instruction_access_bit_fault(uint64_t vector, istate_t *istate)
  581. {
  582.     region_register rr;
  583.     rid_t rid;
  584.     uintptr_t va;
  585.     pte_t *t;  
  586.  
  587.     va = istate->cr_ifa;    /* faulting address */
  588.     rr.word = rr_read(VA2VRN(va));
  589.     rid = rr.map.rid;
  590.  
  591.     page_table_lock(AS, true);
  592.     t = page_mapping_find(AS, va);
  593.     ASSERT(t && t->p);
  594.     if (t && t->p && t->x) {
  595.         /*
  596.          * Update the Accessed bit in page tables and reinsert
  597.          * the mapping into ITC.
  598.          */
  599.         t->a = true;
  600.         itc_pte_copy(t);
  601.     } else {
  602.         if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
  603.             fault_if_from_uspace(istate,"Page fault at %p",va);
  604.             panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
  605.             t->a = true;
  606.             itc_pte_copy(t);
  607.         }
  608.     }
  609.     page_table_unlock(AS, true);
  610. }
  611.  
  612. /** Data access bit fault handler.
  613.  *
  614.  * @param vector Interruption vector.
  615.  * @param istate Structure with saved interruption state.
  616.  */
  617. void data_access_bit_fault(uint64_t vector, istate_t *istate)
  618. {
  619.     region_register rr;
  620.     rid_t rid;
  621.     uintptr_t va;
  622.     pte_t *t;
  623.  
  624.     va = istate->cr_ifa;    /* faulting address */
  625.     rr.word = rr_read(VA2VRN(va));
  626.     rid = rr.map.rid;
  627.  
  628.     page_table_lock(AS, true);
  629.     t = page_mapping_find(AS, va);
  630.     ASSERT(t && t->p);
  631.     if (t && t->p) {
  632.         /*
  633.          * Update the Accessed bit in page tables and reinsert
  634.          * the mapping into DTC.
  635.          */
  636.         t->a = true;
  637.         dtc_pte_copy(t);
  638.     } else {
  639.         if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
  640.             fault_if_from_uspace(istate,"Page fault at %p",va);
  641.             panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
  642.             t->a = true;
  643.             itc_pte_copy(t);
  644.         }
  645.     }
  646.     page_table_unlock(AS, true);
  647. }
  648.  
  649. /** Page not present fault handler.
  650.  *
  651.  * @param vector Interruption vector.
  652.  * @param istate Structure with saved interruption state.
  653.  */
  654. void page_not_present(uint64_t vector, istate_t *istate)
  655. {
  656.     region_register rr;
  657.     rid_t rid;
  658.     uintptr_t va;
  659.     pte_t *t;
  660.    
  661.     va = istate->cr_ifa;    /* faulting address */
  662.     rr.word = rr_read(VA2VRN(va));
  663.     rid = rr.map.rid;
  664.  
  665.     page_table_lock(AS, true);
  666.     t = page_mapping_find(AS, va);
  667.     ASSERT(t);
  668.    
  669.     if (t->p) {
  670.         /*
  671.          * If the Present bit is set in page hash table, just copy it
  672.          * and update ITC/DTC.
  673.          */
  674.         if (t->x)
  675.             itc_pte_copy(t);
  676.         else
  677.             dtc_pte_copy(t);
  678.         page_table_unlock(AS, true);
  679.     } else {
  680.         page_table_unlock(AS, true);
  681.         if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
  682.             fault_if_from_uspace(istate,"Page fault at %p",va);
  683.             panic("%s: va=%p, rid=%d\n", __FUNCTION__, va, rid);
  684.         }
  685.     }
  686. }
  687.  
  688. /** @}
  689.  */
  690.