Subversion Repositories HelenOS

Rev

Rev 2927 | Rev 4338 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (c) 2006 Jakub Jermar
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. /** @addtogroup ia64mm 
  30.  * @{
  31.  */
  32. /** @file
  33.  */
  34.  
  35. /*
  36.  * TLB management.
  37.  */
  38.  
  39. #include <mm/tlb.h>
  40. #include <mm/asid.h>
  41. #include <mm/page.h>
  42. #include <mm/as.h>
  43. #include <arch/mm/tlb.h>
  44. #include <arch/mm/page.h>
  45. #include <arch/mm/vhpt.h>
  46. #include <arch/barrier.h>
  47. #include <arch/interrupt.h>
  48. #include <arch/pal/pal.h>
  49. #include <arch/asm.h>
  50. #include <panic.h>
  51. #include <print.h>
  52. #include <arch.h>
  53. #include <interrupt.h>
  54.  
  55. /** Invalidate all TLB entries. */
  56. void tlb_invalidate_all(void)
  57. {
  58.     ipl_t ipl;
  59.     uintptr_t adr;
  60.     uint32_t count1, count2, stride1, stride2;
  61.        
  62.     unsigned int i, j;
  63.        
  64.     adr = PAL_PTCE_INFO_BASE();
  65.     count1 = PAL_PTCE_INFO_COUNT1();
  66.     count2 = PAL_PTCE_INFO_COUNT2();
  67.     stride1 = PAL_PTCE_INFO_STRIDE1();
  68.     stride2 = PAL_PTCE_INFO_STRIDE2();
  69.        
  70.     ipl = interrupts_disable();
  71.  
  72.     for (i = 0; i < count1; i++) {
  73.         for (j = 0; j < count2; j++) {
  74.             asm volatile (
  75.                 "ptc.e %0 ;;"
  76.                 :
  77.                 : "r" (adr)
  78.             );
  79.             adr += stride2;
  80.         }
  81.         adr += stride1;
  82.     }
  83.  
  84.     interrupts_restore(ipl);
  85.  
  86.     srlz_d();
  87.     srlz_i();
  88. #ifdef CONFIG_VHPT
  89.     vhpt_invalidate_all();
  90. #endif 
  91. }
  92.  
  93. /** Invalidate entries belonging to an address space.
  94.  *
  95.  * @param asid Address space identifier.
  96.  */
  97. void tlb_invalidate_asid(asid_t asid)
  98. {
  99.     tlb_invalidate_all();
  100. }
  101.  
  102.  
  103. void tlb_invalidate_pages(asid_t asid, uintptr_t page, count_t cnt)
  104. {
  105.     region_register rr;
  106.     bool restore_rr = false;
  107.     int b = 0;
  108.     int c = cnt;
  109.  
  110.     uintptr_t va;
  111.     va = page;
  112.  
  113.     rr.word = rr_read(VA2VRN(va));
  114.     if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
  115.         /*
  116.          * The selected region register does not contain required RID.
  117.          * Save the old content of the register and replace the RID.
  118.          */
  119.         region_register rr0;
  120.  
  121.         rr0 = rr;
  122.         rr0.map.rid = ASID2RID(asid, VA2VRN(va));
  123.         rr_write(VA2VRN(va), rr0.word);
  124.         srlz_d();
  125.         srlz_i();
  126.     }
  127.    
  128.     while(c >>= 1)
  129.         b++;
  130.     b >>= 1;
  131.     uint64_t ps;
  132.    
  133.     switch (b) {
  134.     case 0: /*cnt 1-3*/
  135.         ps = PAGE_WIDTH;
  136.         break;
  137.     case 1: /*cnt 4-15*/
  138.         /*cnt=((cnt-1)/4)+1;*/
  139.         ps = PAGE_WIDTH+2;
  140.         va &= ~((1<<ps)-1);
  141.         break;
  142.     case 2: /*cnt 16-63*/
  143.         /*cnt=((cnt-1)/16)+1;*/
  144.         ps = PAGE_WIDTH+4;
  145.         va &= ~((1<<ps)-1);
  146.         break;
  147.     case 3: /*cnt 64-255*/
  148.         /*cnt=((cnt-1)/64)+1;*/
  149.         ps = PAGE_WIDTH+6;
  150.         va &= ~((1<<ps)-1);
  151.         break;
  152.     case 4: /*cnt 256-1023*/
  153.         /*cnt=((cnt-1)/256)+1;*/
  154.         ps = PAGE_WIDTH+8;
  155.         va &= ~((1<<ps)-1);
  156.         break;
  157.     case 5: /*cnt 1024-4095*/
  158.         /*cnt=((cnt-1)/1024)+1;*/
  159.         ps = PAGE_WIDTH+10;
  160.         va &= ~((1<<ps)-1);
  161.         break;
  162.     case 6: /*cnt 4096-16383*/
  163.         /*cnt=((cnt-1)/4096)+1;*/
  164.         ps = PAGE_WIDTH+12;
  165.         va &= ~((1<<ps)-1);
  166.         break;
  167.     case 7: /*cnt 16384-65535*/
  168.     case 8: /*cnt 65536-(256K-1)*/
  169.         /*cnt=((cnt-1)/16384)+1;*/
  170.         ps = PAGE_WIDTH+14;
  171.         va &= ~((1<<ps)-1);
  172.         break;
  173.     default:
  174.         /*cnt=((cnt-1)/(16384*16))+1;*/
  175.         ps=PAGE_WIDTH+18;
  176.         va&=~((1<<ps)-1);
  177.         break;
  178.     }
  179.     /*cnt+=(page!=va);*/
  180.     for(; va<(page+cnt*(PAGE_SIZE)); va += (1<<ps)) {
  181.         asm volatile (
  182.             "ptc.l %0,%1;;"
  183.             :
  184.             : "r" (va), "r" (ps<<2)
  185.         );
  186.     }
  187.     srlz_d();
  188.     srlz_i();
  189.    
  190.     if (restore_rr) {
  191.         rr_write(VA2VRN(va), rr.word);
  192.         srlz_d();
  193.         srlz_i();
  194.     }
  195. }
  196.  
  197. /** Insert data into data translation cache.
  198.  *
  199.  * @param va Virtual page address.
  200.  * @param asid Address space identifier.
  201.  * @param entry The rest of TLB entry as required by TLB insertion format.
  202.  */
  203. void dtc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry)
  204. {
  205.     tc_mapping_insert(va, asid, entry, true);
  206. }
  207.  
  208. /** Insert data into instruction translation cache.
  209.  *
  210.  * @param va Virtual page address.
  211.  * @param asid Address space identifier.
  212.  * @param entry The rest of TLB entry as required by TLB insertion format.
  213.  */
  214. void itc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry)
  215. {
  216.     tc_mapping_insert(va, asid, entry, false);
  217. }
  218.  
  219. /** Insert data into instruction or data translation cache.
  220.  *
  221.  * @param va Virtual page address.
  222.  * @param asid Address space identifier.
  223.  * @param entry The rest of TLB entry as required by TLB insertion format.
  224.  * @param dtc If true, insert into data translation cache, use instruction translation cache otherwise.
  225.  */
  226. void tc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtc)
  227. {
  228.     region_register rr;
  229.     bool restore_rr = false;
  230.  
  231.     rr.word = rr_read(VA2VRN(va));
  232.     if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
  233.         /*
  234.          * The selected region register does not contain required RID.
  235.          * Save the old content of the register and replace the RID.
  236.          */
  237.         region_register rr0;
  238.  
  239.         rr0 = rr;
  240.         rr0.map.rid = ASID2RID(asid, VA2VRN(va));
  241.         rr_write(VA2VRN(va), rr0.word);
  242.         srlz_d();
  243.         srlz_i();
  244.     }
  245.    
  246.     asm volatile (
  247.         "mov r8=psr;;\n"
  248.         "rsm %0;;\n"            /* PSR_IC_MASK */
  249.         "srlz.d;;\n"
  250.         "srlz.i;;\n"
  251.         "mov cr.ifa=%1\n"       /* va */
  252.         "mov cr.itir=%2;;\n"        /* entry.word[1] */
  253.         "cmp.eq p6,p7 = %4,r0;;\n"  /* decide between itc and dtc */
  254.         "(p6) itc.i %3;;\n"
  255.         "(p7) itc.d %3;;\n"
  256.         "mov psr.l=r8;;\n"
  257.         "srlz.d;;\n"
  258.         :
  259.         : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (dtc)
  260.         : "p6", "p7", "r8"
  261.     );
  262.    
  263.     if (restore_rr) {
  264.         rr_write(VA2VRN(va), rr.word);
  265.         srlz_d();
  266.         srlz_i();
  267.     }
  268. }
  269.  
  270. /** Insert data into instruction translation register.
  271.  *
  272.  * @param va Virtual page address.
  273.  * @param asid Address space identifier.
  274.  * @param entry The rest of TLB entry as required by TLB insertion format.
  275.  * @param tr Translation register.
  276.  */
  277. void itr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, index_t tr)
  278. {
  279.     tr_mapping_insert(va, asid, entry, false, tr);
  280. }
  281.  
  282. /** Insert data into data translation register.
  283.  *
  284.  * @param va Virtual page address.
  285.  * @param asid Address space identifier.
  286.  * @param entry The rest of TLB entry as required by TLB insertion format.
  287.  * @param tr Translation register.
  288.  */
  289. void dtr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, index_t tr)
  290. {
  291.     tr_mapping_insert(va, asid, entry, true, tr);
  292. }
  293.  
  294. /** Insert data into instruction or data translation register.
  295.  *
  296.  * @param va Virtual page address.
  297.  * @param asid Address space identifier.
  298.  * @param entry The rest of TLB entry as required by TLB insertion format.
  299.  * @param dtr If true, insert into data translation register, use instruction translation register otherwise.
  300.  * @param tr Translation register.
  301.  */
  302. void tr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtr, index_t tr)
  303. {
  304.     region_register rr;
  305.     bool restore_rr = false;
  306.  
  307.     rr.word = rr_read(VA2VRN(va));
  308.     if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
  309.         /*
  310.          * The selected region register does not contain required RID.
  311.          * Save the old content of the register and replace the RID.
  312.          */
  313.         region_register rr0;
  314.  
  315.         rr0 = rr;
  316.         rr0.map.rid = ASID2RID(asid, VA2VRN(va));
  317.         rr_write(VA2VRN(va), rr0.word);
  318.         srlz_d();
  319.         srlz_i();
  320.     }
  321.  
  322.     asm volatile (
  323.         "mov r8=psr;;\n"
  324.         "rsm %0;;\n"            /* PSR_IC_MASK */
  325.         "srlz.d;;\n"
  326.         "srlz.i;;\n"
  327.         "mov cr.ifa=%1\n"           /* va */         
  328.         "mov cr.itir=%2;;\n"        /* entry.word[1] */
  329.         "cmp.eq p6,p7=%5,r0;;\n"    /* decide between itr and dtr */
  330.         "(p6) itr.i itr[%4]=%3;;\n"
  331.         "(p7) itr.d dtr[%4]=%3;;\n"
  332.         "mov psr.l=r8;;\n"
  333.         "srlz.d;;\n"
  334.         :
  335.         : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (tr), "r" (dtr)
  336.         : "p6", "p7", "r8"
  337.     );
  338.    
  339.     if (restore_rr) {
  340.         rr_write(VA2VRN(va), rr.word);
  341.         srlz_d();
  342.         srlz_i();
  343.     }
  344. }
  345.  
  346. /** Insert data into DTLB.
  347.  *
  348.  * @param page Virtual page address including VRN bits.
  349.  * @param frame Physical frame address.
  350.  * @param dtr If true, insert into data translation register, use data translation cache otherwise.
  351.  * @param tr Translation register if dtr is true, ignored otherwise.
  352.  */
  353. void dtlb_kernel_mapping_insert(uintptr_t page, uintptr_t frame, bool dtr, index_t tr)
  354. {
  355.     tlb_entry_t entry;
  356.    
  357.     entry.word[0] = 0;
  358.     entry.word[1] = 0;
  359.    
  360.     entry.p = true;         /* present */
  361.     entry.ma = MA_WRITEBACK;
  362.     entry.a = true;         /* already accessed */
  363.     entry.d = true;         /* already dirty */
  364.     entry.pl = PL_KERNEL;
  365.     entry.ar = AR_READ | AR_WRITE;
  366.     entry.ppn = frame >> PPN_SHIFT;
  367.     entry.ps = PAGE_WIDTH;
  368.    
  369.     if (dtr)
  370.         dtr_mapping_insert(page, ASID_KERNEL, entry, tr);
  371.     else
  372.         dtc_mapping_insert(page, ASID_KERNEL, entry);
  373. }
  374.  
  375. /** Purge kernel entries from DTR.
  376.  *
  377.  * Purge DTR entries used by the kernel.
  378.  *
  379.  * @param page Virtual page address including VRN bits.
  380.  * @param width Width of the purge in bits.
  381.  */
  382. void dtr_purge(uintptr_t page, count_t width)
  383. {
  384.     asm volatile ("ptr.d %0, %1\n" : : "r" (page), "r" (width<<2));
  385. }
  386.  
  387.  
  388. /** Copy content of PTE into data translation cache.
  389.  *
  390.  * @param t PTE.
  391.  */
  392. void dtc_pte_copy(pte_t *t)
  393. {
  394.     tlb_entry_t entry;
  395.  
  396.     entry.word[0] = 0;
  397.     entry.word[1] = 0;
  398.    
  399.     entry.p = t->p;
  400.     entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
  401.     entry.a = t->a;
  402.     entry.d = t->d;
  403.     entry.pl = t->k ? PL_KERNEL : PL_USER;
  404.     entry.ar = t->w ? AR_WRITE : AR_READ;
  405.     entry.ppn = t->frame >> PPN_SHIFT;
  406.     entry.ps = PAGE_WIDTH;
  407.    
  408.     dtc_mapping_insert(t->page, t->as->asid, entry);
  409. #ifdef CONFIG_VHPT
  410.     vhpt_mapping_insert(t->page, t->as->asid, entry);
  411. #endif 
  412. }
  413.  
  414. /** Copy content of PTE into instruction translation cache.
  415.  *
  416.  * @param t PTE.
  417.  */
  418. void itc_pte_copy(pte_t *t)
  419. {
  420.     tlb_entry_t entry;
  421.  
  422.     entry.word[0] = 0;
  423.     entry.word[1] = 0;
  424.    
  425.     ASSERT(t->x);
  426.    
  427.     entry.p = t->p;
  428.     entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
  429.     entry.a = t->a;
  430.     entry.pl = t->k ? PL_KERNEL : PL_USER;
  431.     entry.ar = t->x ? (AR_EXECUTE | AR_READ) : AR_READ;
  432.     entry.ppn = t->frame >> PPN_SHIFT;
  433.     entry.ps = PAGE_WIDTH;
  434.    
  435.     itc_mapping_insert(t->page, t->as->asid, entry);
  436. #ifdef CONFIG_VHPT
  437.     vhpt_mapping_insert(t->page, t->as->asid, entry);
  438. #endif 
  439. }
  440.  
  441. /** Instruction TLB fault handler for faults with VHPT turned off.
  442.  *
  443.  * @param vector Interruption vector.
  444.  * @param istate Structure with saved interruption state.
  445.  */
  446. void alternate_instruction_tlb_fault(uint64_t vector, istate_t *istate)
  447. {
  448.     region_register rr;
  449.     rid_t rid;
  450.     uintptr_t va;
  451.     pte_t *t;
  452.    
  453.     va = istate->cr_ifa;    /* faulting address */
  454.     rr.word = rr_read(VA2VRN(va));
  455.     rid = rr.map.rid;
  456.  
  457.     page_table_lock(AS, true);
  458.     t = page_mapping_find(AS, va);
  459.     if (t) {
  460.         /*
  461.          * The mapping was found in software page hash table.
  462.          * Insert it into data translation cache.
  463.          */
  464.         itc_pte_copy(t);
  465.         page_table_unlock(AS, true);
  466.     } else {
  467.         /*
  468.          * Forward the page fault to address space page fault handler.
  469.          */
  470.         page_table_unlock(AS, true);
  471.         if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
  472.             fault_if_from_uspace(istate,"Page fault at %p",va);
  473.             panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid, istate->cr_iip);
  474.         }
  475.     }
  476. }
  477.  
  478.  
  479.  
  480. static int is_io_page_accessible(int page)
  481. {
  482.     if(TASK->arch.iomap) return bitmap_get(TASK->arch.iomap,page);
  483.     else return 0;
  484. }
  485.  
  486. #define IO_FRAME_BASE 0xFFFFC000000
  487.  
  488. /** There is special handling of memmaped lagacy io, because
  489.  * of 4KB sized access
  490.  * only for userspace
  491.  *
  492.  * @param va virtual address of page fault
  493.  * @param istate Structure with saved interruption state.
  494.  *
  495.  *
  496.  * @return 1 on success, 0 on fail
  497.  */
  498. static int try_memmap_io_insertion(uintptr_t va, istate_t *istate)
  499. {
  500.     if((va >= IO_OFFSET ) && (va < IO_OFFSET + (1<<IO_PAGE_WIDTH)))
  501.         if(TASK){
  502.            
  503.             uint64_t io_page=(va &  ((1<<IO_PAGE_WIDTH)-1)) >> (USPACE_IO_PAGE_WIDTH);
  504.             if(is_io_page_accessible(io_page)){
  505.                 //printf("Insert %llX\n",va);
  506.  
  507.                 uint64_t page,frame;
  508.  
  509.                 page = IO_OFFSET + (1 << USPACE_IO_PAGE_WIDTH) * io_page;
  510.                 frame = IO_FRAME_BASE + (1 << USPACE_IO_PAGE_WIDTH) * io_page;
  511.  
  512.  
  513.                 tlb_entry_t entry;
  514.    
  515.                 entry.word[0] = 0;
  516.                 entry.word[1] = 0;
  517.    
  518.                 entry.p = true;         /* present */
  519.                 entry.ma = MA_UNCACHEABLE;     
  520.                 entry.a = true;         /* already accessed */
  521.                 entry.d = true;         /* already dirty */
  522.                 entry.pl = PL_USER;
  523.                 entry.ar = AR_READ | AR_WRITE;
  524.                 entry.ppn = frame >> PPN_SHIFT;    //MUSIM spocitat frame
  525.                 entry.ps = USPACE_IO_PAGE_WIDTH;
  526.    
  527.                 dtc_mapping_insert(page, TASK->as->asid, entry); //Musim zjistit ASID
  528.                 return 1;
  529.             }else {
  530.                 fault_if_from_uspace(istate,"IO access fault at %p",va);
  531.                 return 0;
  532.             }      
  533.         } else
  534.             return 0;
  535.     else
  536.         return 0;
  537.        
  538.     return 0;
  539.  
  540. }
  541.  
  542.  
  543.  
  544.  
  545. /** Data TLB fault handler for faults with VHPT turned off.
  546.  *
  547.  * @param vector Interruption vector.
  548.  * @param istate Structure with saved interruption state.
  549.  */
  550. void alternate_data_tlb_fault(uint64_t vector, istate_t *istate)
  551. {
  552.     region_register rr;
  553.     rid_t rid;
  554.     uintptr_t va;
  555.     pte_t *t;
  556.    
  557.     va = istate->cr_ifa;    /* faulting address */
  558.     rr.word = rr_read(VA2VRN(va));
  559.     rid = rr.map.rid;
  560.     if (RID2ASID(rid) == ASID_KERNEL) {
  561.         if (VA2VRN(va) == VRN_KERNEL) {
  562.             /*
  563.              * Provide KA2PA(identity) mapping for faulting piece of
  564.              * kernel address space.
  565.              */
  566.             dtlb_kernel_mapping_insert(va, KA2PA(va), false, 0);
  567.             return;
  568.         }
  569.     }
  570.  
  571.     page_table_lock(AS, true);
  572.     t = page_mapping_find(AS, va);
  573.     if (t) {
  574.         /*
  575.          * The mapping was found in the software page hash table.
  576.          * Insert it into data translation cache.
  577.          */
  578.         dtc_pte_copy(t);
  579.         page_table_unlock(AS, true);
  580.     } else {
  581.         page_table_unlock(AS, true);
  582.         if (try_memmap_io_insertion(va,istate)) return;
  583.         /*
  584.          * Forward the page fault to the address space page fault handler.
  585.          */
  586.         if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
  587.             fault_if_from_uspace(istate,"Page fault at %p",va);
  588.             panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid, istate->cr_iip);
  589.         }
  590.     }
  591. }
  592.  
  593. /** Data nested TLB fault handler.
  594.  *
  595.  * This fault should not occur.
  596.  *
  597.  * @param vector Interruption vector.
  598.  * @param istate Structure with saved interruption state.
  599.  */
  600. void data_nested_tlb_fault(uint64_t vector, istate_t *istate)
  601. {
  602.     panic("%s\n", __func__);
  603. }
  604.  
  605. /** Data Dirty bit fault handler.
  606.  *
  607.  * @param vector Interruption vector.
  608.  * @param istate Structure with saved interruption state.
  609.  */
  610. void data_dirty_bit_fault(uint64_t vector, istate_t *istate)
  611. {
  612.     region_register rr;
  613.     rid_t rid;
  614.     uintptr_t va;
  615.     pte_t *t;
  616.    
  617.     va = istate->cr_ifa;    /* faulting address */
  618.     rr.word = rr_read(VA2VRN(va));
  619.     rid = rr.map.rid;
  620.  
  621.     page_table_lock(AS, true);
  622.     t = page_mapping_find(AS, va);
  623.     ASSERT(t && t->p);
  624.     if (t && t->p && t->w) {
  625.         /*
  626.          * Update the Dirty bit in page tables and reinsert
  627.          * the mapping into DTC.
  628.          */
  629.         t->d = true;
  630.         dtc_pte_copy(t);
  631.     } else {
  632.         if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) {
  633.             fault_if_from_uspace(istate,"Page fault at %p",va);
  634.             panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid, istate->cr_iip);
  635.             t->d = true;
  636.             dtc_pte_copy(t);
  637.         }
  638.     }
  639.     page_table_unlock(AS, true);
  640. }
  641.  
  642. /** Instruction access bit fault handler.
  643.  *
  644.  * @param vector Interruption vector.
  645.  * @param istate Structure with saved interruption state.
  646.  */
  647. void instruction_access_bit_fault(uint64_t vector, istate_t *istate)
  648. {
  649.     region_register rr;
  650.     rid_t rid;
  651.     uintptr_t va;
  652.     pte_t *t;  
  653.  
  654.     va = istate->cr_ifa;    /* faulting address */
  655.     rr.word = rr_read(VA2VRN(va));
  656.     rid = rr.map.rid;
  657.  
  658.     page_table_lock(AS, true);
  659.     t = page_mapping_find(AS, va);
  660.     ASSERT(t && t->p);
  661.     if (t && t->p && t->x) {
  662.         /*
  663.          * Update the Accessed bit in page tables and reinsert
  664.          * the mapping into ITC.
  665.          */
  666.         t->a = true;
  667.         itc_pte_copy(t);
  668.     } else {
  669.         if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
  670.             fault_if_from_uspace(istate,"Page fault at %p",va);
  671.             panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid, istate->cr_iip);
  672.             t->a = true;
  673.             itc_pte_copy(t);
  674.         }
  675.     }
  676.     page_table_unlock(AS, true);
  677. }
  678.  
  679. /** Data access bit fault handler.
  680.  *
  681.  * @param vector Interruption vector.
  682.  * @param istate Structure with saved interruption state.
  683.  */
  684. void data_access_bit_fault(uint64_t vector, istate_t *istate)
  685. {
  686.     region_register rr;
  687.     rid_t rid;
  688.     uintptr_t va;
  689.     pte_t *t;
  690.  
  691.     va = istate->cr_ifa;    /* faulting address */
  692.     rr.word = rr_read(VA2VRN(va));
  693.     rid = rr.map.rid;
  694.  
  695.     page_table_lock(AS, true);
  696.     t = page_mapping_find(AS, va);
  697.     ASSERT(t && t->p);
  698.     if (t && t->p) {
  699.         /*
  700.          * Update the Accessed bit in page tables and reinsert
  701.          * the mapping into DTC.
  702.          */
  703.         t->a = true;
  704.         dtc_pte_copy(t);
  705.     } else {
  706.         if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
  707.             fault_if_from_uspace(istate,"Page fault at %p",va);
  708.             panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid, istate->cr_iip);
  709.             t->a = true;
  710.             itc_pte_copy(t);
  711.         }
  712.     }
  713.     page_table_unlock(AS, true);
  714. }
  715.  
  716. /** Page not present fault handler.
  717.  *
  718.  * @param vector Interruption vector.
  719.  * @param istate Structure with saved interruption state.
  720.  */
  721. void page_not_present(uint64_t vector, istate_t *istate)
  722. {
  723.     region_register rr;
  724.     rid_t rid;
  725.     uintptr_t va;
  726.     pte_t *t;
  727.    
  728.     va = istate->cr_ifa;    /* faulting address */
  729.     rr.word = rr_read(VA2VRN(va));
  730.     rid = rr.map.rid;
  731.  
  732.     page_table_lock(AS, true);
  733.     t = page_mapping_find(AS, va);
  734.     ASSERT(t);
  735.    
  736.     if (t->p) {
  737.         /*
  738.          * If the Present bit is set in page hash table, just copy it
  739.          * and update ITC/DTC.
  740.          */
  741.         if (t->x)
  742.             itc_pte_copy(t);
  743.         else
  744.             dtc_pte_copy(t);
  745.         page_table_unlock(AS, true);
  746.     } else {
  747.         page_table_unlock(AS, true);
  748.         if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
  749.             fault_if_from_uspace(istate,"Page fault at %p",va);
  750.             panic("%s: va=%p, rid=%d\n", __func__, va, rid);
  751.         }
  752.     }
  753. }
  754.  
  755. /** @}
  756.  */
  757.