Subversion Repositories HelenOS-historic

Rev

Rev 919 | Rev 935 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (C) 2006 Jakub Jermar
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. /*
  30.  * TLB management.
  31.  */
  32.  
  33. #include <mm/tlb.h>
  34. #include <mm/asid.h>
  35. #include <mm/page.h>
  36. #include <mm/as.h>
  37. #include <arch/mm/tlb.h>
  38. #include <arch/mm/page.h>
  39. #include <arch/barrier.h>
  40. #include <arch/interrupt.h>
  41. #include <arch/pal/pal.h>
  42. #include <arch/asm.h>
  43. #include <typedefs.h>
  44. #include <panic.h>
  45. #include <arch.h>
  46.  
  47. /** Invalidate all TLB entries. */
  48. void tlb_invalidate_all(void)
  49. {
  50.         __address adr;
  51.         __u32 count1,count2,stride1,stride2;
  52.        
  53.         int i,j;
  54.        
  55.         adr=PAL_PTCE_INFO_BASE();
  56.         count1=PAL_PTCE_INFO_COUNT1();
  57.         count2=PAL_PTCE_INFO_COUNT2();
  58.         stride1=PAL_PTCE_INFO_STRIDE1();
  59.         stride2=PAL_PTCE_INFO_STRIDE2();
  60.        
  61.         interrupts_disable();
  62.  
  63.         for(i=0;i<count1;i++)
  64.         {
  65.             for(j=0;j<count2;j++)
  66.             {
  67.                 asm volatile
  68.                 (
  69.                     "ptc.e %0;;"
  70.                     :
  71.                     :"r" (adr)
  72.                 );
  73.                 adr+=stride2;
  74.             }
  75.             adr+=stride1;
  76.         }
  77.  
  78.         interrupts_enable();
  79.  
  80.         srlz_d();
  81.         srlz_i();
  82. }
  83.  
  84. /** Invalidate entries belonging to an address space.
  85.  *
  86.  * @param asid Address space identifier.
  87.  */
  88. void tlb_invalidate_asid(asid_t asid)
  89. {
  90.     /* TODO */
  91. }
  92.  
  93. /** Insert data into data translation cache.
  94.  *
  95.  * @param va Virtual page address.
  96.  * @param asid Address space identifier.
  97.  * @param entry The rest of TLB entry as required by TLB insertion format.
  98.  */
  99. void dtc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry)
  100. {
  101.     tc_mapping_insert(va, asid, entry, true);
  102. }
  103.  
  104. /** Insert data into instruction translation cache.
  105.  *
  106.  * @param va Virtual page address.
  107.  * @param asid Address space identifier.
  108.  * @param entry The rest of TLB entry as required by TLB insertion format.
  109.  */
  110. void itc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry)
  111. {
  112.     tc_mapping_insert(va, asid, entry, false);
  113. }
  114.  
  115. /** Insert data into instruction or data translation cache.
  116.  *
  117.  * @param va Virtual page address.
  118.  * @param asid Address space identifier.
  119.  * @param entry The rest of TLB entry as required by TLB insertion format.
  120.  * @param dtc If true, insert into data translation cache, use instruction translation cache otherwise.
  121.  */
  122. void tc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, bool dtc)
  123. {
  124.     region_register rr;
  125.     bool restore_rr = false;
  126.  
  127.     rr.word = rr_read(VA2VRN(va));
  128.     if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
  129.         /*
  130.          * The selected region register does not contain required RID.
  131.          * Save the old content of the register and replace the RID.
  132.          */
  133.         region_register rr0;
  134.  
  135.         rr0 = rr;
  136.         rr0.map.rid = ASID2RID(asid, VA2VRN(va));
  137.         rr_write(VA2VRN(va), rr0.word);
  138.         srlz_d();
  139.         srlz_i();
  140.     }
  141.    
  142.     __asm__ volatile (
  143.         "mov r8=psr;;\n"
  144.         "rsm %0;;\n"            /* PSR_IC_MASK */
  145.         "srlz.d;;\n"
  146.         "srlz.i;;\n"
  147.         "mov cr.ifa=%1\n"       /* va */
  148.         "mov cr.itir=%2;;\n"        /* entry.word[1] */
  149.         "cmp.eq p6,p7 = %4,r0;;\n"  /* decide between itc and dtc */
  150.         "(p6) itc.i %3;;\n"
  151.         "(p7) itc.d %3;;\n"
  152.         "mov psr.l=r8;;\n"
  153.         "srlz.d;;\n"
  154.         :
  155.         : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (dtc)
  156.         : "p6", "p7", "r8"
  157.     );
  158.    
  159.     if (restore_rr) {
  160.         rr_write(VA2VRN(va), rr.word);
  161.         srlz_d();
  162.         srlz_i();
  163.     }
  164. }
  165.  
  166. /** Insert data into instruction translation register.
  167.  *
  168.  * @param va Virtual page address.
  169.  * @param asid Address space identifier.
  170.  * @param entry The rest of TLB entry as required by TLB insertion format.
  171.  * @param tr Translation register.
  172.  */
  173. void itr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, index_t tr)
  174. {
  175.     tr_mapping_insert(va, asid, entry, false, tr);
  176. }
  177.  
  178. /** Insert data into data translation register.
  179.  *
  180.  * @param va Virtual page address.
  181.  * @param asid Address space identifier.
  182.  * @param entry The rest of TLB entry as required by TLB insertion format.
  183.  * @param tr Translation register.
  184.  */
  185. void dtr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, index_t tr)
  186. {
  187.     tr_mapping_insert(va, asid, entry, true, tr);
  188. }
  189.  
  190. /** Insert data into instruction or data translation register.
  191.  *
  192.  * @param va Virtual page address.
  193.  * @param asid Address space identifier.
  194.  * @param entry The rest of TLB entry as required by TLB insertion format.
  195.  * @param dtc If true, insert into data translation register, use instruction translation register otherwise.
  196.  * @param tr Translation register.
  197.  */
  198. void tr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, bool dtr, index_t tr)
  199. {
  200.     region_register rr;
  201.     bool restore_rr = false;
  202.  
  203.     rr.word = rr_read(VA2VRN(va));
  204.     if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
  205.         /*
  206.          * The selected region register does not contain required RID.
  207.          * Save the old content of the register and replace the RID.
  208.          */
  209.         region_register rr0;
  210.  
  211.         rr0 = rr;
  212.         rr0.map.rid = ASID2RID(asid, VA2VRN(va));
  213.         rr_write(VA2VRN(va), rr0.word);
  214.         srlz_d();
  215.         srlz_i();
  216.     }
  217.  
  218.     __asm__ volatile (
  219.         "mov r8=psr;;\n"
  220.         "rsm %0;;\n"            /* PSR_IC_MASK */
  221.         "srlz.d;;\n"
  222.         "srlz.i;;\n"
  223.         "mov cr.ifa=%1\n"           /* va */         
  224.         "mov cr.itir=%2;;\n"        /* entry.word[1] */
  225.         "cmp.eq p6,p7=%5,r0;;\n"    /* decide between itr and dtr */
  226.         "(p6) itr.i itr[%4]=%3;;\n"
  227.         "(p7) itr.d dtr[%4]=%3;;\n"
  228.         "mov psr.l=r8;;\n"
  229.         "srlz.d;;\n"
  230.         :
  231.         : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (tr), "r" (dtr)
  232.         : "p6", "p7", "r8"
  233.     );
  234.    
  235.     if (restore_rr) {
  236.         rr_write(VA2VRN(va), rr.word);
  237.         srlz_d();
  238.         srlz_i();
  239.     }
  240. }
  241.  
  242. /** Insert data into DTLB.
  243.  *
  244.  * @param va Virtual page address.
  245.  * @param asid Address space identifier.
  246.  * @param entry The rest of TLB entry as required by TLB insertion format.
  247.  * @param dtr If true, insert into data translation register, use data translation cache otherwise.
  248.  * @param tr Translation register if dtr is true, ignored otherwise.
  249.  */
  250. void dtlb_kernel_mapping_insert(__address page, __address frame, bool dtr, index_t tr)
  251. {
  252.     tlb_entry_t entry;
  253.    
  254.     entry.word[0] = 0;
  255.     entry.word[1] = 0;
  256.    
  257.     entry.p = true;         /* present */
  258.     entry.ma = MA_WRITEBACK;
  259.     entry.a = true;         /* already accessed */
  260.     entry.d = true;         /* already dirty */
  261.     entry.pl = PL_KERNEL;
  262.     entry.ar = AR_READ | AR_WRITE;
  263.     entry.ppn = frame >> PPN_SHIFT;
  264.     entry.ps = PAGE_WIDTH;
  265.    
  266.     if (dtr)
  267.         dtr_mapping_insert(page, ASID_KERNEL, entry, tr);
  268.     else
  269.         dtc_mapping_insert(page, ASID_KERNEL, entry);
  270. }
  271.  
  272. /** Copy content of PTE into data translation cache.
  273.  *
  274.  * @param t PTE.
  275.  */
  276. void dtc_pte_copy(pte_t *t)
  277. {
  278.     tlb_entry_t entry;
  279.  
  280.     entry.word[0] = 0;
  281.     entry.word[1] = 0;
  282.    
  283.     entry.p = t->p;
  284.     entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
  285.     entry.a = t->a;
  286.     entry.d = t->d;
  287.     entry.pl = t->k ? PL_KERNEL : PL_USER;
  288.     entry.ar = t->w ? AR_WRITE : AR_READ;
  289.     entry.ppn = t->frame >> PPN_SHIFT;
  290.     entry.ps = PAGE_WIDTH;
  291.    
  292.     dtc_mapping_insert(t->page, t->as->asid, entry);
  293. }
  294.  
  295. /** Copy content of PTE into instruction translation cache.
  296.  *
  297.  * @param t PTE.
  298.  */
  299. void itc_pte_copy(pte_t *t)
  300. {
  301.     tlb_entry_t entry;
  302.  
  303.     entry.word[0] = 0;
  304.     entry.word[1] = 0;
  305.    
  306.     ASSERT(t->x);
  307.    
  308.     entry.p = t->p;
  309.     entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
  310.     entry.a = t->a;
  311.     entry.pl = t->k ? PL_KERNEL : PL_USER;
  312.     entry.ar = t->x ? (AR_EXECUTE | AR_READ) : AR_READ;
  313.     entry.ppn = t->frame >> PPN_SHIFT;
  314.     entry.ps = PAGE_WIDTH;
  315.    
  316.     itc_mapping_insert(t->page, t->as->asid, entry);
  317. }
  318.  
  319. /** Instruction TLB fault handler for faults with VHPT turned off.
  320.  *
  321.  * @param vector Interruption vector.
  322.  * @param pstate Structure with saved interruption state.
  323.  */
  324. void alternate_instruction_tlb_fault(__u64 vector, struct exception_regdump *pstate)
  325. {
  326.     region_register rr;
  327.     __address va;
  328.     pte_t *t;
  329.    
  330.     va = pstate->cr_ifa;    /* faulting address */
  331.     t = page_mapping_find(AS, va);
  332.     if (t) {
  333.         /*
  334.          * The mapping was found in software page hash table.
  335.          * Insert it into data translation cache.
  336.          */
  337.         itc_pte_copy(t);
  338.     } else {
  339.         /*
  340.          * Forward the page fault to address space page fault handler.
  341.          */
  342.         if (!as_page_fault(va)) {
  343.             panic("%s: va=%P, rid=%d\n", __FUNCTION__, pstate->cr_ifa, rr.map.rid);
  344.         }
  345.     }
  346. }
  347.  
  348. /** Data TLB fault handler for faults with VHPT turned off.
  349.  *
  350.  * @param vector Interruption vector.
  351.  * @param pstate Structure with saved interruption state.
  352.  */
  353. void alternate_data_tlb_fault(__u64 vector, struct exception_regdump *pstate)
  354. {
  355.     region_register rr;
  356.     rid_t rid;
  357.     __address va;
  358.     pte_t *t;
  359.    
  360.     va = pstate->cr_ifa;    /* faulting address */
  361.     rr.word = rr_read(VA2VRN(va));
  362.     rid = rr.map.rid;
  363.     if (RID2ASID(rid) == ASID_KERNEL) {
  364.         if (VA2VRN(va) == VRN_KERNEL) {
  365.             /*
  366.              * Provide KA2PA(identity) mapping for faulting piece of
  367.              * kernel address space.
  368.              */
  369.             dtlb_kernel_mapping_insert(va, KA2PA(va), false, 0);
  370.             return;
  371.         }
  372.     }
  373.  
  374.     t = page_mapping_find(AS, va);
  375.     if (t) {
  376.         /*
  377.          * The mapping was found in software page hash table.
  378.          * Insert it into data translation cache.
  379.          */
  380.         dtc_pte_copy(t);
  381.     } else {
  382.         /*
  383.          * Forward the page fault to address space page fault handler.
  384.          */
  385.         if (!as_page_fault(va)) {
  386.             panic("%s: va=%P, rid=%d\n", __FUNCTION__, pstate->cr_ifa, rr.map.rid);
  387.         }
  388.     }
  389. }
  390.  
  391. /** Data nested TLB fault handler.
  392.  *
  393.  * This fault should not occur.
  394.  *
  395.  * @param vector Interruption vector.
  396.  * @param pstate Structure with saved interruption state.
  397.  */
  398. void data_nested_tlb_fault(__u64 vector, struct exception_regdump *pstate)
  399. {
  400.     panic("%s\n", __FUNCTION__);
  401. }
  402.  
  403. /** Data Dirty bit fault handler.
  404.  *
  405.  * @param vector Interruption vector.
  406.  * @param pstate Structure with saved interruption state.
  407.  */
  408. void data_dirty_bit_fault(__u64 vector, struct exception_regdump *pstate)
  409. {
  410.     pte_t *t;
  411.  
  412.     t = page_mapping_find(AS, pstate->cr_ifa);
  413.     ASSERT(t && t->p);
  414.     if (t && t->p) {
  415.         /*
  416.          * Update the Dirty bit in page tables and reinsert
  417.          * the mapping into DTC.
  418.          */
  419.         t->d = true;
  420.         dtc_pte_copy(t);
  421.     }
  422. }
  423.  
  424. /** Instruction access bit fault handler.
  425.  *
  426.  * @param vector Interruption vector.
  427.  * @param pstate Structure with saved interruption state.
  428.  */
  429. void instruction_access_bit_fault(__u64 vector, struct exception_regdump *pstate)
  430. {
  431.     pte_t *t;
  432.  
  433.     t = page_mapping_find(AS, pstate->cr_ifa);
  434.     ASSERT(t && t->p);
  435.     if (t && t->p) {
  436.         /*
  437.          * Update the Accessed bit in page tables and reinsert
  438.          * the mapping into ITC.
  439.          */
  440.         t->a = true;
  441.         itc_pte_copy(t);
  442.     }
  443. }
  444.  
  445. /** Data access bit fault handler.
  446.  *
  447.  * @param vector Interruption vector.
  448.  * @param pstate Structure with saved interruption state.
  449.  */
  450. void data_access_bit_fault(__u64 vector, struct exception_regdump *pstate)
  451. {
  452.     pte_t *t;
  453.  
  454.     t = page_mapping_find(AS, pstate->cr_ifa);
  455.     ASSERT(t && t->p);
  456.     if (t && t->p) {
  457.         /*
  458.          * Update the Accessed bit in page tables and reinsert
  459.          * the mapping into DTC.
  460.          */
  461.         t->a = true;
  462.         dtc_pte_copy(t);
  463.     }
  464. }
  465.  
  466. /** Page not present fault handler.
  467.  *
  468.  * @param vector Interruption vector.
  469.  * @param pstate Structure with saved interruption state.
  470.  */
  471. void page_not_present(__u64 vector, struct exception_regdump *pstate)
  472. {
  473.     region_register rr;
  474.     __address va;
  475.     pte_t *t;
  476.    
  477.     va = pstate->cr_ifa;    /* faulting address */
  478.     t = page_mapping_find(AS, va);
  479.     ASSERT(t);
  480.    
  481.     if (t->p) {
  482.         /*
  483.          * If the Present bit is set in page hash table, just copy it
  484.          * and update ITC/DTC.
  485.          */
  486.         if (t->x)
  487.             itc_pte_copy(t);
  488.         else
  489.             dtc_pte_copy(t);
  490.     } else {
  491.         if (!as_page_fault(va)) {
  492.             panic("%s: va=%P, rid=%d\n", __FUNCTION__, pstate->cr_ifa, rr.map.rid);
  493.         }
  494.     }
  495. }
  496.