Subversion Repositories HelenOS-historic

Rev

Rev 944 | Rev 947 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (C) 2006 Jakub Jermar
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. /*
  30.  * TLB management.
  31.  */
  32.  
  33. #include <mm/tlb.h>
  34. #include <mm/asid.h>
  35. #include <mm/page.h>
  36. #include <mm/as.h>
  37. #include <arch/mm/tlb.h>
  38. #include <arch/mm/page.h>
  39. #include <arch/barrier.h>
  40. #include <arch/interrupt.h>
  41. #include <arch/pal/pal.h>
  42. #include <arch/asm.h>
  43. #include <typedefs.h>
  44. #include <panic.h>
  45. #include <arch.h>
  46.  
  47.  
  48.  
  49. /** Invalidate all TLB entries. */
  50. void tlb_invalidate_all(void)
  51. {
  52.         __address adr;
  53.         __u32 count1,count2,stride1,stride2;
  54.        
  55.         int i,j;
  56.        
  57.         adr=PAL_PTCE_INFO_BASE();
  58.         count1=PAL_PTCE_INFO_COUNT1();
  59.         count2=PAL_PTCE_INFO_COUNT2();
  60.         stride1=PAL_PTCE_INFO_STRIDE1();
  61.         stride2=PAL_PTCE_INFO_STRIDE2();
  62.        
  63.         interrupts_disable();
  64.  
  65.         for(i=0;i<count1;i++)
  66.         {
  67.             for(j=0;j<count2;j++)
  68.             {
  69.                 asm volatile
  70.                 (
  71.                     "ptc.e %0;;"
  72.                     :
  73.                     :"r" (adr)
  74.                 );
  75.                 adr+=stride2;
  76.             }
  77.             adr+=stride1;
  78.         }
  79.  
  80.         interrupts_enable();
  81.  
  82.         srlz_d();
  83.         srlz_i();
  84. }
  85.  
  86. /** Invalidate entries belonging to an address space.
  87.  *
  88.  * @param asid Address space identifier.
  89.  */
  90. void tlb_invalidate_asid(asid_t asid)
  91. {
  92.     /* TODO */
  93.     tlb_invalidate_all();
  94. }
  95.  
  96.  
  97. void tlb_invalidate_pages(asid_t asid, __address va, count_t cnt)
  98. {
  99.  
  100.  
  101.     region_register rr;
  102.     bool restore_rr = false;
  103.     int b=0;
  104.     int c=cnt;
  105.     int i;
  106.  
  107.     rr.word = rr_read(VA2VRN(va));
  108.     if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
  109.         /*
  110.          * The selected region register does not contain required RID.
  111.          * Save the old content of the register and replace the RID.
  112.          */
  113.         region_register rr0;
  114.  
  115.         rr0 = rr;
  116.         rr0.map.rid = ASID2RID(asid, VA2VRN(va));
  117.         rr_write(VA2VRN(va), rr0.word);
  118.         srlz_d();
  119.         srlz_i();
  120.     }
  121.    
  122.     while(c>>=1)    b++;
  123.     b>>=1;
  124.     __u64 ps;
  125.    
  126.     switch(b)
  127.     {
  128.         case 0: /*cnt 1-3*/
  129.         {
  130.             ps=PAGE_WIDTH;
  131.             break;
  132.         }
  133.         case 1: /*cnt 4-15*/
  134.         {
  135.             cnt=(cnt/4)+1;
  136.             ps=PAGE_WIDTH+2;
  137.             va&=~((1<<ps)-1);
  138.             break;
  139.         }
  140.         case 2: /*cnt 16-63*/
  141.         {
  142.             cnt=(cnt/16)+1;
  143.             ps=PAGE_WIDTH+4;
  144.             va&=~((1<<ps)-1);
  145.             break;
  146.         }
  147.         case 3: /*cnt 64-255*/
  148.         {
  149.             cnt=(cnt/64)+1;
  150.             ps=PAGE_WIDTH+6;
  151.             va&=~((1<<ps)-1);
  152.             break;
  153.         }
  154.         case 4: /*cnt 256-1023*/
  155.         {
  156.             cnt=(cnt/256)+1;
  157.             ps=PAGE_WIDTH+8;
  158.             va&=~((1<<ps)-1);
  159.             break;
  160.         }
  161.         case 5: /*cnt 1024-4095*/
  162.         {
  163.             cnt=(cnt/1024)+1;
  164.             ps=PAGE_WIDTH+10;
  165.             va&=~((1<<ps)-1);
  166.             break;
  167.         }
  168.         case 6: /*cnt 4096-16383*/
  169.         {
  170.             cnt=(cnt/4096)+1;
  171.             ps=PAGE_WIDTH+12;
  172.             va&=~((1<<ps)-1);
  173.             break;
  174.         }
  175.         case 7: /*cnt 16384-65535*/
  176.         case 8: /*cnt 65536-(256K-1)*/
  177.         {
  178.             cnt=(cnt/16384)+1;
  179.             ps=PAGE_WIDTH+14;
  180.             va&=~((1<<ps)-1);
  181.             break;
  182.         }
  183.         default:
  184.         {
  185.             cnt=(cnt/(16384*16))+1;
  186.             ps=PAGE_WIDTH+18;
  187.             va&=~((1<<ps)-1);
  188.             break;
  189.         }
  190.            
  191.     }
  192.     for(i=0;i<cnt;i++)  {
  193.     __asm__ volatile
  194.     (
  195.         "ptc.l %0,%1;;"
  196.         :
  197.         : "r"(va), "r"(ps<<2)
  198.     );
  199.     va+=(1<<ps);
  200.     }
  201.     srlz_d();
  202.     srlz_i();
  203.    
  204.    
  205.     if (restore_rr) {
  206.         rr_write(VA2VRN(va), rr.word);
  207.         srlz_d();
  208.         srlz_i();
  209.     }
  210.  
  211.  
  212. }
  213.  
  214.  
  215. /** Insert data into data translation cache.
  216.  *
  217.  * @param va Virtual page address.
  218.  * @param asid Address space identifier.
  219.  * @param entry The rest of TLB entry as required by TLB insertion format.
  220.  */
  221. void dtc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry)
  222. {
  223.     tc_mapping_insert(va, asid, entry, true);
  224. }
  225.  
  226. /** Insert data into instruction translation cache.
  227.  *
  228.  * @param va Virtual page address.
  229.  * @param asid Address space identifier.
  230.  * @param entry The rest of TLB entry as required by TLB insertion format.
  231.  */
  232. void itc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry)
  233. {
  234.     tc_mapping_insert(va, asid, entry, false);
  235. }
  236.  
  237. /** Insert data into instruction or data translation cache.
  238.  *
  239.  * @param va Virtual page address.
  240.  * @param asid Address space identifier.
  241.  * @param entry The rest of TLB entry as required by TLB insertion format.
  242.  * @param dtc If true, insert into data translation cache, use instruction translation cache otherwise.
  243.  */
  244. void tc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, bool dtc)
  245. {
  246.     region_register rr;
  247.     bool restore_rr = false;
  248.  
  249.     rr.word = rr_read(VA2VRN(va));
  250.     if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
  251.         /*
  252.          * The selected region register does not contain required RID.
  253.          * Save the old content of the register and replace the RID.
  254.          */
  255.         region_register rr0;
  256.  
  257.         rr0 = rr;
  258.         rr0.map.rid = ASID2RID(asid, VA2VRN(va));
  259.         rr_write(VA2VRN(va), rr0.word);
  260.         srlz_d();
  261.         srlz_i();
  262.     }
  263.    
  264.     __asm__ volatile (
  265.         "mov r8=psr;;\n"
  266.         "rsm %0;;\n"            /* PSR_IC_MASK */
  267.         "srlz.d;;\n"
  268.         "srlz.i;;\n"
  269.         "mov cr.ifa=%1\n"       /* va */
  270.         "mov cr.itir=%2;;\n"        /* entry.word[1] */
  271.         "cmp.eq p6,p7 = %4,r0;;\n"  /* decide between itc and dtc */
  272.         "(p6) itc.i %3;;\n"
  273.         "(p7) itc.d %3;;\n"
  274.         "mov psr.l=r8;;\n"
  275.         "srlz.d;;\n"
  276.         :
  277.         : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (dtc)
  278.         : "p6", "p7", "r8"
  279.     );
  280.    
  281.     if (restore_rr) {
  282.         rr_write(VA2VRN(va), rr.word);
  283.         srlz_d();
  284.         srlz_i();
  285.     }
  286. }
  287.  
  288. /** Insert data into instruction translation register.
  289.  *
  290.  * @param va Virtual page address.
  291.  * @param asid Address space identifier.
  292.  * @param entry The rest of TLB entry as required by TLB insertion format.
  293.  * @param tr Translation register.
  294.  */
  295. void itr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, index_t tr)
  296. {
  297.     tr_mapping_insert(va, asid, entry, false, tr);
  298. }
  299.  
  300. /** Insert data into data translation register.
  301.  *
  302.  * @param va Virtual page address.
  303.  * @param asid Address space identifier.
  304.  * @param entry The rest of TLB entry as required by TLB insertion format.
  305.  * @param tr Translation register.
  306.  */
  307. void dtr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, index_t tr)
  308. {
  309.     tr_mapping_insert(va, asid, entry, true, tr);
  310. }
  311.  
  312. /** Insert data into instruction or data translation register.
  313.  *
  314.  * @param va Virtual page address.
  315.  * @param asid Address space identifier.
  316.  * @param entry The rest of TLB entry as required by TLB insertion format.
  317.  * @param dtc If true, insert into data translation register, use instruction translation register otherwise.
  318.  * @param tr Translation register.
  319.  */
  320. void tr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, bool dtr, index_t tr)
  321. {
  322.     region_register rr;
  323.     bool restore_rr = false;
  324.  
  325.     rr.word = rr_read(VA2VRN(va));
  326.     if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
  327.         /*
  328.          * The selected region register does not contain required RID.
  329.          * Save the old content of the register and replace the RID.
  330.          */
  331.         region_register rr0;
  332.  
  333.         rr0 = rr;
  334.         rr0.map.rid = ASID2RID(asid, VA2VRN(va));
  335.         rr_write(VA2VRN(va), rr0.word);
  336.         srlz_d();
  337.         srlz_i();
  338.     }
  339.  
  340.     __asm__ volatile (
  341.         "mov r8=psr;;\n"
  342.         "rsm %0;;\n"            /* PSR_IC_MASK */
  343.         "srlz.d;;\n"
  344.         "srlz.i;;\n"
  345.         "mov cr.ifa=%1\n"           /* va */         
  346.         "mov cr.itir=%2;;\n"        /* entry.word[1] */
  347.         "cmp.eq p6,p7=%5,r0;;\n"    /* decide between itr and dtr */
  348.         "(p6) itr.i itr[%4]=%3;;\n"
  349.         "(p7) itr.d dtr[%4]=%3;;\n"
  350.         "mov psr.l=r8;;\n"
  351.         "srlz.d;;\n"
  352.         :
  353.         : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (tr), "r" (dtr)
  354.         : "p6", "p7", "r8"
  355.     );
  356.    
  357.     if (restore_rr) {
  358.         rr_write(VA2VRN(va), rr.word);
  359.         srlz_d();
  360.         srlz_i();
  361.     }
  362. }
  363.  
  364. /** Insert data into DTLB.
  365.  *
  366.  * @param va Virtual page address.
  367.  * @param asid Address space identifier.
  368.  * @param entry The rest of TLB entry as required by TLB insertion format.
  369.  * @param dtr If true, insert into data translation register, use data translation cache otherwise.
  370.  * @param tr Translation register if dtr is true, ignored otherwise.
  371.  */
  372. void dtlb_kernel_mapping_insert(__address page, __address frame, bool dtr, index_t tr)
  373. {
  374.     tlb_entry_t entry;
  375.    
  376.     entry.word[0] = 0;
  377.     entry.word[1] = 0;
  378.    
  379.     entry.p = true;         /* present */
  380.     entry.ma = MA_WRITEBACK;
  381.     entry.a = true;         /* already accessed */
  382.     entry.d = true;         /* already dirty */
  383.     entry.pl = PL_KERNEL;
  384.     entry.ar = AR_READ | AR_WRITE;
  385.     entry.ppn = frame >> PPN_SHIFT;
  386.     entry.ps = PAGE_WIDTH;
  387.    
  388.     if (dtr)
  389.         dtr_mapping_insert(page, ASID_KERNEL, entry, tr);
  390.     else
  391.         dtc_mapping_insert(page, ASID_KERNEL, entry);
  392. }
  393.  
  394. /** Copy content of PTE into data translation cache.
  395.  *
  396.  * @param t PTE.
  397.  */
  398. void dtc_pte_copy(pte_t *t)
  399. {
  400.     tlb_entry_t entry;
  401.  
  402.     entry.word[0] = 0;
  403.     entry.word[1] = 0;
  404.    
  405.     entry.p = t->p;
  406.     entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
  407.     entry.a = t->a;
  408.     entry.d = t->d;
  409.     entry.pl = t->k ? PL_KERNEL : PL_USER;
  410.     entry.ar = t->w ? AR_WRITE : AR_READ;
  411.     entry.ppn = t->frame >> PPN_SHIFT;
  412.     entry.ps = PAGE_WIDTH;
  413.    
  414.     dtc_mapping_insert(t->page, t->as->asid, entry);
  415. }
  416.  
  417. /** Copy content of PTE into instruction translation cache.
  418.  *
  419.  * @param t PTE.
  420.  */
  421. void itc_pte_copy(pte_t *t)
  422. {
  423.     tlb_entry_t entry;
  424.  
  425.     entry.word[0] = 0;
  426.     entry.word[1] = 0;
  427.    
  428.     ASSERT(t->x);
  429.    
  430.     entry.p = t->p;
  431.     entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
  432.     entry.a = t->a;
  433.     entry.pl = t->k ? PL_KERNEL : PL_USER;
  434.     entry.ar = t->x ? (AR_EXECUTE | AR_READ) : AR_READ;
  435.     entry.ppn = t->frame >> PPN_SHIFT;
  436.     entry.ps = PAGE_WIDTH;
  437.    
  438.     itc_mapping_insert(t->page, t->as->asid, entry);
  439. }
  440.  
  441. /** Instruction TLB fault handler for faults with VHPT turned off.
  442.  *
  443.  * @param vector Interruption vector.
  444.  * @param pstate Structure with saved interruption state.
  445.  */
  446. void alternate_instruction_tlb_fault(__u64 vector, struct exception_regdump *pstate)
  447. {
  448.     region_register rr;
  449.     __address va;
  450.     pte_t *t;
  451.    
  452.     va = pstate->cr_ifa;    /* faulting address */
  453.     t = page_mapping_find(AS, va);
  454.     if (t) {
  455.         /*
  456.          * The mapping was found in software page hash table.
  457.          * Insert it into data translation cache.
  458.          */
  459.         itc_pte_copy(t);
  460.     } else {
  461.         /*
  462.          * Forward the page fault to address space page fault handler.
  463.          */
  464.         if (!as_page_fault(va)) {
  465.             panic("%s: va=%P, rid=%d\n", __FUNCTION__, pstate->cr_ifa, rr.map.rid);
  466.         }
  467.     }
  468. }
  469.  
  470. /** Data TLB fault handler for faults with VHPT turned off.
  471.  *
  472.  * @param vector Interruption vector.
  473.  * @param pstate Structure with saved interruption state.
  474.  */
  475. void alternate_data_tlb_fault(__u64 vector, struct exception_regdump *pstate)
  476. {
  477.     region_register rr;
  478.     rid_t rid;
  479.     __address va;
  480.     pte_t *t;
  481.    
  482.     va = pstate->cr_ifa;    /* faulting address */
  483.     rr.word = rr_read(VA2VRN(va));
  484.     rid = rr.map.rid;
  485.     if (RID2ASID(rid) == ASID_KERNEL) {
  486.         if (VA2VRN(va) == VRN_KERNEL) {
  487.             /*
  488.              * Provide KA2PA(identity) mapping for faulting piece of
  489.              * kernel address space.
  490.              */
  491.             dtlb_kernel_mapping_insert(va, KA2PA(va), false, 0);
  492.             return;
  493.         }
  494.     }
  495.  
  496.     t = page_mapping_find(AS, va);
  497.     if (t) {
  498.         /*
  499.          * The mapping was found in software page hash table.
  500.          * Insert it into data translation cache.
  501.          */
  502.         dtc_pte_copy(t);
  503.     } else {
  504.         /*
  505.          * Forward the page fault to address space page fault handler.
  506.          */
  507.         if (!as_page_fault(va)) {
  508.             panic("%s: va=%P, rid=%d\n", __FUNCTION__, pstate->cr_ifa, rr.map.rid);
  509.         }
  510.     }
  511. }
  512.  
  513. /** Data nested TLB fault handler.
  514.  *
  515.  * This fault should not occur.
  516.  *
  517.  * @param vector Interruption vector.
  518.  * @param pstate Structure with saved interruption state.
  519.  */
  520. void data_nested_tlb_fault(__u64 vector, struct exception_regdump *pstate)
  521. {
  522.     panic("%s\n", __FUNCTION__);
  523. }
  524.  
  525. /** Data Dirty bit fault handler.
  526.  *
  527.  * @param vector Interruption vector.
  528.  * @param pstate Structure with saved interruption state.
  529.  */
  530. void data_dirty_bit_fault(__u64 vector, struct exception_regdump *pstate)
  531. {
  532.     pte_t *t;
  533.  
  534.     t = page_mapping_find(AS, pstate->cr_ifa);
  535.     ASSERT(t && t->p);
  536.     if (t && t->p) {
  537.         /*
  538.          * Update the Dirty bit in page tables and reinsert
  539.          * the mapping into DTC.
  540.          */
  541.         t->d = true;
  542.         dtc_pte_copy(t);
  543.     }
  544. }
  545.  
  546. /** Instruction access bit fault handler.
  547.  *
  548.  * @param vector Interruption vector.
  549.  * @param pstate Structure with saved interruption state.
  550.  */
  551. void instruction_access_bit_fault(__u64 vector, struct exception_regdump *pstate)
  552. {
  553.     pte_t *t;
  554.  
  555.     t = page_mapping_find(AS, pstate->cr_ifa);
  556.     ASSERT(t && t->p);
  557.     if (t && t->p) {
  558.         /*
  559.          * Update the Accessed bit in page tables and reinsert
  560.          * the mapping into ITC.
  561.          */
  562.         t->a = true;
  563.         itc_pte_copy(t);
  564.     }
  565. }
  566.  
  567. /** Data access bit fault handler.
  568.  *
  569.  * @param vector Interruption vector.
  570.  * @param pstate Structure with saved interruption state.
  571.  */
  572. void data_access_bit_fault(__u64 vector, struct exception_regdump *pstate)
  573. {
  574.     pte_t *t;
  575.  
  576.     t = page_mapping_find(AS, pstate->cr_ifa);
  577.     ASSERT(t && t->p);
  578.     if (t && t->p) {
  579.         /*
  580.          * Update the Accessed bit in page tables and reinsert
  581.          * the mapping into DTC.
  582.          */
  583.         t->a = true;
  584.         dtc_pte_copy(t);
  585.     }
  586. }
  587.  
  588. /** Page not present fault handler.
  589.  *
  590.  * @param vector Interruption vector.
  591.  * @param pstate Structure with saved interruption state.
  592.  */
  593. void page_not_present(__u64 vector, struct exception_regdump *pstate)
  594. {
  595.     region_register rr;
  596.     __address va;
  597.     pte_t *t;
  598.    
  599.     va = pstate->cr_ifa;    /* faulting address */
  600.     t = page_mapping_find(AS, va);
  601.     ASSERT(t);
  602.    
  603.     if (t->p) {
  604.         /*
  605.          * If the Present bit is set in page hash table, just copy it
  606.          * and update ITC/DTC.
  607.          */
  608.         if (t->x)
  609.             itc_pte_copy(t);
  610.         else
  611.             dtc_pte_copy(t);
  612.     } else {
  613.         if (!as_page_fault(va)) {
  614.             panic("%s: va=%P, rid=%d\n", __FUNCTION__, pstate->cr_ifa, rr.map.rid);
  615.         }
  616.     }
  617. }
  618.