Subversion Repositories HelenOS-historic

Rev

Rev 935 | Rev 945 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (C) 2006 Jakub Jermar
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. /*
  30.  * TLB management.
  31.  */
  32.  
  33. #include <mm/tlb.h>
  34. #include <mm/asid.h>
  35. #include <mm/page.h>
  36. #include <mm/as.h>
  37. #include <arch/mm/tlb.h>
  38. #include <arch/mm/page.h>
  39. #include <arch/barrier.h>
  40. #include <arch/interrupt.h>
  41. #include <arch/pal/pal.h>
  42. #include <arch/asm.h>
  43. #include <typedefs.h>
  44. #include <panic.h>
  45. #include <arch.h>
  46.  
  47.  
  48.  
  49. /** Invalidate all TLB entries. */
  50. void tlb_invalidate_all(void)
  51. {
  52.         __address adr;
  53.         __u32 count1,count2,stride1,stride2;
  54.        
  55.         int i,j;
  56.        
  57.         adr=PAL_PTCE_INFO_BASE();
  58.         count1=PAL_PTCE_INFO_COUNT1();
  59.         count2=PAL_PTCE_INFO_COUNT2();
  60.         stride1=PAL_PTCE_INFO_STRIDE1();
  61.         stride2=PAL_PTCE_INFO_STRIDE2();
  62.        
  63.         interrupts_disable();
  64.  
  65.         for(i=0;i<count1;i++)
  66.         {
  67.             for(j=0;j<count2;j++)
  68.             {
  69.                 asm volatile
  70.                 (
  71.                     "ptc.e %0;;"
  72.                     :
  73.                     :"r" (adr)
  74.                 );
  75.                 adr+=stride2;
  76.             }
  77.             adr+=stride1;
  78.         }
  79.  
  80.         interrupts_enable();
  81.  
  82.         srlz_d();
  83.         srlz_i();
  84. }
  85.  
  86. /** Invalidate entries belonging to an address space.
  87.  *
  88.  * @param asid Address space identifier.
  89.  */
  90. void tlb_invalidate_asid(asid_t asid)
  91. {
  92.     /* TODO */
  93.     tlb_invalidate_all();
  94. }
  95.  
  96. extern void d(void);
  97. void d(void)
  98. {
  99. }
  100.  
  101.  
  102. void tlb_invalidate_pages(asid_t asid, __address va, count_t cnt)
  103. {
  104.  
  105.  
  106.     region_register rr;
  107.     bool restore_rr = false;
  108.     int b=0;
  109.     int c=cnt;
  110.     int i;
  111.  
  112.     rr.word = rr_read(VA2VRN(va));
  113.     if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
  114.         /*
  115.          * The selected region register does not contain required RID.
  116.          * Save the old content of the register and replace the RID.
  117.          */
  118.         region_register rr0;
  119.  
  120.         rr0 = rr;
  121.         rr0.map.rid = ASID2RID(asid, VA2VRN(va));
  122.         rr_write(VA2VRN(va), rr0.word);
  123.         srlz_d();
  124.         srlz_i();
  125.     }
  126.    
  127.     while(c>>=1)    b++;
  128.     b>>=1;
  129.     __u64 ps;
  130.    
  131.     switch(b)
  132.     {
  133.         case 0: /*cnt 1-3*/
  134.         {
  135.             ps=PAGE_WIDTH;
  136.             break;
  137.         }
  138.         case 1: /*cnt 4-15*/
  139.         {
  140.             cnt=(cnt/4)+1;
  141.             ps=PAGE_WIDTH+2;
  142.             va&=~((1<<ps)-1);
  143.             break;
  144.         }
  145.         case 2: /*cnt 16-63*/
  146.         {
  147.             cnt=(cnt/16)+1;
  148.             ps=PAGE_WIDTH+4;
  149.             va&=~((1<<ps)-1);
  150.             break;
  151.         }
  152.         case 3: /*cnt 64-255*/
  153.         {
  154.             cnt=(cnt/64)+1;
  155.             ps=PAGE_WIDTH+6;
  156.             va&=~((1<<ps)-1);
  157.             break;
  158.         }
  159.         case 4: /*cnt 256-1023*/
  160.         {
  161.             cnt=(cnt/256)+1;
  162.             ps=PAGE_WIDTH+8;
  163.             va&=~((1<<ps)-1);
  164.             break;
  165.         }
  166.         case 5: /*cnt 1024-4095*/
  167.         {
  168.             cnt=(cnt/1024)+1;
  169.             ps=PAGE_WIDTH+10;
  170.             va&=~((1<<ps)-1);
  171.             break;
  172.         }
  173.         case 6: /*cnt 4096-16383*/
  174.         {
  175.             cnt=(cnt/4096)+1;
  176.             ps=PAGE_WIDTH+12;
  177.             va&=~((1<<ps)-1);
  178.             break;
  179.         }
  180.         case 7: /*cnt 16384-65535*/
  181.         case 8: /*cnt 65536-(256K-1)*/
  182.         {
  183.             cnt=(cnt/16384)+1;
  184.             ps=PAGE_WIDTH+14;
  185.             va&=~((1<<ps)-1);
  186.             break;
  187.         }
  188.         default:
  189.         {
  190.             cnt=(cnt/(16384*16))+1;
  191.             ps=PAGE_WIDTH+18;
  192.             va&=~((1<<ps)-1);
  193.             break;
  194.         }
  195.            
  196.     }
  197.     d();
  198.     for(i=0;i<cnt;i++)  {
  199.     __asm__ volatile
  200.     (
  201.         "ptc.l %0,%1;;"
  202.         :
  203.         : "r"(va), "r"(ps<<2)
  204.     );
  205.     va+=(1<<ps);
  206.     }
  207.     srlz_d();
  208.     srlz_i();
  209.    
  210.    
  211.     if (restore_rr) {
  212.         rr_write(VA2VRN(va), rr.word);
  213.         srlz_d();
  214.         srlz_i();
  215.     }
  216.  
  217.  
  218. }
  219.  
  220.  
  221. /** Insert data into data translation cache.
  222.  *
  223.  * @param va Virtual page address.
  224.  * @param asid Address space identifier.
  225.  * @param entry The rest of TLB entry as required by TLB insertion format.
  226.  */
  227. void dtc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry)
  228. {
  229.     tc_mapping_insert(va, asid, entry, true);
  230. }
  231.  
  232. /** Insert data into instruction translation cache.
  233.  *
  234.  * @param va Virtual page address.
  235.  * @param asid Address space identifier.
  236.  * @param entry The rest of TLB entry as required by TLB insertion format.
  237.  */
  238. void itc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry)
  239. {
  240.     tc_mapping_insert(va, asid, entry, false);
  241. }
  242.  
  243. /** Insert data into instruction or data translation cache.
  244.  *
  245.  * @param va Virtual page address.
  246.  * @param asid Address space identifier.
  247.  * @param entry The rest of TLB entry as required by TLB insertion format.
  248.  * @param dtc If true, insert into data translation cache, use instruction translation cache otherwise.
  249.  */
  250. void tc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, bool dtc)
  251. {
  252.     region_register rr;
  253.     bool restore_rr = false;
  254.  
  255.     rr.word = rr_read(VA2VRN(va));
  256.     if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
  257.         /*
  258.          * The selected region register does not contain required RID.
  259.          * Save the old content of the register and replace the RID.
  260.          */
  261.         region_register rr0;
  262.  
  263.         rr0 = rr;
  264.         rr0.map.rid = ASID2RID(asid, VA2VRN(va));
  265.         rr_write(VA2VRN(va), rr0.word);
  266.         srlz_d();
  267.         srlz_i();
  268.     }
  269.    
  270.     __asm__ volatile (
  271.         "mov r8=psr;;\n"
  272.         "rsm %0;;\n"            /* PSR_IC_MASK */
  273.         "srlz.d;;\n"
  274.         "srlz.i;;\n"
  275.         "mov cr.ifa=%1\n"       /* va */
  276.         "mov cr.itir=%2;;\n"        /* entry.word[1] */
  277.         "cmp.eq p6,p7 = %4,r0;;\n"  /* decide between itc and dtc */
  278.         "(p6) itc.i %3;;\n"
  279.         "(p7) itc.d %3;;\n"
  280.         "mov psr.l=r8;;\n"
  281.         "srlz.d;;\n"
  282.         :
  283.         : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (dtc)
  284.         : "p6", "p7", "r8"
  285.     );
  286.    
  287.     if (restore_rr) {
  288.         rr_write(VA2VRN(va), rr.word);
  289.         srlz_d();
  290.         srlz_i();
  291.     }
  292. }
  293.  
  294. /** Insert data into instruction translation register.
  295.  *
  296.  * @param va Virtual page address.
  297.  * @param asid Address space identifier.
  298.  * @param entry The rest of TLB entry as required by TLB insertion format.
  299.  * @param tr Translation register.
  300.  */
  301. void itr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, index_t tr)
  302. {
  303.     tr_mapping_insert(va, asid, entry, false, tr);
  304. }
  305.  
  306. /** Insert data into data translation register.
  307.  *
  308.  * @param va Virtual page address.
  309.  * @param asid Address space identifier.
  310.  * @param entry The rest of TLB entry as required by TLB insertion format.
  311.  * @param tr Translation register.
  312.  */
  313. void dtr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, index_t tr)
  314. {
  315.     tr_mapping_insert(va, asid, entry, true, tr);
  316. }
  317.  
  318. /** Insert data into instruction or data translation register.
  319.  *
  320.  * @param va Virtual page address.
  321.  * @param asid Address space identifier.
  322.  * @param entry The rest of TLB entry as required by TLB insertion format.
  323.  * @param dtc If true, insert into data translation register, use instruction translation register otherwise.
  324.  * @param tr Translation register.
  325.  */
  326. void tr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, bool dtr, index_t tr)
  327. {
  328.     region_register rr;
  329.     bool restore_rr = false;
  330.  
  331.     rr.word = rr_read(VA2VRN(va));
  332.     if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
  333.         /*
  334.          * The selected region register does not contain required RID.
  335.          * Save the old content of the register and replace the RID.
  336.          */
  337.         region_register rr0;
  338.  
  339.         rr0 = rr;
  340.         rr0.map.rid = ASID2RID(asid, VA2VRN(va));
  341.         rr_write(VA2VRN(va), rr0.word);
  342.         srlz_d();
  343.         srlz_i();
  344.     }
  345.  
  346.     __asm__ volatile (
  347.         "mov r8=psr;;\n"
  348.         "rsm %0;;\n"            /* PSR_IC_MASK */
  349.         "srlz.d;;\n"
  350.         "srlz.i;;\n"
  351.         "mov cr.ifa=%1\n"           /* va */         
  352.         "mov cr.itir=%2;;\n"        /* entry.word[1] */
  353.         "cmp.eq p6,p7=%5,r0;;\n"    /* decide between itr and dtr */
  354.         "(p6) itr.i itr[%4]=%3;;\n"
  355.         "(p7) itr.d dtr[%4]=%3;;\n"
  356.         "mov psr.l=r8;;\n"
  357.         "srlz.d;;\n"
  358.         :
  359.         : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (tr), "r" (dtr)
  360.         : "p6", "p7", "r8"
  361.     );
  362.    
  363.     if (restore_rr) {
  364.         rr_write(VA2VRN(va), rr.word);
  365.         srlz_d();
  366.         srlz_i();
  367.     }
  368. }
  369.  
  370. /** Insert data into DTLB.
  371.  *
  372.  * @param va Virtual page address.
  373.  * @param asid Address space identifier.
  374.  * @param entry The rest of TLB entry as required by TLB insertion format.
  375.  * @param dtr If true, insert into data translation register, use data translation cache otherwise.
  376.  * @param tr Translation register if dtr is true, ignored otherwise.
  377.  */
  378. void dtlb_kernel_mapping_insert(__address page, __address frame, bool dtr, index_t tr)
  379. {
  380.     tlb_entry_t entry;
  381.    
  382.     entry.word[0] = 0;
  383.     entry.word[1] = 0;
  384.    
  385.     entry.p = true;         /* present */
  386.     entry.ma = MA_WRITEBACK;
  387.     entry.a = true;         /* already accessed */
  388.     entry.d = true;         /* already dirty */
  389.     entry.pl = PL_KERNEL;
  390.     entry.ar = AR_READ | AR_WRITE;
  391.     entry.ppn = frame >> PPN_SHIFT;
  392.     entry.ps = PAGE_WIDTH;
  393.    
  394.     if (dtr)
  395.         dtr_mapping_insert(page, ASID_KERNEL, entry, tr);
  396.     else
  397.         dtc_mapping_insert(page, ASID_KERNEL, entry);
  398. }
  399.  
  400. /** Copy content of PTE into data translation cache.
  401.  *
  402.  * @param t PTE.
  403.  */
  404. void dtc_pte_copy(pte_t *t)
  405. {
  406.     tlb_entry_t entry;
  407.  
  408.     entry.word[0] = 0;
  409.     entry.word[1] = 0;
  410.    
  411.     entry.p = t->p;
  412.     entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
  413.     entry.a = t->a;
  414.     entry.d = t->d;
  415.     entry.pl = t->k ? PL_KERNEL : PL_USER;
  416.     entry.ar = t->w ? AR_WRITE : AR_READ;
  417.     entry.ppn = t->frame >> PPN_SHIFT;
  418.     entry.ps = PAGE_WIDTH;
  419.    
  420.     dtc_mapping_insert(t->page, t->as->asid, entry);
  421. }
  422.  
  423. /** Copy content of PTE into instruction translation cache.
  424.  *
  425.  * @param t PTE.
  426.  */
  427. void itc_pte_copy(pte_t *t)
  428. {
  429.     tlb_entry_t entry;
  430.  
  431.     entry.word[0] = 0;
  432.     entry.word[1] = 0;
  433.    
  434.     ASSERT(t->x);
  435.    
  436.     entry.p = t->p;
  437.     entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
  438.     entry.a = t->a;
  439.     entry.pl = t->k ? PL_KERNEL : PL_USER;
  440.     entry.ar = t->x ? (AR_EXECUTE | AR_READ) : AR_READ;
  441.     entry.ppn = t->frame >> PPN_SHIFT;
  442.     entry.ps = PAGE_WIDTH;
  443.    
  444.     itc_mapping_insert(t->page, t->as->asid, entry);
  445. }
  446.  
  447. /** Instruction TLB fault handler for faults with VHPT turned off.
  448.  *
  449.  * @param vector Interruption vector.
  450.  * @param pstate Structure with saved interruption state.
  451.  */
  452. void alternate_instruction_tlb_fault(__u64 vector, struct exception_regdump *pstate)
  453. {
  454.     region_register rr;
  455.     __address va;
  456.     pte_t *t;
  457.    
  458.     va = pstate->cr_ifa;    /* faulting address */
  459.     t = page_mapping_find(AS, va);
  460.     if (t) {
  461.         /*
  462.          * The mapping was found in software page hash table.
  463.          * Insert it into data translation cache.
  464.          */
  465.         itc_pte_copy(t);
  466.     } else {
  467.         /*
  468.          * Forward the page fault to address space page fault handler.
  469.          */
  470.         if (!as_page_fault(va)) {
  471.             panic("%s: va=%P, rid=%d\n", __FUNCTION__, pstate->cr_ifa, rr.map.rid);
  472.         }
  473.     }
  474. }
  475.  
  476. /** Data TLB fault handler for faults with VHPT turned off.
  477.  *
  478.  * @param vector Interruption vector.
  479.  * @param pstate Structure with saved interruption state.
  480.  */
  481. void alternate_data_tlb_fault(__u64 vector, struct exception_regdump *pstate)
  482. {
  483.     region_register rr;
  484.     rid_t rid;
  485.     __address va;
  486.     pte_t *t;
  487.    
  488.     va = pstate->cr_ifa;    /* faulting address */
  489.     rr.word = rr_read(VA2VRN(va));
  490.     rid = rr.map.rid;
  491.     if (RID2ASID(rid) == ASID_KERNEL) {
  492.         if (VA2VRN(va) == VRN_KERNEL) {
  493.             /*
  494.              * Provide KA2PA(identity) mapping for faulting piece of
  495.              * kernel address space.
  496.              */
  497.             dtlb_kernel_mapping_insert(va, KA2PA(va), false, 0);
  498.             return;
  499.         }
  500.     }
  501.  
  502.     t = page_mapping_find(AS, va);
  503.     if (t) {
  504.         /*
  505.          * The mapping was found in software page hash table.
  506.          * Insert it into data translation cache.
  507.          */
  508.         dtc_pte_copy(t);
  509.     } else {
  510.         /*
  511.          * Forward the page fault to address space page fault handler.
  512.          */
  513.         if (!as_page_fault(va)) {
  514.             panic("%s: va=%P, rid=%d\n", __FUNCTION__, pstate->cr_ifa, rr.map.rid);
  515.         }
  516.     }
  517. }
  518.  
  519. /** Data nested TLB fault handler.
  520.  *
  521.  * This fault should not occur.
  522.  *
  523.  * @param vector Interruption vector.
  524.  * @param pstate Structure with saved interruption state.
  525.  */
  526. void data_nested_tlb_fault(__u64 vector, struct exception_regdump *pstate)
  527. {
  528.     panic("%s\n", __FUNCTION__);
  529. }
  530.  
  531. /** Data Dirty bit fault handler.
  532.  *
  533.  * @param vector Interruption vector.
  534.  * @param pstate Structure with saved interruption state.
  535.  */
  536. void data_dirty_bit_fault(__u64 vector, struct exception_regdump *pstate)
  537. {
  538.     pte_t *t;
  539.  
  540.     t = page_mapping_find(AS, pstate->cr_ifa);
  541.     ASSERT(t && t->p);
  542.     if (t && t->p) {
  543.         /*
  544.          * Update the Dirty bit in page tables and reinsert
  545.          * the mapping into DTC.
  546.          */
  547.         t->d = true;
  548.         dtc_pte_copy(t);
  549.     }
  550. }
  551.  
  552. /** Instruction access bit fault handler.
  553.  *
  554.  * @param vector Interruption vector.
  555.  * @param pstate Structure with saved interruption state.
  556.  */
  557. void instruction_access_bit_fault(__u64 vector, struct exception_regdump *pstate)
  558. {
  559.     pte_t *t;
  560.  
  561.     t = page_mapping_find(AS, pstate->cr_ifa);
  562.     ASSERT(t && t->p);
  563.     if (t && t->p) {
  564.         /*
  565.          * Update the Accessed bit in page tables and reinsert
  566.          * the mapping into ITC.
  567.          */
  568.         t->a = true;
  569.         itc_pte_copy(t);
  570.     }
  571. }
  572.  
  573. /** Data access bit fault handler.
  574.  *
  575.  * @param vector Interruption vector.
  576.  * @param pstate Structure with saved interruption state.
  577.  */
  578. void data_access_bit_fault(__u64 vector, struct exception_regdump *pstate)
  579. {
  580.     pte_t *t;
  581.  
  582.     t = page_mapping_find(AS, pstate->cr_ifa);
  583.     ASSERT(t && t->p);
  584.     if (t && t->p) {
  585.         /*
  586.          * Update the Accessed bit in page tables and reinsert
  587.          * the mapping into DTC.
  588.          */
  589.         t->a = true;
  590.         dtc_pte_copy(t);
  591.     }
  592. }
  593.  
  594. /** Page not present fault handler.
  595.  *
  596.  * @param vector Interruption vector.
  597.  * @param pstate Structure with saved interruption state.
  598.  */
  599. void page_not_present(__u64 vector, struct exception_regdump *pstate)
  600. {
  601.     region_register rr;
  602.     __address va;
  603.     pte_t *t;
  604.    
  605.     va = pstate->cr_ifa;    /* faulting address */
  606.     t = page_mapping_find(AS, va);
  607.     ASSERT(t);
  608.    
  609.     if (t->p) {
  610.         /*
  611.          * If the Present bit is set in page hash table, just copy it
  612.          * and update ITC/DTC.
  613.          */
  614.         if (t->x)
  615.             itc_pte_copy(t);
  616.         else
  617.             dtc_pte_copy(t);
  618.     } else {
  619.         if (!as_page_fault(va)) {
  620.             panic("%s: va=%P, rid=%d\n", __FUNCTION__, pstate->cr_ifa, rr.map.rid);
  621.         }
  622.     }
  623. }
  624.