Subversion Repositories HelenOS

Rev

Rev 1080 | Rev 1210 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (C) 2006 Jakub Jermar
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. /*
  30.  * TLB management.
  31.  */
  32.  
  33. #include <mm/tlb.h>
  34. #include <mm/asid.h>
  35. #include <mm/page.h>
  36. #include <mm/as.h>
  37. #include <arch/mm/tlb.h>
  38. #include <arch/mm/page.h>
  39. #include <arch/barrier.h>
  40. #include <arch/interrupt.h>
  41. #include <arch/pal/pal.h>
  42. #include <arch/asm.h>
  43. #include <typedefs.h>
  44. #include <panic.h>
  45. #include <print.h>
  46. #include <arch.h>
  47.  
  48. /** Invalidate all TLB entries. */
  49. void tlb_invalidate_all(void)
  50. {
  51.         ipl_t ipl;
  52.         __address adr;
  53.         __u32 count1, count2, stride1, stride2;
  54.        
  55.         int i,j;
  56.        
  57.         adr = PAL_PTCE_INFO_BASE();
  58.         count1 = PAL_PTCE_INFO_COUNT1();
  59.         count2 = PAL_PTCE_INFO_COUNT2();
  60.         stride1 = PAL_PTCE_INFO_STRIDE1();
  61.         stride2 = PAL_PTCE_INFO_STRIDE2();
  62.        
  63.         ipl = interrupts_disable();
  64.  
  65.         for(i = 0; i < count1; i++) {
  66.             for(j = 0; j < count2; j++) {
  67.                 __asm__ volatile (
  68.                     "ptc.e %0 ;;"
  69.                     :
  70.                     : "r" (adr)
  71.                 );
  72.                 adr += stride2;
  73.             }
  74.             adr += stride1;
  75.         }
  76.  
  77.         interrupts_restore(ipl);
  78.  
  79.         srlz_d();
  80.         srlz_i();
  81. }
  82.  
  83. /** Invalidate entries belonging to an address space.
  84.  *
  85.  * @param asid Address space identifier.
  86.  */
  87. void tlb_invalidate_asid(asid_t asid)
  88. {
  89.     tlb_invalidate_all();
  90. }
  91.  
  92.  
  93. void tlb_invalidate_pages(asid_t asid, __address page, count_t cnt)
  94. {
  95.     region_register rr;
  96.     bool restore_rr = false;
  97.     int b = 0;
  98.     int c = cnt;
  99.  
  100.     __address va;
  101.     va = page;
  102.  
  103.     rr.word = rr_read(VA2VRN(va));
  104.     if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
  105.         /*
  106.          * The selected region register does not contain required RID.
  107.          * Save the old content of the register and replace the RID.
  108.          */
  109.         region_register rr0;
  110.  
  111.         rr0 = rr;
  112.         rr0.map.rid = ASID2RID(asid, VA2VRN(va));
  113.         rr_write(VA2VRN(va), rr0.word);
  114.         srlz_d();
  115.         srlz_i();
  116.     }
  117.    
  118.     while(c >>= 1)
  119.         b++;
  120.     b >>= 1;
  121.     __u64 ps;
  122.    
  123.     switch (b) {
  124.         case 0: /*cnt 1-3*/
  125.             ps = PAGE_WIDTH;
  126.             break;
  127.         case 1: /*cnt 4-15*/
  128.             /*cnt=((cnt-1)/4)+1;*/
  129.             ps = PAGE_WIDTH+2;
  130.             va &= ~((1<<ps)-1);
  131.             break;
  132.         case 2: /*cnt 16-63*/
  133.             /*cnt=((cnt-1)/16)+1;*/
  134.             ps = PAGE_WIDTH+4;
  135.             va &= ~((1<<ps)-1);
  136.             break;
  137.         case 3: /*cnt 64-255*/
  138.             /*cnt=((cnt-1)/64)+1;*/
  139.             ps = PAGE_WIDTH+6;
  140.             va &= ~((1<<ps)-1);
  141.             break;
  142.         case 4: /*cnt 256-1023*/
  143.             /*cnt=((cnt-1)/256)+1;*/
  144.             ps = PAGE_WIDTH+8;
  145.             va &= ~((1<<ps)-1);
  146.             break;
  147.         case 5: /*cnt 1024-4095*/
  148.             /*cnt=((cnt-1)/1024)+1;*/
  149.             ps = PAGE_WIDTH+10;
  150.             va &= ~((1<<ps)-1);
  151.             break;
  152.         case 6: /*cnt 4096-16383*/
  153.             /*cnt=((cnt-1)/4096)+1;*/
  154.             ps = PAGE_WIDTH+12;
  155.             va &= ~((1<<ps)-1);
  156.             break;
  157.         case 7: /*cnt 16384-65535*/
  158.         case 8: /*cnt 65536-(256K-1)*/
  159.             /*cnt=((cnt-1)/16384)+1;*/
  160.             ps = PAGE_WIDTH+14;
  161.             va &= ~((1<<ps)-1);
  162.             break;
  163.         default:
  164.             /*cnt=((cnt-1)/(16384*16))+1;*/
  165.             ps=PAGE_WIDTH+18;
  166.             va&=~((1<<ps)-1);
  167.             break;
  168.     }
  169.     /*cnt+=(page!=va);*/
  170.     for(; va<(page+cnt*(PAGE_SIZE)); va += (1<<ps)) {
  171.         __asm__ volatile (
  172.             "ptc.l %0,%1;;"
  173.             :
  174.             : "r" (va), "r" (ps<<2)
  175.         );
  176.     }
  177.     srlz_d();
  178.     srlz_i();
  179.    
  180.     if (restore_rr) {
  181.         rr_write(VA2VRN(va), rr.word);
  182.         srlz_d();
  183.         srlz_i();
  184.     }
  185. }
  186.  
  187.  
  188. /** Insert data into data translation cache.
  189.  *
  190.  * @param va Virtual page address.
  191.  * @param asid Address space identifier.
  192.  * @param entry The rest of TLB entry as required by TLB insertion format.
  193.  */
  194. void dtc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry)
  195. {
  196.     tc_mapping_insert(va, asid, entry, true);
  197. }
  198.  
  199. /** Insert data into instruction translation cache.
  200.  *
  201.  * @param va Virtual page address.
  202.  * @param asid Address space identifier.
  203.  * @param entry The rest of TLB entry as required by TLB insertion format.
  204.  */
  205. void itc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry)
  206. {
  207.     tc_mapping_insert(va, asid, entry, false);
  208. }
  209.  
  210. /** Insert data into instruction or data translation cache.
  211.  *
  212.  * @param va Virtual page address.
  213.  * @param asid Address space identifier.
  214.  * @param entry The rest of TLB entry as required by TLB insertion format.
  215.  * @param dtc If true, insert into data translation cache, use instruction translation cache otherwise.
  216.  */
  217. void tc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, bool dtc)
  218. {
  219.     region_register rr;
  220.     bool restore_rr = false;
  221.  
  222.     rr.word = rr_read(VA2VRN(va));
  223.     if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
  224.         /*
  225.          * The selected region register does not contain required RID.
  226.          * Save the old content of the register and replace the RID.
  227.          */
  228.         region_register rr0;
  229.  
  230.         rr0 = rr;
  231.         rr0.map.rid = ASID2RID(asid, VA2VRN(va));
  232.         rr_write(VA2VRN(va), rr0.word);
  233.         srlz_d();
  234.         srlz_i();
  235.     }
  236.    
  237.     __asm__ volatile (
  238.         "mov r8=psr;;\n"
  239.         "rsm %0;;\n"            /* PSR_IC_MASK */
  240.         "srlz.d;;\n"
  241.         "srlz.i;;\n"
  242.         "mov cr.ifa=%1\n"       /* va */
  243.         "mov cr.itir=%2;;\n"        /* entry.word[1] */
  244.         "cmp.eq p6,p7 = %4,r0;;\n"  /* decide between itc and dtc */
  245.         "(p6) itc.i %3;;\n"
  246.         "(p7) itc.d %3;;\n"
  247.         "mov psr.l=r8;;\n"
  248.         "srlz.d;;\n"
  249.         :
  250.         : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (dtc)
  251.         : "p6", "p7", "r8"
  252.     );
  253.    
  254.     if (restore_rr) {
  255.         rr_write(VA2VRN(va), rr.word);
  256.         srlz_d();
  257.         srlz_i();
  258.     }
  259. }
  260.  
  261. /** Insert data into instruction translation register.
  262.  *
  263.  * @param va Virtual page address.
  264.  * @param asid Address space identifier.
  265.  * @param entry The rest of TLB entry as required by TLB insertion format.
  266.  * @param tr Translation register.
  267.  */
  268. void itr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, index_t tr)
  269. {
  270.     tr_mapping_insert(va, asid, entry, false, tr);
  271. }
  272.  
  273. /** Insert data into data translation register.
  274.  *
  275.  * @param va Virtual page address.
  276.  * @param asid Address space identifier.
  277.  * @param entry The rest of TLB entry as required by TLB insertion format.
  278.  * @param tr Translation register.
  279.  */
  280. void dtr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, index_t tr)
  281. {
  282.     tr_mapping_insert(va, asid, entry, true, tr);
  283. }
  284.  
  285. /** Insert data into instruction or data translation register.
  286.  *
  287.  * @param va Virtual page address.
  288.  * @param asid Address space identifier.
  289.  * @param entry The rest of TLB entry as required by TLB insertion format.
  290.  * @param dtc If true, insert into data translation register, use instruction translation register otherwise.
  291.  * @param tr Translation register.
  292.  */
  293. void tr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, bool dtr, index_t tr)
  294. {
  295.     region_register rr;
  296.     bool restore_rr = false;
  297.  
  298.     rr.word = rr_read(VA2VRN(va));
  299.     if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
  300.         /*
  301.          * The selected region register does not contain required RID.
  302.          * Save the old content of the register and replace the RID.
  303.          */
  304.         region_register rr0;
  305.  
  306.         rr0 = rr;
  307.         rr0.map.rid = ASID2RID(asid, VA2VRN(va));
  308.         rr_write(VA2VRN(va), rr0.word);
  309.         srlz_d();
  310.         srlz_i();
  311.     }
  312.  
  313.     __asm__ volatile (
  314.         "mov r8=psr;;\n"
  315.         "rsm %0;;\n"            /* PSR_IC_MASK */
  316.         "srlz.d;;\n"
  317.         "srlz.i;;\n"
  318.         "mov cr.ifa=%1\n"           /* va */         
  319.         "mov cr.itir=%2;;\n"        /* entry.word[1] */
  320.         "cmp.eq p6,p7=%5,r0;;\n"    /* decide between itr and dtr */
  321.         "(p6) itr.i itr[%4]=%3;;\n"
  322.         "(p7) itr.d dtr[%4]=%3;;\n"
  323.         "mov psr.l=r8;;\n"
  324.         "srlz.d;;\n"
  325.         :
  326.         : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (tr), "r" (dtr)
  327.         : "p6", "p7", "r8"
  328.     );
  329.    
  330.     if (restore_rr) {
  331.         rr_write(VA2VRN(va), rr.word);
  332.         srlz_d();
  333.         srlz_i();
  334.     }
  335. }
  336.  
  337. /** Insert data into DTLB.
  338.  *
  339.  * @param va Virtual page address.
  340.  * @param asid Address space identifier.
  341.  * @param entry The rest of TLB entry as required by TLB insertion format.
  342.  * @param dtr If true, insert into data translation register, use data translation cache otherwise.
  343.  * @param tr Translation register if dtr is true, ignored otherwise.
  344.  */
  345. void dtlb_kernel_mapping_insert(__address page, __address frame, bool dtr, index_t tr)
  346. {
  347.     tlb_entry_t entry;
  348.    
  349.     entry.word[0] = 0;
  350.     entry.word[1] = 0;
  351.    
  352.     entry.p = true;         /* present */
  353.     entry.ma = MA_WRITEBACK;
  354.     entry.a = true;         /* already accessed */
  355.     entry.d = true;         /* already dirty */
  356.     entry.pl = PL_KERNEL;
  357.     entry.ar = AR_READ | AR_WRITE;
  358.     entry.ppn = frame >> PPN_SHIFT;
  359.     entry.ps = PAGE_WIDTH;
  360.    
  361.     if (dtr)
  362.         dtr_mapping_insert(page, ASID_KERNEL, entry, tr);
  363.     else
  364.         dtc_mapping_insert(page, ASID_KERNEL, entry);
  365. }
  366.  
  367. /** Copy content of PTE into data translation cache.
  368.  *
  369.  * @param t PTE.
  370.  */
  371. void dtc_pte_copy(pte_t *t)
  372. {
  373.     tlb_entry_t entry;
  374.  
  375.     entry.word[0] = 0;
  376.     entry.word[1] = 0;
  377.    
  378.     entry.p = t->p;
  379.     entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
  380.     entry.a = t->a;
  381.     entry.d = t->d;
  382.     entry.pl = t->k ? PL_KERNEL : PL_USER;
  383.     entry.ar = t->w ? AR_WRITE : AR_READ;
  384.     entry.ppn = t->frame >> PPN_SHIFT;
  385.     entry.ps = PAGE_WIDTH;
  386.    
  387.     dtc_mapping_insert(t->page, t->as->asid, entry);
  388. }
  389.  
  390. /** Copy content of PTE into instruction translation cache.
  391.  *
  392.  * @param t PTE.
  393.  */
  394. void itc_pte_copy(pte_t *t)
  395. {
  396.     tlb_entry_t entry;
  397.  
  398.     entry.word[0] = 0;
  399.     entry.word[1] = 0;
  400.    
  401.     ASSERT(t->x);
  402.    
  403.     entry.p = t->p;
  404.     entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
  405.     entry.a = t->a;
  406.     entry.pl = t->k ? PL_KERNEL : PL_USER;
  407.     entry.ar = t->x ? (AR_EXECUTE | AR_READ) : AR_READ;
  408.     entry.ppn = t->frame >> PPN_SHIFT;
  409.     entry.ps = PAGE_WIDTH;
  410.    
  411.     itc_mapping_insert(t->page, t->as->asid, entry);
  412. }
  413.  
  414. /** Instruction TLB fault handler for faults with VHPT turned off.
  415.  *
  416.  * @param vector Interruption vector.
  417.  * @param istate Structure with saved interruption state.
  418.  */
  419. void alternate_instruction_tlb_fault(__u64 vector, istate_t *istate)
  420. {
  421.     region_register rr;
  422.     __address va;
  423.     pte_t *t;
  424.    
  425.     va = istate->cr_ifa;    /* faulting address */
  426.     page_table_lock(AS, true);
  427.     t = page_mapping_find(AS, va);
  428.     if (t) {
  429.         /*
  430.          * The mapping was found in software page hash table.
  431.          * Insert it into data translation cache.
  432.          */
  433.         itc_pte_copy(t);
  434.         page_table_unlock(AS, true);
  435.     } else {
  436.         /*
  437.          * Forward the page fault to address space page fault handler.
  438.          */
  439.         page_table_unlock(AS, true);
  440.         if (!as_page_fault(va)) {
  441.             panic("%s: va=%P, rid=%d, iip=%P\n", __FUNCTION__, istate->cr_ifa, rr.map.rid, istate->cr_iip);
  442.         }
  443.     }
  444. }
  445.  
  446. /** Data TLB fault handler for faults with VHPT turned off.
  447.  *
  448.  * @param vector Interruption vector.
  449.  * @param istate Structure with saved interruption state.
  450.  */
  451. void alternate_data_tlb_fault(__u64 vector, istate_t *istate)
  452. {
  453.     region_register rr;
  454.     rid_t rid;
  455.     __address va;
  456.     pte_t *t;
  457.    
  458.     va = istate->cr_ifa;    /* faulting address */
  459.     rr.word = rr_read(VA2VRN(va));
  460.     rid = rr.map.rid;
  461.     if (RID2ASID(rid) == ASID_KERNEL) {
  462.         if (VA2VRN(va) == VRN_KERNEL) {
  463.             /*
  464.              * Provide KA2PA(identity) mapping for faulting piece of
  465.              * kernel address space.
  466.              */
  467.             dtlb_kernel_mapping_insert(va, KA2PA(va), false, 0);
  468.             return;
  469.         }
  470.     }
  471.  
  472.     page_table_lock(AS, true);
  473.     t = page_mapping_find(AS, va);
  474.     if (t) {
  475.         /*
  476.          * The mapping was found in software page hash table.
  477.          * Insert it into data translation cache.
  478.          */
  479.         dtc_pte_copy(t);
  480.         page_table_unlock(AS, true);
  481.     } else {
  482.         /*
  483.          * Forward the page fault to address space page fault handler.
  484.          */
  485.         page_table_unlock(AS, true);
  486.         if (!as_page_fault(va)) {
  487.             panic("%s: va=%P, rid=%d, iip=%P\n", __FUNCTION__, va, rid, istate->cr_iip);
  488.         }
  489.     }
  490. }
  491.  
  492. /** Data nested TLB fault handler.
  493.  *
  494.  * This fault should not occur.
  495.  *
  496.  * @param vector Interruption vector.
  497.  * @param istate Structure with saved interruption state.
  498.  */
  499. void data_nested_tlb_fault(__u64 vector, istate_t *istate)
  500. {
  501.     panic("%s\n", __FUNCTION__);
  502. }
  503.  
  504. /** Data Dirty bit fault handler.
  505.  *
  506.  * @param vector Interruption vector.
  507.  * @param istate Structure with saved interruption state.
  508.  */
  509. void data_dirty_bit_fault(__u64 vector, istate_t *istate)
  510. {
  511.     pte_t *t;
  512.  
  513.     page_table_lock(AS, true);
  514.     t = page_mapping_find(AS, istate->cr_ifa);
  515.     ASSERT(t && t->p);
  516.     if (t && t->p) {
  517.         /*
  518.          * Update the Dirty bit in page tables and reinsert
  519.          * the mapping into DTC.
  520.          */
  521.         t->d = true;
  522.         dtc_pte_copy(t);
  523.     }
  524.     page_table_unlock(AS, true);
  525. }
  526.  
  527. /** Instruction access bit fault handler.
  528.  *
  529.  * @param vector Interruption vector.
  530.  * @param istate Structure with saved interruption state.
  531.  */
  532. void instruction_access_bit_fault(__u64 vector, istate_t *istate)
  533. {
  534.     pte_t *t;
  535.  
  536.     page_table_lock(AS, true);
  537.     t = page_mapping_find(AS, istate->cr_ifa);
  538.     ASSERT(t && t->p);
  539.     if (t && t->p) {
  540.         /*
  541.          * Update the Accessed bit in page tables and reinsert
  542.          * the mapping into ITC.
  543.          */
  544.         t->a = true;
  545.         itc_pte_copy(t);
  546.     }
  547.     page_table_unlock(AS, true);
  548. }
  549.  
  550. /** Data access bit fault handler.
  551.  *
  552.  * @param vector Interruption vector.
  553.  * @param istate Structure with saved interruption state.
  554.  */
  555. void data_access_bit_fault(__u64 vector, istate_t *istate)
  556. {
  557.     pte_t *t;
  558.  
  559.     page_table_lock(AS, true);
  560.     t = page_mapping_find(AS, istate->cr_ifa);
  561.     ASSERT(t && t->p);
  562.     if (t && t->p) {
  563.         /*
  564.          * Update the Accessed bit in page tables and reinsert
  565.          * the mapping into DTC.
  566.          */
  567.         t->a = true;
  568.         dtc_pte_copy(t);
  569.     }
  570.     page_table_unlock(AS, true);
  571. }
  572.  
  573. /** Page not present fault handler.
  574.  *
  575.  * @param vector Interruption vector.
  576.  * @param istate Structure with saved interruption state.
  577.  */
  578. void page_not_present(__u64 vector, istate_t *istate)
  579. {
  580.     region_register rr;
  581.     __address va;
  582.     pte_t *t;
  583.    
  584.     va = istate->cr_ifa;    /* faulting address */
  585.     page_table_lock(AS, true);
  586.     t = page_mapping_find(AS, va);
  587.     ASSERT(t);
  588.    
  589.     if (t->p) {
  590.         /*
  591.          * If the Present bit is set in page hash table, just copy it
  592.          * and update ITC/DTC.
  593.          */
  594.         if (t->x)
  595.             itc_pte_copy(t);
  596.         else
  597.             dtc_pte_copy(t);
  598.         page_table_unlock(AS, true);
  599.     } else {
  600.         page_table_unlock(AS, true);
  601.         if (!as_page_fault(va)) {
  602.             panic("%s: va=%P, rid=%d\n", __FUNCTION__, va, rr.map.rid);
  603.         }
  604.     }
  605. }
  606.