Subversion Repositories HelenOS-historic

Rev

Rev 901 | Rev 919 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (C) 2006 Jakub Jermar
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. /*
  30.  * TLB management.
  31.  */
  32.  
  33. #include <mm/tlb.h>
  34. #include <mm/asid.h>
  35. #include <mm/page.h>
  36. #include <mm/as.h>
  37. #include <arch/mm/tlb.h>
  38. #include <arch/mm/page.h>
  39. #include <arch/barrier.h>
  40. #include <arch/interrupt.h>
  41. #include <typedefs.h>
  42. #include <panic.h>
  43. #include <arch.h>
  44.  
  45. /** Invalidate all TLB entries. */
  46. void tlb_invalidate_all(void)
  47. {
  48.     /* TODO */
  49. }
  50.  
  51. /** Invalidate entries belonging to an address space.
  52.  *
  53.  * @param asid Address space identifier.
  54.  */
  55. void tlb_invalidate_asid(asid_t asid)
  56. {
  57.     /* TODO */
  58. }
  59.  
  60. /** Insert data into data translation cache.
  61.  *
  62.  * @param va Virtual page address.
  63.  * @param asid Address space identifier.
  64.  * @param entry The rest of TLB entry as required by TLB insertion format.
  65.  */
  66. void dtc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry) {
  67.     tc_mapping_insert(va, asid, entry, true);
  68. }
  69.  
  70. /** Insert data into instruction translation cache.
  71.  *
  72.  * @param va Virtual page address.
  73.  * @param asid Address space identifier.
  74.  * @param entry The rest of TLB entry as required by TLB insertion format.
  75.  */
  76. void itc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry) {
  77.     tc_mapping_insert(va, asid, entry, false);
  78. }
  79.  
  80. /** Insert data into instruction or data translation cache.
  81.  *
  82.  * @param va Virtual page address.
  83.  * @param asid Address space identifier.
  84.  * @param entry The rest of TLB entry as required by TLB insertion format.
  85.  * @param dtc If true, insert into data translation cache, use instruction translation cache otherwise.
  86.  */
  87. void tc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, bool dtc)
  88. {
  89.     region_register rr;
  90.     bool restore_rr = false;
  91.  
  92.     rr.word = rr_read(VA2VRN(va));
  93.     if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
  94.         /*
  95.          * The selected region register does not contain required RID.
  96.          * Save the old content of the register and replace the RID.
  97.          */
  98.         region_register rr0;
  99.  
  100.         rr0 = rr;
  101.         rr0.map.rid = ASID2RID(asid, VA2VRN(va));
  102.         rr_write(VA2VRN(va), rr0.word);
  103.         srlz_d();
  104.         srlz_i();
  105.     }
  106.    
  107.     __asm__ volatile (
  108.         "mov r8=psr;;\n"
  109.         "rsm %0;;\n"            /* PSR_IC_MASK */
  110.         "srlz.d;;\n"
  111.         "srlz.i;;\n"
  112.         "mov cr.ifa=%1\n"       /* va */
  113.         "mov cr.itir=%2;;\n"        /* entry.word[1] */
  114.         "cmp.eq p6,p7 = %4,r0;;\n"  /* decide between itc and dtc */
  115.         "(p6) itc.i %3;;\n"
  116.         "(p7) itc.d %3;;\n"
  117.         "mov psr.l=r8;;\n"
  118.         "srlz.d;;\n"
  119.         :
  120.         : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (dtc)
  121.         : "p6", "p7", "r8"
  122.     );
  123.    
  124.     if (restore_rr) {
  125.         rr_write(VA2VRN(va), rr.word);
  126.         srlz_d();
  127.         srlz_i();
  128.     }
  129. }
  130.  
  131. /** Insert data into instruction translation register.
  132.  *
  133.  * @param va Virtual page address.
  134.  * @param asid Address space identifier.
  135.  * @param entry The rest of TLB entry as required by TLB insertion format.
  136.  * @param tr Translation register.
  137.  */
  138. void itr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, index_t tr)
  139. {
  140.     tr_mapping_insert(va, asid, entry, false, tr);
  141. }
  142.  
  143. /** Insert data into data translation register.
  144.  *
  145.  * @param va Virtual page address.
  146.  * @param asid Address space identifier.
  147.  * @param entry The rest of TLB entry as required by TLB insertion format.
  148.  * @param tr Translation register.
  149.  */
  150. void dtr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, index_t tr)
  151. {
  152.     tr_mapping_insert(va, asid, entry, true, tr);
  153. }
  154.  
  155. /** Insert data into instruction or data translation register.
  156.  *
  157.  * @param va Virtual page address.
  158.  * @param asid Address space identifier.
  159.  * @param entry The rest of TLB entry as required by TLB insertion format.
  160.  * @param dtc If true, insert into data translation register, use instruction translation register otherwise.
  161.  * @param tr Translation register.
  162.  */
  163. void tr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, bool dtr, index_t tr)
  164. {
  165.     region_register rr;
  166.     bool restore_rr = false;
  167.  
  168.     rr.word = rr_read(VA2VRN(va));
  169.     if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
  170.         /*
  171.          * The selected region register does not contain required RID.
  172.          * Save the old content of the register and replace the RID.
  173.          */
  174.         region_register rr0;
  175.  
  176.         rr0 = rr;
  177.         rr0.map.rid = ASID2RID(asid, VA2VRN(va));
  178.         rr_write(VA2VRN(va), rr0.word);
  179.         srlz_d();
  180.         srlz_i();
  181.     }
  182.  
  183.     __asm__ volatile (
  184.         "mov r8=psr;;\n"
  185.         "rsm %0;;\n"            /* PSR_IC_MASK */
  186.         "srlz.d;;\n"
  187.         "srlz.i;;\n"
  188.         "mov cr.ifa=%1\n"           /* va */         
  189.         "mov cr.itir=%2;;\n"        /* entry.word[1] */
  190.         "cmp.eq p6,p7=%5,r0;;\n"    /* decide between itr and dtr */
  191.         "(p6) itr.i itr[%4]=%3;;\n"
  192.         "(p7) itr.d dtr[%4]=%3;;\n"
  193.         "mov psr.l=r8;;\n"
  194.         "srlz.d;;\n"
  195.         :
  196.         : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (tr), "r" (dtr)
  197.         : "p6", "p7", "r8"
  198.     );
  199.    
  200.     if (restore_rr) {
  201.         rr_write(VA2VRN(va), rr.word);
  202.         srlz_d();
  203.         srlz_i();
  204.     }
  205. }
  206.  
  207. /** Insert data into DTLB.
  208.  *
  209.  * @param va Virtual page address.
  210.  * @param asid Address space identifier.
  211.  * @param entry The rest of TLB entry as required by TLB insertion format.
  212.  * @param dtr If true, insert into data translation register, use data translation cache otherwise.
  213.  * @param tr Translation register if dtr is true, ignored otherwise.
  214.  */
  215. void dtlb_kernel_mapping_insert(__address page, __address frame, bool dtr, index_t tr)
  216. {
  217.     tlb_entry_t entry;
  218.    
  219.     entry.word[0] = 0;
  220.     entry.word[1] = 0;
  221.    
  222.     entry.p = true;         /* present */
  223.     entry.ma = MA_WRITEBACK;
  224.     entry.a = true;         /* already accessed */
  225.     entry.d = true;         /* already dirty */
  226.     entry.pl = PL_KERNEL;
  227.     entry.ar = AR_READ | AR_WRITE;
  228.     entry.ppn = frame >> PPN_SHIFT;
  229.     entry.ps = PAGE_WIDTH;
  230.    
  231.     if (dtr)
  232.         dtr_mapping_insert(page, ASID_KERNEL, entry, tr);
  233.     else
  234.         dtc_mapping_insert(page, ASID_KERNEL, entry);
  235. }
  236.  
  237. /** Copy content of PTE into data translation cache.
  238.  *
  239.  * @param t PTE.
  240.  */
  241. void dtc_pte_copy(pte_t *t)
  242. {
  243.     tlb_entry_t entry;
  244.  
  245.     entry.word[0] = 0;
  246.     entry.word[1] = 0;
  247.    
  248.     entry.p = t->p;
  249.     entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
  250.     entry.a = t->a;
  251.     entry.d = t->d;
  252.     entry.pl = t->k ? PL_KERNEL : PL_USER;
  253.     entry.ar = t->w ? AR_WRITE : AR_READ;
  254.     entry.ppn = t->frame >> PPN_SHIFT;
  255.     entry.ps = PAGE_WIDTH;
  256.    
  257.     dtc_mapping_insert(t->page, t->as->asid, entry);
  258. }
  259.  
  260. /** Copy content of PTE into instruction translation cache.
  261.  *
  262.  * @param t PTE.
  263.  */
  264. void itc_pte_copy(pte_t *t)
  265. {
  266.     tlb_entry_t entry;
  267.  
  268.     entry.word[0] = 0;
  269.     entry.word[1] = 0;
  270.    
  271.     ASSERT(t->x);
  272.    
  273.     entry.p = t->p;
  274.     entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
  275.     entry.a = t->a;
  276.     entry.pl = t->k ? PL_KERNEL : PL_USER;
  277.     entry.ar = t->x ? (AR_EXECUTE | AR_READ) : AR_READ;
  278.     entry.ppn = t->frame >> PPN_SHIFT;
  279.     entry.ps = PAGE_WIDTH;
  280.    
  281.     itc_mapping_insert(t->page, t->as->asid, entry);
  282. }
  283.  
  284. /** Instruction TLB fault handler for faults with VHPT turned off.
  285.  *
  286.  * @param vector Interruption vector.
  287.  * @param pstate Structure with saved interruption state.
  288.  */
  289. void alternate_instruction_tlb_fault(__u64 vector, struct exception_regdump *pstate)
  290. {
  291.     region_register rr;
  292.     __address va;
  293.     pte_t *t;
  294.    
  295.     va = pstate->cr_ifa;    /* faulting address */
  296.     t = page_mapping_find(AS, va);
  297.     if (t) {
  298.         /*
  299.          * The mapping was found in software page hash table.
  300.          * Insert it into data translation cache.
  301.          */
  302.         itc_pte_copy(t);
  303.     } else {
  304.         /*
  305.          * Forward the page fault to address space page fault handler.
  306.          */
  307.         if (!as_page_fault(va)) {
  308.             panic("%s: va=%P, rid=%d\n", __FUNCTION__, pstate->cr_ifa, rr.map.rid);
  309.         }
  310.     }
  311. }
  312.  
  313. /** Data TLB fault handler for faults with VHPT turned off.
  314.  *
  315.  * @param vector Interruption vector.
  316.  * @param pstate Structure with saved interruption state.
  317.  */
  318. void alternate_data_tlb_fault(__u64 vector, struct exception_regdump *pstate)
  319. {
  320.     region_register rr;
  321.     rid_t rid;
  322.     __address va;
  323.     pte_t *t;
  324.    
  325.     va = pstate->cr_ifa;    /* faulting address */
  326.     rr.word = rr_read(VA2VRN(va));
  327.     rid = rr.map.rid;
  328.     if (RID2ASID(rid) == ASID_KERNEL) {
  329.         if (VA2VRN(va) == VRN_KERNEL) {
  330.             /*
  331.              * Provide KA2PA(identity) mapping for faulting piece of
  332.              * kernel address space.
  333.              */
  334.             dtlb_kernel_mapping_insert(va, KA2PA(va), false, 0);
  335.             return;
  336.         }
  337.     }
  338.    
  339.     t = page_mapping_find(AS, va);
  340.     if (t) {
  341.         /*
  342.          * The mapping was found in software page hash table.
  343.          * Insert it into data translation cache.
  344.          */
  345.         dtc_pte_copy(t);
  346.     } else {
  347.         /*
  348.          * Forward the page fault to address space page fault handler.
  349.          */
  350.         if (!as_page_fault(va)) {
  351.             panic("%s: va=%P, rid=%d\n", __FUNCTION__, pstate->cr_ifa, rr.map.rid);
  352.         }
  353.     }
  354. }
  355.  
  356. /** Data nested TLB fault handler.
  357.  *
  358.  * This fault should not occur.
  359.  *
  360.  * @param vector Interruption vector.
  361.  * @param pstate Structure with saved interruption state.
  362.  */
  363. void data_nested_tlb_fault(__u64 vector, struct exception_regdump *pstate)
  364. {
  365.     panic("%s\n", __FUNCTION__);
  366. }
  367.  
  368. /** Data Dirty bit fault handler.
  369.  *
  370.  * @param vector Interruption vector.
  371.  * @param pstate Structure with saved interruption state.
  372.  */
  373. void data_dirty_bit_fault(__u64 vector, struct exception_regdump *pstate)
  374. {
  375.     pte_t *t;
  376.  
  377.     t = page_mapping_find(AS, pstate->cr_ifa);
  378.     ASSERT(t && t->p);
  379.     if (t && t->p) {
  380.         /*
  381.          * Update the Dirty bit in page tables and reinsert
  382.          * the mapping into DTC.
  383.          */
  384.         t->d = true;
  385.         dtc_pte_copy(t);
  386.     }
  387. }
  388.  
  389. /** Instruction access bit fault handler.
  390.  *
  391.  * @param vector Interruption vector.
  392.  * @param pstate Structure with saved interruption state.
  393.  */
  394. void instruction_access_bit_fault(__u64 vector, struct exception_regdump *pstate)
  395. {
  396.     pte_t *t;
  397.  
  398.     t = page_mapping_find(AS, pstate->cr_ifa);
  399.     ASSERT(t && t->p);
  400.     if (t && t->p) {
  401.         /*
  402.          * Update the Accessed bit in page tables and reinsert
  403.          * the mapping into ITC.
  404.          */
  405.         t->a = true;
  406.         itc_pte_copy(t);
  407.     }
  408. }
  409.  
  410. /** Data access bit fault handler.
  411.  *
  412.  * @param vector Interruption vector.
  413.  * @param pstate Structure with saved interruption state.
  414.  */
  415. void data_access_bit_fault(__u64 vector, struct exception_regdump *pstate)
  416. {
  417.     pte_t *t;
  418.  
  419.     t = page_mapping_find(AS, pstate->cr_ifa);
  420.     ASSERT(t && t->p);
  421.     if (t && t->p) {
  422.         /*
  423.          * Update the Accessed bit in page tables and reinsert
  424.          * the mapping into DTC.
  425.          */
  426.         t->a = true;
  427.         dtc_pte_copy(t);
  428.     }
  429. }
  430.  
  431. /** Page not present fault handler.
  432.  *
  433.  * @param vector Interruption vector.
  434.  * @param pstate Structure with saved interruption state.
  435.  */
  436. void page_not_present(__u64 vector, struct exception_regdump *pstate)
  437. {
  438.     region_register rr;
  439.     __address va;
  440.     pte_t *t;
  441.    
  442.     va = pstate->cr_ifa;    /* faulting address */
  443.     t = page_mapping_find(AS, va);
  444.     ASSERT(t);
  445.    
  446.     if (t->p) {
  447.         /*
  448.          * If the Present bit is set in page hash table, just copy it
  449.          * and update ITC/DTC.
  450.          */
  451.         if (t->x)
  452.             itc_pte_copy(t);
  453.         else
  454.             dtc_pte_copy(t);
  455.     } else {
  456.         if (!as_page_fault(va)) {
  457.             panic("%s: va=%P, rid=%d\n", __FUNCTION__, pstate->cr_ifa, rr.map.rid);
  458.         }
  459.     }
  460. }
  461.