Subversion Repositories HelenOS

Rev

Rev 3835 | Rev 4129 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (c) 2005 Jakub Jermar
  3.  * Copyright (c) 2008 Pavel Rimsky
  4.  * All rights reserved.
  5.  *
  6.  * Redistribution and use in source and binary forms, with or without
  7.  * modification, are permitted provided that the following conditions
  8.  * are met:
  9.  *
  10.  * - Redistributions of source code must retain the above copyright
  11.  *   notice, this list of conditions and the following disclaimer.
  12.  * - Redistributions in binary form must reproduce the above copyright
  13.  *   notice, this list of conditions and the following disclaimer in the
  14.  *   documentation and/or other materials provided with the distribution.
  15.  * - The name of the author may not be used to endorse or promote products
  16.  *   derived from this software without specific prior written permission.
  17.  *
  18.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  19.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  20.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  21.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  22.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  23.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  24.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  25.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  26.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  27.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  28.  */
  29.  
  30. /** @addtogroup sparc64mm  
  31.  * @{
  32.  */
  33. /** @file
  34.  */
  35.  
  36. #include <mm/tlb.h>
  37. #include <mm/as.h>
  38. #include <mm/asid.h>
  39. #include <arch/sun4v/hypercall.h>
  40. #include <arch/mm/frame.h>
  41. #include <arch/mm/page.h>
  42. #include <arch/mm/tte.h>
  43. #include <arch/mm/tlb.h>
  44. #include <arch/interrupt.h>
  45. #include <interrupt.h>
  46. #include <arch.h>
  47. #include <print.h>
  48. #include <arch/types.h>
  49. #include <config.h>
  50. #include <arch/trap/trap.h>
  51. #include <arch/trap/exception.h>
  52. #include <panic.h>
  53. #include <arch/asm.h>
  54. #include <arch/cpu.h>
  55.  
  56. #ifdef CONFIG_TSB
  57. #include <arch/mm/tsb.h>
  58. #endif
  59.  
  60. #if 0
  61. static void dtlb_pte_copy(pte_t *, index_t, bool);
  62. static void itlb_pte_copy(pte_t *, index_t);
  63. static void do_fast_instruction_access_mmu_miss_fault(istate_t *, const char *);
  64. static void do_fast_data_access_mmu_miss_fault(istate_t *, tlb_tag_access_reg_t,
  65.     const char *);
  66. static void do_fast_data_access_protection_fault(istate_t *,
  67.     tlb_tag_access_reg_t, const char *);
  68.  
  69. char *context_encoding[] = {
  70.     "Primary",
  71.     "Secondary",
  72.     "Nucleus",
  73.     "Reserved"
  74. };
  75. #endif
  76.  
  77. /*
  78.  * Invalidate all non-locked DTLB and ITLB entries.
  79.  */
  80. void tlb_arch_init(void)
  81. {
  82.     tlb_invalidate_all();
  83. }
  84.  
  85. /** Insert privileged mapping into DMMU TLB.
  86.  *
  87.  * @param page      Virtual page address.
  88.  * @param frame     Physical frame address.
  89.  * @param pagesize  Page size.
  90.  * @param locked    True for permanent mappings, false otherwise.
  91.  * @param cacheable True if the mapping is cacheable, false otherwise.
  92.  */
  93. void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize,
  94.     bool locked, bool cacheable)
  95. {
  96. #if 0
  97.     tlb_tag_access_reg_t tag;
  98.     tlb_data_t data;
  99.     page_address_t pg;
  100.     frame_address_t fr;
  101.  
  102.     pg.address = page;
  103.     fr.address = frame;
  104.  
  105.     tag.context = ASID_KERNEL;
  106.     tag.vpn = pg.vpn;
  107.  
  108.     dtlb_tag_access_write(tag.value);
  109.  
  110.     data.value = 0;
  111.     data.v = true;
  112.     data.size = pagesize;
  113.     data.pfn = fr.pfn;
  114.     data.l = locked;
  115.     data.cp = cacheable;
  116. #ifdef CONFIG_VIRT_IDX_DCACHE
  117.     data.cv = cacheable;
  118. #endif /* CONFIG_VIRT_IDX_DCACHE */
  119.     data.p = true;
  120.     data.w = true;
  121.     data.g = false;
  122.  
  123.     dtlb_data_in_write(data.value);
  124. #endif
  125. }
  126.  
  127. /** Copy PTE to TLB.
  128.  *
  129.  * @param t         Page Table Entry to be copied.
  130.  * @param index     Zero if lower 8K-subpage, one if higher 8K-subpage.
  131.  * @param ro        If true, the entry will be created read-only, regardless
  132.  *          of its w field.
  133.  */
  134. #if 0
  135. void dtlb_pte_copy(pte_t *t, index_t index, bool ro)
  136. {
  137.     tlb_tag_access_reg_t tag;
  138.     tlb_data_t data;
  139.     page_address_t pg;
  140.     frame_address_t fr;
  141.  
  142.     pg.address = t->page + (index << MMU_PAGE_WIDTH);
  143.     fr.address = t->frame + (index << MMU_PAGE_WIDTH);
  144.  
  145.     tag.value = 0;
  146.     tag.context = t->as->asid;
  147.     tag.vpn = pg.vpn;
  148.  
  149.     dtlb_tag_access_write(tag.value);
  150.  
  151.     data.value = 0;
  152.     data.v = true;
  153.     data.size = PAGESIZE_8K;
  154.     data.pfn = fr.pfn;
  155.     data.l = false;
  156.     data.cp = t->c;
  157. #ifdef CONFIG_VIRT_IDX_DCACHE
  158.     data.cv = t->c;
  159. #endif /* CONFIG_VIRT_IDX_DCACHE */
  160.     data.p = t->k;      /* p like privileged */
  161.     data.w = ro ? false : t->w;
  162.     data.g = t->g;
  163.  
  164.     dtlb_data_in_write(data.value);
  165. }
  166. #endif
  167.  
  168. /** Copy PTE to ITLB.
  169.  *
  170.  * @param t     Page Table Entry to be copied.
  171.  * @param index     Zero if lower 8K-subpage, one if higher 8K-subpage.
  172.  */
  173. #if 0
  174. void itlb_pte_copy(pte_t *t, index_t index)
  175. {
  176.     tlb_tag_access_reg_t tag;
  177.     tlb_data_t data;
  178.     page_address_t pg;
  179.     frame_address_t fr;
  180.  
  181.     pg.address = t->page + (index << MMU_PAGE_WIDTH);
  182.     fr.address = t->frame + (index << MMU_PAGE_WIDTH);
  183.  
  184.     tag.value = 0;
  185.     tag.context = t->as->asid;
  186.     tag.vpn = pg.vpn;
  187.    
  188.     itlb_tag_access_write(tag.value);
  189.    
  190.     data.value = 0;
  191.     data.v = true;
  192.     data.size = PAGESIZE_8K;
  193.     data.pfn = fr.pfn;
  194.     data.l = false;
  195.     data.cp = t->c;
  196.     data.p = t->k;      /* p like privileged */
  197.     data.w = false;
  198.     data.g = t->g;
  199.    
  200.     itlb_data_in_write(data.value);
  201. }
  202. #endif
  203.  
  204. /** ITLB miss handler. */
  205. void fast_instruction_access_mmu_miss(unative_t unused, istate_t *istate)
  206. {
  207. #if 0
  208.     uintptr_t va = ALIGN_DOWN(istate->tpc, PAGE_SIZE);
  209.     index_t index = (istate->tpc >> MMU_PAGE_WIDTH) % MMU_PAGES_PER_PAGE;
  210.     pte_t *t;
  211.  
  212.     page_table_lock(AS, true);
  213.     t = page_mapping_find(AS, va);
  214.     if (t && PTE_EXECUTABLE(t)) {
  215.         /*
  216.          * The mapping was found in the software page hash table.
  217.          * Insert it into ITLB.
  218.          */
  219.         t->a = true;
  220.         itlb_pte_copy(t, index);
  221. #ifdef CONFIG_TSB
  222.         itsb_pte_copy(t, index);
  223. #endif
  224.         page_table_unlock(AS, true);
  225.     } else {
  226.         /*
  227.          * Forward the page fault to the address space page fault
  228.          * handler.
  229.          */    
  230.         page_table_unlock(AS, true);
  231.         if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
  232.             do_fast_instruction_access_mmu_miss_fault(istate,
  233.                 __func__);
  234.         }
  235.     }
  236. #endif
  237. }
  238.  
  239. /** DTLB miss handler.
  240.  *
  241.  * Note that some faults (e.g. kernel faults) were already resolved by the
  242.  * low-level, assembly language part of the fast_data_access_mmu_miss handler.
  243.  *
  244.  * @param tag       Content of the TLB Tag Access register as it existed
  245.  *          when the trap happened. This is to prevent confusion
  246.  *          created by clobbered Tag Access register during a nested
  247.  *          DTLB miss.
  248.  * @param istate    Interrupted state saved on the stack.
  249.  */
  250. //void fast_data_access_mmu_miss(tlb_tag_access_reg_t tag, istate_t *istate)
  251. //{
  252. #if 0
  253.     uintptr_t va;
  254.     index_t index;
  255.     pte_t *t;
  256.  
  257.     va = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE);
  258.     index = tag.vpn % MMU_PAGES_PER_PAGE;
  259.  
  260.     if (tag.context == ASID_KERNEL) {
  261.         if (!tag.vpn) {
  262.             /* NULL access in kernel */
  263.             do_fast_data_access_mmu_miss_fault(istate, tag,
  264.                 __func__);
  265.         }
  266.         do_fast_data_access_mmu_miss_fault(istate, tag, "Unexpected "
  267.             "kernel page fault.");
  268.     }
  269.  
  270.     page_table_lock(AS, true);
  271.     t = page_mapping_find(AS, va);
  272.     if (t) {
  273.         /*
  274.          * The mapping was found in the software page hash table.
  275.          * Insert it into DTLB.
  276.          */
  277.         t->a = true;
  278.         dtlb_pte_copy(t, index, true);
  279. #ifdef CONFIG_TSB
  280.         dtsb_pte_copy(t, index, true);
  281. #endif
  282.         page_table_unlock(AS, true);
  283.     } else {
  284.         /*
  285.          * Forward the page fault to the address space page fault
  286.          * handler.
  287.          */    
  288.         page_table_unlock(AS, true);
  289.         if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
  290.             do_fast_data_access_mmu_miss_fault(istate, tag,
  291.                 __func__);
  292.         }
  293.     }
  294. #endif
  295. //}
  296.  
  297. /** DTLB protection fault handler.
  298.  *
  299.  * @param tag       Content of the TLB Tag Access register as it existed
  300.  *          when the trap happened. This is to prevent confusion
  301.  *          created by clobbered Tag Access register during a nested
  302.  *          DTLB miss.
  303.  * @param istate    Interrupted state saved on the stack.
  304.  */
  305. //void fast_data_access_protection(tlb_tag_access_reg_t tag, istate_t *istate)
  306. //{
  307. #if 0
  308.     uintptr_t va;
  309.     index_t index;
  310.     pte_t *t;
  311.  
  312.     va = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE);
  313.     index = tag.vpn % MMU_PAGES_PER_PAGE;   /* 16K-page emulation */
  314.  
  315.     page_table_lock(AS, true);
  316.     t = page_mapping_find(AS, va);
  317.     if (t && PTE_WRITABLE(t)) {
  318.         /*
  319.          * The mapping was found in the software page hash table and is
  320.          * writable. Demap the old mapping and insert an updated mapping
  321.          * into DTLB.
  322.          */
  323.         t->a = true;
  324.         t->d = true;
  325.         dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_SECONDARY,
  326.             va + index * MMU_PAGE_SIZE);
  327.         dtlb_pte_copy(t, index, false);
  328. #ifdef CONFIG_TSB
  329.         dtsb_pte_copy(t, index, false);
  330. #endif
  331.         page_table_unlock(AS, true);
  332.     } else {
  333.         /*
  334.          * Forward the page fault to the address space page fault
  335.          * handler.
  336.          */    
  337.         page_table_unlock(AS, true);
  338.         if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) {
  339.             do_fast_data_access_protection_fault(istate, tag,
  340.                 __func__);
  341.         }
  342.     }
  343. #endif
  344. //}
  345.  
  346. /** Print TLB entry (for debugging purposes).
  347.  *
  348.  * The diag field has been left out in order to make this function more generic
  349.  * (there is no diag field in US3 architeture).
  350.  *
  351.  * @param i     TLB entry number
  352.  * @param t     TLB entry tag
  353.  * @param d     TLB entry data
  354.  */
  355. #if 0
  356. static void print_tlb_entry(int i, tlb_tag_read_reg_t t, tlb_data_t d)
  357. {
  358.     printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, "
  359.         "ie=%d, soft2=%#x, pfn=%#x, soft=%#x, l=%d, "
  360.         "cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn,
  361.         t.context, d.v, d.size, d.nfo, d.ie, d.soft2,
  362.         d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
  363. }
  364. #endif
  365.  
  366. #if defined (US)
  367.  
  368. /** Print contents of both TLBs. */
  369. void tlb_print(void)
  370. #if 0
  371. {
  372.     int i;
  373.     tlb_data_t d;
  374.     tlb_tag_read_reg_t t;
  375.    
  376.     printf("I-TLB contents:\n");
  377.     for (i = 0; i < ITLB_ENTRY_COUNT; i++) {
  378.         d.value = itlb_data_access_read(i);
  379.         t.value = itlb_tag_read_read(i);
  380.         print_tlb_entry(i, t, d);
  381.     }
  382.  
  383.     printf("D-TLB contents:\n");
  384.     for (i = 0; i < DTLB_ENTRY_COUNT; i++) {
  385.         d.value = dtlb_data_access_read(i);
  386.         t.value = dtlb_tag_read_read(i);
  387.         print_tlb_entry(i, t, d);
  388.     }
  389. #endif
  390. }
  391.  
  392. #elif defined (US3)
  393.  
  394. /** Print contents of all TLBs. */
  395. void tlb_print(void)
  396. {
  397. #if 0
  398.     int i;
  399.     tlb_data_t d;
  400.     tlb_tag_read_reg_t t;
  401.    
  402.     printf("TLB_ISMALL contents:\n");
  403.     for (i = 0; i < tlb_ismall_size(); i++) {
  404.         d.value = dtlb_data_access_read(TLB_ISMALL, i);
  405.         t.value = dtlb_tag_read_read(TLB_ISMALL, i);
  406.         print_tlb_entry(i, t, d);
  407.     }
  408.    
  409.     printf("TLB_IBIG contents:\n");
  410.     for (i = 0; i < tlb_ibig_size(); i++) {
  411.         d.value = dtlb_data_access_read(TLB_IBIG, i);
  412.         t.value = dtlb_tag_read_read(TLB_IBIG, i);
  413.         print_tlb_entry(i, t, d);
  414.     }
  415.    
  416.     printf("TLB_DSMALL contents:\n");
  417.     for (i = 0; i < tlb_dsmall_size(); i++) {
  418.         d.value = dtlb_data_access_read(TLB_DSMALL, i);
  419.         t.value = dtlb_tag_read_read(TLB_DSMALL, i);
  420.         print_tlb_entry(i, t, d);
  421.     }
  422.    
  423.     printf("TLB_DBIG_1 contents:\n");
  424.     for (i = 0; i < tlb_dbig_size(); i++) {
  425.         d.value = dtlb_data_access_read(TLB_DBIG_0, i);
  426.         t.value = dtlb_tag_read_read(TLB_DBIG_0, i);
  427.         print_tlb_entry(i, t, d);
  428.     }
  429.    
  430.     printf("TLB_DBIG_2 contents:\n");
  431.     for (i = 0; i < tlb_dbig_size(); i++) {
  432.         d.value = dtlb_data_access_read(TLB_DBIG_1, i);
  433.         t.value = dtlb_tag_read_read(TLB_DBIG_1, i);
  434.         print_tlb_entry(i, t, d);
  435.     }
  436. #endif
  437. }
  438.  
  439. #endif
  440.  
  441. #if 0
  442. void do_fast_instruction_access_mmu_miss_fault(istate_t *istate,
  443.     const char *str)
  444. {
  445.     fault_if_from_uspace(istate, "%s\n", str);
  446.     dump_istate(istate);
  447.     panic("%s\n", str);
  448. }
  449. #endif
  450.  
  451. #if 0
  452. void do_fast_data_access_mmu_miss_fault(istate_t *istate,
  453.     tlb_tag_access_reg_t tag, const char *str)
  454. {
  455.     uintptr_t va;
  456.  
  457.     va = tag.vpn << MMU_PAGE_WIDTH;
  458.     if (tag.context) {
  459.         fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va,
  460.             tag.context);
  461.     }
  462.     dump_istate(istate);
  463.     printf("Faulting page: %p, ASID=%d\n", va, tag.context);
  464.     panic("%s\n", str);
  465. }
  466. #endif
  467.  
  468. #if 0
  469. void do_fast_data_access_protection_fault(istate_t *istate,
  470.     tlb_tag_access_reg_t tag, const char *str)
  471. {
  472.     uintptr_t va;
  473.  
  474.     va = tag.vpn << MMU_PAGE_WIDTH;
  475.  
  476.     if (tag.context) {
  477.         fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va,
  478.             tag.context);
  479.     }
  480.     printf("Faulting page: %p, ASID=%d\n", va, tag.context);
  481.     dump_istate(istate);
  482.     panic("%s\n", str);
  483. }
  484. #endif
  485.  
  486. void describe_mmu_fault(void)
  487. {
  488. }
  489.  
  490. #if defined (US3)
  491. /** Invalidates given TLB entry if and only if it is non-locked or global.
  492.  *
  493.  * @param tlb       TLB number (one of TLB_DSMALL, TLB_DBIG_0, TLB_DBIG_1,
  494.  *          TLB_ISMALL, TLB_IBIG).
  495.  * @param entry     Entry index within the given TLB.
  496.  */
  497. #if 0
  498. static void tlb_invalidate_entry(int tlb, index_t entry)
  499. {
  500.     tlb_data_t d;
  501.     tlb_tag_read_reg_t t;
  502.    
  503.     if (tlb == TLB_DSMALL || tlb == TLB_DBIG_0 || tlb == TLB_DBIG_1) {
  504.         d.value = dtlb_data_access_read(tlb, entry);
  505.         if (!d.l || d.g) {
  506.             t.value = dtlb_tag_read_read(tlb, entry);
  507.             d.v = false;
  508.             dtlb_tag_access_write(t.value);
  509.             dtlb_data_access_write(tlb, entry, d.value);
  510.         }
  511.     } else if (tlb == TLB_ISMALL || tlb == TLB_IBIG) {
  512.         d.value = itlb_data_access_read(tlb, entry);
  513.         if (!d.l || d.g) {
  514.             t.value = itlb_tag_read_read(tlb, entry);
  515.             d.v = false;
  516.             itlb_tag_access_write(t.value);
  517.             itlb_data_access_write(tlb, entry, d.value);
  518.         }
  519.     }
  520. }
  521. #endif
  522. #endif
  523.  
  524. /** Invalidate all unlocked ITLB and DTLB entries. */
  525. void tlb_invalidate_all(void)
  526. {
  527.     uint64_t errno =  __hypercall_fast3(MMU_DEMAP_ALL, 0, 0,
  528.         MMU_FLAG_DTLB | MMU_FLAG_ITLB);
  529.     if (errno != EOK) {
  530.         panic("Error code = %d.\n", errno);
  531.     }
  532. }
  533.  
  534. /** Invalidate all ITLB and DTLB entries that belong to specified ASID
  535.  * (Context).
  536.  *
  537.  * @param asid Address Space ID.
  538.  */
  539. void tlb_invalidate_asid(asid_t asid)
  540. {
  541. #if 0
  542.     tlb_context_reg_t pc_save, ctx;
  543.    
  544.     /* switch to nucleus because we are mapped by the primary context */
  545.     nucleus_enter();
  546.    
  547.     ctx.v = pc_save.v = mmu_primary_context_read();
  548.     ctx.context = asid;
  549.     mmu_primary_context_write(ctx.v);
  550.    
  551.     itlb_demap(TLB_DEMAP_CONTEXT, TLB_DEMAP_PRIMARY, 0);
  552.     dtlb_demap(TLB_DEMAP_CONTEXT, TLB_DEMAP_PRIMARY, 0);
  553.    
  554.     mmu_primary_context_write(pc_save.v);
  555.    
  556.     nucleus_leave();
  557. #endif
  558. }
  559.  
  560. /** Invalidate all ITLB and DTLB entries for specified page range in specified
  561.  * address space.
  562.  *
  563.  * @param asid      Address Space ID.
  564.  * @param page      First page which to sweep out from ITLB and DTLB.
  565.  * @param cnt       Number of ITLB and DTLB entries to invalidate.
  566.  */
  567. void tlb_invalidate_pages(asid_t asid, uintptr_t page, count_t cnt)
  568. {
  569. #if 0
  570.     unsigned int i;
  571.     tlb_context_reg_t pc_save, ctx;
  572.    
  573.     /* switch to nucleus because we are mapped by the primary context */
  574.     nucleus_enter();
  575.    
  576.     ctx.v = pc_save.v = mmu_primary_context_read();
  577.     ctx.context = asid;
  578.     mmu_primary_context_write(ctx.v);
  579.    
  580.     for (i = 0; i < cnt * MMU_PAGES_PER_PAGE; i++) {
  581.         itlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY,
  582.             page + i * MMU_PAGE_SIZE);
  583.         dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY,
  584.             page + i * MMU_PAGE_SIZE);
  585.     }
  586.    
  587.     mmu_primary_context_write(pc_save.v);
  588.    
  589.     nucleus_leave();
  590. #endif
  591. }
  592.  
  593. /** @}
  594.  */
  595.