Subversion Repositories HelenOS

Rev

Rev 3993 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (c) 2005 Jakub Jermar
  3.  * Copyright (c) 2008 Pavel Rimsky
  4.  * All rights reserved.
  5.  *
  6.  * Redistribution and use in source and binary forms, with or without
  7.  * modification, are permitted provided that the following conditions
  8.  * are met:
  9.  *
  10.  * - Redistributions of source code must retain the above copyright
  11.  *   notice, this list of conditions and the following disclaimer.
  12.  * - Redistributions in binary form must reproduce the above copyright
  13.  *   notice, this list of conditions and the following disclaimer in the
  14.  *   documentation and/or other materials provided with the distribution.
  15.  * - The name of the author may not be used to endorse or promote products
  16.  *   derived from this software without specific prior written permission.
  17.  *
  18.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  19.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  20.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  21.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  22.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  23.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  24.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  25.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  26.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  27.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  28.  */
  29.  
  30. /** @addtogroup sparc64mm  
  31.  * @{
  32.  */
  33. /** @file
  34.  */
  35.  
  36. #include <mm/tlb.h>
  37. #include <mm/as.h>
  38. #include <mm/asid.h>
  39. #include <arch/sun4v/hypercall.h>
  40. #include <arch/mm/frame.h>
  41. #include <arch/mm/page.h>
  42. #include <arch/mm/tte.h>
  43. #include <arch/mm/tlb.h>
  44. #include <arch/interrupt.h>
  45. #include <interrupt.h>
  46. #include <arch.h>
  47. #include <print.h>
  48. #include <arch/types.h>
  49. #include <config.h>
  50. #include <arch/trap/trap.h>
  51. #include <arch/trap/exception.h>
  52. #include <panic.h>
  53. #include <arch/asm.h>
  54. #include <arch/cpu.h>
  55. #include <arch/mm/pagesize.h>
  56.  
  57. #ifdef CONFIG_TSB
  58. #include <arch/mm/tsb.h>
  59. #endif
  60.  
  61. static void itlb_pte_copy(pte_t *);
  62. static void dtlb_pte_copy(pte_t *, bool);
  63. static void do_fast_instruction_access_mmu_miss_fault(istate_t *, const char *);
  64. static void do_fast_data_access_mmu_miss_fault(istate_t *, uint64_t,
  65.     const char *);
  66. static void do_fast_data_access_protection_fault(istate_t *,
  67.     uint64_t, const char *);
  68.  
  69. /*
  70.  * The assembly language routine passes a 64-bit parameter to the Data Access
  71.  * MMU Miss and Data Access protection handlers, the parameter encapsulates
  72.  * a virtual address of the faulting page and the faulting context. The most
  73.  * significant 51 bits represent the VA of the faulting page and the least
  74.  * significant 13 vits represent the faulting context. The following macros
  75.  * extract the page and context out of the 64-bit parameter:
  76.  */
  77.  
  78. /* extracts the VA of the faulting page */
  79. #define DMISS_ADDRESS(page_and_ctx) (((page_and_ctx) >> 13) << 13)
  80.  
  81. /* extracts the faulting context */
  82. #define DMISS_CONTEXT(page_and_ctx) ((page_and_ctx) & 0x1fff)
  83.  
  84. /**
  85.  * Descriptions of fault types from the MMU Fault status area.
  86.  *
  87.  * fault_type[i] contains description of error for which the IFT or DFT
  88.  * field of the MMU fault status area is i.
  89.  */
  90. char *fault_types[] = {
  91.     "unknown",
  92.     "fast miss",
  93.     "fast protection",
  94.     "MMU miss",
  95.     "invalid RA",
  96.     "privileged violation",
  97.     "protection violation",
  98.     "NFO access",
  99.     "so page/NFO side effect",
  100.     "invalid VA",
  101.     "invalid ASI",
  102.     "nc atomic",
  103.     "privileged action",
  104.     "unknown",
  105.     "unaligned access",
  106.     "invalid page size"
  107.     };
  108.    
  109.  
  110. /** Array of MMU fault status areas. */
  111. extern mmu_fault_status_area_t mmu_fsas[MAX_NUM_STRANDS];
  112.  
  113. /*
  114.  * Invalidate all non-locked DTLB and ITLB entries.
  115.  */
  116. void tlb_arch_init(void)
  117. {
  118.     tlb_invalidate_all();
  119. }
  120.  
  121. /** Insert privileged mapping into DMMU TLB.
  122.  *
  123.  * @param page      Virtual page address.
  124.  * @param frame     Physical frame address.
  125.  * @param pagesize  Page size.
  126.  * @param locked    True for permanent mappings, false otherwise.
  127.  * @param cacheable True if the mapping is cacheable, false otherwise.
  128.  */
  129. void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize,
  130.     bool locked, bool cacheable)
  131. {
  132.     tte_data_t data;
  133.    
  134.     data.value = 0;
  135.     data.v = true;
  136.     data.nfo = false;
  137.     data.ra = frame >> FRAME_WIDTH;
  138.     data.ie = false;
  139.     data.e = false;
  140.     data.cp = cacheable;
  141. #ifdef CONFIG_VIRT_IDX_DCACHE
  142.     data.cv = cacheable;
  143. #endif
  144.     data.p = true;
  145.     data.x = false;
  146.     data.w = false;
  147.     data.size = pagesize;
  148.    
  149.     if (locked) {
  150.         __hypercall_fast4(
  151.             MMU_MAP_PERM_ADDR, page, 0, data.value, MMU_FLAG_DTLB);
  152.     } else {
  153.         __hypercall_hyperfast(
  154.             page, ASID_KERNEL, data.value, MMU_FLAG_DTLB, 0,
  155.             MMU_MAP_ADDR);
  156.     }
  157. }
  158.  
  159. /** Copy PTE to TLB.
  160.  *
  161.  * @param t         Page Table Entry to be copied.
  162.  * @param ro        If true, the entry will be created read-only, regardless
  163.  *          of its w field.
  164.  */
  165. void dtlb_pte_copy(pte_t *t, bool ro)
  166. {
  167.     tte_data_t data;
  168.    
  169.     data.value = 0;
  170.     data.v = true;
  171.     data.nfo = false;
  172.     data.ra = (t->frame) >> FRAME_WIDTH;
  173.     data.ie = false;
  174.     data.e = false;
  175.     data.cp = t->c;
  176. #ifdef CONFIG_VIRT_IDX_DCACHE
  177.     data.cv = t->c;
  178. #endif
  179.     data.p = t->k;
  180.     data.x = false;
  181.     data.w = ro ? false : t->w;
  182.     data.size = PAGESIZE_8K;
  183.    
  184.     __hypercall_hyperfast(
  185.         t->page, t->as->asid, data.value, MMU_FLAG_DTLB, 0, MMU_MAP_ADDR);
  186. }
  187.  
  188. /** Copy PTE to ITLB.
  189.  *
  190.  * @param t     Page Table Entry to be copied.
  191.  */
  192. void itlb_pte_copy(pte_t *t)
  193. {
  194.     tte_data_t data;
  195.    
  196.     data.value = 0;
  197.     data.v = true;
  198.     data.nfo = false;
  199.     data.ra = (t->frame) >> FRAME_WIDTH;
  200.     data.ie = false;
  201.     data.e = false;
  202.     data.cp = t->c;
  203.     data.cv = false;
  204.     data.p = t->k;
  205.     data.x = true;
  206.     data.w = false;
  207.     data.size = PAGESIZE_8K;
  208.    
  209.     __hypercall_hyperfast(
  210.         t->page, t->as->asid, data.value, MMU_FLAG_ITLB, 0, MMU_MAP_ADDR);
  211. }
  212.  
  213. /** ITLB miss handler. */
  214. void fast_instruction_access_mmu_miss(unative_t unused, istate_t *istate)
  215. {
  216.     uintptr_t va = ALIGN_DOWN(istate->tpc, PAGE_SIZE);
  217.     pte_t *t;
  218.  
  219.     page_table_lock(AS, true);
  220.     t = page_mapping_find(AS, va);
  221.  
  222.     if (t && PTE_EXECUTABLE(t)) {
  223.         /*
  224.          * The mapping was found in the software page hash table.
  225.          * Insert it into ITLB.
  226.          */
  227.         t->a = true;
  228.         itlb_pte_copy(t);
  229. #ifdef CONFIG_TSB
  230.         itsb_pte_copy(t);
  231. #endif
  232.         page_table_unlock(AS, true);
  233.     } else {
  234.         /*
  235.          * Forward the page fault to the address space page fault
  236.          * handler.
  237.          */    
  238.         page_table_unlock(AS, true);
  239.         if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
  240.             do_fast_instruction_access_mmu_miss_fault(istate,
  241.                 __func__);
  242.         }
  243.     }
  244. }
  245.  
  246. /** DTLB miss handler.
  247.  *
  248.  * Note that some faults (e.g. kernel faults) were already resolved by the
  249.  * low-level, assembly language part of the fast_data_access_mmu_miss handler.
  250.  *
  251.  * @param page_and_ctx  A 64-bit value describing the fault. The most
  252.  *          significant 51 bits of the value contain the virtual
  253.  *          address which caused the fault truncated to the page
  254.  *          boundary. The least significant 13 bits of the value
  255.  *          contain the number of the context in which the fault
  256.  *          occurred.
  257.  * @param istate    Interrupted state saved on the stack.
  258.  */
  259. void fast_data_access_mmu_miss(uint64_t page_and_ctx, istate_t *istate)
  260. {
  261.     pte_t *t;
  262.     uintptr_t va = DMISS_ADDRESS(page_and_ctx);
  263.     uint16_t ctx = DMISS_CONTEXT(page_and_ctx);
  264.  
  265.     if (ctx == ASID_KERNEL) {
  266.         if (va == 0) {
  267.             /* NULL access in kernel */
  268.             do_fast_data_access_mmu_miss_fault(istate, page_and_ctx,
  269.                 __func__);
  270.         }
  271.         do_fast_data_access_mmu_miss_fault(istate, page_and_ctx, "Unexpected "
  272.             "kernel page fault.");
  273.     }
  274.  
  275.     page_table_lock(AS, true);
  276.     t = page_mapping_find(AS, va);
  277.     if (t) {
  278.         /*
  279.          * The mapping was found in the software page hash table.
  280.          * Insert it into DTLB.
  281.          */
  282.         t->a = true;
  283.         dtlb_pte_copy(t, true);
  284. #ifdef CONFIG_TSB
  285.         dtsb_pte_copy(t, true);
  286. #endif
  287.         page_table_unlock(AS, true);
  288.     } else {
  289.         /*
  290.          * Forward the page fault to the address space page fault
  291.          * handler.
  292.          */    
  293.         page_table_unlock(AS, true);
  294.         if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
  295.             do_fast_data_access_mmu_miss_fault(istate, page_and_ctx,
  296.                 __func__);
  297.         }
  298.     }
  299. }
  300.  
  301. /** DTLB protection fault handler.
  302.  *
  303.  * @param page_and_ctx  A 64-bit value describing the fault. The most
  304.  *          significant 51 bits of the value contain the virtual
  305.  *          address which caused the fault truncated to the page
  306.  *          boundary. The least significant 13 bits of the value
  307.  *          contain the number of the context in which the fault
  308.  *          occurred.
  309.  * @param istate    Interrupted state saved on the stack.
  310.  */
  311. void fast_data_access_protection(uint64_t page_and_ctx, istate_t *istate)
  312. {
  313.     pte_t *t;
  314.     uintptr_t va = DMISS_ADDRESS(page_and_ctx);
  315.     uint16_t ctx = DMISS_CONTEXT(page_and_ctx);
  316.  
  317.     page_table_lock(AS, true);
  318.     t = page_mapping_find(AS, va);
  319.     if (t && PTE_WRITABLE(t)) {
  320.         /*
  321.          * The mapping was found in the software page hash table and is
  322.          * writable. Demap the old mapping and insert an updated mapping
  323.          * into DTLB.
  324.          */
  325.         t->a = true;
  326.         t->d = true;
  327.         mmu_demap_page(va, ctx, MMU_FLAG_DTLB);
  328.         dtlb_pte_copy(t, false);
  329. #ifdef CONFIG_TSB
  330.         dtsb_pte_copy(t, false);
  331. #endif
  332.         page_table_unlock(AS, true);
  333.     } else {
  334.         /*
  335.          * Forward the page fault to the address space page fault
  336.          * handler.
  337.          */    
  338.         page_table_unlock(AS, true);
  339.         if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) {
  340.             do_fast_data_access_protection_fault(istate, page_and_ctx,
  341.                 __func__);
  342.         }
  343.     }
  344. }
  345.  
  346. /*
  347.  * On Niagara this function does not work, as supervisor software is isolated
  348.  * from the TLB by the hypervisor and has no chance to investigate the TLB
  349.  * entries.
  350.  */
  351. void tlb_print(void)
  352. {
  353.     printf("Operation not possible on Niagara.\n");
  354. }
  355.  
  356. void do_fast_instruction_access_mmu_miss_fault(istate_t *istate,
  357.     const char *str)
  358. {
  359.     fault_if_from_uspace(istate, "%s\n", str);
  360.     dump_istate(istate);
  361.     panic("%s\n", str);
  362. }
  363.  
  364. void do_fast_data_access_mmu_miss_fault(istate_t *istate,
  365.     uint64_t page_and_ctx, const char *str)
  366. {
  367.     if (DMISS_CONTEXT(page_and_ctx)) {
  368.         fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, DMISS_ADDRESS(page_and_ctx),
  369.             DMISS_CONTEXT(page_and_ctx));
  370.     }
  371.     dump_istate(istate);
  372.     printf("Faulting page: %p, ASID=%d\n", DMISS_ADDRESS(page_and_ctx), DMISS_CONTEXT(page_and_ctx));
  373.     panic("%s\n", str);
  374. }
  375.  
  376. void do_fast_data_access_protection_fault(istate_t *istate,
  377.     uint64_t page_and_ctx, const char *str)
  378. {
  379.     if (DMISS_CONTEXT(page_and_ctx)) {
  380.         fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, DMISS_ADDRESS(page_and_ctx),
  381.             DMISS_CONTEXT(page_and_ctx));
  382.     }
  383.     printf("Faulting page: %p, ASID=%d\n", DMISS_ADDRESS(page_and_ctx), DMISS_CONTEXT(page_and_ctx));
  384.     dump_istate(istate);
  385.     panic("%s\n", str);
  386. }
  387.  
  388. /**
  389.  * Describes the exact condition which caused the last DMMU fault.
  390.  */
  391. void describe_dmmu_fault(void)
  392. {
  393.     uint64_t myid;
  394.     __hypercall_fast_ret1(0, 0, 0, 0, 0, CPU_MYID, &myid);
  395.  
  396.     ASSERT(mmu_fsas[myid].dft < 16);
  397.  
  398.     printf("condition which caused the fault: %s\n",
  399.         fault_types[mmu_fsas[myid].dft]);
  400. }
  401.  
  402. /** Invalidate all unlocked ITLB and DTLB entries. */
  403. void tlb_invalidate_all(void)
  404. {
  405.     uint64_t errno =  __hypercall_fast3(MMU_DEMAP_ALL, 0, 0,
  406.         MMU_FLAG_DTLB | MMU_FLAG_ITLB);
  407.     if (errno != EOK) {
  408.         panic("Error code = %d.\n", errno);
  409.     }
  410. }
  411.  
  412. /** Invalidate all ITLB and DTLB entries that belong to specified ASID
  413.  * (Context).
  414.  *
  415.  * @param asid Address Space ID.
  416.  */
  417. void tlb_invalidate_asid(asid_t asid)
  418. {
  419.     /* switch to nucleus because we are mapped by the primary context */
  420.     nucleus_enter();
  421.  
  422.     __hypercall_fast4(MMU_DEMAP_CTX, 0, 0, asid,
  423.         MMU_FLAG_ITLB | MMU_FLAG_DTLB);
  424.  
  425.     nucleus_leave();
  426. }
  427.  
  428. /** Invalidate all ITLB and DTLB entries for specified page range in specified
  429.  * address space.
  430.  *
  431.  * @param asid      Address Space ID.
  432.  * @param page      First page which to sweep out from ITLB and DTLB.
  433.  * @param cnt       Number of ITLB and DTLB entries to invalidate.
  434.  */
  435. void tlb_invalidate_pages(asid_t asid, uintptr_t page, count_t cnt)
  436. {
  437.     unsigned int i;
  438.  
  439.     /* switch to nucleus because we are mapped by the primary context */
  440.     nucleus_enter();
  441.  
  442.     for (i = 0; i < cnt; i++) {
  443.         __hypercall_fast5(MMU_DEMAP_PAGE, 0, 0, page, asid,
  444.             MMU_FLAG_DTLB | MMU_FLAG_ITLB);
  445.     }
  446.  
  447.     nucleus_leave();
  448. }
  449.  
  450. /** @}
  451.  */
  452.