Subversion Repositories HelenOS

Rev

Rev 3862 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (c) 2005 Jakub Jermar
  3.  * Copyright (c) 2008 Pavel Rimsky
  4.  * All rights reserved.
  5.  *
  6.  * Redistribution and use in source and binary forms, with or without
  7.  * modification, are permitted provided that the following conditions
  8.  * are met:
  9.  *
  10.  * - Redistributions of source code must retain the above copyright
  11.  *   notice, this list of conditions and the following disclaimer.
  12.  * - Redistributions in binary form must reproduce the above copyright
  13.  *   notice, this list of conditions and the following disclaimer in the
  14.  *   documentation and/or other materials provided with the distribution.
  15.  * - The name of the author may not be used to endorse or promote products
  16.  *   derived from this software without specific prior written permission.
  17.  *
  18.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  19.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  20.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  21.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  22.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  23.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  24.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  25.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  26.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  27.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  28.  */
  29.  
  30. /** @addtogroup sparc64mm  
  31.  * @{
  32.  */
  33. /** @file
  34.  */
  35.  
  36. #include <mm/tlb.h>
  37. #include <mm/as.h>
  38. #include <mm/asid.h>
  39. #include <arch/sun4v/hypercall.h>
  40. #include <arch/mm/frame.h>
  41. #include <arch/mm/page.h>
  42. #include <arch/mm/sun4v/tte.h>
  43. #include <arch/mm/sun4v/tlb.h>
  44. #include <arch/interrupt.h>
  45. #include <interrupt.h>
  46. #include <arch.h>
  47. #include <print.h>
  48. #include <arch/types.h>
  49. #include <config.h>
  50. #include <arch/trap/trap.h>
  51. #include <arch/trap/exception.h>
  52. #include <panic.h>
  53. #include <arch/asm.h>
  54. #include <arch/sun4v/cpu.h>
  55.  
  56. #ifdef CONFIG_TSB
  57. #include <arch/mm/tsb.h>
  58. #endif
  59.  
  60. #if 0
  61. static void dtlb_pte_copy(pte_t *, index_t, bool);
  62. static void itlb_pte_copy(pte_t *, index_t);
  63. static void do_fast_instruction_access_mmu_miss_fault(istate_t *, const char *);
  64. static void do_fast_data_access_mmu_miss_fault(istate_t *, tlb_tag_access_reg_t,
  65.     const char *);
  66. static void do_fast_data_access_protection_fault(istate_t *,
  67.     tlb_tag_access_reg_t, const char *);
  68.  
  69. char *context_encoding[] = {
  70.     "Primary",
  71.     "Secondary",
  72.     "Nucleus",
  73.     "Reserved"
  74. };
  75. #endif
  76.  
  77. mmu_fault_status_area_t mmu_fault_status_areas[MAX_NUM_STRANDS]
  78.      __attribute__ ((aligned (64)));
  79.  
  80. void tlb_arch_init(void)
  81. {
  82.     uint64_t errno;
  83.     /*
  84.      * Invalidate all non-locked DTLB and ITLB entries.
  85.      */
  86.    
  87.     tlb_invalidate_all();
  88.  
  89.     /*
  90.      * Set the MMU fault status area for the current CPU.
  91.      */
  92.     uint64_t myid;
  93.     __hypercall_fast_ret1(0, 0, 0, 0, 0, CPU_MYID, &myid);
  94.     errno = __hypercall_fast1(MMU_FAULT_AREA_CONF,
  95.             KA2PA(&(mmu_fault_status_areas[myid])));
  96.     if (errno != EOK) {
  97.         panic("Could not set MMU fault area for CPU %d, errno = %d.\n",
  98.             myid, errno);
  99.     }
  100.     printf("Setting MMU fault area for CPU %d at %x.\n", myid, KA2PA(&(mmu_fault_status_areas[myid])));
  101. #if 0
  102.     /*
  103.      * Clear both SFSRs.
  104.      */
  105.     dtlb_sfsr_write(0);
  106.     itlb_sfsr_write(0);
  107. #endif
  108. }
  109.  
  110. /** Insert privileged mapping into DMMU TLB.
  111.  *
  112.  * @param page      Virtual page address.
  113.  * @param frame     Physical frame address.
  114.  * @param pagesize  Page size.
  115.  * @param locked    True for permanent mappings, false otherwise.
  116.  * @param cacheable True if the mapping is cacheable, false otherwise.
  117.  */
  118. void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize,
  119.     bool locked, bool cacheable)
  120. {
  121. #if 0
  122.     tlb_tag_access_reg_t tag;
  123.     tlb_data_t data;
  124.     page_address_t pg;
  125.     frame_address_t fr;
  126.  
  127.     pg.address = page;
  128.     fr.address = frame;
  129.  
  130.     tag.context = ASID_KERNEL;
  131.     tag.vpn = pg.vpn;
  132.  
  133.     dtlb_tag_access_write(tag.value);
  134.  
  135.     data.value = 0;
  136.     data.v = true;
  137.     data.size = pagesize;
  138.     data.pfn = fr.pfn;
  139.     data.l = locked;
  140.     data.cp = cacheable;
  141. #ifdef CONFIG_VIRT_IDX_DCACHE
  142.     data.cv = cacheable;
  143. #endif /* CONFIG_VIRT_IDX_DCACHE */
  144.     data.p = true;
  145.     data.w = true;
  146.     data.g = false;
  147.  
  148.     dtlb_data_in_write(data.value);
  149. #endif
  150. }
  151.  
  152. /** Copy PTE to TLB.
  153.  *
  154.  * @param t         Page Table Entry to be copied.
  155.  * @param index     Zero if lower 8K-subpage, one if higher 8K-subpage.
  156.  * @param ro        If true, the entry will be created read-only, regardless
  157.  *          of its w field.
  158.  */
  159. #if 0
  160. void dtlb_pte_copy(pte_t *t, index_t index, bool ro)
  161. {
  162.     tlb_tag_access_reg_t tag;
  163.     tlb_data_t data;
  164.     page_address_t pg;
  165.     frame_address_t fr;
  166.  
  167.     pg.address = t->page + (index << MMU_PAGE_WIDTH);
  168.     fr.address = t->frame + (index << MMU_PAGE_WIDTH);
  169.  
  170.     tag.value = 0;
  171.     tag.context = t->as->asid;
  172.     tag.vpn = pg.vpn;
  173.  
  174.     dtlb_tag_access_write(tag.value);
  175.  
  176.     data.value = 0;
  177.     data.v = true;
  178.     data.size = PAGESIZE_8K;
  179.     data.pfn = fr.pfn;
  180.     data.l = false;
  181.     data.cp = t->c;
  182. #ifdef CONFIG_VIRT_IDX_DCACHE
  183.     data.cv = t->c;
  184. #endif /* CONFIG_VIRT_IDX_DCACHE */
  185.     data.p = t->k;      /* p like privileged */
  186.     data.w = ro ? false : t->w;
  187.     data.g = t->g;
  188.  
  189.     dtlb_data_in_write(data.value);
  190. }
  191. #endif
  192.  
  193. /** Copy PTE to ITLB.
  194.  *
  195.  * @param t     Page Table Entry to be copied.
  196.  * @param index     Zero if lower 8K-subpage, one if higher 8K-subpage.
  197.  */
  198. #if 0
  199. void itlb_pte_copy(pte_t *t, index_t index)
  200. {
  201.     tlb_tag_access_reg_t tag;
  202.     tlb_data_t data;
  203.     page_address_t pg;
  204.     frame_address_t fr;
  205.  
  206.     pg.address = t->page + (index << MMU_PAGE_WIDTH);
  207.     fr.address = t->frame + (index << MMU_PAGE_WIDTH);
  208.  
  209.     tag.value = 0;
  210.     tag.context = t->as->asid;
  211.     tag.vpn = pg.vpn;
  212.    
  213.     itlb_tag_access_write(tag.value);
  214.    
  215.     data.value = 0;
  216.     data.v = true;
  217.     data.size = PAGESIZE_8K;
  218.     data.pfn = fr.pfn;
  219.     data.l = false;
  220.     data.cp = t->c;
  221.     data.p = t->k;      /* p like privileged */
  222.     data.w = false;
  223.     data.g = t->g;
  224.    
  225.     itlb_data_in_write(data.value);
  226. }
  227. #endif
  228.  
  229. /** ITLB miss handler. */
  230. void fast_instruction_access_mmu_miss(unative_t unused, istate_t *istate)
  231. {
  232. #if 0
  233.     uintptr_t va = ALIGN_DOWN(istate->tpc, PAGE_SIZE);
  234.     index_t index = (istate->tpc >> MMU_PAGE_WIDTH) % MMU_PAGES_PER_PAGE;
  235.     pte_t *t;
  236.  
  237.     page_table_lock(AS, true);
  238.     t = page_mapping_find(AS, va);
  239.     if (t && PTE_EXECUTABLE(t)) {
  240.         /*
  241.          * The mapping was found in the software page hash table.
  242.          * Insert it into ITLB.
  243.          */
  244.         t->a = true;
  245.         itlb_pte_copy(t, index);
  246. #ifdef CONFIG_TSB
  247.         itsb_pte_copy(t, index);
  248. #endif
  249.         page_table_unlock(AS, true);
  250.     } else {
  251.         /*
  252.          * Forward the page fault to the address space page fault
  253.          * handler.
  254.          */    
  255.         page_table_unlock(AS, true);
  256.         if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
  257.             do_fast_instruction_access_mmu_miss_fault(istate,
  258.                 __func__);
  259.         }
  260.     }
  261. #endif
  262. }
  263.  
  264. /** DTLB miss handler.
  265.  *
  266.  * Note that some faults (e.g. kernel faults) were already resolved by the
  267.  * low-level, assembly language part of the fast_data_access_mmu_miss handler.
  268.  *
  269.  * @param tag       Content of the TLB Tag Access register as it existed
  270.  *          when the trap happened. This is to prevent confusion
  271.  *          created by clobbered Tag Access register during a nested
  272.  *          DTLB miss.
  273.  * @param istate    Interrupted state saved on the stack.
  274.  */
  275. //void fast_data_access_mmu_miss(tlb_tag_access_reg_t tag, istate_t *istate)
  276. //{
  277. #if 0
  278.     uintptr_t va;
  279.     index_t index;
  280.     pte_t *t;
  281.  
  282.     va = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE);
  283.     index = tag.vpn % MMU_PAGES_PER_PAGE;
  284.  
  285.     if (tag.context == ASID_KERNEL) {
  286.         if (!tag.vpn) {
  287.             /* NULL access in kernel */
  288.             do_fast_data_access_mmu_miss_fault(istate, tag,
  289.                 __func__);
  290.         }
  291.         do_fast_data_access_mmu_miss_fault(istate, tag, "Unexpected "
  292.             "kernel page fault.");
  293.     }
  294.  
  295.     page_table_lock(AS, true);
  296.     t = page_mapping_find(AS, va);
  297.     if (t) {
  298.         /*
  299.          * The mapping was found in the software page hash table.
  300.          * Insert it into DTLB.
  301.          */
  302.         t->a = true;
  303.         dtlb_pte_copy(t, index, true);
  304. #ifdef CONFIG_TSB
  305.         dtsb_pte_copy(t, index, true);
  306. #endif
  307.         page_table_unlock(AS, true);
  308.     } else {
  309.         /*
  310.          * Forward the page fault to the address space page fault
  311.          * handler.
  312.          */    
  313.         page_table_unlock(AS, true);
  314.         if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
  315.             do_fast_data_access_mmu_miss_fault(istate, tag,
  316.                 __func__);
  317.         }
  318.     }
  319. #endif
  320. //}
  321.  
  322. /** DTLB protection fault handler.
  323.  *
  324.  * @param tag       Content of the TLB Tag Access register as it existed
  325.  *          when the trap happened. This is to prevent confusion
  326.  *          created by clobbered Tag Access register during a nested
  327.  *          DTLB miss.
  328.  * @param istate    Interrupted state saved on the stack.
  329.  */
  330. //void fast_data_access_protection(tlb_tag_access_reg_t tag, istate_t *istate)
  331. //{
  332. #if 0
  333.     uintptr_t va;
  334.     index_t index;
  335.     pte_t *t;
  336.  
  337.     va = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE);
  338.     index = tag.vpn % MMU_PAGES_PER_PAGE;   /* 16K-page emulation */
  339.  
  340.     page_table_lock(AS, true);
  341.     t = page_mapping_find(AS, va);
  342.     if (t && PTE_WRITABLE(t)) {
  343.         /*
  344.          * The mapping was found in the software page hash table and is
  345.          * writable. Demap the old mapping and insert an updated mapping
  346.          * into DTLB.
  347.          */
  348.         t->a = true;
  349.         t->d = true;
  350.         dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_SECONDARY,
  351.             va + index * MMU_PAGE_SIZE);
  352.         dtlb_pte_copy(t, index, false);
  353. #ifdef CONFIG_TSB
  354.         dtsb_pte_copy(t, index, false);
  355. #endif
  356.         page_table_unlock(AS, true);
  357.     } else {
  358.         /*
  359.          * Forward the page fault to the address space page fault
  360.          * handler.
  361.          */    
  362.         page_table_unlock(AS, true);
  363.         if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) {
  364.             do_fast_data_access_protection_fault(istate, tag,
  365.                 __func__);
  366.         }
  367.     }
  368. #endif
  369. //}
  370.  
  371. /** Print TLB entry (for debugging purposes).
  372.  *
  373.  * The diag field has been left out in order to make this function more generic
  374.  * (there is no diag field in US3 architeture).
  375.  *
  376.  * @param i     TLB entry number
  377.  * @param t     TLB entry tag
  378.  * @param d     TLB entry data
  379.  */
  380. #if 0
  381. static void print_tlb_entry(int i, tlb_tag_read_reg_t t, tlb_data_t d)
  382. {
  383.     printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, "
  384.         "ie=%d, soft2=%#x, pfn=%#x, soft=%#x, l=%d, "
  385.         "cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn,
  386.         t.context, d.v, d.size, d.nfo, d.ie, d.soft2,
  387.         d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
  388. }
  389. #endif
  390.  
  391. #if defined (US)
  392.  
  393. /** Print contents of both TLBs. */
  394. void tlb_print(void)
  395. #if 0
  396. {
  397.     int i;
  398.     tlb_data_t d;
  399.     tlb_tag_read_reg_t t;
  400.    
  401.     printf("I-TLB contents:\n");
  402.     for (i = 0; i < ITLB_ENTRY_COUNT; i++) {
  403.         d.value = itlb_data_access_read(i);
  404.         t.value = itlb_tag_read_read(i);
  405.         print_tlb_entry(i, t, d);
  406.     }
  407.  
  408.     printf("D-TLB contents:\n");
  409.     for (i = 0; i < DTLB_ENTRY_COUNT; i++) {
  410.         d.value = dtlb_data_access_read(i);
  411.         t.value = dtlb_tag_read_read(i);
  412.         print_tlb_entry(i, t, d);
  413.     }
  414. #endif
  415. }
  416.  
  417. #elif defined (US3)
  418.  
  419. /** Print contents of all TLBs. */
  420. void tlb_print(void)
  421. {
  422. #if 0
  423.     int i;
  424.     tlb_data_t d;
  425.     tlb_tag_read_reg_t t;
  426.    
  427.     printf("TLB_ISMALL contents:\n");
  428.     for (i = 0; i < tlb_ismall_size(); i++) {
  429.         d.value = dtlb_data_access_read(TLB_ISMALL, i);
  430.         t.value = dtlb_tag_read_read(TLB_ISMALL, i);
  431.         print_tlb_entry(i, t, d);
  432.     }
  433.    
  434.     printf("TLB_IBIG contents:\n");
  435.     for (i = 0; i < tlb_ibig_size(); i++) {
  436.         d.value = dtlb_data_access_read(TLB_IBIG, i);
  437.         t.value = dtlb_tag_read_read(TLB_IBIG, i);
  438.         print_tlb_entry(i, t, d);
  439.     }
  440.    
  441.     printf("TLB_DSMALL contents:\n");
  442.     for (i = 0; i < tlb_dsmall_size(); i++) {
  443.         d.value = dtlb_data_access_read(TLB_DSMALL, i);
  444.         t.value = dtlb_tag_read_read(TLB_DSMALL, i);
  445.         print_tlb_entry(i, t, d);
  446.     }
  447.    
  448.     printf("TLB_DBIG_1 contents:\n");
  449.     for (i = 0; i < tlb_dbig_size(); i++) {
  450.         d.value = dtlb_data_access_read(TLB_DBIG_0, i);
  451.         t.value = dtlb_tag_read_read(TLB_DBIG_0, i);
  452.         print_tlb_entry(i, t, d);
  453.     }
  454.    
  455.     printf("TLB_DBIG_2 contents:\n");
  456.     for (i = 0; i < tlb_dbig_size(); i++) {
  457.         d.value = dtlb_data_access_read(TLB_DBIG_1, i);
  458.         t.value = dtlb_tag_read_read(TLB_DBIG_1, i);
  459.         print_tlb_entry(i, t, d);
  460.     }
  461. #endif
  462. }
  463.  
  464. #endif
  465.  
  466. #if 0
  467. void do_fast_instruction_access_mmu_miss_fault(istate_t *istate,
  468.     const char *str)
  469. {
  470.     fault_if_from_uspace(istate, "%s\n", str);
  471.     dump_istate(istate);
  472.     panic("%s\n", str);
  473. }
  474. #endif
  475.  
  476. #if 0
  477. void do_fast_data_access_mmu_miss_fault(istate_t *istate,
  478.     tlb_tag_access_reg_t tag, const char *str)
  479. {
  480.     uintptr_t va;
  481.  
  482.     va = tag.vpn << MMU_PAGE_WIDTH;
  483.     if (tag.context) {
  484.         fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va,
  485.             tag.context);
  486.     }
  487.     dump_istate(istate);
  488.     printf("Faulting page: %p, ASID=%d\n", va, tag.context);
  489.     panic("%s\n", str);
  490. }
  491. #endif
  492.  
  493. #if 0
  494. void do_fast_data_access_protection_fault(istate_t *istate,
  495.     tlb_tag_access_reg_t tag, const char *str)
  496. {
  497.     uintptr_t va;
  498.  
  499.     va = tag.vpn << MMU_PAGE_WIDTH;
  500.  
  501.     if (tag.context) {
  502.         fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va,
  503.             tag.context);
  504.     }
  505.     printf("Faulting page: %p, ASID=%d\n", va, tag.context);
  506.     dump_istate(istate);
  507.     panic("%s\n", str);
  508. }
  509. #endif
  510.  
  511. void describe_mmu_fault(void)
  512. {
  513. }
  514.  
  515. #if defined (US3)
  516. /** Invalidates given TLB entry if and only if it is non-locked or global.
  517.  *
  518.  * @param tlb       TLB number (one of TLB_DSMALL, TLB_DBIG_0, TLB_DBIG_1,
  519.  *          TLB_ISMALL, TLB_IBIG).
  520.  * @param entry     Entry index within the given TLB.
  521.  */
  522. #if 0
  523. static void tlb_invalidate_entry(int tlb, index_t entry)
  524. {
  525.     tlb_data_t d;
  526.     tlb_tag_read_reg_t t;
  527.    
  528.     if (tlb == TLB_DSMALL || tlb == TLB_DBIG_0 || tlb == TLB_DBIG_1) {
  529.         d.value = dtlb_data_access_read(tlb, entry);
  530.         if (!d.l || d.g) {
  531.             t.value = dtlb_tag_read_read(tlb, entry);
  532.             d.v = false;
  533.             dtlb_tag_access_write(t.value);
  534.             dtlb_data_access_write(tlb, entry, d.value);
  535.         }
  536.     } else if (tlb == TLB_ISMALL || tlb == TLB_IBIG) {
  537.         d.value = itlb_data_access_read(tlb, entry);
  538.         if (!d.l || d.g) {
  539.             t.value = itlb_tag_read_read(tlb, entry);
  540.             d.v = false;
  541.             itlb_tag_access_write(t.value);
  542.             itlb_data_access_write(tlb, entry, d.value);
  543.         }
  544.     }
  545. }
  546. #endif
  547. #endif
  548.  
  549. /** Invalidate all unlocked ITLB and DTLB entries. */
  550. void tlb_invalidate_all(void)
  551. {
  552.     uint64_t errno =  __hypercall_fast3(MMU_DEMAP_ALL, 0, 0,
  553.         MMU_FLAG_DTLB | MMU_FLAG_ITLB);
  554.     if (errno != EOK) {
  555.         panic("Error code = %d.\n", errno);
  556.     }
  557. }
  558.  
  559. /** Invalidate all ITLB and DTLB entries that belong to specified ASID
  560.  * (Context).
  561.  *
  562.  * @param asid Address Space ID.
  563.  */
  564. void tlb_invalidate_asid(asid_t asid)
  565. {
  566. #if 0
  567.     tlb_context_reg_t pc_save, ctx;
  568.    
  569.     /* switch to nucleus because we are mapped by the primary context */
  570.     nucleus_enter();
  571.    
  572.     ctx.v = pc_save.v = mmu_primary_context_read();
  573.     ctx.context = asid;
  574.     mmu_primary_context_write(ctx.v);
  575.    
  576.     itlb_demap(TLB_DEMAP_CONTEXT, TLB_DEMAP_PRIMARY, 0);
  577.     dtlb_demap(TLB_DEMAP_CONTEXT, TLB_DEMAP_PRIMARY, 0);
  578.    
  579.     mmu_primary_context_write(pc_save.v);
  580.    
  581.     nucleus_leave();
  582. #endif
  583. }
  584.  
  585. /** Invalidate all ITLB and DTLB entries for specified page range in specified
  586.  * address space.
  587.  *
  588.  * @param asid      Address Space ID.
  589.  * @param page      First page which to sweep out from ITLB and DTLB.
  590.  * @param cnt       Number of ITLB and DTLB entries to invalidate.
  591.  */
  592. void tlb_invalidate_pages(asid_t asid, uintptr_t page, count_t cnt)
  593. {
  594. #if 0
  595.     unsigned int i;
  596.     tlb_context_reg_t pc_save, ctx;
  597.    
  598.     /* switch to nucleus because we are mapped by the primary context */
  599.     nucleus_enter();
  600.    
  601.     ctx.v = pc_save.v = mmu_primary_context_read();
  602.     ctx.context = asid;
  603.     mmu_primary_context_write(ctx.v);
  604.    
  605.     for (i = 0; i < cnt * MMU_PAGES_PER_PAGE; i++) {
  606.         itlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY,
  607.             page + i * MMU_PAGE_SIZE);
  608.         dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY,
  609.             page + i * MMU_PAGE_SIZE);
  610.     }
  611.    
  612.     mmu_primary_context_write(pc_save.v);
  613.    
  614.     nucleus_leave();
  615. #endif
  616. }
  617.  
  618. /** @}
  619.  */
  620.