Subversion Repositories HelenOS

Rev

Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (c) 2005 Jakub Jermar
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. /** @addtogroup sparc64mm  
  30.  * @{
  31.  */
  32. /** @file
  33.  */
  34.  
  35. #include <arch/mm/sun4u/tlb.h>
  36. #include <arch/mm/sun4u/tlb.h>
  37. #include <mm/tlb.h>
  38. #include <mm/as.h>
  39. #include <mm/asid.h>
  40. #include <arch/mm/frame.h>
  41. #include <arch/mm/page.h>
  42. #include <arch/mm/pagesize.h>
  43. #include <arch/mm/sun4u/mmu.h>
  44. #include <arch/interrupt.h>
  45. #include <interrupt.h>
  46. #include <arch.h>
  47. #include <print.h>
  48. #include <arch/types.h>
  49. #include <config.h>
  50. #include <arch/trap/trap.h>
  51. #include <arch/trap/exception.h>
  52. #include <panic.h>
  53. #include <arch/asm.h>
  54.  
  55. #ifdef CONFIG_TSB
  56. #include <arch/mm/tsb.h>
  57. #endif
  58.  
  59. static void dtlb_pte_copy(pte_t *, index_t, bool);
  60. static void itlb_pte_copy(pte_t *, index_t);
  61. static void do_fast_instruction_access_mmu_miss_fault(istate_t *, const char *);
  62. static void do_fast_data_access_mmu_miss_fault(istate_t *, tlb_tag_access_reg_t,
  63.     const char *);
  64. static void do_fast_data_access_protection_fault(istate_t *,
  65.     tlb_tag_access_reg_t, const char *);
  66.  
  67. char *context_encoding[] = {
  68.     "Primary",
  69.     "Secondary",
  70.     "Nucleus",
  71.     "Reserved"
  72. };
  73.  
  74. void tlb_arch_init(void)
  75. {
  76.     /*
  77.      * Invalidate all non-locked DTLB and ITLB entries.
  78.      */
  79.     tlb_invalidate_all();
  80.  
  81.     /*
  82.      * Clear both SFSRs.
  83.      */
  84.     dtlb_sfsr_write(0);
  85.     itlb_sfsr_write(0);
  86. }
  87.  
  88. /** Insert privileged mapping into DMMU TLB.
  89.  *
  90.  * @param page      Virtual page address.
  91.  * @param frame     Physical frame address.
  92.  * @param pagesize  Page size.
  93.  * @param locked    True for permanent mappings, false otherwise.
  94.  * @param cacheable True if the mapping is cacheable, false otherwise.
  95.  */
  96. void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize,
  97.     bool locked, bool cacheable)
  98. {
  99.     tlb_tag_access_reg_t tag;
  100.     tlb_data_t data;
  101.     page_address_t pg;
  102.     frame_address_t fr;
  103.  
  104.     pg.address = page;
  105.     fr.address = frame;
  106.  
  107.     tag.context = ASID_KERNEL;
  108.     tag.vpn = pg.vpn;
  109.  
  110.     dtlb_tag_access_write(tag.value);
  111.  
  112.     data.value = 0;
  113.     data.v = true;
  114.     data.size = pagesize;
  115.     data.pfn = fr.pfn;
  116.     data.l = locked;
  117.     data.cp = cacheable;
  118. #ifdef CONFIG_VIRT_IDX_DCACHE
  119.     data.cv = cacheable;
  120. #endif /* CONFIG_VIRT_IDX_DCACHE */
  121.     data.p = true;
  122.     data.w = true;
  123.     data.g = false;
  124.  
  125.     dtlb_data_in_write(data.value);
  126. }
  127.  
  128. /** Copy PTE to TLB.
  129.  *
  130.  * @param t         Page Table Entry to be copied.
  131.  * @param index     Zero if lower 8K-subpage, one if higher 8K-subpage.
  132.  * @param ro        If true, the entry will be created read-only, regardless
  133.  *          of its w field.
  134.  */
  135. void dtlb_pte_copy(pte_t *t, index_t index, bool ro)
  136. {
  137.     tlb_tag_access_reg_t tag;
  138.     tlb_data_t data;
  139.     page_address_t pg;
  140.     frame_address_t fr;
  141.  
  142.     pg.address = t->page + (index << MMU_PAGE_WIDTH);
  143.     fr.address = t->frame + (index << MMU_PAGE_WIDTH);
  144.  
  145.     tag.value = 0;
  146.     tag.context = t->as->asid;
  147.     tag.vpn = pg.vpn;
  148.  
  149.     dtlb_tag_access_write(tag.value);
  150.  
  151.     data.value = 0;
  152.     data.v = true;
  153.     data.size = PAGESIZE_8K;
  154.     data.pfn = fr.pfn;
  155.     data.l = false;
  156.     data.cp = t->c;
  157. #ifdef CONFIG_VIRT_IDX_DCACHE
  158.     data.cv = t->c;
  159. #endif /* CONFIG_VIRT_IDX_DCACHE */
  160.     data.p = t->k;      /* p like privileged */
  161.     data.w = ro ? false : t->w;
  162.     data.g = t->g;
  163.  
  164.     dtlb_data_in_write(data.value);
  165. }
  166.  
  167. /** Copy PTE to ITLB.
  168.  *
  169.  * @param t     Page Table Entry to be copied.
  170.  * @param index     Zero if lower 8K-subpage, one if higher 8K-subpage.
  171.  */
  172. void itlb_pte_copy(pte_t *t, index_t index)
  173. {
  174.     tlb_tag_access_reg_t tag;
  175.     tlb_data_t data;
  176.     page_address_t pg;
  177.     frame_address_t fr;
  178.  
  179.     pg.address = t->page + (index << MMU_PAGE_WIDTH);
  180.     fr.address = t->frame + (index << MMU_PAGE_WIDTH);
  181.  
  182.     tag.value = 0;
  183.     tag.context = t->as->asid;
  184.     tag.vpn = pg.vpn;
  185.    
  186.     itlb_tag_access_write(tag.value);
  187.    
  188.     data.value = 0;
  189.     data.v = true;
  190.     data.size = PAGESIZE_8K;
  191.     data.pfn = fr.pfn;
  192.     data.l = false;
  193.     data.cp = t->c;
  194.     data.p = t->k;      /* p like privileged */
  195.     data.w = false;
  196.     data.g = t->g;
  197.    
  198.     itlb_data_in_write(data.value);
  199. }
  200.  
  201. /** ITLB miss handler. */
  202. void fast_instruction_access_mmu_miss(unative_t unused, istate_t *istate)
  203. {
  204.     uintptr_t va = ALIGN_DOWN(istate->tpc, PAGE_SIZE);
  205.     index_t index = (istate->tpc >> MMU_PAGE_WIDTH) % MMU_PAGES_PER_PAGE;
  206.     pte_t *t;
  207.  
  208.     page_table_lock(AS, true);
  209.     t = page_mapping_find(AS, va);
  210.     if (t && PTE_EXECUTABLE(t)) {
  211.         /*
  212.          * The mapping was found in the software page hash table.
  213.          * Insert it into ITLB.
  214.          */
  215.         t->a = true;
  216.         itlb_pte_copy(t, index);
  217. #ifdef CONFIG_TSB
  218.         itsb_pte_copy(t, index);
  219. #endif
  220.         page_table_unlock(AS, true);
  221.     } else {
  222.         /*
  223.          * Forward the page fault to the address space page fault
  224.          * handler.
  225.          */    
  226.         page_table_unlock(AS, true);
  227.         if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
  228.             do_fast_instruction_access_mmu_miss_fault(istate,
  229.                 __func__);
  230.         }
  231.     }
  232. }
  233.  
  234. /** DTLB miss handler.
  235.  *
  236.  * Note that some faults (e.g. kernel faults) were already resolved by the
  237.  * low-level, assembly language part of the fast_data_access_mmu_miss handler.
  238.  *
  239.  * @param tag       Content of the TLB Tag Access register as it existed
  240.  *          when the trap happened. This is to prevent confusion
  241.  *          created by clobbered Tag Access register during a nested
  242.  *          DTLB miss.
  243.  * @param istate    Interrupted state saved on the stack.
  244.  */
  245. void fast_data_access_mmu_miss(tlb_tag_access_reg_t tag, istate_t *istate)
  246. {
  247.     uintptr_t va;
  248.     index_t index;
  249.     pte_t *t;
  250.  
  251.     va = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE);
  252.     index = tag.vpn % MMU_PAGES_PER_PAGE;
  253.  
  254.     if (tag.context == ASID_KERNEL) {
  255.         if (!tag.vpn) {
  256.             /* NULL access in kernel */
  257.             do_fast_data_access_mmu_miss_fault(istate, tag,
  258.                 __func__);
  259.         }
  260.         do_fast_data_access_mmu_miss_fault(istate, tag, "Unexpected "
  261.             "kernel page fault.");
  262.     }
  263.  
  264.     page_table_lock(AS, true);
  265.     t = page_mapping_find(AS, va);
  266.     if (t) {
  267.         /*
  268.          * The mapping was found in the software page hash table.
  269.          * Insert it into DTLB.
  270.          */
  271.         t->a = true;
  272.         dtlb_pte_copy(t, index, true);
  273. #ifdef CONFIG_TSB
  274.         dtsb_pte_copy(t, index, true);
  275. #endif
  276.         page_table_unlock(AS, true);
  277.     } else {
  278.         /*
  279.          * Forward the page fault to the address space page fault
  280.          * handler.
  281.          */    
  282.         page_table_unlock(AS, true);
  283.         if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
  284.             do_fast_data_access_mmu_miss_fault(istate, tag,
  285.                 __func__);
  286.         }
  287.     }
  288. }
  289.  
  290. /** DTLB protection fault handler.
  291.  *
  292.  * @param tag       Content of the TLB Tag Access register as it existed
  293.  *          when the trap happened. This is to prevent confusion
  294.  *          created by clobbered Tag Access register during a nested
  295.  *          DTLB miss.
  296.  * @param istate    Interrupted state saved on the stack.
  297.  */
  298. void fast_data_access_protection(tlb_tag_access_reg_t tag, istate_t *istate)
  299. {
  300.     uintptr_t va;
  301.     index_t index;
  302.     pte_t *t;
  303.  
  304.     va = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE);
  305.     index = tag.vpn % MMU_PAGES_PER_PAGE;   /* 16K-page emulation */
  306.  
  307.     page_table_lock(AS, true);
  308.     t = page_mapping_find(AS, va);
  309.     if (t && PTE_WRITABLE(t)) {
  310.         /*
  311.          * The mapping was found in the software page hash table and is
  312.          * writable. Demap the old mapping and insert an updated mapping
  313.          * into DTLB.
  314.          */
  315.         t->a = true;
  316.         t->d = true;
  317.         dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_SECONDARY,
  318.             va + index * MMU_PAGE_SIZE);
  319.         dtlb_pte_copy(t, index, false);
  320. #ifdef CONFIG_TSB
  321.         dtsb_pte_copy(t, index, false);
  322. #endif
  323.         page_table_unlock(AS, true);
  324.     } else {
  325.         /*
  326.          * Forward the page fault to the address space page fault
  327.          * handler.
  328.          */    
  329.         page_table_unlock(AS, true);
  330.         if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) {
  331.             do_fast_data_access_protection_fault(istate, tag,
  332.                 __func__);
  333.         }
  334.     }
  335. }
  336.  
  337. /** Print TLB entry (for debugging purposes).
  338.  *
  339.  * The diag field has been left out in order to make this function more generic
  340.  * (there is no diag field in US3 architeture).
  341.  *
  342.  * @param i     TLB entry number
  343.  * @param t     TLB entry tag
  344.  * @param d     TLB entry data
  345.  */
  346. static void print_tlb_entry(int i, tlb_tag_read_reg_t t, tlb_data_t d)
  347. {
  348.     printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, "
  349.         "ie=%d, soft2=%#x, pfn=%#x, soft=%#x, l=%d, "
  350.         "cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn,
  351.         t.context, d.v, d.size, d.nfo, d.ie, d.soft2,
  352.         d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
  353. }
  354.  
  355. #if defined (US)
  356.  
  357. /** Print contents of both TLBs. */
  358. void tlb_print(void)
  359. {
  360.     int i;
  361.     tlb_data_t d;
  362.     tlb_tag_read_reg_t t;
  363.    
  364.     printf("I-TLB contents:\n");
  365.     for (i = 0; i < ITLB_ENTRY_COUNT; i++) {
  366.         d.value = itlb_data_access_read(i);
  367.         t.value = itlb_tag_read_read(i);
  368.         print_tlb_entry(i, t, d);
  369.     }
  370.  
  371.     printf("D-TLB contents:\n");
  372.     for (i = 0; i < DTLB_ENTRY_COUNT; i++) {
  373.         d.value = dtlb_data_access_read(i);
  374.         t.value = dtlb_tag_read_read(i);
  375.         print_tlb_entry(i, t, d);
  376.     }
  377. }
  378.  
  379. #elif defined (US3)
  380.  
  381. /** Print contents of all TLBs. */
  382. void tlb_print(void)
  383. {
  384.     int i;
  385.     tlb_data_t d;
  386.     tlb_tag_read_reg_t t;
  387.    
  388.     printf("TLB_ISMALL contents:\n");
  389.     for (i = 0; i < tlb_ismall_size(); i++) {
  390.         d.value = dtlb_data_access_read(TLB_ISMALL, i);
  391.         t.value = dtlb_tag_read_read(TLB_ISMALL, i);
  392.         print_tlb_entry(i, t, d);
  393.     }
  394.    
  395.     printf("TLB_IBIG contents:\n");
  396.     for (i = 0; i < tlb_ibig_size(); i++) {
  397.         d.value = dtlb_data_access_read(TLB_IBIG, i);
  398.         t.value = dtlb_tag_read_read(TLB_IBIG, i);
  399.         print_tlb_entry(i, t, d);
  400.     }
  401.    
  402.     printf("TLB_DSMALL contents:\n");
  403.     for (i = 0; i < tlb_dsmall_size(); i++) {
  404.         d.value = dtlb_data_access_read(TLB_DSMALL, i);
  405.         t.value = dtlb_tag_read_read(TLB_DSMALL, i);
  406.         print_tlb_entry(i, t, d);
  407.     }
  408.    
  409.     printf("TLB_DBIG_1 contents:\n");
  410.     for (i = 0; i < tlb_dbig_size(); i++) {
  411.         d.value = dtlb_data_access_read(TLB_DBIG_0, i);
  412.         t.value = dtlb_tag_read_read(TLB_DBIG_0, i);
  413.         print_tlb_entry(i, t, d);
  414.     }
  415.    
  416.     printf("TLB_DBIG_2 contents:\n");
  417.     for (i = 0; i < tlb_dbig_size(); i++) {
  418.         d.value = dtlb_data_access_read(TLB_DBIG_1, i);
  419.         t.value = dtlb_tag_read_read(TLB_DBIG_1, i);
  420.         print_tlb_entry(i, t, d);
  421.     }
  422. }
  423.  
  424. #endif
  425.  
  426. void do_fast_instruction_access_mmu_miss_fault(istate_t *istate,
  427.     const char *str)
  428. {
  429.     fault_if_from_uspace(istate, "%s\n", str);
  430.     dump_istate(istate);
  431.     panic("%s\n", str);
  432. }
  433.  
  434. void do_fast_data_access_mmu_miss_fault(istate_t *istate,
  435.     tlb_tag_access_reg_t tag, const char *str)
  436. {
  437.     uintptr_t va;
  438.  
  439.     va = tag.vpn << MMU_PAGE_WIDTH;
  440.     if (tag.context) {
  441.         fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va,
  442.             tag.context);
  443.     }
  444.     dump_istate(istate);
  445.     printf("Faulting page: %p, ASID=%d\n", va, tag.context);
  446.     panic("%s\n", str);
  447. }
  448.  
  449. void do_fast_data_access_protection_fault(istate_t *istate,
  450.     tlb_tag_access_reg_t tag, const char *str)
  451. {
  452.     uintptr_t va;
  453.  
  454.     va = tag.vpn << MMU_PAGE_WIDTH;
  455.  
  456.     if (tag.context) {
  457.         fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va,
  458.             tag.context);
  459.     }
  460.     printf("Faulting page: %p, ASID=%d\n", va, tag.context);
  461.     dump_istate(istate);
  462.     panic("%s\n", str);
  463. }
  464.  
  465. void describe_mmu_fault(void)
  466. {
  467.     tlb_sfsr_reg_t sfsr;
  468.     uintptr_t sfar;
  469.  
  470.     sfsr.value = dtlb_sfsr_read();
  471.     sfar = dtlb_sfar_read();
  472.    
  473. #if defined (US)
  474.     printf("DTLB SFSR: asi=%#x, ft=%#x, e=%d, ct=%d, pr=%d, w=%d, ow=%d, "
  475.         "fv=%d\n", sfsr.asi, sfsr.ft, sfsr.e, sfsr.ct, sfsr.pr, sfsr.w,
  476.         sfsr.ow, sfsr.fv);
  477. #elif defined (US3)
  478.     printf("DTLB SFSR: nf=%d, asi=%#x, tm=%d, ft=%#x, e=%d, ct=%d, pr=%d, "
  479.         "w=%d, ow=%d, fv=%d\n", sfsr.nf, sfsr.asi, sfsr.tm, sfsr.ft,
  480.         sfsr.e, sfsr.ct, sfsr.pr, sfsr.w, sfsr.ow, sfsr.fv);
  481. #endif
  482.        
  483.     printf("DTLB SFAR: address=%p\n", sfar);
  484.    
  485.     dtlb_sfsr_write(0);
  486. }
  487.  
  488. #if defined (US3)
  489. /** Invalidates given TLB entry if and only if it is non-locked or global.
  490.  *
  491.  * @param tlb       TLB number (one of TLB_DSMALL, TLB_DBIG_0, TLB_DBIG_1,
  492.  *          TLB_ISMALL, TLB_IBIG).
  493.  * @param entry     Entry index within the given TLB.
  494.  */
  495. static void tlb_invalidate_entry(int tlb, index_t entry)
  496. {
  497.     tlb_data_t d;
  498.     tlb_tag_read_reg_t t;
  499.    
  500.     if (tlb == TLB_DSMALL || tlb == TLB_DBIG_0 || tlb == TLB_DBIG_1) {
  501.         d.value = dtlb_data_access_read(tlb, entry);
  502.         if (!d.l || d.g) {
  503.             t.value = dtlb_tag_read_read(tlb, entry);
  504.             d.v = false;
  505.             dtlb_tag_access_write(t.value);
  506.             dtlb_data_access_write(tlb, entry, d.value);
  507.         }
  508.     } else if (tlb == TLB_ISMALL || tlb == TLB_IBIG) {
  509.         d.value = itlb_data_access_read(tlb, entry);
  510.         if (!d.l || d.g) {
  511.             t.value = itlb_tag_read_read(tlb, entry);
  512.             d.v = false;
  513.             itlb_tag_access_write(t.value);
  514.             itlb_data_access_write(tlb, entry, d.value);
  515.         }
  516.     }
  517. }
  518. #endif
  519.  
  520. /** Invalidate all unlocked ITLB and DTLB entries. */
  521. void tlb_invalidate_all(void)
  522. {
  523.     int i;
  524.    
  525.     /*
  526.      * Walk all ITLB and DTLB entries and remove all unlocked mappings.
  527.      *
  528.      * The kernel doesn't use global mappings so any locked global mappings
  529.      * found must have been created by someone else. Their only purpose now
  530.      * is to collide with proper mappings. Invalidate immediately. It should
  531.      * be safe to invalidate them as late as now.
  532.      */
  533.  
  534. #if defined (US)
  535.     tlb_data_t d;
  536.     tlb_tag_read_reg_t t;
  537.  
  538.     for (i = 0; i < ITLB_ENTRY_COUNT; i++) {
  539.         d.value = itlb_data_access_read(i);
  540.         if (!d.l || d.g) {
  541.             t.value = itlb_tag_read_read(i);
  542.             d.v = false;
  543.             itlb_tag_access_write(t.value);
  544.             itlb_data_access_write(i, d.value);
  545.         }
  546.     }
  547.  
  548.     for (i = 0; i < DTLB_ENTRY_COUNT; i++) {
  549.         d.value = dtlb_data_access_read(i);
  550.         if (!d.l || d.g) {
  551.             t.value = dtlb_tag_read_read(i);
  552.             d.v = false;
  553.             dtlb_tag_access_write(t.value);
  554.             dtlb_data_access_write(i, d.value);
  555.         }
  556.     }
  557.  
  558. #elif defined (US3)
  559.  
  560.     for (i = 0; i < tlb_ismall_size(); i++)
  561.         tlb_invalidate_entry(TLB_ISMALL, i);
  562.     for (i = 0; i < tlb_ibig_size(); i++)
  563.         tlb_invalidate_entry(TLB_IBIG, i);
  564.     for (i = 0; i < tlb_dsmall_size(); i++)
  565.         tlb_invalidate_entry(TLB_DSMALL, i);
  566.     for (i = 0; i < tlb_dbig_size(); i++)
  567.         tlb_invalidate_entry(TLB_DBIG_0, i);
  568.     for (i = 0; i < tlb_dbig_size(); i++)
  569.         tlb_invalidate_entry(TLB_DBIG_1, i);
  570. #endif
  571.  
  572. }
  573.  
  574. /** Invalidate all ITLB and DTLB entries that belong to specified ASID
  575.  * (Context).
  576.  *
  577.  * @param asid Address Space ID.
  578.  */
  579. void tlb_invalidate_asid(asid_t asid)
  580. {
  581.     tlb_context_reg_t pc_save, ctx;
  582.    
  583.     /* switch to nucleus because we are mapped by the primary context */
  584.     nucleus_enter();
  585.    
  586.     ctx.v = pc_save.v = mmu_primary_context_read();
  587.     ctx.context = asid;
  588.     mmu_primary_context_write(ctx.v);
  589.    
  590.     itlb_demap(TLB_DEMAP_CONTEXT, TLB_DEMAP_PRIMARY, 0);
  591.     dtlb_demap(TLB_DEMAP_CONTEXT, TLB_DEMAP_PRIMARY, 0);
  592.    
  593.     mmu_primary_context_write(pc_save.v);
  594.    
  595.     nucleus_leave();
  596. }
  597.  
  598. /** Invalidate all ITLB and DTLB entries for specified page range in specified
  599.  * address space.
  600.  *
  601.  * @param asid      Address Space ID.
  602.  * @param page      First page which to sweep out from ITLB and DTLB.
  603.  * @param cnt       Number of ITLB and DTLB entries to invalidate.
  604.  */
  605. void tlb_invalidate_pages(asid_t asid, uintptr_t page, count_t cnt)
  606. {
  607.     unsigned int i;
  608.     tlb_context_reg_t pc_save, ctx;
  609.    
  610.     /* switch to nucleus because we are mapped by the primary context */
  611.     nucleus_enter();
  612.    
  613.     ctx.v = pc_save.v = mmu_primary_context_read();
  614.     ctx.context = asid;
  615.     mmu_primary_context_write(ctx.v);
  616.    
  617.     for (i = 0; i < cnt * MMU_PAGES_PER_PAGE; i++) {
  618.         itlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY,
  619.             page + i * MMU_PAGE_SIZE);
  620.         dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY,
  621.             page + i * MMU_PAGE_SIZE);
  622.     }
  623.    
  624.     mmu_primary_context_write(pc_save.v);
  625.    
  626.     nucleus_leave();
  627. }
  628.  
  629. /** @}
  630.  */
  631.