Subversion Repositories HelenOS

Rev

Rev 3771 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (c) 2005 Jakub Jermar
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. /** @addtogroup sparc64mm  
  30.  * @{
  31.  */
  32. /** @file
  33.  */
  34.  
  35. #include <arch/mm/tlb.h>
  36. #include <mm/tlb.h>
  37. #include <mm/as.h>
  38. #include <mm/asid.h>
  39. #include <arch/mm/frame.h>
  40. #include <arch/mm/page.h>
  41. #include <arch/mm/pagesize.h>
  42. #include <arch/mm/mmu.h>
  43. #include <arch/interrupt.h>
  44. #include <interrupt.h>
  45. #include <arch.h>
  46. #include <print.h>
  47. #include <arch/types.h>
  48. #include <config.h>
  49. #include <arch/trap/trap.h>
  50. #include <arch/trap/exception.h>
  51. #include <panic.h>
  52. #include <arch/asm.h>
  53.  
  54. #ifdef CONFIG_TSB
  55. #include <arch/mm/tsb.h>
  56. #endif
  57.  
  58. static void dtlb_pte_copy(pte_t *, index_t, bool);
  59. static void itlb_pte_copy(pte_t *, index_t);
  60. static void do_fast_instruction_access_mmu_miss_fault(istate_t *, const char *);
  61. static void do_fast_data_access_mmu_miss_fault(istate_t *, tlb_tag_access_reg_t,
  62.     const char *);
  63. static void do_fast_data_access_protection_fault(istate_t *,
  64.     tlb_tag_access_reg_t, const char *);
  65.  
  66. char *context_encoding[] = {
  67.     "Primary",
  68.     "Secondary",
  69.     "Nucleus",
  70.     "Reserved"
  71. };
  72.  
  73. void tlb_arch_init(void)
  74. {
  75.     /*
  76.      * Invalidate all non-locked DTLB and ITLB entries.
  77.      */
  78.     tlb_invalidate_all();
  79.  
  80.     /*
  81.      * Clear both SFSRs.
  82.      */
  83.     dtlb_sfsr_write(0);
  84.     itlb_sfsr_write(0);
  85. }
  86.  
  87. /** Insert privileged mapping into DMMU TLB.
  88.  *
  89.  * @param page      Virtual page address.
  90.  * @param frame     Physical frame address.
  91.  * @param pagesize  Page size.
  92.  * @param locked    True for permanent mappings, false otherwise.
  93.  * @param cacheable True if the mapping is cacheable, false otherwise.
  94.  */
  95. void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize,
  96.     bool locked, bool cacheable)
  97. {
  98.     tlb_tag_access_reg_t tag;
  99.     tlb_data_t data;
  100.     page_address_t pg;
  101.     frame_address_t fr;
  102.  
  103.     pg.address = page;
  104.     fr.address = frame;
  105.  
  106.     tag.context = ASID_KERNEL;
  107.     tag.vpn = pg.vpn;
  108.  
  109.     dtlb_tag_access_write(tag.value);
  110.  
  111.     data.value = 0;
  112.     data.v = true;
  113.     data.size = pagesize;
  114.     data.pfn = fr.pfn;
  115.     data.l = locked;
  116.     data.cp = cacheable;
  117. #ifdef CONFIG_VIRT_IDX_DCACHE
  118.     data.cv = cacheable;
  119. #endif /* CONFIG_VIRT_IDX_DCACHE */
  120.     data.p = true;
  121.     data.w = true;
  122.     data.g = false;
  123.  
  124.     dtlb_data_in_write(data.value);
  125. }
  126.  
  127. /** Copy PTE to TLB.
  128.  *
  129.  * @param t         Page Table Entry to be copied.
  130.  * @param index     Zero if lower 8K-subpage, one if higher 8K-subpage.
  131.  * @param ro        If true, the entry will be created read-only, regardless
  132.  *          of its w field.
  133.  */
  134. void dtlb_pte_copy(pte_t *t, index_t index, bool ro)
  135. {
  136.     tlb_tag_access_reg_t tag;
  137.     tlb_data_t data;
  138.     page_address_t pg;
  139.     frame_address_t fr;
  140.  
  141.     pg.address = t->page + (index << MMU_PAGE_WIDTH);
  142.     fr.address = t->frame + (index << MMU_PAGE_WIDTH);
  143.  
  144.     tag.value = 0;
  145.     tag.context = t->as->asid;
  146.     tag.vpn = pg.vpn;
  147.  
  148.     dtlb_tag_access_write(tag.value);
  149.  
  150.     data.value = 0;
  151.     data.v = true;
  152.     data.size = PAGESIZE_8K;
  153.     data.pfn = fr.pfn;
  154.     data.l = false;
  155.     data.cp = t->c;
  156. #ifdef CONFIG_VIRT_IDX_DCACHE
  157.     data.cv = t->c;
  158. #endif /* CONFIG_VIRT_IDX_DCACHE */
  159.     data.p = t->k;      /* p like privileged */
  160.     data.w = ro ? false : t->w;
  161.     data.g = t->g;
  162.  
  163.     dtlb_data_in_write(data.value);
  164. }
  165.  
  166. /** Copy PTE to ITLB.
  167.  *
  168.  * @param t     Page Table Entry to be copied.
  169.  * @param index     Zero if lower 8K-subpage, one if higher 8K-subpage.
  170.  */
  171. void itlb_pte_copy(pte_t *t, index_t index)
  172. {
  173.     tlb_tag_access_reg_t tag;
  174.     tlb_data_t data;
  175.     page_address_t pg;
  176.     frame_address_t fr;
  177.  
  178.     pg.address = t->page + (index << MMU_PAGE_WIDTH);
  179.     fr.address = t->frame + (index << MMU_PAGE_WIDTH);
  180.  
  181.     tag.value = 0;
  182.     tag.context = t->as->asid;
  183.     tag.vpn = pg.vpn;
  184.    
  185.     itlb_tag_access_write(tag.value);
  186.    
  187.     data.value = 0;
  188.     data.v = true;
  189.     data.size = PAGESIZE_8K;
  190.     data.pfn = fr.pfn;
  191.     data.l = false;
  192.     data.cp = t->c;
  193.     data.p = t->k;      /* p like privileged */
  194.     data.w = false;
  195.     data.g = t->g;
  196.    
  197.     itlb_data_in_write(data.value);
  198. }
  199.  
  200. /** ITLB miss handler. */
  201. void fast_instruction_access_mmu_miss(unative_t unused, istate_t *istate)
  202. {
  203.     uintptr_t va = ALIGN_DOWN(istate->tpc, PAGE_SIZE);
  204.     index_t index = (istate->tpc >> MMU_PAGE_WIDTH) % MMU_PAGES_PER_PAGE;
  205.     pte_t *t;
  206.  
  207.     page_table_lock(AS, true);
  208.     t = page_mapping_find(AS, va);
  209.     if (t && PTE_EXECUTABLE(t)) {
  210.         /*
  211.          * The mapping was found in the software page hash table.
  212.          * Insert it into ITLB.
  213.          */
  214.         t->a = true;
  215.         itlb_pte_copy(t, index);
  216. #ifdef CONFIG_TSB
  217.         itsb_pte_copy(t, index);
  218. #endif
  219.         page_table_unlock(AS, true);
  220.     } else {
  221.         /*
  222.          * Forward the page fault to the address space page fault
  223.          * handler.
  224.          */    
  225.         page_table_unlock(AS, true);
  226.         if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
  227.             do_fast_instruction_access_mmu_miss_fault(istate,
  228.                 __func__);
  229.         }
  230.     }
  231. }
  232.  
  233. /** DTLB miss handler.
  234.  *
  235.  * Note that some faults (e.g. kernel faults) were already resolved by the
  236.  * low-level, assembly language part of the fast_data_access_mmu_miss handler.
  237.  *
  238.  * @param tag       Content of the TLB Tag Access register as it existed
  239.  *          when the trap happened. This is to prevent confusion
  240.  *          created by clobbered Tag Access register during a nested
  241.  *          DTLB miss.
  242.  * @param istate    Interrupted state saved on the stack.
  243.  */
  244. void fast_data_access_mmu_miss(tlb_tag_access_reg_t tag, istate_t *istate)
  245. {
  246.     uintptr_t va;
  247.     index_t index;
  248.     pte_t *t;
  249.  
  250.     va = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE);
  251.     index = tag.vpn % MMU_PAGES_PER_PAGE;
  252.  
  253.     if (tag.context == ASID_KERNEL) {
  254.         if (!tag.vpn) {
  255.             /* NULL access in kernel */
  256.             do_fast_data_access_mmu_miss_fault(istate, tag,
  257.                 __func__);
  258.         }
  259.         do_fast_data_access_mmu_miss_fault(istate, tag, "Unexpected "
  260.             "kernel page fault.");
  261.     }
  262.  
  263.     page_table_lock(AS, true);
  264.     t = page_mapping_find(AS, va);
  265.     if (t) {
  266.         /*
  267.          * The mapping was found in the software page hash table.
  268.          * Insert it into DTLB.
  269.          */
  270.         t->a = true;
  271.         dtlb_pte_copy(t, index, true);
  272. #ifdef CONFIG_TSB
  273.         dtsb_pte_copy(t, index, true);
  274. #endif
  275.         page_table_unlock(AS, true);
  276.     } else {
  277.         /*
  278.          * Forward the page fault to the address space page fault
  279.          * handler.
  280.          */    
  281.         page_table_unlock(AS, true);
  282.         if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
  283.             do_fast_data_access_mmu_miss_fault(istate, tag,
  284.                 __func__);
  285.         }
  286.     }
  287. }
  288.  
  289. /** DTLB protection fault handler.
  290.  *
  291.  * @param tag       Content of the TLB Tag Access register as it existed
  292.  *          when the trap happened. This is to prevent confusion
  293.  *          created by clobbered Tag Access register during a nested
  294.  *          DTLB miss.
  295.  * @param istate    Interrupted state saved on the stack.
  296.  */
  297. void fast_data_access_protection(tlb_tag_access_reg_t tag, istate_t *istate)
  298. {
  299.     uintptr_t va;
  300.     index_t index;
  301.     pte_t *t;
  302.  
  303.     va = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE);
  304.     index = tag.vpn % MMU_PAGES_PER_PAGE;   /* 16K-page emulation */
  305.  
  306.     page_table_lock(AS, true);
  307.     t = page_mapping_find(AS, va);
  308.     if (t && PTE_WRITABLE(t)) {
  309.         /*
  310.          * The mapping was found in the software page hash table and is
  311.          * writable. Demap the old mapping and insert an updated mapping
  312.          * into DTLB.
  313.          */
  314.         t->a = true;
  315.         t->d = true;
  316.         dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_SECONDARY,
  317.             va + index * MMU_PAGE_SIZE);
  318.         dtlb_pte_copy(t, index, false);
  319. #ifdef CONFIG_TSB
  320.         dtsb_pte_copy(t, index, false);
  321. #endif
  322.         page_table_unlock(AS, true);
  323.     } else {
  324.         /*
  325.          * Forward the page fault to the address space page fault
  326.          * handler.
  327.          */    
  328.         page_table_unlock(AS, true);
  329.         if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) {
  330.             do_fast_data_access_protection_fault(istate, tag,
  331.                 __func__);
  332.         }
  333.     }
  334. }
  335.  
  336. /** Print TLB entry (for debugging purposes).
  337.  *
  338.  * The diag field has been left out in order to make this function more generic
  339.  * (there is no diag field in US3 architeture).
  340.  *
  341.  * @param i     TLB entry number
  342.  * @param t     TLB entry tag
  343.  * @param d     TLB entry data
  344.  */
  345. static void print_tlb_entry(int i, tlb_tag_read_reg_t t, tlb_data_t d)
  346. {
  347.     printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, "
  348.         "ie=%d, soft2=%#x, pfn=%#x, soft=%#x, l=%d, "
  349.         "cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn,
  350.         t.context, d.v, d.size, d.nfo, d.ie, d.soft2,
  351.         d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
  352. }
  353.  
  354. #if defined (US)
  355.  
  356. /** Print contents of both TLBs. */
  357. void tlb_print(void)
  358. {
  359.     int i;
  360.     tlb_data_t d;
  361.     tlb_tag_read_reg_t t;
  362.    
  363.     printf("I-TLB contents:\n");
  364.     for (i = 0; i < ITLB_ENTRY_COUNT; i++) {
  365.         d.value = itlb_data_access_read(i);
  366.         t.value = itlb_tag_read_read(i);
  367.         print_tlb_entry(i, t, d);
  368.     }
  369.  
  370.     printf("D-TLB contents:\n");
  371.     for (i = 0; i < DTLB_ENTRY_COUNT; i++) {
  372.         d.value = dtlb_data_access_read(i);
  373.         t.value = dtlb_tag_read_read(i);
  374.         print_tlb_entry(i, t, d);
  375.     }
  376. }
  377.  
  378. #elif defined (US3)
  379.  
  380. /** Print contents of all TLBs. */
  381. void tlb_print(void)
  382. {
  383.     int i;
  384.     tlb_data_t d;
  385.     tlb_tag_read_reg_t t;
  386.    
  387.     printf("TLB_ISMALL contents:\n");
  388.     for (i = 0; i < tlb_ismall_size(); i++) {
  389.         d.value = dtlb_data_access_read(TLB_ISMALL, i);
  390.         t.value = dtlb_tag_read_read(TLB_ISMALL, i);
  391.         print_tlb_entry(i, t, d);
  392.     }
  393.    
  394.     printf("TLB_IBIG contents:\n");
  395.     for (i = 0; i < tlb_ibig_size(); i++) {
  396.         d.value = dtlb_data_access_read(TLB_IBIG, i);
  397.         t.value = dtlb_tag_read_read(TLB_IBIG, i);
  398.         print_tlb_entry(i, t, d);
  399.     }
  400.    
  401.     printf("TLB_DSMALL contents:\n");
  402.     for (i = 0; i < tlb_dsmall_size(); i++) {
  403.         d.value = dtlb_data_access_read(TLB_DSMALL, i);
  404.         t.value = dtlb_tag_read_read(TLB_DSMALL, i);
  405.         print_tlb_entry(i, t, d);
  406.     }
  407.    
  408.     printf("TLB_DBIG_1 contents:\n");
  409.     for (i = 0; i < tlb_dbig_size(); i++) {
  410.         d.value = dtlb_data_access_read(TLB_DBIG_0, i);
  411.         t.value = dtlb_tag_read_read(TLB_DBIG_0, i);
  412.         print_tlb_entry(i, t, d);
  413.     }
  414.    
  415.     printf("TLB_DBIG_2 contents:\n");
  416.     for (i = 0; i < tlb_dbig_size(); i++) {
  417.         d.value = dtlb_data_access_read(TLB_DBIG_1, i);
  418.         t.value = dtlb_tag_read_read(TLB_DBIG_1, i);
  419.         print_tlb_entry(i, t, d);
  420.     }
  421. }
  422.  
  423. #endif
  424.  
  425. void do_fast_instruction_access_mmu_miss_fault(istate_t *istate,
  426.     const char *str)
  427. {
  428.     fault_if_from_uspace(istate, "%s\n", str);
  429.     dump_istate(istate);
  430.     panic("%s\n", str);
  431. }
  432.  
  433. void do_fast_data_access_mmu_miss_fault(istate_t *istate,
  434.     tlb_tag_access_reg_t tag, const char *str)
  435. {
  436.     uintptr_t va;
  437.  
  438.     va = tag.vpn << MMU_PAGE_WIDTH;
  439.     if (tag.context) {
  440.         fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va,
  441.             tag.context);
  442.     }
  443.     dump_istate(istate);
  444.     printf("Faulting page: %p, ASID=%d\n", va, tag.context);
  445.     panic("%s\n", str);
  446. }
  447.  
  448. void do_fast_data_access_protection_fault(istate_t *istate,
  449.     tlb_tag_access_reg_t tag, const char *str)
  450. {
  451.     uintptr_t va;
  452.  
  453.     va = tag.vpn << MMU_PAGE_WIDTH;
  454.  
  455.     if (tag.context) {
  456.         fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va,
  457.             tag.context);
  458.     }
  459.     printf("Faulting page: %p, ASID=%d\n", va, tag.context);
  460.     dump_istate(istate);
  461.     panic("%s\n", str);
  462. }
  463.  
  464. void describe_mmu_fault(void)
  465. {
  466.     tlb_sfsr_reg_t sfsr;
  467.     uintptr_t sfar;
  468.  
  469.     sfsr.value = dtlb_sfsr_read();
  470.     sfar = dtlb_sfar_read();
  471.    
  472. #if defined (US)
  473.     printf("DTLB SFSR: asi=%#x, ft=%#x, e=%d, ct=%d, pr=%d, w=%d, ow=%d, "
  474.         "fv=%d\n", sfsr.asi, sfsr.ft, sfsr.e, sfsr.ct, sfsr.pr, sfsr.w,
  475.         sfsr.ow, sfsr.fv);
  476. #elif defined (US3)
  477.     printf("DTLB SFSR: nf=%d, asi=%#x, tm=%d, ft=%#x, e=%d, ct=%d, pr=%d, "
  478.         "w=%d, ow=%d, fv=%d\n", sfsr.nf, sfsr.asi, sfsr.tm, sfsr.ft,
  479.         sfsr.e, sfsr.ct, sfsr.pr, sfsr.w, sfsr.ow, sfsr.fv);
  480. #endif
  481.        
  482.     printf("DTLB SFAR: address=%p\n", sfar);
  483.    
  484.     dtlb_sfsr_write(0);
  485. }
  486.  
  487. #if defined (US3)
  488. /** Invalidates given TLB entry if and only if it is non-locked or global.
  489.  *
  490.  * @param tlb       TLB number (one of TLB_DSMALL, TLB_DBIG_0, TLB_DBIG_1,
  491.  *          TLB_ISMALL, TLB_IBIG).
  492.  * @param entry     Entry index within the given TLB.
  493.  */
  494. static void tlb_invalidate_entry(int tlb, index_t entry)
  495. {
  496.     tlb_data_t d;
  497.     tlb_tag_read_reg_t t;
  498.    
  499.     if (tlb == TLB_DSMALL || tlb == TLB_DBIG_0 || tlb == TLB_DBIG_1) {
  500.         d.value = dtlb_data_access_read(tlb, entry);
  501.         if (!d.l || d.g) {
  502.             t.value = dtlb_tag_read_read(tlb, entry);
  503.             d.v = false;
  504.             dtlb_tag_access_write(t.value);
  505.             dtlb_data_access_write(tlb, entry, d.value);
  506.         }
  507.     } else if (tlb == TLB_ISMALL || tlb == TLB_IBIG) {
  508.         d.value = itlb_data_access_read(tlb, entry);
  509.         if (!d.l || d.g) {
  510.             t.value = itlb_tag_read_read(tlb, entry);
  511.             d.v = false;
  512.             itlb_tag_access_write(t.value);
  513.             itlb_data_access_write(tlb, entry, d.value);
  514.         }
  515.     }
  516. }
  517. #endif
  518.  
  519. /** Invalidate all unlocked ITLB and DTLB entries. */
  520. void tlb_invalidate_all(void)
  521. {
  522.     int i;
  523.    
  524.     /*
  525.      * Walk all ITLB and DTLB entries and remove all unlocked mappings.
  526.      *
  527.      * The kernel doesn't use global mappings so any locked global mappings
  528.      * found must have been created by someone else. Their only purpose now
  529.      * is to collide with proper mappings. Invalidate immediately. It should
  530.      * be safe to invalidate them as late as now.
  531.      */
  532.  
  533. #if defined (US)
  534.     tlb_data_t d;
  535.     tlb_tag_read_reg_t t;
  536.  
  537.     for (i = 0; i < ITLB_ENTRY_COUNT; i++) {
  538.         d.value = itlb_data_access_read(i);
  539.         if (!d.l || d.g) {
  540.             t.value = itlb_tag_read_read(i);
  541.             d.v = false;
  542.             itlb_tag_access_write(t.value);
  543.             itlb_data_access_write(i, d.value);
  544.         }
  545.     }
  546.  
  547.     for (i = 0; i < DTLB_ENTRY_COUNT; i++) {
  548.         d.value = dtlb_data_access_read(i);
  549.         if (!d.l || d.g) {
  550.             t.value = dtlb_tag_read_read(i);
  551.             d.v = false;
  552.             dtlb_tag_access_write(t.value);
  553.             dtlb_data_access_write(i, d.value);
  554.         }
  555.     }
  556.  
  557. #elif defined (US3)
  558.  
  559.     for (i = 0; i < tlb_ismall_size(); i++)
  560.         tlb_invalidate_entry(TLB_ISMALL, i);
  561.     for (i = 0; i < tlb_ibig_size(); i++)
  562.         tlb_invalidate_entry(TLB_IBIG, i);
  563.     for (i = 0; i < tlb_dsmall_size(); i++)
  564.         tlb_invalidate_entry(TLB_DSMALL, i);
  565.     for (i = 0; i < tlb_dbig_size(); i++)
  566.         tlb_invalidate_entry(TLB_DBIG_0, i);
  567.     for (i = 0; i < tlb_dbig_size(); i++)
  568.         tlb_invalidate_entry(TLB_DBIG_1, i);
  569. #endif
  570.  
  571. }
  572.  
  573. /** Invalidate all ITLB and DTLB entries that belong to specified ASID
  574.  * (Context).
  575.  *
  576.  * @param asid Address Space ID.
  577.  */
  578. void tlb_invalidate_asid(asid_t asid)
  579. {
  580.     tlb_context_reg_t pc_save, ctx;
  581.    
  582.     /* switch to nucleus because we are mapped by the primary context */
  583.     nucleus_enter();
  584.    
  585.     ctx.v = pc_save.v = mmu_primary_context_read();
  586.     ctx.context = asid;
  587.     mmu_primary_context_write(ctx.v);
  588.    
  589.     itlb_demap(TLB_DEMAP_CONTEXT, TLB_DEMAP_PRIMARY, 0);
  590.     dtlb_demap(TLB_DEMAP_CONTEXT, TLB_DEMAP_PRIMARY, 0);
  591.    
  592.     mmu_primary_context_write(pc_save.v);
  593.    
  594.     nucleus_leave();
  595. }
  596.  
  597. /** Invalidate all ITLB and DTLB entries for specified page range in specified
  598.  * address space.
  599.  *
  600.  * @param asid      Address Space ID.
  601.  * @param page      First page which to sweep out from ITLB and DTLB.
  602.  * @param cnt       Number of ITLB and DTLB entries to invalidate.
  603.  */
  604. void tlb_invalidate_pages(asid_t asid, uintptr_t page, count_t cnt)
  605. {
  606.     unsigned int i;
  607.     tlb_context_reg_t pc_save, ctx;
  608.    
  609.     /* switch to nucleus because we are mapped by the primary context */
  610.     nucleus_enter();
  611.    
  612.     ctx.v = pc_save.v = mmu_primary_context_read();
  613.     ctx.context = asid;
  614.     mmu_primary_context_write(ctx.v);
  615.    
  616.     for (i = 0; i < cnt * MMU_PAGES_PER_PAGE; i++) {
  617.         itlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY,
  618.             page + i * MMU_PAGE_SIZE);
  619.         dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY,
  620.             page + i * MMU_PAGE_SIZE);
  621.     }
  622.    
  623.     mmu_primary_context_write(pc_save.v);
  624.    
  625.     nucleus_leave();
  626. }
  627.  
  628. /** @}
  629.  */
  630.