Subversion Repositories HelenOS

Rev

Rev 2927 | Rev 4339 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (c) 2005 Jakub Jermar
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. /** @addtogroup sparc64mm  
  30.  * @{
  31.  */
  32. /** @file
  33.  */
  34.  
  35. #include <arch/mm/tlb.h>
  36. #include <mm/tlb.h>
  37. #include <mm/as.h>
  38. #include <mm/asid.h>
  39. #include <arch/mm/frame.h>
  40. #include <arch/mm/page.h>
  41. #include <arch/mm/mmu.h>
  42. #include <arch/interrupt.h>
  43. #include <interrupt.h>
  44. #include <arch.h>
  45. #include <print.h>
  46. #include <arch/types.h>
  47. #include <config.h>
  48. #include <arch/trap/trap.h>
  49. #include <arch/trap/exception.h>
  50. #include <panic.h>
  51. #include <arch/asm.h>
  52.  
  53. #ifdef CONFIG_TSB
  54. #include <arch/mm/tsb.h>
  55. #endif
  56.  
  57. static void dtlb_pte_copy(pte_t *, index_t, bool);
  58. static void itlb_pte_copy(pte_t *, index_t);
  59. static void do_fast_instruction_access_mmu_miss_fault(istate_t *, const char *);
  60. static void do_fast_data_access_mmu_miss_fault(istate_t *, tlb_tag_access_reg_t,
  61.     const char *);
  62. static void do_fast_data_access_protection_fault(istate_t *,
  63.     tlb_tag_access_reg_t, const char *);
  64.  
  65. char *context_encoding[] = {
  66.     "Primary",
  67.     "Secondary",
  68.     "Nucleus",
  69.     "Reserved"
  70. };
  71.  
  72. void tlb_arch_init(void)
  73. {
  74.     /*
  75.      * Invalidate all non-locked DTLB and ITLB entries.
  76.      */
  77.     tlb_invalidate_all();
  78.  
  79.     /*
  80.      * Clear both SFSRs.
  81.      */
  82.     dtlb_sfsr_write(0);
  83.     itlb_sfsr_write(0);
  84. }
  85.  
  86. /** Insert privileged mapping into DMMU TLB.
  87.  *
  88.  * @param page      Virtual page address.
  89.  * @param frame     Physical frame address.
  90.  * @param pagesize  Page size.
  91.  * @param locked    True for permanent mappings, false otherwise.
  92.  * @param cacheable True if the mapping is cacheable, false otherwise.
  93.  */
  94. void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize,
  95.     bool locked, bool cacheable)
  96. {
  97.     tlb_tag_access_reg_t tag;
  98.     tlb_data_t data;
  99.     page_address_t pg;
  100.     frame_address_t fr;
  101.  
  102.     pg.address = page;
  103.     fr.address = frame;
  104.  
  105.     tag.context = ASID_KERNEL;
  106.     tag.vpn = pg.vpn;
  107.  
  108.     dtlb_tag_access_write(tag.value);
  109.  
  110.     data.value = 0;
  111.     data.v = true;
  112.     data.size = pagesize;
  113.     data.pfn = fr.pfn;
  114.     data.l = locked;
  115.     data.cp = cacheable;
  116. #ifdef CONFIG_VIRT_IDX_DCACHE
  117.     data.cv = cacheable;
  118. #endif /* CONFIG_VIRT_IDX_DCACHE */
  119.     data.p = true;
  120.     data.w = true;
  121.     data.g = false;
  122.  
  123.     dtlb_data_in_write(data.value);
  124. }
  125.  
  126. /** Copy PTE to TLB.
  127.  *
  128.  * @param t         Page Table Entry to be copied.
  129.  * @param index     Zero if lower 8K-subpage, one if higher 8K-subpage.
  130.  * @param ro        If true, the entry will be created read-only, regardless
  131.  *          of its w field.
  132.  */
  133. void dtlb_pte_copy(pte_t *t, index_t index, bool ro)
  134. {
  135.     tlb_tag_access_reg_t tag;
  136.     tlb_data_t data;
  137.     page_address_t pg;
  138.     frame_address_t fr;
  139.  
  140.     pg.address = t->page + (index << MMU_PAGE_WIDTH);
  141.     fr.address = t->frame + (index << MMU_PAGE_WIDTH);
  142.  
  143.     tag.value = 0;
  144.     tag.context = t->as->asid;
  145.     tag.vpn = pg.vpn;
  146.  
  147.     dtlb_tag_access_write(tag.value);
  148.  
  149.     data.value = 0;
  150.     data.v = true;
  151.     data.size = PAGESIZE_8K;
  152.     data.pfn = fr.pfn;
  153.     data.l = false;
  154.     data.cp = t->c;
  155. #ifdef CONFIG_VIRT_IDX_DCACHE
  156.     data.cv = t->c;
  157. #endif /* CONFIG_VIRT_IDX_DCACHE */
  158.     data.p = t->k;      /* p like privileged */
  159.     data.w = ro ? false : t->w;
  160.     data.g = t->g;
  161.  
  162.     dtlb_data_in_write(data.value);
  163. }
  164.  
  165. /** Copy PTE to ITLB.
  166.  *
  167.  * @param t     Page Table Entry to be copied.
  168.  * @param index     Zero if lower 8K-subpage, one if higher 8K-subpage.
  169.  */
  170. void itlb_pte_copy(pte_t *t, index_t index)
  171. {
  172.     tlb_tag_access_reg_t tag;
  173.     tlb_data_t data;
  174.     page_address_t pg;
  175.     frame_address_t fr;
  176.  
  177.     pg.address = t->page + (index << MMU_PAGE_WIDTH);
  178.     fr.address = t->frame + (index << MMU_PAGE_WIDTH);
  179.  
  180.     tag.value = 0;
  181.     tag.context = t->as->asid;
  182.     tag.vpn = pg.vpn;
  183.    
  184.     itlb_tag_access_write(tag.value);
  185.    
  186.     data.value = 0;
  187.     data.v = true;
  188.     data.size = PAGESIZE_8K;
  189.     data.pfn = fr.pfn;
  190.     data.l = false;
  191.     data.cp = t->c;
  192.     data.p = t->k;      /* p like privileged */
  193.     data.w = false;
  194.     data.g = t->g;
  195.    
  196.     itlb_data_in_write(data.value);
  197. }
  198.  
  199. /** ITLB miss handler. */
  200. void fast_instruction_access_mmu_miss(unative_t unused, istate_t *istate)
  201. {
  202.     uintptr_t va = ALIGN_DOWN(istate->tpc, PAGE_SIZE);
  203.     index_t index = (istate->tpc >> MMU_PAGE_WIDTH) % MMU_PAGES_PER_PAGE;
  204.     pte_t *t;
  205.  
  206.     page_table_lock(AS, true);
  207.     t = page_mapping_find(AS, va);
  208.     if (t && PTE_EXECUTABLE(t)) {
  209.         /*
  210.          * The mapping was found in the software page hash table.
  211.          * Insert it into ITLB.
  212.          */
  213.         t->a = true;
  214.         itlb_pte_copy(t, index);
  215. #ifdef CONFIG_TSB
  216.         itsb_pte_copy(t, index);
  217. #endif
  218.         page_table_unlock(AS, true);
  219.     } else {
  220.         /*
  221.          * Forward the page fault to the address space page fault
  222.          * handler.
  223.          */    
  224.         page_table_unlock(AS, true);
  225.         if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
  226.             do_fast_instruction_access_mmu_miss_fault(istate,
  227.                 __func__);
  228.         }
  229.     }
  230. }
  231.  
  232. /** DTLB miss handler.
  233.  *
  234.  * Note that some faults (e.g. kernel faults) were already resolved by the
  235.  * low-level, assembly language part of the fast_data_access_mmu_miss handler.
  236.  *
  237.  * @param tag       Content of the TLB Tag Access register as it existed
  238.  *          when the trap happened. This is to prevent confusion
  239.  *          created by clobbered Tag Access register during a nested
  240.  *          DTLB miss.
  241.  * @param istate    Interrupted state saved on the stack.
  242.  */
  243. void fast_data_access_mmu_miss(tlb_tag_access_reg_t tag, istate_t *istate)
  244. {
  245.     uintptr_t va;
  246.     index_t index;
  247.     pte_t *t;
  248.  
  249.     va = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE);
  250.     index = tag.vpn % MMU_PAGES_PER_PAGE;
  251.  
  252.     if (tag.context == ASID_KERNEL) {
  253.         if (!tag.vpn) {
  254.             /* NULL access in kernel */
  255.             do_fast_data_access_mmu_miss_fault(istate, tag,
  256.                 __func__);
  257.         }
  258.         do_fast_data_access_mmu_miss_fault(istate, tag, "Unexpected "
  259.             "kernel page fault.");
  260.     }
  261.  
  262.     page_table_lock(AS, true);
  263.     t = page_mapping_find(AS, va);
  264.     if (t) {
  265.         /*
  266.          * The mapping was found in the software page hash table.
  267.          * Insert it into DTLB.
  268.          */
  269.         t->a = true;
  270.         dtlb_pte_copy(t, index, true);
  271. #ifdef CONFIG_TSB
  272.         dtsb_pte_copy(t, index, true);
  273. #endif
  274.         page_table_unlock(AS, true);
  275.     } else {
  276.         /*
  277.          * Forward the page fault to the address space page fault
  278.          * handler.
  279.          */    
  280.         page_table_unlock(AS, true);
  281.         if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
  282.             do_fast_data_access_mmu_miss_fault(istate, tag,
  283.                 __func__);
  284.         }
  285.     }
  286. }
  287.  
  288. /** DTLB protection fault handler.
  289.  *
  290.  * @param tag       Content of the TLB Tag Access register as it existed
  291.  *          when the trap happened. This is to prevent confusion
  292.  *          created by clobbered Tag Access register during a nested
  293.  *          DTLB miss.
  294.  * @param istate    Interrupted state saved on the stack.
  295.  */
  296. void fast_data_access_protection(tlb_tag_access_reg_t tag, istate_t *istate)
  297. {
  298.     uintptr_t va;
  299.     index_t index;
  300.     pte_t *t;
  301.  
  302.     va = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE);
  303.     index = tag.vpn % MMU_PAGES_PER_PAGE;   /* 16K-page emulation */
  304.  
  305.     page_table_lock(AS, true);
  306.     t = page_mapping_find(AS, va);
  307.     if (t && PTE_WRITABLE(t)) {
  308.         /*
  309.          * The mapping was found in the software page hash table and is
  310.          * writable. Demap the old mapping and insert an updated mapping
  311.          * into DTLB.
  312.          */
  313.         t->a = true;
  314.         t->d = true;
  315.         dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_SECONDARY,
  316.             va + index * MMU_PAGE_SIZE);
  317.         dtlb_pte_copy(t, index, false);
  318. #ifdef CONFIG_TSB
  319.         dtsb_pte_copy(t, index, false);
  320. #endif
  321.         page_table_unlock(AS, true);
  322.     } else {
  323.         /*
  324.          * Forward the page fault to the address space page fault
  325.          * handler.
  326.          */    
  327.         page_table_unlock(AS, true);
  328.         if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) {
  329.             do_fast_data_access_protection_fault(istate, tag,
  330.                 __func__);
  331.         }
  332.     }
  333. }
  334.  
  335. /** Print TLB entry (for debugging purposes).
  336.  *
  337.  * The diag field has been left out in order to make this function more generic
  338.  * (there is no diag field in US3 architeture).
  339.  *
  340.  * @param i     TLB entry number
  341.  * @param t     TLB entry tag
  342.  * @param d     TLB entry data
  343.  */
  344. static void print_tlb_entry(int i, tlb_tag_read_reg_t t, tlb_data_t d)
  345. {
  346.     printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, "
  347.         "ie=%d, soft2=%#x, pfn=%#x, soft=%#x, l=%d, "
  348.         "cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn,
  349.         t.context, d.v, d.size, d.nfo, d.ie, d.soft2,
  350.         d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
  351. }
  352.  
  353. #if defined (US)
  354.  
  355. /** Print contents of both TLBs. */
  356. void tlb_print(void)
  357. {
  358.     int i;
  359.     tlb_data_t d;
  360.     tlb_tag_read_reg_t t;
  361.    
  362.     printf("I-TLB contents:\n");
  363.     for (i = 0; i < ITLB_ENTRY_COUNT; i++) {
  364.         d.value = itlb_data_access_read(i);
  365.         t.value = itlb_tag_read_read(i);
  366.         print_tlb_entry(i, t, d);
  367.     }
  368.  
  369.     printf("D-TLB contents:\n");
  370.     for (i = 0; i < DTLB_ENTRY_COUNT; i++) {
  371.         d.value = dtlb_data_access_read(i);
  372.         t.value = dtlb_tag_read_read(i);
  373.         print_tlb_entry(i, t, d);
  374.     }
  375. }
  376.  
  377. #elif defined (US3)
  378.  
  379. /** Print contents of all TLBs. */
  380. void tlb_print(void)
  381. {
  382.     int i;
  383.     tlb_data_t d;
  384.     tlb_tag_read_reg_t t;
  385.    
  386.     printf("TLB_ISMALL contents:\n");
  387.     for (i = 0; i < tlb_ismall_size(); i++) {
  388.         d.value = dtlb_data_access_read(TLB_ISMALL, i);
  389.         t.value = dtlb_tag_read_read(TLB_ISMALL, i);
  390.         print_tlb_entry(i, t, d);
  391.     }
  392.    
  393.     printf("TLB_IBIG contents:\n");
  394.     for (i = 0; i < tlb_ibig_size(); i++) {
  395.         d.value = dtlb_data_access_read(TLB_IBIG, i);
  396.         t.value = dtlb_tag_read_read(TLB_IBIG, i);
  397.         print_tlb_entry(i, t, d);
  398.     }
  399.    
  400.     printf("TLB_DSMALL contents:\n");
  401.     for (i = 0; i < tlb_dsmall_size(); i++) {
  402.         d.value = dtlb_data_access_read(TLB_DSMALL, i);
  403.         t.value = dtlb_tag_read_read(TLB_DSMALL, i);
  404.         print_tlb_entry(i, t, d);
  405.     }
  406.    
  407.     printf("TLB_DBIG_1 contents:\n");
  408.     for (i = 0; i < tlb_dbig_size(); i++) {
  409.         d.value = dtlb_data_access_read(TLB_DBIG_0, i);
  410.         t.value = dtlb_tag_read_read(TLB_DBIG_0, i);
  411.         print_tlb_entry(i, t, d);
  412.     }
  413.    
  414.     printf("TLB_DBIG_2 contents:\n");
  415.     for (i = 0; i < tlb_dbig_size(); i++) {
  416.         d.value = dtlb_data_access_read(TLB_DBIG_1, i);
  417.         t.value = dtlb_tag_read_read(TLB_DBIG_1, i);
  418.         print_tlb_entry(i, t, d);
  419.     }
  420. }
  421.  
  422. #endif
  423.  
  424. void do_fast_instruction_access_mmu_miss_fault(istate_t *istate,
  425.     const char *str)
  426. {
  427.     fault_if_from_uspace(istate, "%s\n", str);
  428.     dump_istate(istate);
  429.     panic("%s\n", str);
  430. }
  431.  
  432. void do_fast_data_access_mmu_miss_fault(istate_t *istate,
  433.     tlb_tag_access_reg_t tag, const char *str)
  434. {
  435.     uintptr_t va;
  436.  
  437.     va = tag.vpn << MMU_PAGE_WIDTH;
  438.     if (tag.context) {
  439.         fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va,
  440.             tag.context);
  441.     }
  442.     dump_istate(istate);
  443.     printf("Faulting page: %p, ASID=%d\n", va, tag.context);
  444.     panic("%s\n", str);
  445. }
  446.  
  447. void do_fast_data_access_protection_fault(istate_t *istate,
  448.     tlb_tag_access_reg_t tag, const char *str)
  449. {
  450.     uintptr_t va;
  451.  
  452.     va = tag.vpn << MMU_PAGE_WIDTH;
  453.  
  454.     if (tag.context) {
  455.         fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va,
  456.             tag.context);
  457.     }
  458.     printf("Faulting page: %p, ASID=%d\n", va, tag.context);
  459.     dump_istate(istate);
  460.     panic("%s\n", str);
  461. }
  462.  
  463. void dump_sfsr_and_sfar(void)
  464. {
  465.     tlb_sfsr_reg_t sfsr;
  466.     uintptr_t sfar;
  467.  
  468.     sfsr.value = dtlb_sfsr_read();
  469.     sfar = dtlb_sfar_read();
  470.    
  471. #if defined (US)
  472.     printf("DTLB SFSR: asi=%#x, ft=%#x, e=%d, ct=%d, pr=%d, w=%d, ow=%d, "
  473.         "fv=%d\n", sfsr.asi, sfsr.ft, sfsr.e, sfsr.ct, sfsr.pr, sfsr.w,
  474.         sfsr.ow, sfsr.fv);
  475. #elif defined (US3)
  476.     printf("DTLB SFSR: nf=%d, asi=%#x, tm=%d, ft=%#x, e=%d, ct=%d, pr=%d, "
  477.         "w=%d, ow=%d, fv=%d\n", sfsr.nf, sfsr.asi, sfsr.tm, sfsr.ft,
  478.         sfsr.e, sfsr.ct, sfsr.pr, sfsr.w, sfsr.ow, sfsr.fv);
  479. #endif
  480.        
  481.     printf("DTLB SFAR: address=%p\n", sfar);
  482.    
  483.     dtlb_sfsr_write(0);
  484. }
  485.  
  486. #if defined (US3)
  487. /** Invalidates given TLB entry if and only if it is non-locked or global.
  488.  *
  489.  * @param tlb       TLB number (one of TLB_DSMALL, TLB_DBIG_0, TLB_DBIG_1,
  490.  *          TLB_ISMALL, TLB_IBIG).
  491.  * @param entry     Entry index within the given TLB.
  492.  */
  493. static void tlb_invalidate_entry(int tlb, index_t entry)
  494. {
  495.     tlb_data_t d;
  496.     tlb_tag_read_reg_t t;
  497.    
  498.     if (tlb == TLB_DSMALL || tlb == TLB_DBIG_0 || tlb == TLB_DBIG_1) {
  499.         d.value = dtlb_data_access_read(tlb, entry);
  500.         if (!d.l || d.g) {
  501.             t.value = dtlb_tag_read_read(tlb, entry);
  502.             d.v = false;
  503.             dtlb_tag_access_write(t.value);
  504.             dtlb_data_access_write(tlb, entry, d.value);
  505.         }
  506.     } else if (tlb == TLB_ISMALL || tlb == TLB_IBIG) {
  507.         d.value = itlb_data_access_read(tlb, entry);
  508.         if (!d.l || d.g) {
  509.             t.value = itlb_tag_read_read(tlb, entry);
  510.             d.v = false;
  511.             itlb_tag_access_write(t.value);
  512.             itlb_data_access_write(tlb, entry, d.value);
  513.         }
  514.     }
  515. }
  516. #endif
  517.  
  518. /** Invalidate all unlocked ITLB and DTLB entries. */
  519. void tlb_invalidate_all(void)
  520. {
  521.     int i;
  522.    
  523.     /*
  524.      * Walk all ITLB and DTLB entries and remove all unlocked mappings.
  525.      *
  526.      * The kernel doesn't use global mappings so any locked global mappings
  527.      * found must have been created by someone else. Their only purpose now
  528.      * is to collide with proper mappings. Invalidate immediately. It should
  529.      * be safe to invalidate them as late as now.
  530.      */
  531.  
  532. #if defined (US)
  533.     tlb_data_t d;
  534.     tlb_tag_read_reg_t t;
  535.  
  536.     for (i = 0; i < ITLB_ENTRY_COUNT; i++) {
  537.         d.value = itlb_data_access_read(i);
  538.         if (!d.l || d.g) {
  539.             t.value = itlb_tag_read_read(i);
  540.             d.v = false;
  541.             itlb_tag_access_write(t.value);
  542.             itlb_data_access_write(i, d.value);
  543.         }
  544.     }
  545.  
  546.     for (i = 0; i < DTLB_ENTRY_COUNT; i++) {
  547.         d.value = dtlb_data_access_read(i);
  548.         if (!d.l || d.g) {
  549.             t.value = dtlb_tag_read_read(i);
  550.             d.v = false;
  551.             dtlb_tag_access_write(t.value);
  552.             dtlb_data_access_write(i, d.value);
  553.         }
  554.     }
  555.  
  556. #elif defined (US3)
  557.  
  558.     for (i = 0; i < tlb_ismall_size(); i++)
  559.         tlb_invalidate_entry(TLB_ISMALL, i);
  560.     for (i = 0; i < tlb_ibig_size(); i++)
  561.         tlb_invalidate_entry(TLB_IBIG, i);
  562.     for (i = 0; i < tlb_dsmall_size(); i++)
  563.         tlb_invalidate_entry(TLB_DSMALL, i);
  564.     for (i = 0; i < tlb_dbig_size(); i++)
  565.         tlb_invalidate_entry(TLB_DBIG_0, i);
  566.     for (i = 0; i < tlb_dbig_size(); i++)
  567.         tlb_invalidate_entry(TLB_DBIG_1, i);
  568. #endif
  569.  
  570. }
  571.  
  572. /** Invalidate all ITLB and DTLB entries that belong to specified ASID
  573.  * (Context).
  574.  *
  575.  * @param asid Address Space ID.
  576.  */
  577. void tlb_invalidate_asid(asid_t asid)
  578. {
  579.     tlb_context_reg_t pc_save, ctx;
  580.    
  581.     /* switch to nucleus because we are mapped by the primary context */
  582.     nucleus_enter();
  583.    
  584.     ctx.v = pc_save.v = mmu_primary_context_read();
  585.     ctx.context = asid;
  586.     mmu_primary_context_write(ctx.v);
  587.    
  588.     itlb_demap(TLB_DEMAP_CONTEXT, TLB_DEMAP_PRIMARY, 0);
  589.     dtlb_demap(TLB_DEMAP_CONTEXT, TLB_DEMAP_PRIMARY, 0);
  590.    
  591.     mmu_primary_context_write(pc_save.v);
  592.    
  593.     nucleus_leave();
  594. }
  595.  
  596. /** Invalidate all ITLB and DTLB entries for specified page range in specified
  597.  * address space.
  598.  *
  599.  * @param asid      Address Space ID.
  600.  * @param page      First page which to sweep out from ITLB and DTLB.
  601.  * @param cnt       Number of ITLB and DTLB entries to invalidate.
  602.  */
  603. void tlb_invalidate_pages(asid_t asid, uintptr_t page, count_t cnt)
  604. {
  605.     unsigned int i;
  606.     tlb_context_reg_t pc_save, ctx;
  607.    
  608.     /* switch to nucleus because we are mapped by the primary context */
  609.     nucleus_enter();
  610.    
  611.     ctx.v = pc_save.v = mmu_primary_context_read();
  612.     ctx.context = asid;
  613.     mmu_primary_context_write(ctx.v);
  614.    
  615.     for (i = 0; i < cnt * MMU_PAGES_PER_PAGE; i++) {
  616.         itlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY,
  617.             page + i * MMU_PAGE_SIZE);
  618.         dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY,
  619.             page + i * MMU_PAGE_SIZE);
  620.     }
  621.    
  622.     mmu_primary_context_write(pc_save.v);
  623.    
  624.     nucleus_leave();
  625. }
  626.  
  627. /** @}
  628.  */
  629.