Subversion Repositories HelenOS

Rev

Rev 4055 | Rev 4420 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (c) 2005 Jakub Jermar
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. /** @addtogroup sparc64mm  
  30.  * @{
  31.  */
  32. /** @file
  33.  */
  34.  
  35. #include <arch/mm/tlb.h>
  36. #include <mm/tlb.h>
  37. #include <mm/as.h>
  38. #include <mm/asid.h>
  39. #include <arch/mm/frame.h>
  40. #include <arch/mm/page.h>
  41. #include <arch/mm/mmu.h>
  42. #include <arch/interrupt.h>
  43. #include <interrupt.h>
  44. #include <arch.h>
  45. #include <print.h>
  46. #include <arch/types.h>
  47. #include <config.h>
  48. #include <arch/trap/trap.h>
  49. #include <arch/trap/exception.h>
  50. #include <panic.h>
  51. #include <arch/asm.h>
  52.  
  53. #ifdef CONFIG_TSB
  54. #include <arch/mm/tsb.h>
  55. #endif
  56.  
  57. static void dtlb_pte_copy(pte_t *, index_t, bool);
  58. static void itlb_pte_copy(pte_t *, index_t);
  59. static void do_fast_instruction_access_mmu_miss_fault(istate_t *, const char *);
  60. static void do_fast_data_access_mmu_miss_fault(istate_t *, tlb_tag_access_reg_t,
  61.     const char *);
  62. static void do_fast_data_access_protection_fault(istate_t *,
  63.     tlb_tag_access_reg_t, const char *);
  64.  
  65. char *context_encoding[] = {
  66.     "Primary",
  67.     "Secondary",
  68.     "Nucleus",
  69.     "Reserved"
  70. };
  71.  
  72. void tlb_arch_init(void)
  73. {
  74.     /*
  75.      * Invalidate all non-locked DTLB and ITLB entries.
  76.      */
  77.     tlb_invalidate_all();
  78.  
  79.     /*
  80.      * Clear both SFSRs.
  81.      */
  82.     dtlb_sfsr_write(0);
  83.     itlb_sfsr_write(0);
  84. }
  85.  
  86. /** Insert privileged mapping into DMMU TLB.
  87.  *
  88.  * @param page      Virtual page address.
  89.  * @param frame     Physical frame address.
  90.  * @param pagesize  Page size.
  91.  * @param locked    True for permanent mappings, false otherwise.
  92.  * @param cacheable True if the mapping is cacheable, false otherwise.
  93.  */
  94. void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize,
  95.     bool locked, bool cacheable)
  96. {
  97.     tlb_tag_access_reg_t tag;
  98.     tlb_data_t data;
  99.     page_address_t pg;
  100.     frame_address_t fr;
  101.  
  102.     pg.address = page;
  103.     fr.address = frame;
  104.  
  105.     tag.context = ASID_KERNEL;
  106.     tag.vpn = pg.vpn;
  107.  
  108.     dtlb_tag_access_write(tag.value);
  109.  
  110.     data.value = 0;
  111.     data.v = true;
  112.     data.size = pagesize;
  113.     data.pfn = fr.pfn;
  114.     data.l = locked;
  115.     data.cp = cacheable;
  116. #ifdef CONFIG_VIRT_IDX_DCACHE
  117.     data.cv = cacheable;
  118. #endif /* CONFIG_VIRT_IDX_DCACHE */
  119.     data.p = true;
  120.     data.w = true;
  121.     data.g = false;
  122.  
  123.     dtlb_data_in_write(data.value);
  124. }
  125.  
  126. /** Copy PTE to TLB.
  127.  *
  128.  * @param t         Page Table Entry to be copied.
  129.  * @param index     Zero if lower 8K-subpage, one if higher 8K-subpage.
  130.  * @param ro        If true, the entry will be created read-only, regardless
  131.  *          of its w field.
  132.  */
  133. void dtlb_pte_copy(pte_t *t, index_t index, bool ro)
  134. {
  135.     tlb_tag_access_reg_t tag;
  136.     tlb_data_t data;
  137.     page_address_t pg;
  138.     frame_address_t fr;
  139.  
  140.     pg.address = t->page + (index << MMU_PAGE_WIDTH);
  141.     fr.address = t->frame + (index << MMU_PAGE_WIDTH);
  142.  
  143.     tag.value = 0;
  144.     tag.context = t->as->asid;
  145.     tag.vpn = pg.vpn;
  146.  
  147.     dtlb_tag_access_write(tag.value);
  148.  
  149.     data.value = 0;
  150.     data.v = true;
  151.     data.size = PAGESIZE_8K;
  152.     data.pfn = fr.pfn;
  153.     data.l = false;
  154.     data.cp = t->c;
  155. #ifdef CONFIG_VIRT_IDX_DCACHE
  156.     data.cv = t->c;
  157. #endif /* CONFIG_VIRT_IDX_DCACHE */
  158.     data.p = t->k;      /* p like privileged */
  159.     data.w = ro ? false : t->w;
  160.     data.g = t->g;
  161.  
  162.     dtlb_data_in_write(data.value);
  163. }
  164.  
  165. /** Copy PTE to ITLB.
  166.  *
  167.  * @param t     Page Table Entry to be copied.
  168.  * @param index     Zero if lower 8K-subpage, one if higher 8K-subpage.
  169.  */
  170. void itlb_pte_copy(pte_t *t, index_t index)
  171. {
  172.     tlb_tag_access_reg_t tag;
  173.     tlb_data_t data;
  174.     page_address_t pg;
  175.     frame_address_t fr;
  176.  
  177.     pg.address = t->page + (index << MMU_PAGE_WIDTH);
  178.     fr.address = t->frame + (index << MMU_PAGE_WIDTH);
  179.  
  180.     tag.value = 0;
  181.     tag.context = t->as->asid;
  182.     tag.vpn = pg.vpn;
  183.    
  184.     itlb_tag_access_write(tag.value);
  185.    
  186.     data.value = 0;
  187.     data.v = true;
  188.     data.size = PAGESIZE_8K;
  189.     data.pfn = fr.pfn;
  190.     data.l = false;
  191.     data.cp = t->c;
  192.     data.p = t->k;      /* p like privileged */
  193.     data.w = false;
  194.     data.g = t->g;
  195.    
  196.     itlb_data_in_write(data.value);
  197. }
  198.  
  199. /** ITLB miss handler. */
  200. void fast_instruction_access_mmu_miss(unative_t unused, istate_t *istate)
  201. {
  202.     uintptr_t page_16k = ALIGN_DOWN(istate->tpc, PAGE_SIZE);
  203.     index_t index = (istate->tpc >> MMU_PAGE_WIDTH) % MMU_PAGES_PER_PAGE;
  204.     pte_t *t;
  205.  
  206.     page_table_lock(AS, true);
  207.     t = page_mapping_find(AS, page_16k);
  208.     if (t && PTE_EXECUTABLE(t)) {
  209.         /*
  210.          * The mapping was found in the software page hash table.
  211.          * Insert it into ITLB.
  212.          */
  213.         t->a = true;
  214.         itlb_pte_copy(t, index);
  215. #ifdef CONFIG_TSB
  216.         itsb_pte_copy(t, index);
  217. #endif
  218.         page_table_unlock(AS, true);
  219.     } else {
  220.         /*
  221.          * Forward the page fault to the address space page fault
  222.          * handler.
  223.          */    
  224.         page_table_unlock(AS, true);
  225.         if (as_page_fault(page_16k, PF_ACCESS_EXEC, istate) ==
  226.             AS_PF_FAULT) {
  227.             do_fast_instruction_access_mmu_miss_fault(istate,
  228.                 __func__);
  229.         }
  230.     }
  231. }
  232.  
  233. /** DTLB miss handler.
  234.  *
  235.  * Note that some faults (e.g. kernel faults) were already resolved by the
  236.  * low-level, assembly language part of the fast_data_access_mmu_miss handler.
  237.  *
  238.  * @param tag       Content of the TLB Tag Access register as it existed
  239.  *          when the trap happened. This is to prevent confusion
  240.  *          created by clobbered Tag Access register during a nested
  241.  *          DTLB miss.
  242.  * @param istate    Interrupted state saved on the stack.
  243.  */
  244. void fast_data_access_mmu_miss(tlb_tag_access_reg_t tag, istate_t *istate)
  245. {
  246.     uintptr_t page_8k;
  247.     uintptr_t page_16k;
  248.     index_t index;
  249.     pte_t *t;
  250.  
  251.     page_8k = (uint64_t) tag.vpn << MMU_PAGE_WIDTH;
  252.     page_16k = ALIGN_DOWN(page_8k, PAGE_SIZE);
  253.     index = tag.vpn % MMU_PAGES_PER_PAGE;
  254.  
  255.     if (tag.context == ASID_KERNEL) {
  256.         if (!tag.vpn) {
  257.             /* NULL access in kernel */
  258.             do_fast_data_access_mmu_miss_fault(istate, tag,
  259.                 __func__);
  260.         } else if (page_8k >= end_of_identity) {
  261.             /*
  262.              * The kernel is accessing the I/O space.
  263.              * We still do identity mapping for I/O,
  264.              * but without caching.
  265.              */
  266.             dtlb_insert_mapping(page_8k, KA2PA(page_8k),
  267.                 PAGESIZE_8K, false, false);
  268.             return;
  269.         }
  270.         do_fast_data_access_mmu_miss_fault(istate, tag, "Unexpected "
  271.             "kernel page fault.");
  272.     }
  273.  
  274.     page_table_lock(AS, true);
  275.     t = page_mapping_find(AS, page_16k);
  276.     if (t) {
  277.         /*
  278.          * The mapping was found in the software page hash table.
  279.          * Insert it into DTLB.
  280.          */
  281.         t->a = true;
  282.         dtlb_pte_copy(t, index, true);
  283. #ifdef CONFIG_TSB
  284.         dtsb_pte_copy(t, index, true);
  285. #endif
  286.         page_table_unlock(AS, true);
  287.     } else {
  288.         /*
  289.          * Forward the page fault to the address space page fault
  290.          * handler.
  291.          */    
  292.         page_table_unlock(AS, true);
  293.         if (as_page_fault(page_16k, PF_ACCESS_READ, istate) ==
  294.             AS_PF_FAULT) {
  295.             do_fast_data_access_mmu_miss_fault(istate, tag,
  296.                 __func__);
  297.         }
  298.     }
  299. }
  300.  
  301. /** DTLB protection fault handler.
  302.  *
  303.  * @param tag       Content of the TLB Tag Access register as it existed
  304.  *          when the trap happened. This is to prevent confusion
  305.  *          created by clobbered Tag Access register during a nested
  306.  *          DTLB miss.
  307.  * @param istate    Interrupted state saved on the stack.
  308.  */
  309. void fast_data_access_protection(tlb_tag_access_reg_t tag, istate_t *istate)
  310. {
  311.     uintptr_t page_16k;
  312.     index_t index;
  313.     pte_t *t;
  314.  
  315.     page_16k = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE);
  316.     index = tag.vpn % MMU_PAGES_PER_PAGE;   /* 16K-page emulation */
  317.  
  318.     page_table_lock(AS, true);
  319.     t = page_mapping_find(AS, page_16k);
  320.     if (t && PTE_WRITABLE(t)) {
  321.         /*
  322.          * The mapping was found in the software page hash table and is
  323.          * writable. Demap the old mapping and insert an updated mapping
  324.          * into DTLB.
  325.          */
  326.         t->a = true;
  327.         t->d = true;
  328.         dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_SECONDARY,
  329.             page_16k + index * MMU_PAGE_SIZE);
  330.         dtlb_pte_copy(t, index, false);
  331. #ifdef CONFIG_TSB
  332.         dtsb_pte_copy(t, index, false);
  333. #endif
  334.         page_table_unlock(AS, true);
  335.     } else {
  336.         /*
  337.          * Forward the page fault to the address space page fault
  338.          * handler.
  339.          */    
  340.         page_table_unlock(AS, true);
  341.         if (as_page_fault(page_16k, PF_ACCESS_WRITE, istate) ==
  342.             AS_PF_FAULT) {
  343.             do_fast_data_access_protection_fault(istate, tag,
  344.                 __func__);
  345.         }
  346.     }
  347. }
  348.  
  349. /** Print TLB entry (for debugging purposes).
  350.  *
  351.  * The diag field has been left out in order to make this function more generic
  352.  * (there is no diag field in US3 architeture).
  353.  *
  354.  * @param i     TLB entry number
  355.  * @param t     TLB entry tag
  356.  * @param d     TLB entry data
  357.  */
  358. static void print_tlb_entry(int i, tlb_tag_read_reg_t t, tlb_data_t d)
  359. {
  360.     printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, "
  361.         "ie=%d, soft2=%#x, pfn=%#x, soft=%#x, l=%d, "
  362.         "cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn,
  363.         t.context, d.v, d.size, d.nfo, d.ie, d.soft2,
  364.         d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
  365. }
  366.  
  367. #if defined (US)
  368.  
  369. /** Print contents of both TLBs. */
  370. void tlb_print(void)
  371. {
  372.     int i;
  373.     tlb_data_t d;
  374.     tlb_tag_read_reg_t t;
  375.    
  376.     printf("I-TLB contents:\n");
  377.     for (i = 0; i < ITLB_ENTRY_COUNT; i++) {
  378.         d.value = itlb_data_access_read(i);
  379.         t.value = itlb_tag_read_read(i);
  380.         print_tlb_entry(i, t, d);
  381.     }
  382.  
  383.     printf("D-TLB contents:\n");
  384.     for (i = 0; i < DTLB_ENTRY_COUNT; i++) {
  385.         d.value = dtlb_data_access_read(i);
  386.         t.value = dtlb_tag_read_read(i);
  387.         print_tlb_entry(i, t, d);
  388.     }
  389. }
  390.  
  391. #elif defined (US3)
  392.  
  393. /** Print contents of all TLBs. */
  394. void tlb_print(void)
  395. {
  396.     int i;
  397.     tlb_data_t d;
  398.     tlb_tag_read_reg_t t;
  399.    
  400.     printf("TLB_ISMALL contents:\n");
  401.     for (i = 0; i < tlb_ismall_size(); i++) {
  402.         d.value = dtlb_data_access_read(TLB_ISMALL, i);
  403.         t.value = dtlb_tag_read_read(TLB_ISMALL, i);
  404.         print_tlb_entry(i, t, d);
  405.     }
  406.    
  407.     printf("TLB_IBIG contents:\n");
  408.     for (i = 0; i < tlb_ibig_size(); i++) {
  409.         d.value = dtlb_data_access_read(TLB_IBIG, i);
  410.         t.value = dtlb_tag_read_read(TLB_IBIG, i);
  411.         print_tlb_entry(i, t, d);
  412.     }
  413.    
  414.     printf("TLB_DSMALL contents:\n");
  415.     for (i = 0; i < tlb_dsmall_size(); i++) {
  416.         d.value = dtlb_data_access_read(TLB_DSMALL, i);
  417.         t.value = dtlb_tag_read_read(TLB_DSMALL, i);
  418.         print_tlb_entry(i, t, d);
  419.     }
  420.    
  421.     printf("TLB_DBIG_1 contents:\n");
  422.     for (i = 0; i < tlb_dbig_size(); i++) {
  423.         d.value = dtlb_data_access_read(TLB_DBIG_0, i);
  424.         t.value = dtlb_tag_read_read(TLB_DBIG_0, i);
  425.         print_tlb_entry(i, t, d);
  426.     }
  427.    
  428.     printf("TLB_DBIG_2 contents:\n");
  429.     for (i = 0; i < tlb_dbig_size(); i++) {
  430.         d.value = dtlb_data_access_read(TLB_DBIG_1, i);
  431.         t.value = dtlb_tag_read_read(TLB_DBIG_1, i);
  432.         print_tlb_entry(i, t, d);
  433.     }
  434. }
  435.  
  436. #endif
  437.  
  438. void do_fast_instruction_access_mmu_miss_fault(istate_t *istate,
  439.     const char *str)
  440. {
  441.     fault_if_from_uspace(istate, "%s.", str);
  442.     dump_istate(istate);
  443.     panic("%s.", str);
  444. }
  445.  
  446. void do_fast_data_access_mmu_miss_fault(istate_t *istate,
  447.     tlb_tag_access_reg_t tag, const char *str)
  448. {
  449.     uintptr_t va;
  450.  
  451.     va = tag.vpn << MMU_PAGE_WIDTH;
  452.     if (tag.context) {
  453.         fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d).", str, va,
  454.             tag.context);
  455.     }
  456.     dump_istate(istate);
  457.     printf("Faulting page: %p, ASID=%d.\n", va, tag.context);
  458.     panic("%s.", str);
  459. }
  460.  
  461. void do_fast_data_access_protection_fault(istate_t *istate,
  462.     tlb_tag_access_reg_t tag, const char *str)
  463. {
  464.     uintptr_t va;
  465.  
  466.     va = tag.vpn << MMU_PAGE_WIDTH;
  467.  
  468.     if (tag.context) {
  469.         fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d).", str, va,
  470.             tag.context);
  471.     }
  472.     printf("Faulting page: %p, ASID=%d\n", va, tag.context);
  473.     dump_istate(istate);
  474.     panic("%s.", str);
  475. }
  476.  
  477. void dump_sfsr_and_sfar(void)
  478. {
  479.     tlb_sfsr_reg_t sfsr;
  480.     uintptr_t sfar;
  481.  
  482.     sfsr.value = dtlb_sfsr_read();
  483.     sfar = dtlb_sfar_read();
  484.    
  485. #if defined (US)
  486.     printf("DTLB SFSR: asi=%#x, ft=%#x, e=%d, ct=%d, pr=%d, w=%d, ow=%d, "
  487.         "fv=%d\n", sfsr.asi, sfsr.ft, sfsr.e, sfsr.ct, sfsr.pr, sfsr.w,
  488.         sfsr.ow, sfsr.fv);
  489. #elif defined (US3)
  490.     printf("DTLB SFSR: nf=%d, asi=%#x, tm=%d, ft=%#x, e=%d, ct=%d, pr=%d, "
  491.         "w=%d, ow=%d, fv=%d\n", sfsr.nf, sfsr.asi, sfsr.tm, sfsr.ft,
  492.         sfsr.e, sfsr.ct, sfsr.pr, sfsr.w, sfsr.ow, sfsr.fv);
  493. #endif
  494.        
  495.     printf("DTLB SFAR: address=%p\n", sfar);
  496.    
  497.     dtlb_sfsr_write(0);
  498. }
  499.  
  500. #if defined (US3)
  501. /** Invalidates given TLB entry if and only if it is non-locked or global.
  502.  *
  503.  * @param tlb       TLB number (one of TLB_DSMALL, TLB_DBIG_0, TLB_DBIG_1,
  504.  *          TLB_ISMALL, TLB_IBIG).
  505.  * @param entry     Entry index within the given TLB.
  506.  */
  507. static void tlb_invalidate_entry(int tlb, index_t entry)
  508. {
  509.     tlb_data_t d;
  510.     tlb_tag_read_reg_t t;
  511.    
  512.     if (tlb == TLB_DSMALL || tlb == TLB_DBIG_0 || tlb == TLB_DBIG_1) {
  513.         d.value = dtlb_data_access_read(tlb, entry);
  514.         if (!d.l || d.g) {
  515.             t.value = dtlb_tag_read_read(tlb, entry);
  516.             d.v = false;
  517.             dtlb_tag_access_write(t.value);
  518.             dtlb_data_access_write(tlb, entry, d.value);
  519.         }
  520.     } else if (tlb == TLB_ISMALL || tlb == TLB_IBIG) {
  521.         d.value = itlb_data_access_read(tlb, entry);
  522.         if (!d.l || d.g) {
  523.             t.value = itlb_tag_read_read(tlb, entry);
  524.             d.v = false;
  525.             itlb_tag_access_write(t.value);
  526.             itlb_data_access_write(tlb, entry, d.value);
  527.         }
  528.     }
  529. }
  530. #endif
  531.  
  532. /** Invalidate all unlocked ITLB and DTLB entries. */
  533. void tlb_invalidate_all(void)
  534. {
  535.     int i;
  536.    
  537.     /*
  538.      * Walk all ITLB and DTLB entries and remove all unlocked mappings.
  539.      *
  540.      * The kernel doesn't use global mappings so any locked global mappings
  541.      * found must have been created by someone else. Their only purpose now
  542.      * is to collide with proper mappings. Invalidate immediately. It should
  543.      * be safe to invalidate them as late as now.
  544.      */
  545.  
  546. #if defined (US)
  547.     tlb_data_t d;
  548.     tlb_tag_read_reg_t t;
  549.  
  550.     for (i = 0; i < ITLB_ENTRY_COUNT; i++) {
  551.         d.value = itlb_data_access_read(i);
  552.         if (!d.l || d.g) {
  553.             t.value = itlb_tag_read_read(i);
  554.             d.v = false;
  555.             itlb_tag_access_write(t.value);
  556.             itlb_data_access_write(i, d.value);
  557.         }
  558.     }
  559.  
  560.     for (i = 0; i < DTLB_ENTRY_COUNT; i++) {
  561.         d.value = dtlb_data_access_read(i);
  562.         if (!d.l || d.g) {
  563.             t.value = dtlb_tag_read_read(i);
  564.             d.v = false;
  565.             dtlb_tag_access_write(t.value);
  566.             dtlb_data_access_write(i, d.value);
  567.         }
  568.     }
  569.  
  570. #elif defined (US3)
  571.  
  572.     for (i = 0; i < tlb_ismall_size(); i++)
  573.         tlb_invalidate_entry(TLB_ISMALL, i);
  574.     for (i = 0; i < tlb_ibig_size(); i++)
  575.         tlb_invalidate_entry(TLB_IBIG, i);
  576.     for (i = 0; i < tlb_dsmall_size(); i++)
  577.         tlb_invalidate_entry(TLB_DSMALL, i);
  578.     for (i = 0; i < tlb_dbig_size(); i++)
  579.         tlb_invalidate_entry(TLB_DBIG_0, i);
  580.     for (i = 0; i < tlb_dbig_size(); i++)
  581.         tlb_invalidate_entry(TLB_DBIG_1, i);
  582. #endif
  583.  
  584. }
  585.  
  586. /** Invalidate all ITLB and DTLB entries that belong to specified ASID
  587.  * (Context).
  588.  *
  589.  * @param asid Address Space ID.
  590.  */
  591. void tlb_invalidate_asid(asid_t asid)
  592. {
  593.     tlb_context_reg_t pc_save, ctx;
  594.    
  595.     /* switch to nucleus because we are mapped by the primary context */
  596.     nucleus_enter();
  597.    
  598.     ctx.v = pc_save.v = mmu_primary_context_read();
  599.     ctx.context = asid;
  600.     mmu_primary_context_write(ctx.v);
  601.    
  602.     itlb_demap(TLB_DEMAP_CONTEXT, TLB_DEMAP_PRIMARY, 0);
  603.     dtlb_demap(TLB_DEMAP_CONTEXT, TLB_DEMAP_PRIMARY, 0);
  604.    
  605.     mmu_primary_context_write(pc_save.v);
  606.    
  607.     nucleus_leave();
  608. }
  609.  
  610. /** Invalidate all ITLB and DTLB entries for specified page range in specified
  611.  * address space.
  612.  *
  613.  * @param asid      Address Space ID.
  614.  * @param page      First page which to sweep out from ITLB and DTLB.
  615.  * @param cnt       Number of ITLB and DTLB entries to invalidate.
  616.  */
  617. void tlb_invalidate_pages(asid_t asid, uintptr_t page, count_t cnt)
  618. {
  619.     unsigned int i;
  620.     tlb_context_reg_t pc_save, ctx;
  621.    
  622.     /* switch to nucleus because we are mapped by the primary context */
  623.     nucleus_enter();
  624.    
  625.     ctx.v = pc_save.v = mmu_primary_context_read();
  626.     ctx.context = asid;
  627.     mmu_primary_context_write(ctx.v);
  628.    
  629.     for (i = 0; i < cnt * MMU_PAGES_PER_PAGE; i++) {
  630.         itlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY,
  631.             page + i * MMU_PAGE_SIZE);
  632.         dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY,
  633.             page + i * MMU_PAGE_SIZE);
  634.     }
  635.    
  636.     mmu_primary_context_write(pc_save.v);
  637.    
  638.     nucleus_leave();
  639. }
  640.  
  641. /** @}
  642.  */
  643.