Subversion Repositories HelenOS

Rev

Rev 3742 | Rev 3770 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (c) 2005 Jakub Jermar
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. /** @addtogroup sparc64mm  
  30.  * @{
  31.  */
  32. /** @file
  33.  */
  34.  
  35. #include <arch/mm/page.h>
  36. #include <arch/mm/tlb.h>
  37. #include <arch/mm/sun4u/tlb.h>
  38. #include <genarch/mm/page_ht.h>
  39. #include <mm/frame.h>
  40. #include <arch/mm/frame.h>
  41. #include <bitops.h>
  42. #include <debug.h>
  43. #include <align.h>
  44. #include <config.h>
  45.  
  46. #ifdef CONFIG_SMP
  47. /** Entries locked in DTLB of BSP.
  48.  *
  49.  * Application processors need to have the same locked entries in their DTLBs as
  50.  * the bootstrap processor.
  51.  */
  52. static struct {
  53.     uintptr_t virt_page;
  54.     uintptr_t phys_page;
  55.     int pagesize_code;
  56. } bsp_locked_dtlb_entry[DTLB_MAX_LOCKED_ENTRIES];
  57.  
  58. /** Number of entries in bsp_locked_dtlb_entry array. */
  59. static count_t bsp_locked_dtlb_entries = 0;
  60. #endif /* CONFIG_SMP */
  61.  
  62. /** Perform sparc64 specific initialization of paging. */
  63. void page_arch_init(void)
  64. {
  65.     if (config.cpu_active == 1) {
  66.         page_mapping_operations = &ht_mapping_operations;
  67.     } else {
  68.  
  69. #ifdef CONFIG_SMP
  70.         unsigned int i;
  71.  
  72.         /*
  73.          * Copy locked DTLB entries from the BSP.
  74.          */    
  75.         for (i = 0; i < bsp_locked_dtlb_entries; i++) {
  76.             dtlb_insert_mapping(bsp_locked_dtlb_entry[i].virt_page,
  77.                 bsp_locked_dtlb_entry[i].phys_page,
  78.                 bsp_locked_dtlb_entry[i].pagesize_code, true,
  79.                 false);
  80.         }
  81. #endif 
  82.  
  83.     }
  84. }
  85.  
  86. /** Map memory-mapped device into virtual memory.
  87.  *
  88.  * So far, only DTLB is used to map devices into memory. Chances are that there
  89.  * will be only a limited amount of devices that the kernel itself needs to
  90.  * lock in DTLB.
  91.  *
  92.  * @param physaddr Physical address of the page where the device is located.
  93.  *  Must be at least page-aligned.
  94.  * @param size Size of the device's registers. Must not exceed 4M and must
  95.  *  include extra space caused by the alignment.
  96.  *
  97.  * @return Virtual address of the page where the device is mapped.
  98.  */
  99. uintptr_t hw_map(uintptr_t physaddr, size_t size)
  100. {
  101.     unsigned int order;
  102.     unsigned int i;
  103.  
  104.     ASSERT(config.cpu_active == 1);
  105.  
  106.     struct {
  107.         int pagesize_code;
  108.         size_t increment;
  109.         count_t count;
  110.     } sizemap[] = {
  111.         { PAGESIZE_8K, 0, 1 },              /* 8K */
  112.         { PAGESIZE_8K, MMU_PAGE_SIZE, 2 },      /* 16K */
  113.         { PAGESIZE_8K, MMU_PAGE_SIZE, 4 },      /* 32K */
  114.         { PAGESIZE_64K, 0, 1},              /* 64K */
  115.         { PAGESIZE_64K, 8 * MMU_PAGE_SIZE, 2 },     /* 128K */
  116.         { PAGESIZE_64K, 8 * MMU_PAGE_SIZE, 4 },     /* 256K */
  117.         { PAGESIZE_512K, 0, 1 },            /* 512K */
  118.         { PAGESIZE_512K, 64 * MMU_PAGE_SIZE, 2 },   /* 1M */
  119.         { PAGESIZE_512K, 64 * MMU_PAGE_SIZE, 4 },   /* 2M */
  120.         { PAGESIZE_4M, 0, 1 },              /* 4M */
  121.         { PAGESIZE_4M, 512 * MMU_PAGE_SIZE, 2 }     /* 8M */
  122.     };
  123.    
  124.     ASSERT(ALIGN_UP(physaddr, MMU_PAGE_SIZE) == physaddr);
  125.     ASSERT(size <= 8 * 1024 * 1024);
  126.    
  127.     if (size <= MMU_FRAME_SIZE)
  128.         order = 0;
  129.     else
  130.         order = (fnzb64(size - 1) + 1) - MMU_FRAME_WIDTH;
  131.  
  132.     /*
  133.      * Use virtual addresses that are beyond the limit of physical memory.
  134.      * Thus, the physical address space will not be wasted by holes created
  135.      * by frame_alloc().
  136.      */
  137.     ASSERT(PA2KA(last_frame));
  138.     uintptr_t virtaddr = ALIGN_UP(PA2KA(last_frame),
  139.         1 << (order + FRAME_WIDTH));
  140.     last_frame = ALIGN_UP(KA2PA(virtaddr) + size,
  141.         1 << (order + FRAME_WIDTH));
  142.    
  143.     for (i = 0; i < sizemap[order].count; i++) {
  144.         /*
  145.          * First, insert the mapping into DTLB.
  146.          */
  147.         dtlb_insert_mapping(virtaddr + i * sizemap[order].increment,
  148.             physaddr + i * sizemap[order].increment,
  149.             sizemap[order].pagesize_code, true, false);
  150.    
  151. #ifdef CONFIG_SMP  
  152.         /*
  153.          * Second, save the information about the mapping for APs.
  154.          */
  155.         bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].virt_page =
  156.             virtaddr + i * sizemap[order].increment;
  157.         bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].phys_page =
  158.             physaddr + i * sizemap[order].increment;
  159.         bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].pagesize_code =
  160.             sizemap[order].pagesize_code;
  161.         bsp_locked_dtlb_entries++;
  162. #endif
  163.     }
  164.    
  165.     return virtaddr;
  166. }
  167.  
  168. /** @}
  169.  */
  170.  
  171.