Subversion Repositories HelenOS

Rev

Rev 2467 | Rev 4055 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (c) 2005 Ondrej Palkovsky
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. /** @addtogroup amd64mm
  30.  * @{
  31.  */
  32. /** @file
  33.  */
  34.  
  35. /** Paging on AMD64
  36.  *
  37.  * The space is divided in positive numbers - userspace and
  38.  * negative numbers - kernel space. The 'negative' space starting
  39.  * with 0xffff800000000000 and ending with 0xffffffff80000000
  40.  * (-2GB) is identically mapped physical memory. The area
  41.  * (0xffffffff80000000 ... 0xffffffffffffffff is again identically
  42.  * mapped first 2GB.
  43.  *
  44.  * ATTENTION - PA2KA(KA2PA(x)) != x if 'x' is in kernel
  45.  */
  46.  
  47. #ifndef KERN_amd64_PAGE_H_
  48. #define KERN_amd64_PAGE_H_
  49.  
  50. #include <arch/mm/frame.h>
  51.  
  52. #define PAGE_WIDTH  FRAME_WIDTH
  53. #define PAGE_SIZE   FRAME_SIZE
  54.  
  55. #define PAGE_COLOR_BITS 0           /* dummy */
  56.  
  57. #ifdef KERNEL
  58.  
  59. #ifndef __ASM__
  60. #   include <mm/mm.h>
  61. #   include <arch/types.h>
  62. #   include <arch/interrupt.h>
  63.  
  64. static inline uintptr_t ka2pa(uintptr_t x)
  65. {
  66.     if (x > 0xffffffff80000000)
  67.         return x - 0xffffffff80000000;
  68.     else
  69.         return x - 0xffff800000000000;
  70. }
  71.  
  72. #   define KA2PA(x)     ka2pa((uintptr_t) x)
  73. #   define PA2KA_CODE(x)    (((uintptr_t) (x)) + 0xffffffff80000000)
  74. #   define PA2KA(x)     (((uintptr_t) (x)) + 0xffff800000000000)
  75. #else
  76. #   define KA2PA(x)     ((x) - 0xffffffff80000000)
  77. #   define PA2KA(x)     ((x) + 0xffffffff80000000)
  78. #endif
  79.  
  80. /* Number of entries in each level. */
  81. #define PTL0_ENTRIES_ARCH   512
  82. #define PTL1_ENTRIES_ARCH   512
  83. #define PTL2_ENTRIES_ARCH   512
  84. #define PTL3_ENTRIES_ARCH   512
  85.  
  86. /* Page table sizes for each level. */
  87. #define PTL0_SIZE_ARCH      ONE_FRAME
  88. #define PTL1_SIZE_ARCH      ONE_FRAME
  89. #define PTL2_SIZE_ARCH      ONE_FRAME
  90. #define PTL3_SIZE_ARCH      ONE_FRAME
  91.  
  92. /* Macros calculating indices into page tables in each level. */
  93. #define PTL0_INDEX_ARCH(vaddr)  (((vaddr) >> 39) & 0x1ff)
  94. #define PTL1_INDEX_ARCH(vaddr)  (((vaddr) >> 30) & 0x1ff)
  95. #define PTL2_INDEX_ARCH(vaddr)  (((vaddr) >> 21) & 0x1ff)
  96. #define PTL3_INDEX_ARCH(vaddr)  (((vaddr) >> 12) & 0x1ff)
  97.  
  98. /* Get PTE address accessors for each level. */
  99. #define GET_PTL1_ADDRESS_ARCH(ptl0, i) \
  100.     ((pte_t *) ((((uint64_t) ((pte_t *) (ptl0))[(i)].addr_12_31) << 12) | \
  101.         (((uint64_t) ((pte_t *) (ptl0))[(i)].addr_32_51) << 32)))
  102. #define GET_PTL2_ADDRESS_ARCH(ptl1, i) \
  103.     ((pte_t *) ((((uint64_t) ((pte_t *) (ptl1))[(i)].addr_12_31) << 12) | \
  104.         (((uint64_t) ((pte_t *) (ptl1))[(i)].addr_32_51) << 32)))
  105. #define GET_PTL3_ADDRESS_ARCH(ptl2, i) \
  106.     ((pte_t *) ((((uint64_t) ((pte_t *) (ptl2))[(i)].addr_12_31) << 12) | \
  107.         (((uint64_t) ((pte_t *) (ptl2))[(i)].addr_32_51) << 32)))
  108. #define GET_FRAME_ADDRESS_ARCH(ptl3, i) \
  109.     ((uintptr_t *) \
  110.         ((((uint64_t) ((pte_t *) (ptl3))[(i)].addr_12_31) << 12) | \
  111.         (((uint64_t) ((pte_t *) (ptl3))[(i)].addr_32_51) << 32)))
  112.  
  113. /* Set PTE address accessors for each level. */
  114. #define SET_PTL0_ADDRESS_ARCH(ptl0) \
  115.     (write_cr3((uintptr_t) (ptl0)))
  116. #define SET_PTL1_ADDRESS_ARCH(ptl0, i, a) \
  117.     set_pt_addr((pte_t *) (ptl0), (index_t) (i), a)
  118. #define SET_PTL2_ADDRESS_ARCH(ptl1, i, a) \
  119.     set_pt_addr((pte_t *) (ptl1), (index_t) (i), a)
  120. #define SET_PTL3_ADDRESS_ARCH(ptl2, i, a) \
  121.     set_pt_addr((pte_t *) (ptl2), (index_t) (i), a)
  122. #define SET_FRAME_ADDRESS_ARCH(ptl3, i, a) \
  123.     set_pt_addr((pte_t *) (ptl3), (index_t) (i), a)
  124.  
  125. /* Get PTE flags accessors for each level. */
  126. #define GET_PTL1_FLAGS_ARCH(ptl0, i) \
  127.     get_pt_flags((pte_t *) (ptl0), (index_t) (i))
  128. #define GET_PTL2_FLAGS_ARCH(ptl1, i) \
  129.     get_pt_flags((pte_t *) (ptl1), (index_t) (i))
  130. #define GET_PTL3_FLAGS_ARCH(ptl2, i) \
  131.     get_pt_flags((pte_t *) (ptl2), (index_t) (i))
  132. #define GET_FRAME_FLAGS_ARCH(ptl3, i) \
  133.     get_pt_flags((pte_t *) (ptl3), (index_t) (i))
  134.  
  135. /* Set PTE flags accessors for each level. */
  136. #define SET_PTL1_FLAGS_ARCH(ptl0, i, x) \
  137.     set_pt_flags((pte_t *) (ptl0), (index_t) (i), (x))
  138. #define SET_PTL2_FLAGS_ARCH(ptl1, i, x) \
  139.     set_pt_flags((pte_t *) (ptl1), (index_t) (i), (x))
  140. #define SET_PTL3_FLAGS_ARCH(ptl2, i, x) \
  141.     set_pt_flags((pte_t *) (ptl2), (index_t) (i), (x))
  142. #define SET_FRAME_FLAGS_ARCH(ptl3, i, x) \
  143.     set_pt_flags((pte_t *) (ptl3), (index_t) (i), (x))
  144.  
  145. /* Macros for querying the last-level PTE entries. */
  146. #define PTE_VALID_ARCH(p) \
  147.     (*((uint64_t *) (p)) != 0)
  148. #define PTE_PRESENT_ARCH(p) \
  149.     ((p)->present != 0)
  150. #define PTE_GET_FRAME_ARCH(p) \
  151.     ((((uintptr_t) (p)->addr_12_31) << 12) | \
  152.         ((uintptr_t) (p)->addr_32_51 << 32))
  153. #define PTE_WRITABLE_ARCH(p) \
  154.     ((p)->writeable != 0)
  155. #define PTE_EXECUTABLE_ARCH(p) \
  156.     ((p)->no_execute == 0)
  157.  
  158. #ifndef __ASM__
  159.  
  160. /* Page fault error codes. */
  161.  
  162. /** When bit on this position is 0, the page fault was caused by a not-present
  163.  * page.
  164.  */
  165. #define PFERR_CODE_P            (1 << 0)  
  166.  
  167. /** When bit on this position is 1, the page fault was caused by a write. */
  168. #define PFERR_CODE_RW           (1 << 1)
  169.  
  170. /** When bit on this position is 1, the page fault was caused in user mode. */
  171. #define PFERR_CODE_US           (1 << 2)
  172.  
  173. /** When bit on this position is 1, a reserved bit was set in page directory. */
  174. #define PFERR_CODE_RSVD         (1 << 3)
  175.  
  176. /** When bit on this position os 1, the page fault was caused during instruction
  177.  * fecth.
  178.  */
  179. #define PFERR_CODE_ID       (1 << 4)
  180.  
  181. static inline int get_pt_flags(pte_t *pt, index_t i)
  182. {
  183.     pte_t *p = &pt[i];
  184.    
  185.     return ((!p->page_cache_disable) << PAGE_CACHEABLE_SHIFT |
  186.         (!p->present) << PAGE_PRESENT_SHIFT |
  187.         p->uaccessible << PAGE_USER_SHIFT |
  188.         1 << PAGE_READ_SHIFT |
  189.         p->writeable << PAGE_WRITE_SHIFT |
  190.         (!p->no_execute) << PAGE_EXEC_SHIFT |
  191.         p->global << PAGE_GLOBAL_SHIFT);
  192. }
  193.  
  194. static inline void set_pt_addr(pte_t *pt, index_t i, uintptr_t a)
  195. {
  196.     pte_t *p = &pt[i];
  197.  
  198.     p->addr_12_31 = (a >> 12) & 0xfffff;
  199.     p->addr_32_51 = a >> 32;
  200. }
  201.  
  202. static inline void set_pt_flags(pte_t *pt, index_t i, int flags)
  203. {
  204.     pte_t *p = &pt[i];
  205.    
  206.     p->page_cache_disable = !(flags & PAGE_CACHEABLE);
  207.     p->present = !(flags & PAGE_NOT_PRESENT);
  208.     p->uaccessible = (flags & PAGE_USER) != 0;
  209.     p->writeable = (flags & PAGE_WRITE) != 0;
  210.     p->no_execute = (flags & PAGE_EXEC) == 0;
  211.     p->global = (flags & PAGE_GLOBAL) != 0;
  212.    
  213.     /*
  214.      * Ensure that there is at least one bit set even if the present bit is cleared.
  215.      */
  216.     p->soft_valid = 1;
  217. }
  218.  
  219. extern void page_arch_init(void);
  220. extern void page_fault(int n, istate_t *istate);
  221.  
  222. #endif /* __ASM__ */
  223.  
  224. #endif /* KERNEL */
  225.  
  226. #endif
  227.  
  228. /** @}
  229.  */
  230.