Subversion Repositories HelenOS

Rev

Rev 2017 | Go to most recent revision | Blame | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (c) 2006 Martin Decky
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. /** @addtogroup ia32xen_mm 
  30.  * @{
  31.  */
  32. /** @file
  33.  */
  34.  
  35. #ifndef KERN_ia32xen_PAGE_H_
  36. #define KERN_ia32xen_PAGE_H_
  37.  
  38. #include <arch/mm/frame.h>
  39.  
  40. #define PAGE_WIDTH  FRAME_WIDTH
  41. #define PAGE_SIZE   FRAME_SIZE
  42.  
  43. #define PAGE_COLOR_BITS 0           /* dummy */
  44.  
  45. #ifdef KERNEL
  46.  
  47. #ifndef __ASM__
  48. #   define KA2PA(x) (((uintptr_t) (x)) - 0x80000000)
  49. #   define PA2KA(x) (((uintptr_t) (x)) + 0x80000000)
  50. #else
  51. #   define KA2PA(x) ((x) - 0x80000000)
  52. #   define PA2KA(x) ((x) + 0x80000000)
  53. #endif
  54.  
  55. /*
  56.  * Implementation of generic 4-level page table interface.
  57.  * IA-32 has 2-level page tables, so PTL1 and PTL2 are left out.
  58.  */
  59. #define PTL0_ENTRIES_ARCH   1024
  60. #define PTL1_ENTRIES_ARCH   0
  61. #define PTL2_ENTRIES_ARCH   0
  62. #define PTL3_ENTRIES_ARCH   1024
  63.  
  64. #define PTL0_INDEX_ARCH(vaddr)  (((vaddr) >> 22) & 0x3ff)
  65. #define PTL1_INDEX_ARCH(vaddr)  0
  66. #define PTL2_INDEX_ARCH(vaddr)  0
  67. #define PTL3_INDEX_ARCH(vaddr)  (((vaddr) >> 12) & 0x3ff)
  68.  
  69. #define GET_PTL1_ADDRESS_ARCH(ptl0, i)      ((pte_t *) MA2PA((((pte_t *) (ptl0))[(i)].frame_address) << 12))
  70. #define GET_PTL2_ADDRESS_ARCH(ptl1, i)      (ptl1)
  71. #define GET_PTL3_ADDRESS_ARCH(ptl2, i)      (ptl2)
  72. #define GET_FRAME_ADDRESS_ARCH(ptl3, i)     ((uintptr_t) MA2PA((((pte_t *) (ptl3))[(i)].frame_address) << 12))
  73.  
  74. #define SET_PTL0_ADDRESS_ARCH(ptl0) { \
  75.     mmuext_op_t mmu_ext; \
  76.     \
  77.     mmu_ext.cmd = MMUEXT_NEW_BASEPTR; \
  78.     mmu_ext.mfn = ADDR2PFN(PA2MA(ptl0)); \
  79.     ASSERT(xen_mmuext_op(&mmu_ext, 1, NULL, DOMID_SELF) == 0); \
  80. }
  81.  
  82. #define SET_PTL1_ADDRESS_ARCH(ptl0, i, a) { \
  83.     mmuext_op_t mmu_ext; \
  84.     \
  85.     mmu_ext.cmd = MMUEXT_PIN_L1_TABLE; \
  86.     mmu_ext.mfn = ADDR2PFN(PA2MA(a)); \
  87.     ASSERT(xen_mmuext_op(&mmu_ext, 1, NULL, DOMID_SELF) == 0); \
  88.     \
  89.     mmu_update_t update; \
  90.     \
  91.     update.ptr = PA2MA(KA2PA(&((pte_t *) (ptl0))[(i)])); \
  92.     update.val = PA2MA(a); \
  93.     ASSERT(xen_mmu_update(&update, 1, NULL, DOMID_SELF) == 0); \
  94. }
  95.  
  96. #define SET_PTL2_ADDRESS_ARCH(ptl1, i, a)
  97. #define SET_PTL3_ADDRESS_ARCH(ptl2, i, a)
  98. #define SET_FRAME_ADDRESS_ARCH(ptl3, i, a) { \
  99.     mmu_update_t update; \
  100.     \
  101.     update.ptr = PA2MA(KA2PA(&((pte_t *) (ptl3))[(i)])); \
  102.     update.val = PA2MA(a); \
  103.     ASSERT(xen_mmu_update(&update, 1, NULL, DOMID_SELF) == 0); \
  104. }
  105.  
  106. #define GET_PTL1_FLAGS_ARCH(ptl0, i)        get_pt_flags((pte_t *) (ptl0), (index_t)(i))
  107. #define GET_PTL2_FLAGS_ARCH(ptl1, i)        PAGE_PRESENT
  108. #define GET_PTL3_FLAGS_ARCH(ptl2, i)        PAGE_PRESENT
  109. #define GET_FRAME_FLAGS_ARCH(ptl3, i)       get_pt_flags((pte_t *) (ptl3), (index_t)(i))
  110.  
  111. #define SET_PTL1_FLAGS_ARCH(ptl0, i, x)     set_pt_flags((pte_t *) (ptl0), (index_t)(i), (x))
  112. #define SET_PTL2_FLAGS_ARCH(ptl1, i, x)
  113. #define SET_PTL3_FLAGS_ARCH(ptl2, i, x)
  114. #define SET_FRAME_FLAGS_ARCH(ptl3, i, x)        set_pt_flags((pte_t *) (ptl3), (index_t)(i), (x))
  115.  
  116. #define PTE_VALID_ARCH(p)           (*((uint32_t *) (p)) != 0)
  117. #define PTE_PRESENT_ARCH(p)         ((p)->present != 0)
  118. #define PTE_GET_FRAME_ARCH(p)           ((p)->frame_address << FRAME_WIDTH)
  119. #define PTE_WRITABLE_ARCH(p)            ((p)->writeable != 0)
  120. #define PTE_EXECUTABLE_ARCH(p)          1
  121.  
  122. #ifndef __ASM__
  123.  
  124. #include <mm/page.h>
  125. #include <arch/types.h>
  126. #include <arch/mm/frame.h>
  127. #include <typedefs.h>
  128. #include <arch/hypercall.h>
  129.  
  130. /* Page fault error codes. */
  131.  
  132. /** When bit on this position is 0, the page fault was caused by a not-present page. */
  133. #define PFERR_CODE_P        (1 << 0)
  134.  
  135. /** When bit on this position is 1, the page fault was caused by a write. */
  136. #define PFERR_CODE_RW       (1 << 1)
  137.  
  138. /** When bit on this position is 1, the page fault was caused in user mode. */
  139. #define PFERR_CODE_US       (1 << 2)
  140.  
  141. /** When bit on this position is 1, a reserved bit was set in page directory. */
  142. #define PFERR_CODE_RSVD     (1 << 3)
  143.  
  144. /** Page Table Entry. */
  145. struct page_specifier {
  146.     unsigned present : 1;
  147.     unsigned writeable : 1;
  148.     unsigned uaccessible : 1;
  149.     unsigned page_write_through : 1;
  150.     unsigned page_cache_disable : 1;
  151.     unsigned accessed : 1;
  152.     unsigned dirty : 1;
  153.     unsigned pat : 1;
  154.     unsigned global : 1;
  155.     unsigned soft_valid : 1;    /**< Valid content even if the present bit is not set. */
  156.     unsigned avl : 2;
  157.     unsigned frame_address : 20;
  158. } __attribute__ ((packed));
  159.  
  160. typedef struct {
  161.     uint64_t ptr;      /**< Machine address of PTE */
  162.     union {            /**< New contents of PTE */
  163.         uint64_t val;
  164.         pte_t pte;
  165.     };
  166. } mmu_update_t;
  167.  
  168. typedef struct {
  169.     unsigned int cmd;
  170.     union {
  171.         unsigned long mfn;
  172.         unsigned long linear_addr;
  173.     };
  174.     union {
  175.         unsigned int nr_ents;
  176.         void *vcpumask;
  177.     };
  178. } mmuext_op_t;
  179.  
  180. static inline int xen_update_va_mapping(const void *va, const pte_t pte, const unsigned int flags)
  181. {
  182.     return hypercall4(XEN_UPDATE_VA_MAPPING, va, pte, 0, flags);
  183. }
  184.  
  185. static inline int xen_mmu_update(const mmu_update_t *req, const unsigned int count, unsigned int *success_count, domid_t domid)
  186. {
  187.     return hypercall4(XEN_MMU_UPDATE, req, count, success_count, domid);
  188. }
  189.  
  190. static inline int xen_mmuext_op(const mmuext_op_t *op, const unsigned int count, unsigned int *success_count, domid_t domid)
  191. {
  192.     return hypercall4(XEN_MMUEXT_OP, op, count, success_count, domid);
  193. }
  194.  
  195. static inline int get_pt_flags(pte_t *pt, index_t i)
  196. {
  197.     pte_t *p = &pt[i];
  198.    
  199.     return (
  200.         (!p->page_cache_disable)<<PAGE_CACHEABLE_SHIFT |
  201.         (!p->present)<<PAGE_PRESENT_SHIFT |
  202.         p->uaccessible<<PAGE_USER_SHIFT |
  203.         1<<PAGE_READ_SHIFT |
  204.         p->writeable<<PAGE_WRITE_SHIFT |
  205.         1<<PAGE_EXEC_SHIFT |
  206.         p->global<<PAGE_GLOBAL_SHIFT
  207.     );
  208. }
  209.  
  210. static inline void set_pt_flags(pte_t *pt, index_t i, int flags)
  211. {
  212.     pte_t p = pt[i];
  213.    
  214.     p.page_cache_disable = !(flags & PAGE_CACHEABLE);
  215.     p.present = !(flags & PAGE_NOT_PRESENT);
  216.     p.uaccessible = (flags & PAGE_USER) != 0;
  217.     p.writeable = (flags & PAGE_WRITE) != 0;
  218.     p.global = (flags & PAGE_GLOBAL) != 0;
  219.    
  220.     /*
  221.      * Ensure that there is at least one bit set even if the present bit is cleared.
  222.      */
  223.     p.soft_valid = true;
  224.    
  225.     mmu_update_t update;
  226.    
  227.     update.ptr = PA2MA(KA2PA(&(pt[i])));
  228.     update.pte = p;
  229.     xen_mmu_update(&update, 1, NULL, DOMID_SELF);
  230. }
  231.  
  232. extern void page_arch_init(void);
  233. extern void page_fault(int n, istate_t *istate);
  234.  
  235. #endif /* __ASM__ */
  236.  
  237. #endif /* KERNEL */
  238.  
  239. #endif
  240.  
  241. /** @}
  242.  */
  243.