Subversion Repositories HelenOS

Rev

Rev 2465 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 2465 Rev 2467
Line 54... Line 54...
54
 
54
 
55
/*
55
/*
56
 * Implementation of generic 4-level page table interface.
56
 * Implementation of generic 4-level page table interface.
57
 * IA-32 has 2-level page tables, so PTL1 and PTL2 are left out.
57
 * IA-32 has 2-level page tables, so PTL1 and PTL2 are left out.
58
 */
58
 */
-
 
59
 
-
 
60
/* Number of entries in each level. */
59
#define PTL0_ENTRIES_ARCH   1024
61
#define PTL0_ENTRIES_ARCH   1024
60
#define PTL1_ENTRIES_ARCH   0
62
#define PTL1_ENTRIES_ARCH   0
61
#define PTL2_ENTRIES_ARCH   0
63
#define PTL2_ENTRIES_ARCH   0
62
#define PTL3_ENTRIES_ARCH   1024
64
#define PTL3_ENTRIES_ARCH   1024
63
 
65
 
-
 
66
/* Page table size for each level. */
64
#define PTL0_SIZE_ARCH       ONE_FRAME
67
#define PTL0_SIZE_ARCH      ONE_FRAME
65
#define PTL1_SIZE_ARCH       0
68
#define PTL1_SIZE_ARCH      0
66
#define PTL2_SIZE_ARCH       0
69
#define PTL2_SIZE_ARCH      0
67
#define PTL3_SIZE_ARCH       ONE_FRAME
70
#define PTL3_SIZE_ARCH      ONE_FRAME
68
 
71
 
-
 
72
/* Macros calculating indices into page tables in each level. */
69
#define PTL0_INDEX_ARCH(vaddr)  (((vaddr) >> 22) & 0x3ff)
73
#define PTL0_INDEX_ARCH(vaddr)  (((vaddr) >> 22) & 0x3ff)
70
#define PTL1_INDEX_ARCH(vaddr)  0
74
#define PTL1_INDEX_ARCH(vaddr)  0
71
#define PTL2_INDEX_ARCH(vaddr)  0
75
#define PTL2_INDEX_ARCH(vaddr)  0
72
#define PTL3_INDEX_ARCH(vaddr)  (((vaddr) >> 12) & 0x3ff)
76
#define PTL3_INDEX_ARCH(vaddr)  (((vaddr) >> 12) & 0x3ff)
73
 
77
 
-
 
78
/* Get PTE address accessors for each level. */
-
 
79
#define GET_PTL1_ADDRESS_ARCH(ptl0, i) \
74
#define GET_PTL1_ADDRESS_ARCH(ptl0, i)      ((pte_t *) MA2PA((((pte_t *) (ptl0))[(i)].frame_address) << 12))
80
    ((pte_t *) MA2PA((((pte_t *) (ptl0))[(i)].frame_address) << 12))
75
#define GET_PTL2_ADDRESS_ARCH(ptl1, i)      (ptl1)
81
#define GET_PTL2_ADDRESS_ARCH(ptl1, i) \
-
 
82
    (ptl1)
76
#define GET_PTL3_ADDRESS_ARCH(ptl2, i)      (ptl2)
83
#define GET_PTL3_ADDRESS_ARCH(ptl2, i) \
-
 
84
    (ptl2)
-
 
85
#define GET_FRAME_ADDRESS_ARCH(ptl3, i) \
77
#define GET_FRAME_ADDRESS_ARCH(ptl3, i)     ((uintptr_t) MA2PA((((pte_t *) (ptl3))[(i)].frame_address) << 12))
86
    ((uintptr_t) MA2PA((((pte_t *) (ptl3))[(i)].frame_address) << 12))
78
 
87
 
-
 
88
/* Set PTE address accessors for each level. */
79
#define SET_PTL0_ADDRESS_ARCH(ptl0) { \
89
#define SET_PTL0_ADDRESS_ARCH(ptl0) \
-
 
90
{ \
80
    mmuext_op_t mmu_ext; \
91
    mmuext_op_t mmu_ext; \
81
    \
92
    \
82
    mmu_ext.cmd = MMUEXT_NEW_BASEPTR; \
93
    mmu_ext.cmd = MMUEXT_NEW_BASEPTR; \
83
    mmu_ext.mfn = ADDR2PFN(PA2MA(ptl0)); \
94
    mmu_ext.mfn = ADDR2PFN(PA2MA(ptl0)); \
84
    ASSERT(xen_mmuext_op(&mmu_ext, 1, NULL, DOMID_SELF) == 0); \
95
    ASSERT(xen_mmuext_op(&mmu_ext, 1, NULL, DOMID_SELF) == 0); \
85
}
96
}
86
 
97
 
87
#define SET_PTL1_ADDRESS_ARCH(ptl0, i, a) { \
98
#define SET_PTL1_ADDRESS_ARCH(ptl0, i, a) \
-
 
99
{ \
88
    mmuext_op_t mmu_ext; \
100
    mmuext_op_t mmu_ext; \
89
    \
101
    \
90
    mmu_ext.cmd = MMUEXT_PIN_L1_TABLE; \
102
    mmu_ext.cmd = MMUEXT_PIN_L1_TABLE; \
91
    mmu_ext.mfn = ADDR2PFN(PA2MA(a)); \
103
    mmu_ext.mfn = ADDR2PFN(PA2MA(a)); \
92
    ASSERT(xen_mmuext_op(&mmu_ext, 1, NULL, DOMID_SELF) == 0); \
104
    ASSERT(xen_mmuext_op(&mmu_ext, 1, NULL, DOMID_SELF) == 0); \
Line 98... Line 110...
98
    ASSERT(xen_mmu_update(&update, 1, NULL, DOMID_SELF) == 0); \
110
    ASSERT(xen_mmu_update(&update, 1, NULL, DOMID_SELF) == 0); \
99
}
111
}
100
 
112
 
101
#define SET_PTL2_ADDRESS_ARCH(ptl1, i, a)
113
#define SET_PTL2_ADDRESS_ARCH(ptl1, i, a)
102
#define SET_PTL3_ADDRESS_ARCH(ptl2, i, a)
114
#define SET_PTL3_ADDRESS_ARCH(ptl2, i, a)
103
#define SET_FRAME_ADDRESS_ARCH(ptl3, i, a) { \
115
#define SET_FRAME_ADDRESS_ARCH(ptl3, i, a) \
-
 
116
{ \
104
    mmu_update_t update; \
117
    mmu_update_t update; \
105
    \
118
    \
106
    update.ptr = PA2MA(KA2PA(&((pte_t *) (ptl3))[(i)])); \
119
    update.ptr = PA2MA(KA2PA(&((pte_t *) (ptl3))[(i)])); \
107
    update.val = PA2MA(a); \
120
    update.val = PA2MA(a); \
108
    ASSERT(xen_mmu_update(&update, 1, NULL, DOMID_SELF) == 0); \
121
    ASSERT(xen_mmu_update(&update, 1, NULL, DOMID_SELF) == 0); \
109
}
122
}
110
 
123
 
-
 
124
/* Get PTE flags accessors for each level. */
-
 
125
#define GET_PTL1_FLAGS_ARCH(ptl0, i) \
111
#define GET_PTL1_FLAGS_ARCH(ptl0, i)        get_pt_flags((pte_t *) (ptl0), (index_t)(i))
126
    get_pt_flags((pte_t *) (ptl0), (index_t) (i))
112
#define GET_PTL2_FLAGS_ARCH(ptl1, i)        PAGE_PRESENT
127
#define GET_PTL2_FLAGS_ARCH(ptl1, i) \
-
 
128
    PAGE_PRESENT
113
#define GET_PTL3_FLAGS_ARCH(ptl2, i)        PAGE_PRESENT
129
#define GET_PTL3_FLAGS_ARCH(ptl2, i) \
-
 
130
    PAGE_PRESENT
-
 
131
#define GET_FRAME_FLAGS_ARCH(ptl3, i) \
114
#define GET_FRAME_FLAGS_ARCH(ptl3, i)       get_pt_flags((pte_t *) (ptl3), (index_t)(i))
132
    get_pt_flags((pte_t *) (ptl3), (index_t) (i))
115
 
133
 
-
 
134
/* Set PTE flags accessors for each level. */
-
 
135
#define SET_PTL1_FLAGS_ARCH(ptl0, i, x) \
116
#define SET_PTL1_FLAGS_ARCH(ptl0, i, x)     set_pt_flags((pte_t *) (ptl0), (index_t)(i), (x))
136
    set_pt_flags((pte_t *) (ptl0), (index_t) (i), (x))
117
#define SET_PTL2_FLAGS_ARCH(ptl1, i, x)
137
#define SET_PTL2_FLAGS_ARCH(ptl1, i, x)
118
#define SET_PTL3_FLAGS_ARCH(ptl2, i, x)
138
#define SET_PTL3_FLAGS_ARCH(ptl2, i, x)
-
 
139
#define SET_FRAME_FLAGS_ARCH(ptl3, i, x) \
119
#define SET_FRAME_FLAGS_ARCH(ptl3, i, x)        set_pt_flags((pte_t *) (ptl3), (index_t)(i), (x))
140
    set_pt_flags((pte_t *) (ptl3), (index_t) (i), (x))
120
 
141
 
-
 
142
/* Query macros for the last level. */
-
 
143
#define PTE_VALID_ARCH(p) \
121
#define PTE_VALID_ARCH(p)           (*((uint32_t *) (p)) != 0)
144
    (*((uint32_t *) (p)) != 0)
122
#define PTE_PRESENT_ARCH(p)         ((p)->present != 0)
145
#define PTE_PRESENT_ARCH(p) \
-
 
146
    ((p)->present != 0)
-
 
147
#define PTE_GET_FRAME_ARCH(p) \
123
#define PTE_GET_FRAME_ARCH(p)           ((p)->frame_address << FRAME_WIDTH)
148
    ((p)->frame_address << FRAME_WIDTH)
124
#define PTE_WRITABLE_ARCH(p)            ((p)->writeable != 0)
149
#define PTE_WRITABLE_ARCH(p) \
-
 
150
    ((p)->writeable != 0)
125
#define PTE_EXECUTABLE_ARCH(p)          1
151
#define PTE_EXECUTABLE_ARCH(p) \
-
 
152
    1
126
 
153
 
127
#ifndef __ASM__
154
#ifndef __ASM__
128
 
155
 
129
#include <mm/mm.h>
156
#include <mm/mm.h>
130
#include <arch/hypercall.h>
157
#include <arch/hypercall.h>
131
#include <arch/interrupt.h>
158
#include <arch/interrupt.h>
132
 
159
 
133
/* Page fault error codes. */
160
/* Page fault error codes. */
134
 
161
 
135
/** When bit on this position is 0, the page fault was caused by a not-present page. */
162
/** When bit on this position is 0, the page fault was caused by a not-present
-
 
163
 * page.
-
 
164
 */
136
#define PFERR_CODE_P        (1 << 0)
165
#define PFERR_CODE_P        (1 << 0)
137
 
166
 
138
/** When bit on this position is 1, the page fault was caused by a write. */
167
/** When bit on this position is 1, the page fault was caused by a write. */
139
#define PFERR_CODE_RW       (1 << 1)
168
#define PFERR_CODE_RW       (1 << 1)
140
 
169
 
Line 162... Line 191...
162
        unsigned int nr_ents;
191
        unsigned int nr_ents;
163
        void *vcpumask;
192
        void *vcpumask;
164
    };
193
    };
165
} mmuext_op_t;
194
} mmuext_op_t;
166
 
195
 
167
static inline int xen_update_va_mapping(const void *va, const pte_t pte, const unsigned int flags)
196
static inline int xen_update_va_mapping(const void *va, const pte_t pte,
-
 
197
    const unsigned int flags)
168
{
198
{
169
    return hypercall4(XEN_UPDATE_VA_MAPPING, va, pte, 0, flags);
199
    return hypercall4(XEN_UPDATE_VA_MAPPING, va, pte, 0, flags);
170
}
200
}
171
 
201
 
-
 
202
static inline int xen_mmu_update(const mmu_update_t *req,
172
static inline int xen_mmu_update(const mmu_update_t *req, const unsigned int count, unsigned int *success_count, domid_t domid)
203
    const unsigned int count, unsigned int *success_count, domid_t domid)
173
{
204
{
174
    return hypercall4(XEN_MMU_UPDATE, req, count, success_count, domid);
205
    return hypercall4(XEN_MMU_UPDATE, req, count, success_count, domid);
175
}
206
}
176
 
207
 
177
static inline int xen_mmuext_op(const mmuext_op_t *op, const unsigned int count, unsigned int *success_count, domid_t domid)
208
static inline int xen_mmuext_op(const mmuext_op_t *op, const unsigned int count,
-
 
209
    unsigned int *success_count, domid_t domid)
178
{
210
{
179
    return hypercall4(XEN_MMUEXT_OP, op, count, success_count, domid);
211
    return hypercall4(XEN_MMUEXT_OP, op, count, success_count, domid);
180
}
212
}
181
 
213
 
182
static inline int get_pt_flags(pte_t *pt, index_t i)
214
static inline int get_pt_flags(pte_t *pt, index_t i)
183
{
215
{
184
    pte_t *p = &pt[i];
216
    pte_t *p = &pt[i];
185
   
217
   
186
    return (
-
 
187
        (!p->page_cache_disable)<<PAGE_CACHEABLE_SHIFT |
218
    return ((!p->page_cache_disable) << PAGE_CACHEABLE_SHIFT |
188
        (!p->present)<<PAGE_PRESENT_SHIFT |
219
        (!p->present) << PAGE_PRESENT_SHIFT |
189
        p->uaccessible<<PAGE_USER_SHIFT |
220
        p->uaccessible << PAGE_USER_SHIFT |
190
        1<<PAGE_READ_SHIFT |
221
        1 << PAGE_READ_SHIFT |
191
        p->writeable<<PAGE_WRITE_SHIFT |
222
        p->writeable << PAGE_WRITE_SHIFT |
192
        1<<PAGE_EXEC_SHIFT |
223
        1 << PAGE_EXEC_SHIFT |
193
        p->global<<PAGE_GLOBAL_SHIFT
224
        p->global << PAGE_GLOBAL_SHIFT);
194
    );
-
 
195
}
225
}
196
 
226
 
197
static inline void set_pt_flags(pte_t *pt, index_t i, int flags)
227
static inline void set_pt_flags(pte_t *pt, index_t i, int flags)
198
{
228
{
199
    pte_t p = pt[i];
229
    pte_t p = pt[i];