Subversion Repositories HelenOS

Rev

Rev 2467 | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 2467 Rev 2474
1
/*
1
/*
2
 * Copyright (c) 2006 Martin Decky
2
 * Copyright (c) 2006 Martin Decky
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
/** @addtogroup ia32xen_mm 
29
/** @addtogroup ia32xen_mm 
30
 * @{
30
 * @{
31
 */
31
 */
32
/** @file
32
/** @file
33
 */
33
 */
34
 
34
 
35
#ifndef KERN_ia32xen_PAGE_H_
35
#ifndef KERN_ia32xen_PAGE_H_
36
#define KERN_ia32xen_PAGE_H_
36
#define KERN_ia32xen_PAGE_H_
37
 
37
 
38
#include <arch/mm/frame.h>
38
#include <arch/mm/frame.h>
39
 
39
 
40
#define PAGE_WIDTH  FRAME_WIDTH
40
#define PAGE_WIDTH  FRAME_WIDTH
41
#define PAGE_SIZE   FRAME_SIZE
41
#define PAGE_SIZE   FRAME_SIZE
42
 
42
 
43
#define PAGE_COLOR_BITS 0           /* dummy */
43
#define PAGE_COLOR_BITS 0           /* dummy */
44
 
44
 
45
#ifdef KERNEL
45
#ifdef KERNEL
46
 
46
 
47
#ifndef __ASM__
47
#ifndef __ASM__
48
#   define KA2PA(x) (((uintptr_t) (x)) - 0x80000000)
48
#   define KA2PA(x) (((uintptr_t) (x)) - 0x80000000)
49
#   define PA2KA(x) (((uintptr_t) (x)) + 0x80000000)
49
#   define PA2KA(x) (((uintptr_t) (x)) + 0x80000000)
50
#else
50
#else
51
#   define KA2PA(x) ((x) - 0x80000000)
51
#   define KA2PA(x) ((x) - 0x80000000)
52
#   define PA2KA(x) ((x) + 0x80000000)
52
#   define PA2KA(x) ((x) + 0x80000000)
53
#endif
53
#endif
54
 
54
 
55
/*
55
/*
56
 * Implementation of generic 4-level page table interface.
56
 * Implementation of generic 4-level page table interface.
57
 * IA-32 has 2-level page tables, so PTL1 and PTL2 are left out.
57
 * IA-32 has 2-level page tables, so PTL1 and PTL2 are left out.
58
 */
58
 */
59
 
59
 
60
/* Number of entries in each level. */
60
/* Number of entries in each level. */
61
#define PTL0_ENTRIES_ARCH   1024
61
#define PTL0_ENTRIES_ARCH   1024
62
#define PTL1_ENTRIES_ARCH   0
62
#define PTL1_ENTRIES_ARCH   0
63
#define PTL2_ENTRIES_ARCH   0
63
#define PTL2_ENTRIES_ARCH   0
64
#define PTL3_ENTRIES_ARCH   1024
64
#define PTL3_ENTRIES_ARCH   1024
65
 
65
 
66
/* Page table size for each level. */
66
/* Page table size for each level. */
67
#define PTL0_SIZE_ARCH      ONE_FRAME
67
#define PTL0_SIZE_ARCH      ONE_FRAME
68
#define PTL1_SIZE_ARCH      0
68
#define PTL1_SIZE_ARCH      0
69
#define PTL2_SIZE_ARCH      0
69
#define PTL2_SIZE_ARCH      0
70
#define PTL3_SIZE_ARCH      ONE_FRAME
70
#define PTL3_SIZE_ARCH      ONE_FRAME
71
 
71
 
72
/* Macros calculating indices into page tables in each level. */
72
/* Macros calculating indices into page tables in each level. */
73
#define PTL0_INDEX_ARCH(vaddr)  (((vaddr) >> 22) & 0x3ff)
73
#define PTL0_INDEX_ARCH(vaddr)  (((vaddr) >> 22) & 0x3ff)
74
#define PTL1_INDEX_ARCH(vaddr)  0
74
#define PTL1_INDEX_ARCH(vaddr)  0
75
#define PTL2_INDEX_ARCH(vaddr)  0
75
#define PTL2_INDEX_ARCH(vaddr)  0
76
#define PTL3_INDEX_ARCH(vaddr)  (((vaddr) >> 12) & 0x3ff)
76
#define PTL3_INDEX_ARCH(vaddr)  (((vaddr) >> 12) & 0x3ff)
77
 
77
 
78
/* Get PTE address accessors for each level. */
78
/* Get PTE address accessors for each level. */
79
#define GET_PTL1_ADDRESS_ARCH(ptl0, i) \
79
#define GET_PTL1_ADDRESS_ARCH(ptl0, i) \
80
    ((pte_t *) MA2PA((((pte_t *) (ptl0))[(i)].frame_address) << 12))
80
    ((pte_t *) MA2PA((((pte_t *) (ptl0))[(i)].frame_address) << 12))
81
#define GET_PTL2_ADDRESS_ARCH(ptl1, i) \
81
#define GET_PTL2_ADDRESS_ARCH(ptl1, i) \
82
    (ptl1)
82
    (ptl1)
83
#define GET_PTL3_ADDRESS_ARCH(ptl2, i) \
83
#define GET_PTL3_ADDRESS_ARCH(ptl2, i) \
84
    (ptl2)
84
    (ptl2)
85
#define GET_FRAME_ADDRESS_ARCH(ptl3, i) \
85
#define GET_FRAME_ADDRESS_ARCH(ptl3, i) \
86
    ((uintptr_t) MA2PA((((pte_t *) (ptl3))[(i)].frame_address) << 12))
86
    ((uintptr_t) MA2PA((((pte_t *) (ptl3))[(i)].frame_address) << 12))
87
 
87
 
88
/* Set PTE address accessors for each level. */
88
/* Set PTE address accessors for each level. */
89
#define SET_PTL0_ADDRESS_ARCH(ptl0) \
89
#define SET_PTL0_ADDRESS_ARCH(ptl0) \
90
{ \
90
{ \
91
    mmuext_op_t mmu_ext; \
91
    mmuext_op_t mmu_ext; \
92
    \
92
    \
93
    mmu_ext.cmd = MMUEXT_NEW_BASEPTR; \
93
    mmu_ext.cmd = MMUEXT_NEW_BASEPTR; \
94
    mmu_ext.mfn = ADDR2PFN(PA2MA(ptl0)); \
94
    mmu_ext.mfn = ADDR2PFN(PA2MA(ptl0)); \
95
    ASSERT(xen_mmuext_op(&mmu_ext, 1, NULL, DOMID_SELF) == 0); \
95
    ASSERT(xen_mmuext_op(&mmu_ext, 1, NULL, DOMID_SELF) == 0); \
96
}
96
}
97
 
97
 
98
#define SET_PTL1_ADDRESS_ARCH(ptl0, i, a) \
98
#define SET_PTL1_ADDRESS_ARCH(ptl0, i, a) \
99
{ \
99
{ \
100
    mmuext_op_t mmu_ext; \
100
    mmuext_op_t mmu_ext; \
101
    \
101
    \
102
    mmu_ext.cmd = MMUEXT_PIN_L1_TABLE; \
102
    mmu_ext.cmd = MMUEXT_PIN_L1_TABLE; \
103
    mmu_ext.mfn = ADDR2PFN(PA2MA(a)); \
103
    mmu_ext.mfn = ADDR2PFN(PA2MA(a)); \
104
    ASSERT(xen_mmuext_op(&mmu_ext, 1, NULL, DOMID_SELF) == 0); \
104
    ASSERT(xen_mmuext_op(&mmu_ext, 1, NULL, DOMID_SELF) == 0); \
105
    \
105
    \
106
    mmu_update_t update; \
106
    mmu_update_t update; \
107
    \
107
    \
108
    update.ptr = PA2MA(KA2PA(&((pte_t *) (ptl0))[(i)])); \
108
    update.ptr = PA2MA(KA2PA(&((pte_t *) (ptl0))[(i)])); \
109
    update.val = PA2MA(a); \
109
    update.val = PA2MA(a); \
110
    ASSERT(xen_mmu_update(&update, 1, NULL, DOMID_SELF) == 0); \
110
    ASSERT(xen_mmu_update(&update, 1, NULL, DOMID_SELF) == 0); \
111
}
111
}
112
 
112
 
113
#define SET_PTL2_ADDRESS_ARCH(ptl1, i, a)
113
#define SET_PTL2_ADDRESS_ARCH(ptl1, i, a)
114
#define SET_PTL3_ADDRESS_ARCH(ptl2, i, a)
114
#define SET_PTL3_ADDRESS_ARCH(ptl2, i, a)
115
#define SET_FRAME_ADDRESS_ARCH(ptl3, i, a) \
115
#define SET_FRAME_ADDRESS_ARCH(ptl3, i, a) \
116
{ \
116
{ \
117
    mmu_update_t update; \
117
    mmu_update_t update; \
118
    \
118
    \
119
    update.ptr = PA2MA(KA2PA(&((pte_t *) (ptl3))[(i)])); \
119
    update.ptr = PA2MA(KA2PA(&((pte_t *) (ptl3))[(i)])); \
120
    update.val = PA2MA(a); \
120
    update.val = PA2MA(a); \
121
    ASSERT(xen_mmu_update(&update, 1, NULL, DOMID_SELF) == 0); \
121
    ASSERT(xen_mmu_update(&update, 1, NULL, DOMID_SELF) == 0); \
122
}
122
}
123
 
123
 
124
/* Get PTE flags accessors for each level. */
124
/* Get PTE flags accessors for each level. */
125
#define GET_PTL1_FLAGS_ARCH(ptl0, i) \
125
#define GET_PTL1_FLAGS_ARCH(ptl0, i) \
126
    get_pt_flags((pte_t *) (ptl0), (index_t) (i))
126
    get_pt_flags((pte_t *) (ptl0), (index_t) (i))
127
#define GET_PTL2_FLAGS_ARCH(ptl1, i) \
127
#define GET_PTL2_FLAGS_ARCH(ptl1, i) \
128
    PAGE_PRESENT
128
    PAGE_PRESENT
129
#define GET_PTL3_FLAGS_ARCH(ptl2, i) \
129
#define GET_PTL3_FLAGS_ARCH(ptl2, i) \
130
    PAGE_PRESENT
130
    PAGE_PRESENT
131
#define GET_FRAME_FLAGS_ARCH(ptl3, i) \
131
#define GET_FRAME_FLAGS_ARCH(ptl3, i) \
132
    get_pt_flags((pte_t *) (ptl3), (index_t) (i))
132
    get_pt_flags((pte_t *) (ptl3), (index_t) (i))
133
 
133
 
134
/* Set PTE flags accessors for each level. */
134
/* Set PTE flags accessors for each level. */
135
#define SET_PTL1_FLAGS_ARCH(ptl0, i, x) \
135
#define SET_PTL1_FLAGS_ARCH(ptl0, i, x) \
136
    set_pt_flags((pte_t *) (ptl0), (index_t) (i), (x))
136
    set_pt_flags((pte_t *) (ptl0), (index_t) (i), (x))
137
#define SET_PTL2_FLAGS_ARCH(ptl1, i, x)
137
#define SET_PTL2_FLAGS_ARCH(ptl1, i, x)
138
#define SET_PTL3_FLAGS_ARCH(ptl2, i, x)
138
#define SET_PTL3_FLAGS_ARCH(ptl2, i, x)
139
#define SET_FRAME_FLAGS_ARCH(ptl3, i, x) \
139
#define SET_FRAME_FLAGS_ARCH(ptl3, i, x) \
140
    set_pt_flags((pte_t *) (ptl3), (index_t) (i), (x))
140
    set_pt_flags((pte_t *) (ptl3), (index_t) (i), (x))
141
 
141
 
142
/* Query macros for the last level. */
142
/* Query macros for the last level. */
143
#define PTE_VALID_ARCH(p) \
143
#define PTE_VALID_ARCH(p) \
144
    (*((uint32_t *) (p)) != 0)
144
    (*((uint32_t *) (p)) != 0)
145
#define PTE_PRESENT_ARCH(p) \
145
#define PTE_PRESENT_ARCH(p) \
146
    ((p)->present != 0)
146
    ((p)->present != 0)
147
#define PTE_GET_FRAME_ARCH(p) \
147
#define PTE_GET_FRAME_ARCH(p) \
148
    ((p)->frame_address << FRAME_WIDTH)
148
    ((p)->frame_address << FRAME_WIDTH)
149
#define PTE_WRITABLE_ARCH(p) \
149
#define PTE_WRITABLE_ARCH(p) \
150
    ((p)->writeable != 0)
150
    ((p)->writeable != 0)
151
#define PTE_EXECUTABLE_ARCH(p) \
151
#define PTE_EXECUTABLE_ARCH(p) \
152
    1
152
    1
153
 
153
 
154
#ifndef __ASM__
154
#ifndef __ASM__
155
 
155
 
156
#include <mm/mm.h>
156
#include <mm/mm.h>
157
#include <arch/hypercall.h>
157
#include <arch/hypercall.h>
158
#include <arch/interrupt.h>
158
#include <arch/interrupt.h>
159
 
159
 
160
/* Page fault error codes. */
160
/* Page fault error codes. */
161
 
161
 
162
/** When bit on this position is 0, the page fault was caused by a not-present
162
/** When bit on this position is 0, the page fault was caused by a not-present
163
 * page.
163
 * page.
164
 */
164
 */
165
#define PFERR_CODE_P        (1 << 0)
165
#define PFERR_CODE_P        (1 << 0)
166
 
166
 
167
/** When bit on this position is 1, the page fault was caused by a write. */
167
/** When bit on this position is 1, the page fault was caused by a write. */
168
#define PFERR_CODE_RW       (1 << 1)
168
#define PFERR_CODE_RW       (1 << 1)
169
 
169
 
170
/** When bit on this position is 1, the page fault was caused in user mode. */
170
/** When bit on this position is 1, the page fault was caused in user mode. */
171
#define PFERR_CODE_US       (1 << 2)
171
#define PFERR_CODE_US       (1 << 2)
172
 
172
 
173
/** When bit on this position is 1, a reserved bit was set in page directory. */
173
/** When bit on this position is 1, a reserved bit was set in page directory. */
174
#define PFERR_CODE_RSVD     (1 << 3)
174
#define PFERR_CODE_RSVD     (1 << 3)
175
 
175
 
176
typedef struct {
176
typedef struct {
177
    uint64_t ptr;      /**< Machine address of PTE */
177
    uint64_t ptr;      /**< Machine address of PTE */
178
    union {            /**< New contents of PTE */
178
    union {            /**< New contents of PTE */
179
        uint64_t val;
179
        uint64_t val;
180
        pte_t pte;
180
        pte_t pte;
181
    };
181
    };
182
} mmu_update_t;
182
} mmu_update_t;
183
 
183
 
184
typedef struct {
184
typedef struct {
185
    unsigned int cmd;
185
    unsigned int cmd;
186
    union {
186
    union {
187
        unsigned long mfn;
187
        unsigned long mfn;
188
        unsigned long linear_addr;
188
        unsigned long linear_addr;
189
    };
189
    };
190
    union {
190
    union {
191
        unsigned int nr_ents;
191
        unsigned int nr_ents;
192
        void *vcpumask;
192
        void *vcpumask;
193
    };
193
    };
194
} mmuext_op_t;
194
} mmuext_op_t;
195
 
195
 
196
static inline int xen_update_va_mapping(const void *va, const pte_t pte,
196
static inline int xen_update_va_mapping(const void *va, const pte_t pte,
197
    const unsigned int flags)
197
    const unsigned int flags)
198
{
198
{
199
    return hypercall4(XEN_UPDATE_VA_MAPPING, va, pte, 0, flags);
199
    return hypercall4(XEN_UPDATE_VA_MAPPING, va, pte, 0, flags);
200
}
200
}
201
 
201
 
202
static inline int xen_mmu_update(const mmu_update_t *req,
202
static inline int xen_mmu_update(const mmu_update_t *req,
203
    const unsigned int count, unsigned int *success_count, domid_t domid)
203
    const unsigned int count, unsigned int *success_count, domid_t domid)
204
{
204
{
205
    return hypercall4(XEN_MMU_UPDATE, req, count, success_count, domid);
205
    return hypercall4(XEN_MMU_UPDATE, req, count, success_count, domid);
206
}
206
}
207
 
207
 
208
static inline int xen_mmuext_op(const mmuext_op_t *op, const unsigned int count,
208
static inline int xen_mmuext_op(const mmuext_op_t *op, const unsigned int count,
209
    unsigned int *success_count, domid_t domid)
209
    unsigned int *success_count, domid_t domid)
210
{
210
{
211
    return hypercall4(XEN_MMUEXT_OP, op, count, success_count, domid);
211
    return hypercall4(XEN_MMUEXT_OP, op, count, success_count, domid);
212
}
212
}
213
 
213
 
214
static inline int get_pt_flags(pte_t *pt, index_t i)
214
static inline int get_pt_flags(pte_t *pt, index_t i)
215
{
215
{
216
    pte_t *p = &pt[i];
216
    pte_t *p = &pt[i];
217
   
217
   
218
    return ((!p->page_cache_disable) << PAGE_CACHEABLE_SHIFT |
218
    return ((!p->page_cache_disable) << PAGE_CACHEABLE_SHIFT |
219
        (!p->present) << PAGE_PRESENT_SHIFT |
219
        (!p->present) << PAGE_PRESENT_SHIFT |
220
        p->uaccessible << PAGE_USER_SHIFT |
220
        p->uaccessible << PAGE_USER_SHIFT |
221
        1 << PAGE_READ_SHIFT |
221
        1 << PAGE_READ_SHIFT |
222
        p->writeable << PAGE_WRITE_SHIFT |
222
        p->writeable << PAGE_WRITE_SHIFT |
223
        1 << PAGE_EXEC_SHIFT |
223
        1 << PAGE_EXEC_SHIFT |
224
        p->global << PAGE_GLOBAL_SHIFT);
224
        p->global << PAGE_GLOBAL_SHIFT);
225
}
225
}
226
 
226
 
227
static inline void set_pt_flags(pte_t *pt, index_t i, int flags)
227
static inline void set_pt_flags(pte_t *pt, index_t i, int flags)
228
{
228
{
229
    pte_t p = pt[i];
229
    pte_t p = pt[i];
230
   
230
   
231
    p.page_cache_disable = !(flags & PAGE_CACHEABLE);
231
    p.page_cache_disable = !(flags & PAGE_CACHEABLE);
232
    p.present = !(flags & PAGE_NOT_PRESENT);
232
    p.present = !(flags & PAGE_NOT_PRESENT);
233
    p.uaccessible = (flags & PAGE_USER) != 0;
233
    p.uaccessible = (flags & PAGE_USER) != 0;
234
    p.writeable = (flags & PAGE_WRITE) != 0;
234
    p.writeable = (flags & PAGE_WRITE) != 0;
235
    p.global = (flags & PAGE_GLOBAL) != 0;
235
    p.global = (flags & PAGE_GLOBAL) != 0;
236
   
236
   
237
    /*
237
    /*
238
     * Ensure that there is at least one bit set even if the present bit is cleared.
238
     * Ensure that there is at least one bit set even if the present bit is cleared.
239
     */
239
     */
240
    p.soft_valid = true;
240
    p.soft_valid = true;
241
   
241
   
242
    mmu_update_t update;
242
    mmu_update_t update;
243
   
243
   
244
    update.ptr = PA2MA(KA2PA(&(pt[i])));
244
    update.ptr = PA2MA(KA2PA(&(pt[i])));
245
    update.pte = p;
245
    update.pte = p;
246
    xen_mmu_update(&update, 1, NULL, DOMID_SELF);
246
    xen_mmu_update(&update, 1, NULL, DOMID_SELF);
247
}
247
}
248
 
248
 
249
extern void page_arch_init(void);
249
extern void page_arch_init(void);
250
extern void page_fault(int n, istate_t *istate);
250
extern void page_fault(int n, istate_t *istate);
251
 
251
 
252
#endif /* __ASM__ */
252
#endif /* __ASM__ */
253
 
253
 
254
#endif /* KERNEL */
254
#endif /* KERNEL */
255
 
255
 
256
#endif
256
#endif
257
 
257
 
258
/** @}
258
/** @}
259
 */
259
 */
260
 
260