Subversion Repositories HelenOS

Rev

Rev 2467 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1809 decky 1
/*
2071 jermar 2
 * Copyright (c) 2006 Martin Decky
1809 decky 3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
1952 jermar 29
/** @addtogroup ia32xen_mm 
1809 decky 30
 * @{
31
 */
32
/** @file
33
 */
34
 
1952 jermar 35
#ifndef KERN_ia32xen_PAGE_H_
36
#define KERN_ia32xen_PAGE_H_
1809 decky 37
 
38
#include <arch/mm/frame.h>
39
 
40
#define PAGE_WIDTH  FRAME_WIDTH
41
#define PAGE_SIZE   FRAME_SIZE
42
 
43
#ifdef KERNEL
44
 
45
#ifndef __ASM__
46
#   define KA2PA(x) (((uintptr_t) (x)) - 0x80000000)
47
#   define PA2KA(x) (((uintptr_t) (x)) + 0x80000000)
48
#else
49
#   define KA2PA(x) ((x) - 0x80000000)
50
#   define PA2KA(x) ((x) + 0x80000000)
51
#endif
52
 
53
/*
54
 * Implementation of generic 4-level page table interface.
55
 * IA-32 has 2-level page tables, so PTL1 and PTL2 are left out.
56
 */
2467 jermar 57
 
58
/* Number of entries in each level. */
1809 decky 59
#define PTL0_ENTRIES_ARCH   1024
60
#define PTL1_ENTRIES_ARCH   0
61
#define PTL2_ENTRIES_ARCH   0
62
#define PTL3_ENTRIES_ARCH   1024
63
 
2467 jermar 64
/* Page table size for each level. */
65
#define PTL0_SIZE_ARCH      ONE_FRAME
66
#define PTL1_SIZE_ARCH      0
67
#define PTL2_SIZE_ARCH      0
68
#define PTL3_SIZE_ARCH      ONE_FRAME
2465 jermar 69
 
2467 jermar 70
/* Macros calculating indices into page tables in each level. */
1821 decky 71
#define PTL0_INDEX_ARCH(vaddr)  (((vaddr) >> 22) & 0x3ff)
1809 decky 72
#define PTL1_INDEX_ARCH(vaddr)  0
73
#define PTL2_INDEX_ARCH(vaddr)  0
1821 decky 74
#define PTL3_INDEX_ARCH(vaddr)  (((vaddr) >> 12) & 0x3ff)
1809 decky 75
 
2467 jermar 76
/* Get PTE address accessors for each level. */
77
#define GET_PTL1_ADDRESS_ARCH(ptl0, i) \
78
    ((pte_t *) MA2PA((((pte_t *) (ptl0))[(i)].frame_address) << 12))
79
#define GET_PTL2_ADDRESS_ARCH(ptl1, i) \
80
    (ptl1)
81
#define GET_PTL3_ADDRESS_ARCH(ptl2, i) \
82
    (ptl2)
83
#define GET_FRAME_ADDRESS_ARCH(ptl3, i) \
84
    ((uintptr_t) MA2PA((((pte_t *) (ptl3))[(i)].frame_address) << 12))
1809 decky 85
 
2467 jermar 86
/* Set PTE address accessors for each level. */
87
#define SET_PTL0_ADDRESS_ARCH(ptl0) \
88
{ \
1821 decky 89
    mmuext_op_t mmu_ext; \
1824 decky 90
    \
1821 decky 91
    mmu_ext.cmd = MMUEXT_NEW_BASEPTR; \
1824 decky 92
    mmu_ext.mfn = ADDR2PFN(PA2MA(ptl0)); \
2017 decky 93
    ASSERT(xen_mmuext_op(&mmu_ext, 1, NULL, DOMID_SELF) == 0); \
1821 decky 94
}
1824 decky 95
 
2467 jermar 96
#define SET_PTL1_ADDRESS_ARCH(ptl0, i, a) \
97
{ \
2017 decky 98
    mmuext_op_t mmu_ext; \
99
    \
100
    mmu_ext.cmd = MMUEXT_PIN_L1_TABLE; \
101
    mmu_ext.mfn = ADDR2PFN(PA2MA(a)); \
102
    ASSERT(xen_mmuext_op(&mmu_ext, 1, NULL, DOMID_SELF) == 0); \
103
    \
1821 decky 104
    mmu_update_t update; \
1824 decky 105
    \
1821 decky 106
    update.ptr = PA2MA(KA2PA(&((pte_t *) (ptl0))[(i)])); \
2017 decky 107
    update.val = PA2MA(a); \
108
    ASSERT(xen_mmu_update(&update, 1, NULL, DOMID_SELF) == 0); \
1821 decky 109
}
2017 decky 110
 
1809 decky 111
#define SET_PTL2_ADDRESS_ARCH(ptl1, i, a)
112
#define SET_PTL3_ADDRESS_ARCH(ptl2, i, a)
2467 jermar 113
#define SET_FRAME_ADDRESS_ARCH(ptl3, i, a) \
114
{ \
2017 decky 115
    mmu_update_t update; \
116
    \
117
    update.ptr = PA2MA(KA2PA(&((pte_t *) (ptl3))[(i)])); \
118
    update.val = PA2MA(a); \
119
    ASSERT(xen_mmu_update(&update, 1, NULL, DOMID_SELF) == 0); \
120
}
1809 decky 121
 
2467 jermar 122
/* Get PTE flags accessors for each level. */
123
#define GET_PTL1_FLAGS_ARCH(ptl0, i) \
124
    get_pt_flags((pte_t *) (ptl0), (index_t) (i))
125
#define GET_PTL2_FLAGS_ARCH(ptl1, i) \
126
    PAGE_PRESENT
127
#define GET_PTL3_FLAGS_ARCH(ptl2, i) \
128
    PAGE_PRESENT
129
#define GET_FRAME_FLAGS_ARCH(ptl3, i) \
130
    get_pt_flags((pte_t *) (ptl3), (index_t) (i))
1809 decky 131
 
2467 jermar 132
/* Set PTE flags accessors for each level. */
133
#define SET_PTL1_FLAGS_ARCH(ptl0, i, x) \
134
    set_pt_flags((pte_t *) (ptl0), (index_t) (i), (x))
1809 decky 135
#define SET_PTL2_FLAGS_ARCH(ptl1, i, x)
136
#define SET_PTL3_FLAGS_ARCH(ptl2, i, x)
2467 jermar 137
#define SET_FRAME_FLAGS_ARCH(ptl3, i, x) \
138
    set_pt_flags((pte_t *) (ptl3), (index_t) (i), (x))
1809 decky 139
 
2467 jermar 140
/* Query macros for the last level. */
141
#define PTE_VALID_ARCH(p) \
142
    (*((uint32_t *) (p)) != 0)
143
#define PTE_PRESENT_ARCH(p) \
144
    ((p)->present != 0)
145
#define PTE_GET_FRAME_ARCH(p) \
146
    ((p)->frame_address << FRAME_WIDTH)
147
#define PTE_WRITABLE_ARCH(p) \
148
    ((p)->writeable != 0)
149
#define PTE_EXECUTABLE_ARCH(p) \
150
    1
1809 decky 151
 
152
#ifndef __ASM__
153
 
2089 decky 154
#include <mm/mm.h>
1824 decky 155
#include <arch/hypercall.h>
2089 decky 156
#include <arch/interrupt.h>
1809 decky 157
 
158
/* Page fault error codes. */
159
 
2467 jermar 160
/** When bit on this position is 0, the page fault was caused by a not-present
161
 * page.
162
 */
1821 decky 163
#define PFERR_CODE_P        (1 << 0)
1809 decky 164
 
165
/** When bit on this position is 1, the page fault was caused by a write. */
1821 decky 166
#define PFERR_CODE_RW       (1 << 1)
1809 decky 167
 
168
/** When bit on this position is 1, the page fault was caused in user mode. */
1821 decky 169
#define PFERR_CODE_US       (1 << 2)
1809 decky 170
 
171
/** When bit on this position is 1, a reserved bit was set in page directory. */
1821 decky 172
#define PFERR_CODE_RSVD     (1 << 3)
1809 decky 173
 
1824 decky 174
typedef struct {
175
    uint64_t ptr;      /**< Machine address of PTE */
176
    union {            /**< New contents of PTE */
177
        uint64_t val;
178
        pte_t pte;
179
    };
180
} mmu_update_t;
181
 
182
typedef struct {
183
    unsigned int cmd;
184
    union {
185
        unsigned long mfn;
186
        unsigned long linear_addr;
187
    };
188
    union {
189
        unsigned int nr_ents;
190
        void *vcpumask;
191
    };
192
} mmuext_op_t;
193
 
2467 jermar 194
static inline int xen_update_va_mapping(const void *va, const pte_t pte,
195
    const unsigned int flags)
1824 decky 196
{
197
    return hypercall4(XEN_UPDATE_VA_MAPPING, va, pte, 0, flags);
198
}
199
 
2467 jermar 200
static inline int xen_mmu_update(const mmu_update_t *req,
201
    const unsigned int count, unsigned int *success_count, domid_t domid)
1824 decky 202
{
203
    return hypercall4(XEN_MMU_UPDATE, req, count, success_count, domid);
204
}
205
 
2467 jermar 206
static inline int xen_mmuext_op(const mmuext_op_t *op, const unsigned int count,
207
    unsigned int *success_count, domid_t domid)
1824 decky 208
{
209
    return hypercall4(XEN_MMUEXT_OP, op, count, success_count, domid);
210
}
211
 
1809 decky 212
static inline int get_pt_flags(pte_t *pt, index_t i)
213
{
214
    pte_t *p = &pt[i];
215
 
2467 jermar 216
    return ((!p->page_cache_disable) << PAGE_CACHEABLE_SHIFT |
217
        (!p->present) << PAGE_PRESENT_SHIFT |
218
        p->uaccessible << PAGE_USER_SHIFT |
219
        1 << PAGE_READ_SHIFT |
220
        p->writeable << PAGE_WRITE_SHIFT |
221
        1 << PAGE_EXEC_SHIFT |
222
        p->global << PAGE_GLOBAL_SHIFT);
1809 decky 223
}
224
 
225
static inline void set_pt_flags(pte_t *pt, index_t i, int flags)
226
{
2017 decky 227
    pte_t p = pt[i];
1809 decky 228
 
2017 decky 229
    p.page_cache_disable = !(flags & PAGE_CACHEABLE);
230
    p.present = !(flags & PAGE_NOT_PRESENT);
231
    p.uaccessible = (flags & PAGE_USER) != 0;
232
    p.writeable = (flags & PAGE_WRITE) != 0;
233
    p.global = (flags & PAGE_GLOBAL) != 0;
1809 decky 234
 
235
    /*
236
     * Ensure that there is at least one bit set even if the present bit is cleared.
237
     */
2017 decky 238
    p.soft_valid = true;
239
 
240
    mmu_update_t update;
241
 
242
    update.ptr = PA2MA(KA2PA(&(pt[i])));
243
    update.pte = p;
244
    xen_mmu_update(&update, 1, NULL, DOMID_SELF);
1809 decky 245
}
246
 
247
extern void page_arch_init(void);
248
extern void page_fault(int n, istate_t *istate);
249
 
250
#endif /* __ASM__ */
251
 
252
#endif /* KERNEL */
253
 
254
#endif
255
 
256
/** @}
257
 */