Subversion Repositories HelenOS

Rev

Rev 2465 | Rev 3233 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 2465 Rev 2467
1
/*
1
/*
2
 * Copyright (c) 2005 Ondrej Palkovsky
2
 * Copyright (c) 2005 Ondrej Palkovsky
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
/** @addtogroup amd64mm
29
/** @addtogroup amd64mm
30
 * @{
30
 * @{
31
 */
31
 */
32
/** @file
32
/** @file
33
 */
33
 */
34
 
34
 
35
/** Paging on AMD64
35
/** Paging on AMD64
36
 *
36
 *
37
 * The space is divided in positive numbers - userspace and
37
 * The space is divided in positive numbers - userspace and
38
 * negative numbers - kernel space. The 'negative' space starting
38
 * negative numbers - kernel space. The 'negative' space starting
39
 * with 0xffff800000000000 and ending with 0xffffffff80000000
39
 * with 0xffff800000000000 and ending with 0xffffffff80000000
40
 * (-2GB) is identically mapped physical memory. The area
40
 * (-2GB) is identically mapped physical memory. The area
41
 * (0xffffffff80000000 ... 0xffffffffffffffff is again identically
41
 * (0xffffffff80000000 ... 0xffffffffffffffff is again identically
42
 * mapped first 2GB.
42
 * mapped first 2GB.
43
 *
43
 *
44
 * ATTENTION - PA2KA(KA2PA(x)) != x if 'x' is in kernel
44
 * ATTENTION - PA2KA(KA2PA(x)) != x if 'x' is in kernel
45
 */
45
 */
46
 
46
 
47
#ifndef KERN_amd64_PAGE_H_
47
#ifndef KERN_amd64_PAGE_H_
48
#define KERN_amd64_PAGE_H_
48
#define KERN_amd64_PAGE_H_
49
 
49
 
50
#include <arch/mm/frame.h>
50
#include <arch/mm/frame.h>
51
 
51
 
52
#define PAGE_WIDTH  FRAME_WIDTH
52
#define PAGE_WIDTH  FRAME_WIDTH
53
#define PAGE_SIZE   FRAME_SIZE
53
#define PAGE_SIZE   FRAME_SIZE
54
 
54
 
55
#define PAGE_COLOR_BITS 0           /* dummy */
55
#define PAGE_COLOR_BITS 0           /* dummy */
56
 
56
 
57
#ifdef KERNEL
57
#ifdef KERNEL
58
 
58
 
59
#ifndef __ASM__
59
#ifndef __ASM__
60
#   include <mm/mm.h>
60
#   include <mm/mm.h>
61
#   include <arch/types.h>
61
#   include <arch/types.h>
62
#   include <arch/interrupt.h>
62
#   include <arch/interrupt.h>
63
 
63
 
64
static inline uintptr_t ka2pa(uintptr_t x)
64
static inline uintptr_t ka2pa(uintptr_t x)
65
{
65
{
66
    if (x > 0xffffffff80000000)
66
    if (x > 0xffffffff80000000)
67
        return x - 0xffffffff80000000;
67
        return x - 0xffffffff80000000;
68
    else
68
    else
69
        return x - 0xffff800000000000;
69
        return x - 0xffff800000000000;
70
}
70
}
71
 
71
 
72
#   define KA2PA(x)      ka2pa((uintptr_t)x)
72
#   define KA2PA(x)     ka2pa((uintptr_t) x)
73
#   define PA2KA_CODE(x)      (((uintptr_t) (x)) + 0xffffffff80000000)
73
#   define PA2KA_CODE(x)    (((uintptr_t) (x)) + 0xffffffff80000000)
74
#   define PA2KA(x)      (((uintptr_t) (x)) + 0xffff800000000000)
74
#   define PA2KA(x)     (((uintptr_t) (x)) + 0xffff800000000000)
75
#else
75
#else
76
#   define KA2PA(x)      ((x) - 0xffffffff80000000)
76
#   define KA2PA(x)     ((x) - 0xffffffff80000000)
77
#   define PA2KA(x)      ((x) + 0xffffffff80000000)
77
#   define PA2KA(x)     ((x) + 0xffffffff80000000)
78
#endif
78
#endif
79
 
79
 
-
 
80
/* Number of entries in each level. */
80
#define PTL0_ENTRIES_ARCH   512
81
#define PTL0_ENTRIES_ARCH   512
81
#define PTL1_ENTRIES_ARCH   512
82
#define PTL1_ENTRIES_ARCH   512
82
#define PTL2_ENTRIES_ARCH   512
83
#define PTL2_ENTRIES_ARCH   512
83
#define PTL3_ENTRIES_ARCH   512
84
#define PTL3_ENTRIES_ARCH   512
84
 
85
 
-
 
86
/* Page table sizes for each level. */
85
#define PTL0_SIZE_ARCH       ONE_FRAME
87
#define PTL0_SIZE_ARCH      ONE_FRAME
86
#define PTL1_SIZE_ARCH       ONE_FRAME
88
#define PTL1_SIZE_ARCH      ONE_FRAME
87
#define PTL2_SIZE_ARCH       ONE_FRAME
89
#define PTL2_SIZE_ARCH      ONE_FRAME
88
#define PTL3_SIZE_ARCH       ONE_FRAME
90
#define PTL3_SIZE_ARCH      ONE_FRAME
89
 
91
 
-
 
92
/* Macros calculating indices into page tables in each level. */
90
#define PTL0_INDEX_ARCH(vaddr)  (((vaddr)>>39)&0x1ff)
93
#define PTL0_INDEX_ARCH(vaddr)  (((vaddr) >> 39) & 0x1ff)
91
#define PTL1_INDEX_ARCH(vaddr)  (((vaddr)>>30)&0x1ff)
94
#define PTL1_INDEX_ARCH(vaddr)  (((vaddr) >> 30) & 0x1ff)
92
#define PTL2_INDEX_ARCH(vaddr)  (((vaddr)>>21)&0x1ff)
95
#define PTL2_INDEX_ARCH(vaddr)  (((vaddr) >> 21) & 0x1ff)
93
#define PTL3_INDEX_ARCH(vaddr)  (((vaddr)>>12)&0x1ff)
96
#define PTL3_INDEX_ARCH(vaddr)  (((vaddr) >> 12) & 0x1ff)
94
 
97
 
-
 
98
/* Get PTE address accessors for each level. */
-
 
99
#define GET_PTL1_ADDRESS_ARCH(ptl0, i) \
95
#define GET_PTL1_ADDRESS_ARCH(ptl0, i)      ((pte_t *) ((((uint64_t) ((pte_t *)(ptl0))[(i)].addr_12_31)<<12) | (((uint64_t) ((pte_t *)(ptl0))[(i)].addr_32_51)<<32 )))
100
    ((pte_t *) ((((uint64_t) ((pte_t *) (ptl0))[(i)].addr_12_31) << 12) | \
-
 
101
        (((uint64_t) ((pte_t *) (ptl0))[(i)].addr_32_51) << 32)))
-
 
102
#define GET_PTL2_ADDRESS_ARCH(ptl1, i) \
96
#define GET_PTL2_ADDRESS_ARCH(ptl1, i)      ((pte_t *) ((((uint64_t) ((pte_t *)(ptl1))[(i)].addr_12_31)<<12) | (((uint64_t) ((pte_t *)(ptl1))[(i)].addr_32_51)<<32 )))
103
    ((pte_t *) ((((uint64_t) ((pte_t *) (ptl1))[(i)].addr_12_31) << 12) | \
-
 
104
        (((uint64_t) ((pte_t *) (ptl1))[(i)].addr_32_51) << 32)))
-
 
105
#define GET_PTL3_ADDRESS_ARCH(ptl2, i) \
97
#define GET_PTL3_ADDRESS_ARCH(ptl2, i)      ((pte_t *) ((((uint64_t) ((pte_t *)(ptl2))[(i)].addr_12_31)<<12) | (((uint64_t) ((pte_t *)(ptl2))[(i)].addr_32_51)<<32 )))
106
    ((pte_t *) ((((uint64_t) ((pte_t *) (ptl2))[(i)].addr_12_31) << 12) | \
-
 
107
        (((uint64_t) ((pte_t *) (ptl2))[(i)].addr_32_51) << 32)))
-
 
108
#define GET_FRAME_ADDRESS_ARCH(ptl3, i) \
-
 
109
    ((uintptr_t *) \
98
#define GET_FRAME_ADDRESS_ARCH(ptl3, i)     ((uintptr_t *) ((((uint64_t) ((pte_t *)(ptl3))[(i)].addr_12_31)<<12) | (((uint64_t) ((pte_t *)(ptl3))[(i)].addr_32_51)<<32 )))
110
        ((((uint64_t) ((pte_t *) (ptl3))[(i)].addr_12_31) << 12) | \
-
 
111
        (((uint64_t) ((pte_t *) (ptl3))[(i)].addr_32_51) << 32)))
99
 
112
 
-
 
113
/* Set PTE address accessors for each level. */
-
 
114
#define SET_PTL0_ADDRESS_ARCH(ptl0) \
-
 
115
    (write_cr3((uintptr_t) (ptl0)))
100
#define SET_PTL0_ADDRESS_ARCH(ptl0)     (write_cr3((uintptr_t) (ptl0)))
116
#define SET_PTL1_ADDRESS_ARCH(ptl0, i, a) \
101
#define SET_PTL1_ADDRESS_ARCH(ptl0, i, a)   set_pt_addr((pte_t *)(ptl0), (index_t)(i), a)
117
    set_pt_addr((pte_t *) (ptl0), (index_t) (i), a)
-
 
118
#define SET_PTL2_ADDRESS_ARCH(ptl1, i, a) \
102
#define SET_PTL2_ADDRESS_ARCH(ptl1, i, a)       set_pt_addr((pte_t *)(ptl1), (index_t)(i), a)
119
    set_pt_addr((pte_t *) (ptl1), (index_t) (i), a)
-
 
120
#define SET_PTL3_ADDRESS_ARCH(ptl2, i, a) \
103
#define SET_PTL3_ADDRESS_ARCH(ptl2, i, a)       set_pt_addr((pte_t *)(ptl2), (index_t)(i), a)
121
    set_pt_addr((pte_t *) (ptl2), (index_t) (i), a)
-
 
122
#define SET_FRAME_ADDRESS_ARCH(ptl3, i, a) \
104
#define SET_FRAME_ADDRESS_ARCH(ptl3, i, a)  set_pt_addr((pte_t *)(ptl3), (index_t)(i), a)
123
    set_pt_addr((pte_t *) (ptl3), (index_t) (i), a)
105
 
124
 
-
 
125
/* Get PTE flags accessors for each level. */
-
 
126
#define GET_PTL1_FLAGS_ARCH(ptl0, i) \
106
#define GET_PTL1_FLAGS_ARCH(ptl0, i)        get_pt_flags((pte_t *)(ptl0), (index_t)(i))
127
    get_pt_flags((pte_t *) (ptl0), (index_t) (i))
-
 
128
#define GET_PTL2_FLAGS_ARCH(ptl1, i) \
107
#define GET_PTL2_FLAGS_ARCH(ptl1, i)        get_pt_flags((pte_t *)(ptl1), (index_t)(i))
129
    get_pt_flags((pte_t *) (ptl1), (index_t) (i))
-
 
130
#define GET_PTL3_FLAGS_ARCH(ptl2, i) \
108
#define GET_PTL3_FLAGS_ARCH(ptl2, i)        get_pt_flags((pte_t *)(ptl2), (index_t)(i))
131
    get_pt_flags((pte_t *) (ptl2), (index_t) (i))
-
 
132
#define GET_FRAME_FLAGS_ARCH(ptl3, i) \
109
#define GET_FRAME_FLAGS_ARCH(ptl3, i)       get_pt_flags((pte_t *)(ptl3), (index_t)(i))
133
    get_pt_flags((pte_t *) (ptl3), (index_t) (i))
110
 
134
 
-
 
135
/* Set PTE flags accessors for each level. */
-
 
136
#define SET_PTL1_FLAGS_ARCH(ptl0, i, x) \
111
#define SET_PTL1_FLAGS_ARCH(ptl0, i, x)     set_pt_flags((pte_t *)(ptl0), (index_t)(i), (x))
137
    set_pt_flags((pte_t *) (ptl0), (index_t) (i), (x))
-
 
138
#define SET_PTL2_FLAGS_ARCH(ptl1, i, x) \
112
#define SET_PTL2_FLAGS_ARCH(ptl1, i, x)         set_pt_flags((pte_t *)(ptl1), (index_t)(i), (x))
139
    set_pt_flags((pte_t *) (ptl1), (index_t) (i), (x))
-
 
140
#define SET_PTL3_FLAGS_ARCH(ptl2, i, x) \
113
#define SET_PTL3_FLAGS_ARCH(ptl2, i, x)         set_pt_flags((pte_t *)(ptl2), (index_t)(i), (x))
141
    set_pt_flags((pte_t *) (ptl2), (index_t) (i), (x))
-
 
142
#define SET_FRAME_FLAGS_ARCH(ptl3, i, x) \
114
#define SET_FRAME_FLAGS_ARCH(ptl3, i, x)    set_pt_flags((pte_t *)(ptl3), (index_t)(i), (x))
143
    set_pt_flags((pte_t *) (ptl3), (index_t) (i), (x))
115
 
144
 
-
 
145
/* Macros for querying the last-level PTE entries. */
-
 
146
#define PTE_VALID_ARCH(p) \
116
#define PTE_VALID_ARCH(p)           (*((uint64_t *) (p)) != 0)
147
    (*((uint64_t *) (p)) != 0)
117
#define PTE_PRESENT_ARCH(p)         ((p)->present != 0)
148
#define PTE_PRESENT_ARCH(p) \
-
 
149
    ((p)->present != 0)
-
 
150
#define PTE_GET_FRAME_ARCH(p) \
118
#define PTE_GET_FRAME_ARCH(p)           ((((uintptr_t)(p)->addr_12_31)<<12) | ((uintptr_t)(p)->addr_32_51<<32))
151
    ((((uintptr_t) (p)->addr_12_31) << 12) | \
-
 
152
        ((uintptr_t) (p)->addr_32_51 << 32))
119
#define PTE_WRITABLE_ARCH(p)            ((p)->writeable != 0)
153
#define PTE_WRITABLE_ARCH(p) \
-
 
154
    ((p)->writeable != 0)
120
#define PTE_EXECUTABLE_ARCH(p)          ((p)->no_execute == 0)
155
#define PTE_EXECUTABLE_ARCH(p) \
-
 
156
    ((p)->no_execute == 0)
121
 
157
 
122
#ifndef __ASM__
158
#ifndef __ASM__
123
 
159
 
124
/* Page fault error codes. */
160
/* Page fault error codes. */
125
 
161
 
126
/** When bit on this position is 0, the page fault was caused by a not-present page. */
162
/** When bit on this position is 0, the page fault was caused by a not-present
-
 
163
 * page.
-
 
164
 */
127
#define PFERR_CODE_P            (1<<0)  
165
#define PFERR_CODE_P            (1 << 0)  
128
 
166
 
129
/** When bit on this position is 1, the page fault was caused by a write. */
167
/** When bit on this position is 1, the page fault was caused by a write. */
130
#define PFERR_CODE_RW           (1<<1)
168
#define PFERR_CODE_RW           (1 << 1)
131
 
169
 
132
/** When bit on this position is 1, the page fault was caused in user mode. */
170
/** When bit on this position is 1, the page fault was caused in user mode. */
133
#define PFERR_CODE_US           (1<<2)
171
#define PFERR_CODE_US           (1 << 2)
134
 
172
 
135
/** When bit on this position is 1, a reserved bit was set in page directory. */
173
/** When bit on this position is 1, a reserved bit was set in page directory. */
136
#define PFERR_CODE_RSVD         (1<<3)
174
#define PFERR_CODE_RSVD         (1 << 3)
137
 
175
 
138
/** When bit on this position os 1, the page fault was caused during instruction fecth. */
176
/** When bit on this position os 1, the page fault was caused during instruction
-
 
177
 * fecth.
-
 
178
 */
139
#define PFERR_CODE_ID       (1<<4)
179
#define PFERR_CODE_ID       (1 << 4)
140
 
180
 
141
static inline int get_pt_flags(pte_t *pt, index_t i)
181
static inline int get_pt_flags(pte_t *pt, index_t i)
142
{
182
{
143
    pte_t *p = &pt[i];
183
    pte_t *p = &pt[i];
144
   
184
   
145
    return (
-
 
146
        (!p->page_cache_disable)<<PAGE_CACHEABLE_SHIFT |
185
    return ((!p->page_cache_disable) << PAGE_CACHEABLE_SHIFT |
147
        (!p->present)<<PAGE_PRESENT_SHIFT |
186
        (!p->present) << PAGE_PRESENT_SHIFT |
148
        p->uaccessible<<PAGE_USER_SHIFT |
187
        p->uaccessible << PAGE_USER_SHIFT |
149
        1<<PAGE_READ_SHIFT |
188
        1 << PAGE_READ_SHIFT |
150
        p->writeable<<PAGE_WRITE_SHIFT |
189
        p->writeable << PAGE_WRITE_SHIFT |
151
        (!p->no_execute)<<PAGE_EXEC_SHIFT |
190
        (!p->no_execute) << PAGE_EXEC_SHIFT |
152
        p->global<<PAGE_GLOBAL_SHIFT
191
        p->global << PAGE_GLOBAL_SHIFT);
153
    );
-
 
154
}
192
}
155
 
193
 
156
static inline void set_pt_addr(pte_t *pt, index_t i, uintptr_t a)
194
static inline void set_pt_addr(pte_t *pt, index_t i, uintptr_t a)
157
{
195
{
158
    pte_t *p = &pt[i];
196
    pte_t *p = &pt[i];
159
 
197
 
160
    p->addr_12_31 = (a >> 12) & 0xfffff;
198
    p->addr_12_31 = (a >> 12) & 0xfffff;
161
    p->addr_32_51 = a >> 32;
199
    p->addr_32_51 = a >> 32;
162
}
200
}
163
 
201
 
164
static inline void set_pt_flags(pte_t *pt, index_t i, int flags)
202
static inline void set_pt_flags(pte_t *pt, index_t i, int flags)
165
{
203
{
166
    pte_t *p = &pt[i];
204
    pte_t *p = &pt[i];
167
   
205
   
168
    p->page_cache_disable = !(flags & PAGE_CACHEABLE);
206
    p->page_cache_disable = !(flags & PAGE_CACHEABLE);
169
    p->present = !(flags & PAGE_NOT_PRESENT);
207
    p->present = !(flags & PAGE_NOT_PRESENT);
170
    p->uaccessible = (flags & PAGE_USER) != 0;
208
    p->uaccessible = (flags & PAGE_USER) != 0;
171
    p->writeable = (flags & PAGE_WRITE) != 0;
209
    p->writeable = (flags & PAGE_WRITE) != 0;
172
    p->no_execute = (flags & PAGE_EXEC) == 0;
210
    p->no_execute = (flags & PAGE_EXEC) == 0;
173
    p->global = (flags & PAGE_GLOBAL) != 0;
211
    p->global = (flags & PAGE_GLOBAL) != 0;
174
   
212
   
175
    /*
213
    /*
176
     * Ensure that there is at least one bit set even if the present bit is cleared.
214
     * Ensure that there is at least one bit set even if the present bit is cleared.
177
     */
215
     */
178
    p->soft_valid = 1;
216
    p->soft_valid = 1;
179
}
217
}
180
 
218
 
181
extern void page_arch_init(void);
219
extern void page_arch_init(void);
182
extern void page_fault(int n, istate_t *istate);
220
extern void page_fault(int n, istate_t *istate);
183
 
221
 
184
#endif /* __ASM__ */
222
#endif /* __ASM__ */
185
 
223
 
186
#endif /* KERNEL */
224
#endif /* KERNEL */
187
 
225
 
188
#endif
226
#endif
189
 
227
 
190
/** @}
228
/** @}
191
 */
229
 */
192
 
230