Subversion Repositories HelenOS-historic

Rev

Rev 1595 | Rev 1705 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 1595 Rev 1702
1
/*
1
/*
2
 * Copyright (C) 2001-2004 Jakub Jermar
2
 * Copyright (C) 2001-2004 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
-
 
29
 /** @addtogroup amd64mm amd64 
-
 
30
  * @ingroup mm
-
 
31
 * @{
-
 
32
 */
-
 
33
/** @file
-
 
34
 * @ingroup amd64
-
 
35
 */
-
 
36
 
29
#include <arch/mm/page.h>
37
#include <arch/mm/page.h>
30
#include <genarch/mm/page_pt.h>
38
#include <genarch/mm/page_pt.h>
31
#include <arch/mm/frame.h>
39
#include <arch/mm/frame.h>
32
#include <mm/page.h>
40
#include <mm/page.h>
33
#include <mm/frame.h>
41
#include <mm/frame.h>
34
#include <mm/as.h>
42
#include <mm/as.h>
35
#include <arch/interrupt.h>
43
#include <arch/interrupt.h>
36
#include <arch/asm.h>
44
#include <arch/asm.h>
37
#include <config.h>
45
#include <config.h>
38
#include <memstr.h>
46
#include <memstr.h>
39
#include <interrupt.h>
47
#include <interrupt.h>
40
#include <print.h>
48
#include <print.h>
41
#include <panic.h>
49
#include <panic.h>
42
#include <align.h>
50
#include <align.h>
43
 
51
 
44
/* Definitions for identity page mapper */
52
/* Definitions for identity page mapper */
45
pte_t helper_ptl1[512] __attribute__((aligned (PAGE_SIZE)));
53
pte_t helper_ptl1[512] __attribute__((aligned (PAGE_SIZE)));
46
pte_t helper_ptl2[512] __attribute__((aligned (PAGE_SIZE)));
54
pte_t helper_ptl2[512] __attribute__((aligned (PAGE_SIZE)));
47
pte_t helper_ptl3[512] __attribute__((aligned (PAGE_SIZE)));
55
pte_t helper_ptl3[512] __attribute__((aligned (PAGE_SIZE)));
48
extern pte_t ptl_0; /* From boot.S */
56
extern pte_t ptl_0; /* From boot.S */
49
 
57
 
50
#define PTL1_PRESENT(ptl0, page) (!(GET_PTL1_FLAGS_ARCH(ptl0, PTL0_INDEX_ARCH(page)) & PAGE_NOT_PRESENT))
58
#define PTL1_PRESENT(ptl0, page) (!(GET_PTL1_FLAGS_ARCH(ptl0, PTL0_INDEX_ARCH(page)) & PAGE_NOT_PRESENT))
51
#define PTL2_PRESENT(ptl1, page) (!(GET_PTL2_FLAGS_ARCH(ptl1, PTL1_INDEX_ARCH(page)) & PAGE_NOT_PRESENT))
59
#define PTL2_PRESENT(ptl1, page) (!(GET_PTL2_FLAGS_ARCH(ptl1, PTL1_INDEX_ARCH(page)) & PAGE_NOT_PRESENT))
52
#define PTL3_PRESENT(ptl2, page) (!(GET_PTL3_FLAGS_ARCH(ptl2, PTL2_INDEX_ARCH(page)) & PAGE_NOT_PRESENT))
60
#define PTL3_PRESENT(ptl2, page) (!(GET_PTL3_FLAGS_ARCH(ptl2, PTL2_INDEX_ARCH(page)) & PAGE_NOT_PRESENT))
53
 
61
 
54
#define PTL1_ADDR(ptl0, page) ((pte_t *)PA2KA(GET_PTL1_ADDRESS_ARCH(ptl0, PTL0_INDEX_ARCH(page))))
62
#define PTL1_ADDR(ptl0, page) ((pte_t *)PA2KA(GET_PTL1_ADDRESS_ARCH(ptl0, PTL0_INDEX_ARCH(page))))
55
#define PTL2_ADDR(ptl1, page) ((pte_t *)PA2KA(GET_PTL2_ADDRESS_ARCH(ptl1, PTL1_INDEX_ARCH(page))))
63
#define PTL2_ADDR(ptl1, page) ((pte_t *)PA2KA(GET_PTL2_ADDRESS_ARCH(ptl1, PTL1_INDEX_ARCH(page))))
56
#define PTL3_ADDR(ptl2, page) ((pte_t *)PA2KA(GET_PTL3_ADDRESS_ARCH(ptl2, PTL2_INDEX_ARCH(page))))
64
#define PTL3_ADDR(ptl2, page) ((pte_t *)PA2KA(GET_PTL3_ADDRESS_ARCH(ptl2, PTL2_INDEX_ARCH(page))))
57
 
65
 
58
#define SETUP_PTL1(ptl0, page, tgt)  {  \
66
#define SETUP_PTL1(ptl0, page, tgt)  {  \
59
    SET_PTL1_ADDRESS_ARCH(ptl0, PTL0_INDEX_ARCH(page), (__address)KA2PA(tgt)); \
67
    SET_PTL1_ADDRESS_ARCH(ptl0, PTL0_INDEX_ARCH(page), (__address)KA2PA(tgt)); \
60
        SET_PTL1_FLAGS_ARCH(ptl0, PTL0_INDEX_ARCH(page), PAGE_WRITE | PAGE_EXEC); \
68
        SET_PTL1_FLAGS_ARCH(ptl0, PTL0_INDEX_ARCH(page), PAGE_WRITE | PAGE_EXEC); \
61
    }
69
    }
62
#define SETUP_PTL2(ptl1, page, tgt)  {  \
70
#define SETUP_PTL2(ptl1, page, tgt)  {  \
63
    SET_PTL2_ADDRESS_ARCH(ptl1, PTL1_INDEX_ARCH(page), (__address)KA2PA(tgt)); \
71
    SET_PTL2_ADDRESS_ARCH(ptl1, PTL1_INDEX_ARCH(page), (__address)KA2PA(tgt)); \
64
        SET_PTL2_FLAGS_ARCH(ptl1, PTL1_INDEX_ARCH(page), PAGE_WRITE | PAGE_EXEC); \
72
        SET_PTL2_FLAGS_ARCH(ptl1, PTL1_INDEX_ARCH(page), PAGE_WRITE | PAGE_EXEC); \
65
    }
73
    }
66
#define SETUP_PTL3(ptl2, page, tgt)  {  \
74
#define SETUP_PTL3(ptl2, page, tgt)  {  \
67
    SET_PTL3_ADDRESS_ARCH(ptl2, PTL2_INDEX_ARCH(page), (__address)KA2PA(tgt)); \
75
    SET_PTL3_ADDRESS_ARCH(ptl2, PTL2_INDEX_ARCH(page), (__address)KA2PA(tgt)); \
68
        SET_PTL3_FLAGS_ARCH(ptl2, PTL2_INDEX_ARCH(page), PAGE_WRITE | PAGE_EXEC); \
76
        SET_PTL3_FLAGS_ARCH(ptl2, PTL2_INDEX_ARCH(page), PAGE_WRITE | PAGE_EXEC); \
69
    }
77
    }
70
#define SETUP_FRAME(ptl3, page, tgt)  { \
78
#define SETUP_FRAME(ptl3, page, tgt)  { \
71
    SET_FRAME_ADDRESS_ARCH(ptl3, PTL3_INDEX_ARCH(page), (__address)KA2PA(tgt)); \
79
    SET_FRAME_ADDRESS_ARCH(ptl3, PTL3_INDEX_ARCH(page), (__address)KA2PA(tgt)); \
72
        SET_FRAME_FLAGS_ARCH(ptl3, PTL3_INDEX_ARCH(page), PAGE_WRITE | PAGE_EXEC); \
80
        SET_FRAME_FLAGS_ARCH(ptl3, PTL3_INDEX_ARCH(page), PAGE_WRITE | PAGE_EXEC); \
73
    }
81
    }
74
 
82
 
75
 
83
 
76
void page_arch_init(void)
84
void page_arch_init(void)
77
{
85
{
78
    __address cur;
86
    __address cur;
79
    int i;
87
    int i;
80
    int identity_flags = PAGE_CACHEABLE | PAGE_EXEC | PAGE_GLOBAL;
88
    int identity_flags = PAGE_CACHEABLE | PAGE_EXEC | PAGE_GLOBAL;
81
 
89
 
82
    if (config.cpu_active == 1) {
90
    if (config.cpu_active == 1) {
83
        page_mapping_operations = &pt_mapping_operations;
91
        page_mapping_operations = &pt_mapping_operations;
84
       
92
       
85
        /*
93
        /*
86
         * PA2KA(identity) mapping for all frames.
94
         * PA2KA(identity) mapping for all frames.
87
         */
95
         */
88
        for (cur = 0; cur < last_frame; cur += FRAME_SIZE) {
96
        for (cur = 0; cur < last_frame; cur += FRAME_SIZE) {
89
            /* Standard identity mapping */
97
            /* Standard identity mapping */
90
            page_mapping_insert(AS_KERNEL, PA2KA(cur), cur, identity_flags);
98
            page_mapping_insert(AS_KERNEL, PA2KA(cur), cur, identity_flags);
91
        }
99
        }
92
        /* Upper kernel mapping
100
        /* Upper kernel mapping
93
         * - from zero to top of kernel (include bottom addresses
101
         * - from zero to top of kernel (include bottom addresses
94
         *   because some are needed for init )
102
         *   because some are needed for init )
95
         */
103
         */
96
        for (cur = PA2KA_CODE(0); cur < config.base+config.kernel_size; cur += FRAME_SIZE) {
104
        for (cur = PA2KA_CODE(0); cur < config.base+config.kernel_size; cur += FRAME_SIZE) {
97
            page_mapping_insert(AS_KERNEL, cur, KA2PA(cur), identity_flags);
105
            page_mapping_insert(AS_KERNEL, cur, KA2PA(cur), identity_flags);
98
        }
106
        }
99
        for (i=0; i < init.cnt; i++) {
107
        for (i=0; i < init.cnt; i++) {
100
            for (cur=init.tasks[i].addr;cur < init.tasks[i].size; cur += FRAME_SIZE) {
108
            for (cur=init.tasks[i].addr;cur < init.tasks[i].size; cur += FRAME_SIZE) {
101
                page_mapping_insert(AS_KERNEL, PA2KA_CODE(KA2PA(cur)), KA2PA(cur), identity_flags);
109
                page_mapping_insert(AS_KERNEL, PA2KA_CODE(KA2PA(cur)), KA2PA(cur), identity_flags);
102
            }
110
            }
103
        }
111
        }
104
 
112
 
105
        exc_register(14, "page_fault", (iroutine)page_fault);
113
        exc_register(14, "page_fault", (iroutine)page_fault);
106
        write_cr3((__address) AS_KERNEL->page_table);
114
        write_cr3((__address) AS_KERNEL->page_table);
107
    }
115
    }
108
    else {
116
    else {
109
        write_cr3((__address) AS_KERNEL->page_table);
117
        write_cr3((__address) AS_KERNEL->page_table);
110
    }
118
    }
111
}
119
}
112
 
120
 
113
 
121
 
114
/** Identity page mapper
122
/** Identity page mapper
115
 *
123
 *
116
 * We need to map whole physical memory identically before the page subsystem
124
 * We need to map whole physical memory identically before the page subsystem
117
 * is initializaed. This thing clears page table and fills in the specific
125
 * is initializaed. This thing clears page table and fills in the specific
118
 * items.
126
 * items.
119
 */
127
 */
120
void ident_page_fault(int n, istate_t *istate)
128
void ident_page_fault(int n, istate_t *istate)
121
{
129
{
122
    __address page;
130
    __address page;
123
    static __address oldpage = 0;
131
    static __address oldpage = 0;
124
    pte_t *aptl_1, *aptl_2, *aptl_3;
132
    pte_t *aptl_1, *aptl_2, *aptl_3;
125
 
133
 
126
    page = read_cr2();
134
    page = read_cr2();
127
    if (oldpage) {
135
    if (oldpage) {
128
        /* Unmap old address */
136
        /* Unmap old address */
129
        aptl_1 = PTL1_ADDR(&ptl_0, oldpage);
137
        aptl_1 = PTL1_ADDR(&ptl_0, oldpage);
130
        aptl_2 = PTL2_ADDR(aptl_1, oldpage);
138
        aptl_2 = PTL2_ADDR(aptl_1, oldpage);
131
        aptl_3 = PTL3_ADDR(aptl_2, oldpage);
139
        aptl_3 = PTL3_ADDR(aptl_2, oldpage);
132
 
140
 
133
        SET_FRAME_FLAGS_ARCH(aptl_3, PTL3_INDEX_ARCH(oldpage), PAGE_NOT_PRESENT);
141
        SET_FRAME_FLAGS_ARCH(aptl_3, PTL3_INDEX_ARCH(oldpage), PAGE_NOT_PRESENT);
134
        if (KA2PA(aptl_3) == KA2PA(helper_ptl3))
142
        if (KA2PA(aptl_3) == KA2PA(helper_ptl3))
135
            SET_PTL3_FLAGS_ARCH(aptl_2, PTL2_INDEX_ARCH(oldpage), PAGE_NOT_PRESENT);
143
            SET_PTL3_FLAGS_ARCH(aptl_2, PTL2_INDEX_ARCH(oldpage), PAGE_NOT_PRESENT);
136
        if (KA2PA(aptl_2) == KA2PA(helper_ptl2))
144
        if (KA2PA(aptl_2) == KA2PA(helper_ptl2))
137
            SET_PTL2_FLAGS_ARCH(aptl_1, PTL1_INDEX_ARCH(oldpage), PAGE_NOT_PRESENT);
145
            SET_PTL2_FLAGS_ARCH(aptl_1, PTL1_INDEX_ARCH(oldpage), PAGE_NOT_PRESENT);
138
        if (KA2PA(aptl_1) == KA2PA(helper_ptl1))
146
        if (KA2PA(aptl_1) == KA2PA(helper_ptl1))
139
            SET_PTL1_FLAGS_ARCH(&ptl_0, PTL0_INDEX_ARCH(oldpage), PAGE_NOT_PRESENT);
147
            SET_PTL1_FLAGS_ARCH(&ptl_0, PTL0_INDEX_ARCH(oldpage), PAGE_NOT_PRESENT);
140
    }
148
    }
141
    if (PTL1_PRESENT(&ptl_0, page))
149
    if (PTL1_PRESENT(&ptl_0, page))
142
        aptl_1 = PTL1_ADDR(&ptl_0, page);
150
        aptl_1 = PTL1_ADDR(&ptl_0, page);
143
    else {
151
    else {
144
        SETUP_PTL1(&ptl_0, page, helper_ptl1);
152
        SETUP_PTL1(&ptl_0, page, helper_ptl1);
145
        aptl_1 = helper_ptl1;
153
        aptl_1 = helper_ptl1;
146
    }
154
    }
147
       
155
       
148
    if (PTL2_PRESENT(aptl_1, page))
156
    if (PTL2_PRESENT(aptl_1, page))
149
        aptl_2 = PTL2_ADDR(aptl_1, page);
157
        aptl_2 = PTL2_ADDR(aptl_1, page);
150
    else {
158
    else {
151
        SETUP_PTL2(aptl_1, page, helper_ptl2);
159
        SETUP_PTL2(aptl_1, page, helper_ptl2);
152
        aptl_2 = helper_ptl2;
160
        aptl_2 = helper_ptl2;
153
    }
161
    }
154
 
162
 
155
    if (PTL3_PRESENT(aptl_2, page))
163
    if (PTL3_PRESENT(aptl_2, page))
156
        aptl_3 = PTL3_ADDR(aptl_2, page);
164
        aptl_3 = PTL3_ADDR(aptl_2, page);
157
    else {
165
    else {
158
        SETUP_PTL3(aptl_2, page, helper_ptl3);
166
        SETUP_PTL3(aptl_2, page, helper_ptl3);
159
        aptl_3 = helper_ptl3;
167
        aptl_3 = helper_ptl3;
160
    }
168
    }
161
   
169
   
162
    SETUP_FRAME(aptl_3, page, page);
170
    SETUP_FRAME(aptl_3, page, page);
163
 
171
 
164
    oldpage = page;
172
    oldpage = page;
165
}
173
}
166
 
174
 
167
 
175
 
168
void page_fault(int n, istate_t *istate)
176
void page_fault(int n, istate_t *istate)
169
{
177
{
170
    __address page;
178
    __address page;
171
    pf_access_t access;
179
    pf_access_t access;
172
   
180
   
173
    page = read_cr2();
181
    page = read_cr2();
174
   
182
   
175
    if (istate->error_word & PFERR_CODE_RSVD)
183
    if (istate->error_word & PFERR_CODE_RSVD)
176
        panic("Reserved bit set in page table entry.\n");
184
        panic("Reserved bit set in page table entry.\n");
177
   
185
   
178
    if (istate->error_word & PFERR_CODE_RW)
186
    if (istate->error_word & PFERR_CODE_RW)
179
        access = PF_ACCESS_WRITE;
187
        access = PF_ACCESS_WRITE;
180
    else if (istate->error_word & PFERR_CODE_ID)
188
    else if (istate->error_word & PFERR_CODE_ID)
181
        access = PF_ACCESS_EXEC;
189
        access = PF_ACCESS_EXEC;
182
    else
190
    else
183
        access = PF_ACCESS_READ;
191
        access = PF_ACCESS_READ;
184
   
192
   
185
    if (as_page_fault(page, access, istate) == AS_PF_FAULT) {
193
    if (as_page_fault(page, access, istate) == AS_PF_FAULT) {
186
        fault_if_from_uspace(istate, "Page fault: %#x", page);
194
        fault_if_from_uspace(istate, "Page fault: %#x", page);
187
 
195
 
188
        print_info_errcode(n, istate);
196
        print_info_errcode(n, istate);
189
        printf("Page fault address: %llX\n", page);
197
        printf("Page fault address: %llX\n", page);
190
        panic("page fault\n");
198
        panic("page fault\n");
191
    }
199
    }
192
}
200
}
193
 
201
 
194
 
202
 
195
__address hw_map(__address physaddr, size_t size)
203
__address hw_map(__address physaddr, size_t size)
196
{
204
{
197
    if (last_frame + ALIGN_UP(size, PAGE_SIZE) > KA2PA(KERNEL_ADDRESS_SPACE_END_ARCH))
205
    if (last_frame + ALIGN_UP(size, PAGE_SIZE) > KA2PA(KERNEL_ADDRESS_SPACE_END_ARCH))
198
        panic("Unable to map physical memory %p (%d bytes)", physaddr, size)
206
        panic("Unable to map physical memory %p (%d bytes)", physaddr, size)
199
   
207
   
200
    __address virtaddr = PA2KA(last_frame);
208
    __address virtaddr = PA2KA(last_frame);
201
    pfn_t i;
209
    pfn_t i;
202
    for (i = 0; i < ADDR2PFN(ALIGN_UP(size, PAGE_SIZE)); i++)
210
    for (i = 0; i < ADDR2PFN(ALIGN_UP(size, PAGE_SIZE)); i++)
203
        page_mapping_insert(AS_KERNEL, virtaddr + PFN2ADDR(i), physaddr + PFN2ADDR(i), PAGE_NOT_CACHEABLE);
211
        page_mapping_insert(AS_KERNEL, virtaddr + PFN2ADDR(i), physaddr + PFN2ADDR(i), PAGE_NOT_CACHEABLE);
204
   
212
   
205
    last_frame = ALIGN_UP(last_frame + size, FRAME_SIZE);
213
    last_frame = ALIGN_UP(last_frame + size, FRAME_SIZE);
206
   
214
   
207
    return virtaddr;
215
    return virtaddr;
208
}
216
}
-
 
217
 
-
 
218
 /** @}
-
 
219
 */
-
 
220
 
209
 
221