Subversion Repositories HelenOS

Rev

Rev 3940 | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 3940 Rev 3973
1
/*
1
/*
2
 * Copyright (c) 2001-2004 Jakub Jermar
2
 * Copyright (c) 2001-2004 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
/** @addtogroup amd64mm
29
/** @addtogroup amd64mm
30
 * @{
30
 * @{
31
 */
31
 */
32
/** @file
32
/** @file
33
 */
33
 */
34
 
34
 
35
#include <arch/mm/page.h>
35
#include <arch/mm/page.h>
36
#include <genarch/mm/page_pt.h>
36
#include <genarch/mm/page_pt.h>
37
#include <genarch/drivers/ega/ega.h>
-
 
38
#include <genarch/drivers/legacy/ia32/io.h>
-
 
39
#include <arch/mm/frame.h>
37
#include <arch/mm/frame.h>
40
#include <mm/page.h>
38
#include <mm/page.h>
41
#include <mm/frame.h>
39
#include <mm/frame.h>
42
#include <mm/as.h>
40
#include <mm/as.h>
43
#include <arch/interrupt.h>
41
#include <arch/interrupt.h>
44
#include <arch/asm.h>
42
#include <arch/asm.h>
45
#include <config.h>
43
#include <config.h>
46
#include <memstr.h>
44
#include <memstr.h>
47
#include <interrupt.h>
45
#include <interrupt.h>
48
#include <print.h>
46
#include <print.h>
49
#include <panic.h>
47
#include <panic.h>
50
#include <align.h>
48
#include <align.h>
51
#include <ddi/ddi.h>
-
 
52
 
-
 
53
/** Physical memory area for devices. */
-
 
54
static parea_t dev_area;
-
 
55
static parea_t ega_area;
-
 
56
 
49
 
57
/* Definitions for identity page mapper */
50
/* Definitions for identity page mapper */
58
pte_t helper_ptl1[512] __attribute__((aligned (PAGE_SIZE)));
51
pte_t helper_ptl1[512] __attribute__((aligned (PAGE_SIZE)));
59
pte_t helper_ptl2[512] __attribute__((aligned (PAGE_SIZE)));
52
pte_t helper_ptl2[512] __attribute__((aligned (PAGE_SIZE)));
60
pte_t helper_ptl3[512] __attribute__((aligned (PAGE_SIZE)));
53
pte_t helper_ptl3[512] __attribute__((aligned (PAGE_SIZE)));
61
extern pte_t ptl_0; /* From boot.S */
54
extern pte_t ptl_0; /* From boot.S */
62
 
55
 
63
#define PTL1_PRESENT(ptl0, page) (!(GET_PTL1_FLAGS_ARCH(ptl0, PTL0_INDEX_ARCH(page)) & PAGE_NOT_PRESENT))
56
#define PTL1_PRESENT(ptl0, page) (!(GET_PTL1_FLAGS_ARCH(ptl0, PTL0_INDEX_ARCH(page)) & PAGE_NOT_PRESENT))
64
#define PTL2_PRESENT(ptl1, page) (!(GET_PTL2_FLAGS_ARCH(ptl1, PTL1_INDEX_ARCH(page)) & PAGE_NOT_PRESENT))
57
#define PTL2_PRESENT(ptl1, page) (!(GET_PTL2_FLAGS_ARCH(ptl1, PTL1_INDEX_ARCH(page)) & PAGE_NOT_PRESENT))
65
#define PTL3_PRESENT(ptl2, page) (!(GET_PTL3_FLAGS_ARCH(ptl2, PTL2_INDEX_ARCH(page)) & PAGE_NOT_PRESENT))
58
#define PTL3_PRESENT(ptl2, page) (!(GET_PTL3_FLAGS_ARCH(ptl2, PTL2_INDEX_ARCH(page)) & PAGE_NOT_PRESENT))
66
 
59
 
67
#define PTL1_ADDR(ptl0, page) ((pte_t *)PA2KA(GET_PTL1_ADDRESS_ARCH(ptl0, PTL0_INDEX_ARCH(page))))
60
#define PTL1_ADDR(ptl0, page) ((pte_t *)PA2KA(GET_PTL1_ADDRESS_ARCH(ptl0, PTL0_INDEX_ARCH(page))))
68
#define PTL2_ADDR(ptl1, page) ((pte_t *)PA2KA(GET_PTL2_ADDRESS_ARCH(ptl1, PTL1_INDEX_ARCH(page))))
61
#define PTL2_ADDR(ptl1, page) ((pte_t *)PA2KA(GET_PTL2_ADDRESS_ARCH(ptl1, PTL1_INDEX_ARCH(page))))
69
#define PTL3_ADDR(ptl2, page) ((pte_t *)PA2KA(GET_PTL3_ADDRESS_ARCH(ptl2, PTL2_INDEX_ARCH(page))))
62
#define PTL3_ADDR(ptl2, page) ((pte_t *)PA2KA(GET_PTL3_ADDRESS_ARCH(ptl2, PTL2_INDEX_ARCH(page))))
70
 
63
 
71
#define SETUP_PTL1(ptl0, page, tgt)  {  \
64
#define SETUP_PTL1(ptl0, page, tgt)  {  \
72
    SET_PTL1_ADDRESS_ARCH(ptl0, PTL0_INDEX_ARCH(page), (uintptr_t)KA2PA(tgt)); \
65
    SET_PTL1_ADDRESS_ARCH(ptl0, PTL0_INDEX_ARCH(page), (uintptr_t)KA2PA(tgt)); \
73
        SET_PTL1_FLAGS_ARCH(ptl0, PTL0_INDEX_ARCH(page), PAGE_WRITE | PAGE_EXEC); \
66
        SET_PTL1_FLAGS_ARCH(ptl0, PTL0_INDEX_ARCH(page), PAGE_WRITE | PAGE_EXEC); \
74
    }
67
    }
75
#define SETUP_PTL2(ptl1, page, tgt)  {  \
68
#define SETUP_PTL2(ptl1, page, tgt)  {  \
76
    SET_PTL2_ADDRESS_ARCH(ptl1, PTL1_INDEX_ARCH(page), (uintptr_t)KA2PA(tgt)); \
69
    SET_PTL2_ADDRESS_ARCH(ptl1, PTL1_INDEX_ARCH(page), (uintptr_t)KA2PA(tgt)); \
77
        SET_PTL2_FLAGS_ARCH(ptl1, PTL1_INDEX_ARCH(page), PAGE_WRITE | PAGE_EXEC); \
70
        SET_PTL2_FLAGS_ARCH(ptl1, PTL1_INDEX_ARCH(page), PAGE_WRITE | PAGE_EXEC); \
78
    }
71
    }
79
#define SETUP_PTL3(ptl2, page, tgt)  {  \
72
#define SETUP_PTL3(ptl2, page, tgt)  {  \
80
    SET_PTL3_ADDRESS_ARCH(ptl2, PTL2_INDEX_ARCH(page), (uintptr_t)KA2PA(tgt)); \
73
    SET_PTL3_ADDRESS_ARCH(ptl2, PTL2_INDEX_ARCH(page), (uintptr_t)KA2PA(tgt)); \
81
        SET_PTL3_FLAGS_ARCH(ptl2, PTL2_INDEX_ARCH(page), PAGE_WRITE | PAGE_EXEC); \
74
        SET_PTL3_FLAGS_ARCH(ptl2, PTL2_INDEX_ARCH(page), PAGE_WRITE | PAGE_EXEC); \
82
    }
75
    }
83
#define SETUP_FRAME(ptl3, page, tgt)  { \
76
#define SETUP_FRAME(ptl3, page, tgt)  { \
84
    SET_FRAME_ADDRESS_ARCH(ptl3, PTL3_INDEX_ARCH(page), (uintptr_t)KA2PA(tgt)); \
77
    SET_FRAME_ADDRESS_ARCH(ptl3, PTL3_INDEX_ARCH(page), (uintptr_t)KA2PA(tgt)); \
85
        SET_FRAME_FLAGS_ARCH(ptl3, PTL3_INDEX_ARCH(page), PAGE_WRITE | PAGE_EXEC); \
78
        SET_FRAME_FLAGS_ARCH(ptl3, PTL3_INDEX_ARCH(page), PAGE_WRITE | PAGE_EXEC); \
86
    }
79
    }
87
 
80
 
88
 
81
 
89
void page_arch_init(void)
82
void page_arch_init(void)
90
{
83
{
91
    uintptr_t cur;
84
    uintptr_t cur;
92
    unsigned int i;
85
    unsigned int i;
93
    int identity_flags = PAGE_CACHEABLE | PAGE_EXEC | PAGE_GLOBAL | PAGE_WRITE;
86
    int identity_flags = PAGE_CACHEABLE | PAGE_EXEC | PAGE_GLOBAL | PAGE_WRITE;
94
 
87
 
95
    if (config.cpu_active == 1) {
88
    if (config.cpu_active == 1) {
96
        page_mapping_operations = &pt_mapping_operations;
89
        page_mapping_operations = &pt_mapping_operations;
97
 
90
 
98
        /*
91
        /*
99
         * PA2KA(identity) mapping for all frames.
92
         * PA2KA(identity) mapping for all frames.
100
         */
93
         */
101
        for (cur = 0; cur < last_frame; cur += FRAME_SIZE) {
94
        for (cur = 0; cur < last_frame; cur += FRAME_SIZE) {
102
            /* Standard identity mapping */
95
            /* Standard identity mapping */
103
            page_mapping_insert(AS_KERNEL, PA2KA(cur), cur, identity_flags);
96
            page_mapping_insert(AS_KERNEL, PA2KA(cur), cur, identity_flags);
104
        }
97
        }
105
       
98
       
106
        /* Upper kernel mapping
99
        /* Upper kernel mapping
107
         * - from zero to top of kernel (include bottom addresses
100
         * - from zero to top of kernel (include bottom addresses
108
         *   because some are needed for init)
101
         *   because some are needed for init)
109
         */
102
         */
110
        for (cur = PA2KA_CODE(0); cur < config.base + config.kernel_size; cur += FRAME_SIZE)
103
        for (cur = PA2KA_CODE(0); cur < config.base + config.kernel_size; cur += FRAME_SIZE)
111
            page_mapping_insert(AS_KERNEL, cur, KA2PA(cur), identity_flags);
104
            page_mapping_insert(AS_KERNEL, cur, KA2PA(cur), identity_flags);
112
       
105
       
113
        for (cur = config.stack_base; cur < config.stack_base + config.stack_size; cur += FRAME_SIZE)
106
        for (cur = config.stack_base; cur < config.stack_base + config.stack_size; cur += FRAME_SIZE)
114
            page_mapping_insert(AS_KERNEL, cur, KA2PA(cur), identity_flags);
107
            page_mapping_insert(AS_KERNEL, cur, KA2PA(cur), identity_flags);
115
       
108
       
116
        for (i = 0; i < init.cnt; i++) {
109
        for (i = 0; i < init.cnt; i++) {
117
            for (cur = init.tasks[i].addr; cur < init.tasks[i].addr + init.tasks[i].size; cur += FRAME_SIZE)
110
            for (cur = init.tasks[i].addr; cur < init.tasks[i].addr + init.tasks[i].size; cur += FRAME_SIZE)
118
                page_mapping_insert(AS_KERNEL, PA2KA_CODE(KA2PA(cur)), KA2PA(cur), identity_flags);
111
                page_mapping_insert(AS_KERNEL, PA2KA_CODE(KA2PA(cur)), KA2PA(cur), identity_flags);
119
        }
112
        }
120
 
113
 
121
        exc_register(14, "page_fault", (iroutine) page_fault);
114
        exc_register(14, "page_fault", (iroutine) page_fault);
122
        write_cr3((uintptr_t) AS_KERNEL->genarch.page_table);
115
        write_cr3((uintptr_t) AS_KERNEL->genarch.page_table);
123
    } else
116
    } else
124
        write_cr3((uintptr_t) AS_KERNEL->genarch.page_table);
117
        write_cr3((uintptr_t) AS_KERNEL->genarch.page_table);
125
}
118
}
126
 
119
 
127
 
120
 
128
/** Identity page mapper
121
/** Identity page mapper
129
 *
122
 *
130
 * We need to map whole physical memory identically before the page subsystem
123
 * We need to map whole physical memory identically before the page subsystem
131
 * is initializaed. This thing clears page table and fills in the specific
124
 * is initializaed. This thing clears page table and fills in the specific
132
 * items.
125
 * items.
133
 */
126
 */
134
void ident_page_fault(int n, istate_t *istate)
127
void ident_page_fault(int n, istate_t *istate)
135
{
128
{
136
    uintptr_t page;
129
    uintptr_t page;
137
    static uintptr_t oldpage = 0;
130
    static uintptr_t oldpage = 0;
138
    pte_t *aptl_1, *aptl_2, *aptl_3;
131
    pte_t *aptl_1, *aptl_2, *aptl_3;
139
 
132
 
140
    page = read_cr2();
133
    page = read_cr2();
141
    if (oldpage) {
134
    if (oldpage) {
142
        /* Unmap old address */
135
        /* Unmap old address */
143
        aptl_1 = PTL1_ADDR(&ptl_0, oldpage);
136
        aptl_1 = PTL1_ADDR(&ptl_0, oldpage);
144
        aptl_2 = PTL2_ADDR(aptl_1, oldpage);
137
        aptl_2 = PTL2_ADDR(aptl_1, oldpage);
145
        aptl_3 = PTL3_ADDR(aptl_2, oldpage);
138
        aptl_3 = PTL3_ADDR(aptl_2, oldpage);
146
 
139
 
147
        SET_FRAME_FLAGS_ARCH(aptl_3, PTL3_INDEX_ARCH(oldpage), PAGE_NOT_PRESENT);
140
        SET_FRAME_FLAGS_ARCH(aptl_3, PTL3_INDEX_ARCH(oldpage), PAGE_NOT_PRESENT);
148
        if (KA2PA(aptl_3) == KA2PA(helper_ptl3))
141
        if (KA2PA(aptl_3) == KA2PA(helper_ptl3))
149
            SET_PTL3_FLAGS_ARCH(aptl_2, PTL2_INDEX_ARCH(oldpage), PAGE_NOT_PRESENT);
142
            SET_PTL3_FLAGS_ARCH(aptl_2, PTL2_INDEX_ARCH(oldpage), PAGE_NOT_PRESENT);
150
        if (KA2PA(aptl_2) == KA2PA(helper_ptl2))
143
        if (KA2PA(aptl_2) == KA2PA(helper_ptl2))
151
            SET_PTL2_FLAGS_ARCH(aptl_1, PTL1_INDEX_ARCH(oldpage), PAGE_NOT_PRESENT);
144
            SET_PTL2_FLAGS_ARCH(aptl_1, PTL1_INDEX_ARCH(oldpage), PAGE_NOT_PRESENT);
152
        if (KA2PA(aptl_1) == KA2PA(helper_ptl1))
145
        if (KA2PA(aptl_1) == KA2PA(helper_ptl1))
153
            SET_PTL1_FLAGS_ARCH(&ptl_0, PTL0_INDEX_ARCH(oldpage), PAGE_NOT_PRESENT);
146
            SET_PTL1_FLAGS_ARCH(&ptl_0, PTL0_INDEX_ARCH(oldpage), PAGE_NOT_PRESENT);
154
    }
147
    }
155
    if (PTL1_PRESENT(&ptl_0, page))
148
    if (PTL1_PRESENT(&ptl_0, page))
156
        aptl_1 = PTL1_ADDR(&ptl_0, page);
149
        aptl_1 = PTL1_ADDR(&ptl_0, page);
157
    else {
150
    else {
158
        SETUP_PTL1(&ptl_0, page, helper_ptl1);
151
        SETUP_PTL1(&ptl_0, page, helper_ptl1);
159
        aptl_1 = helper_ptl1;
152
        aptl_1 = helper_ptl1;
160
    }
153
    }
161
       
154
       
162
    if (PTL2_PRESENT(aptl_1, page))
155
    if (PTL2_PRESENT(aptl_1, page))
163
        aptl_2 = PTL2_ADDR(aptl_1, page);
156
        aptl_2 = PTL2_ADDR(aptl_1, page);
164
    else {
157
    else {
165
        SETUP_PTL2(aptl_1, page, helper_ptl2);
158
        SETUP_PTL2(aptl_1, page, helper_ptl2);
166
        aptl_2 = helper_ptl2;
159
        aptl_2 = helper_ptl2;
167
    }
160
    }
168
 
161
 
169
    if (PTL3_PRESENT(aptl_2, page))
162
    if (PTL3_PRESENT(aptl_2, page))
170
        aptl_3 = PTL3_ADDR(aptl_2, page);
163
        aptl_3 = PTL3_ADDR(aptl_2, page);
171
    else {
164
    else {
172
        SETUP_PTL3(aptl_2, page, helper_ptl3);
165
        SETUP_PTL3(aptl_2, page, helper_ptl3);
173
        aptl_3 = helper_ptl3;
166
        aptl_3 = helper_ptl3;
174
    }
167
    }
175
   
168
   
176
    SETUP_FRAME(aptl_3, page, page);
169
    SETUP_FRAME(aptl_3, page, page);
177
 
170
 
178
    oldpage = page;
171
    oldpage = page;
179
}
172
}
180
 
173
 
181
 
174
 
182
void page_fault(int n, istate_t *istate)
175
void page_fault(int n, istate_t *istate)
183
{
176
{
184
    uintptr_t page;
177
    uintptr_t page;
185
    pf_access_t access;
178
    pf_access_t access;
186
   
179
   
187
    page = read_cr2();
180
    page = read_cr2();
188
   
181
   
189
    if (istate->error_word & PFERR_CODE_RSVD)
182
    if (istate->error_word & PFERR_CODE_RSVD)
190
        panic("Reserved bit set in page table entry.");
183
        panic("Reserved bit set in page table entry.");
191
   
184
   
192
    if (istate->error_word & PFERR_CODE_RW)
185
    if (istate->error_word & PFERR_CODE_RW)
193
        access = PF_ACCESS_WRITE;
186
        access = PF_ACCESS_WRITE;
194
    else if (istate->error_word & PFERR_CODE_ID)
187
    else if (istate->error_word & PFERR_CODE_ID)
195
        access = PF_ACCESS_EXEC;
188
        access = PF_ACCESS_EXEC;
196
    else
189
    else
197
        access = PF_ACCESS_READ;
190
        access = PF_ACCESS_READ;
198
   
191
   
199
    if (as_page_fault(page, access, istate) == AS_PF_FAULT) {
192
    if (as_page_fault(page, access, istate) == AS_PF_FAULT) {
200
        fault_if_from_uspace(istate, "Page fault: %#x.", page);
193
        fault_if_from_uspace(istate, "Page fault: %#x.", page);
201
 
194
 
202
        decode_istate(n, istate);
195
        decode_istate(n, istate);
203
        printf("Page fault address: %llx.\n", page);
196
        printf("Page fault address: %llx.\n", page);
204
        panic("Page fault.");
197
        panic("Page fault.");
205
    }
198
    }
206
}
199
}
207
 
200
 
208
 
201
 
209
uintptr_t hw_map(uintptr_t physaddr, size_t size)
202
uintptr_t hw_map(uintptr_t physaddr, size_t size)
210
{
203
{
211
    if (last_frame + ALIGN_UP(size, PAGE_SIZE) > KA2PA(KERNEL_ADDRESS_SPACE_END_ARCH))
204
    if (last_frame + ALIGN_UP(size, PAGE_SIZE) > KA2PA(KERNEL_ADDRESS_SPACE_END_ARCH))
212
        panic("Unable to map physical memory %p (%d bytes).", physaddr, size)
205
        panic("Unable to map physical memory %p (%d bytes).", physaddr, size)
213
   
206
   
214
    uintptr_t virtaddr = PA2KA(last_frame);
207
    uintptr_t virtaddr = PA2KA(last_frame);
215
    pfn_t i;
208
    pfn_t i;
216
    for (i = 0; i < ADDR2PFN(ALIGN_UP(size, PAGE_SIZE)); i++)
209
    for (i = 0; i < ADDR2PFN(ALIGN_UP(size, PAGE_SIZE)); i++)
217
        page_mapping_insert(AS_KERNEL, virtaddr + PFN2ADDR(i), physaddr + PFN2ADDR(i), PAGE_NOT_CACHEABLE | PAGE_WRITE);
210
        page_mapping_insert(AS_KERNEL, virtaddr + PFN2ADDR(i), physaddr + PFN2ADDR(i), PAGE_NOT_CACHEABLE | PAGE_WRITE);
218
   
211
   
219
    last_frame = ALIGN_UP(last_frame + size, FRAME_SIZE);
212
    last_frame = ALIGN_UP(last_frame + size, FRAME_SIZE);
220
   
213
   
221
    return virtaddr;
214
    return virtaddr;
222
}
215
}
223
 
-
 
224
void hw_area(void)
-
 
225
{
-
 
226
    dev_area.pbase = end_frame;
-
 
227
    dev_area.frames = SIZE2FRAMES(0xfffffffffffff - end_frame);
-
 
228
    ddi_parea_register(&dev_area);
-
 
229
   
-
 
230
    ega_area.pbase = EGA_VIDEORAM;
-
 
231
    ega_area.frames = SIZE2FRAMES(EGA_VRAM_SIZE);
-
 
232
    ddi_parea_register(&ega_area);
-
 
233
}
-
 
234
 
216
 
235
/** @}
217
/** @}
236
 */
218
 */
237
 
219