Subversion Repositories HelenOS

Rev

Rev 2071 | Rev 2132 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 2071 Rev 2076
1
/*
1
/*
2
 * Copyright (c) 2006 Jakub Jermar
2
 * Copyright (c) 2006 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
/** @addtogroup genericmm
29
/** @addtogroup genericmm
30
 * @{
30
 * @{
31
 */
31
 */
32
 
32
 
33
/**
33
/**
34
 * @file
34
 * @file
35
 * @brief   Backend for address space areas backed by an ELF image.
35
 * @brief   Backend for address space areas backed by an ELF image.
36
 */
36
 */
37
 
37
 
38
#include <lib/elf.h>
38
#include <lib/elf.h>
39
#include <debug.h>
39
#include <debug.h>
40
#include <arch/types.h>
40
#include <arch/types.h>
41
#include <typedefs.h>
41
#include <typedefs.h>
42
#include <mm/as.h>
42
#include <mm/as.h>
43
#include <mm/frame.h>
43
#include <mm/frame.h>
44
#include <mm/slab.h>
44
#include <mm/slab.h>
45
#include <mm/page.h>
45
#include <mm/page.h>
46
#include <genarch/mm/page_pt.h>
46
#include <genarch/mm/page_pt.h>
47
#include <genarch/mm/page_ht.h>
47
#include <genarch/mm/page_ht.h>
48
#include <align.h>
48
#include <align.h>
49
#include <memstr.h>
49
#include <memstr.h>
50
#include <macros.h>
50
#include <macros.h>
51
#include <arch.h>
51
#include <arch.h>
52
 
52
 
-
 
53
#ifdef CONFIG_VIRT_IDX_DCACHE
-
 
54
#include <arch/mm/cache.h>
-
 
55
#endif
-
 
56
 
53
static int elf_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access);
57
static int elf_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access);
54
static void elf_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame);
58
static void elf_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame);
55
static void elf_share(as_area_t *area);
59
static void elf_share(as_area_t *area);
56
 
60
 
57
mem_backend_t elf_backend = {
61
mem_backend_t elf_backend = {
58
    .page_fault = elf_page_fault,
62
    .page_fault = elf_page_fault,
59
    .frame_free = elf_frame_free,
63
    .frame_free = elf_frame_free,
60
    .share = elf_share
64
    .share = elf_share
61
};
65
};
62
 
66
 
63
/** Service a page fault in the ELF backend address space area.
67
/** Service a page fault in the ELF backend address space area.
64
 *
68
 *
65
 * The address space area and page tables must be already locked.
69
 * The address space area and page tables must be already locked.
66
 *
70
 *
67
 * @param area Pointer to the address space area.
71
 * @param area Pointer to the address space area.
68
 * @param addr Faulting virtual address.
72
 * @param addr Faulting virtual address.
69
 * @param access Access mode that caused the fault (i.e. read/write/exec).
73
 * @param access Access mode that caused the fault (i.e. read/write/exec).
70
 *
74
 *
71
 * @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. serviced).
75
 * @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. serviced).
72
 */
76
 */
73
int elf_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access)
77
int elf_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access)
74
{
78
{
75
    elf_header_t *elf = area->backend_data.elf;
79
    elf_header_t *elf = area->backend_data.elf;
76
    elf_segment_header_t *entry = area->backend_data.segment;
80
    elf_segment_header_t *entry = area->backend_data.segment;
77
    btree_node_t *leaf;
81
    btree_node_t *leaf;
78
    uintptr_t base, frame;
82
    uintptr_t base, frame;
79
    index_t i;
83
    index_t i;
80
 
84
 
81
    if (!as_area_check_access(area, access))
85
    if (!as_area_check_access(area, access))
82
        return AS_PF_FAULT;
86
        return AS_PF_FAULT;
83
 
87
 
84
    ASSERT((addr >= entry->p_vaddr) && (addr < entry->p_vaddr + entry->p_memsz));
88
    ASSERT((addr >= entry->p_vaddr) && (addr < entry->p_vaddr + entry->p_memsz));
85
    i = (addr - entry->p_vaddr) >> PAGE_WIDTH;
89
    i = (addr - entry->p_vaddr) >> PAGE_WIDTH;
86
    base = (uintptr_t) (((void *) elf) + entry->p_offset);
90
    base = (uintptr_t) (((void *) elf) + entry->p_offset);
87
    ASSERT(ALIGN_UP(base, FRAME_SIZE) == base);
91
    ASSERT(ALIGN_UP(base, FRAME_SIZE) == base);
88
 
92
 
89
    if (area->sh_info) {
93
    if (area->sh_info) {
90
        bool found = false;
94
        bool found = false;
91
 
95
 
92
        /*
96
        /*
93
         * The address space area is shared.
97
         * The address space area is shared.
94
         */
98
         */
95
         
99
         
96
        mutex_lock(&area->sh_info->lock);
100
        mutex_lock(&area->sh_info->lock);
97
        frame = (uintptr_t) btree_search(&area->sh_info->pagemap,
101
        frame = (uintptr_t) btree_search(&area->sh_info->pagemap,
98
            ALIGN_DOWN(addr, PAGE_SIZE) - area->base, &leaf);
102
            ALIGN_DOWN(addr, PAGE_SIZE) - area->base, &leaf);
99
        if (!frame) {
103
        if (!frame) {
100
            int i;
104
            int i;
101
 
105
 
102
            /*
106
            /*
103
             * Workaround for valid NULL address.
107
             * Workaround for valid NULL address.
104
             */
108
             */
105
 
109
 
106
            for (i = 0; i < leaf->keys; i++) {
110
            for (i = 0; i < leaf->keys; i++) {
107
                if (leaf->key[i] == ALIGN_DOWN(addr, PAGE_SIZE)) {
111
                if (leaf->key[i] == ALIGN_DOWN(addr, PAGE_SIZE)) {
108
                    found = true;
112
                    found = true;
109
                    break;
113
                    break;
110
                }
114
                }
111
            }
115
            }
112
        }
116
        }
113
        if (frame || found) {
117
        if (frame || found) {
114
            frame_reference_add(ADDR2PFN(frame));
118
            frame_reference_add(ADDR2PFN(frame));
115
            page_mapping_insert(AS, addr, frame, as_area_get_flags(area));
119
            page_mapping_insert(AS, addr, frame, as_area_get_flags(area));
116
            if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1))
120
            if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1))
117
                panic("Could not insert used space.\n");
121
                panic("Could not insert used space.\n");
118
            mutex_unlock(&area->sh_info->lock);
122
            mutex_unlock(&area->sh_info->lock);
119
            return AS_PF_OK;
123
            return AS_PF_OK;
120
        }
124
        }
121
    }
125
    }
122
   
126
   
123
    /*
127
    /*
124
     * The area is either not shared or the pagemap does not contain the mapping.
128
     * The area is either not shared or the pagemap does not contain the mapping.
125
     */
129
     */
126
   
130
   
127
    if (ALIGN_DOWN(addr, PAGE_SIZE) + PAGE_SIZE < entry->p_vaddr + entry->p_filesz) {
131
    if (ALIGN_DOWN(addr, PAGE_SIZE) + PAGE_SIZE < entry->p_vaddr + entry->p_filesz) {
128
        /*
132
        /*
129
         * Initialized portion of the segment. The memory is backed
133
         * Initialized portion of the segment. The memory is backed
130
         * directly by the content of the ELF image. Pages are
134
         * directly by the content of the ELF image. Pages are
131
         * only copied if the segment is writable so that there
135
         * only copied if the segment is writable so that there
132
         * can be more instantions of the same memory ELF image
136
         * can be more instantions of the same memory ELF image
133
         * used at a time. Note that this could be later done
137
         * used at a time. Note that this could be later done
134
         * as COW.
138
         * as COW.
135
         */
139
         */
136
        if (entry->p_flags & PF_W) {
140
        if (entry->p_flags & PF_W) {
137
            frame = (uintptr_t)frame_alloc(ONE_FRAME, 0);
141
            frame = (uintptr_t)frame_alloc(ONE_FRAME, 0);
138
            memcpy((void *) PA2KA(frame), (void *) (base + i*FRAME_SIZE), FRAME_SIZE);
142
            memcpy((void *) PA2KA(frame), (void *) (base + i*FRAME_SIZE), FRAME_SIZE);
139
           
143
           
140
            if (area->sh_info) {
144
            if (area->sh_info) {
141
                frame_reference_add(ADDR2PFN(frame));
145
                frame_reference_add(ADDR2PFN(frame));
142
                btree_insert(&area->sh_info->pagemap, ALIGN_DOWN(addr, PAGE_SIZE) - area->base,
146
                btree_insert(&area->sh_info->pagemap, ALIGN_DOWN(addr, PAGE_SIZE) - area->base,
143
                    (void *) frame, leaf);
147
                    (void *) frame, leaf);
144
            }
148
            }
145
 
149
 
146
        } else {
150
        } else {
147
            frame = KA2PA(base + i*FRAME_SIZE);
151
            frame = KA2PA(base + i*FRAME_SIZE);
148
        }  
152
        }  
149
    } else if (ALIGN_DOWN(addr, PAGE_SIZE) >= ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) {
153
    } else if (ALIGN_DOWN(addr, PAGE_SIZE) >= ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) {
150
        /*
154
        /*
151
         * This is the uninitialized portion of the segment.
155
         * This is the uninitialized portion of the segment.
152
         * It is not physically present in the ELF image.
156
         * It is not physically present in the ELF image.
153
         * To resolve the situation, a frame must be allocated
157
         * To resolve the situation, a frame must be allocated
154
         * and cleared.
158
         * and cleared.
155
         */
159
         */
156
        frame = (uintptr_t)frame_alloc(ONE_FRAME, 0);
160
        frame = (uintptr_t)frame_alloc(ONE_FRAME, 0);
157
        memsetb(PA2KA(frame), FRAME_SIZE, 0);
161
        memsetb(PA2KA(frame), FRAME_SIZE, 0);
158
 
162
 
159
        if (area->sh_info) {
163
        if (area->sh_info) {
160
            frame_reference_add(ADDR2PFN(frame));
164
            frame_reference_add(ADDR2PFN(frame));
161
            btree_insert(&area->sh_info->pagemap, ALIGN_DOWN(addr, PAGE_SIZE) - area->base,
165
            btree_insert(&area->sh_info->pagemap, ALIGN_DOWN(addr, PAGE_SIZE) - area->base,
162
                (void *) frame, leaf);
166
                (void *) frame, leaf);
163
        }
167
        }
164
 
168
 
165
    } else {
169
    } else {
166
        size_t size;
170
        size_t size;
167
        /*
171
        /*
168
         * The mixed case.
172
         * The mixed case.
169
         * The lower part is backed by the ELF image and
173
         * The lower part is backed by the ELF image and
170
         * the upper part is anonymous memory.
174
         * the upper part is anonymous memory.
171
         */
175
         */
172
        size = entry->p_filesz - (i<<PAGE_WIDTH);
176
        size = entry->p_filesz - (i<<PAGE_WIDTH);
173
        frame = (uintptr_t)frame_alloc(ONE_FRAME, 0);
177
        frame = (uintptr_t)frame_alloc(ONE_FRAME, 0);
174
        memsetb(PA2KA(frame) + size, FRAME_SIZE - size, 0);
178
        memsetb(PA2KA(frame) + size, FRAME_SIZE - size, 0);
175
        memcpy((void *) PA2KA(frame), (void *) (base + i*FRAME_SIZE), size);
179
        memcpy((void *) PA2KA(frame), (void *) (base + i*FRAME_SIZE), size);
176
 
180
 
177
        if (area->sh_info) {
181
        if (area->sh_info) {
178
            frame_reference_add(ADDR2PFN(frame));
182
            frame_reference_add(ADDR2PFN(frame));
179
            btree_insert(&area->sh_info->pagemap, ALIGN_DOWN(addr, PAGE_SIZE) - area->base,
183
            btree_insert(&area->sh_info->pagemap, ALIGN_DOWN(addr, PAGE_SIZE) - area->base,
180
                (void *) frame, leaf);
184
                (void *) frame, leaf);
181
        }
185
        }
182
 
186
 
183
    }
187
    }
184
   
188
   
185
    if (area->sh_info)
189
    if (area->sh_info)
186
        mutex_unlock(&area->sh_info->lock);
190
        mutex_unlock(&area->sh_info->lock);
187
   
191
   
188
    page_mapping_insert(AS, addr, frame, as_area_get_flags(area));
192
    page_mapping_insert(AS, addr, frame, as_area_get_flags(area));
189
    if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1))
193
    if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1))
190
        panic("Could not insert used space.\n");
194
        panic("Could not insert used space.\n");
191
 
195
 
192
    return AS_PF_OK;
196
    return AS_PF_OK;
193
}
197
}
194
 
198
 
195
/** Free a frame that is backed by the ELF backend.
199
/** Free a frame that is backed by the ELF backend.
196
 *
200
 *
197
 * The address space area and page tables must be already locked.
201
 * The address space area and page tables must be already locked.
198
 *
202
 *
199
 * @param area Pointer to the address space area.
203
 * @param area Pointer to the address space area.
200
 * @param page Page that is mapped to frame. Must be aligned to PAGE_SIZE.
204
 * @param page Page that is mapped to frame. Must be aligned to PAGE_SIZE.
201
 * @param frame Frame to be released.
205
 * @param frame Frame to be released.
202
 *
206
 *
203
 */
207
 */
204
void elf_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame)
208
void elf_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame)
205
{
209
{
206
    elf_header_t *elf = area->backend_data.elf;
210
    elf_header_t *elf = area->backend_data.elf;
207
    elf_segment_header_t *entry = area->backend_data.segment;
211
    elf_segment_header_t *entry = area->backend_data.segment;
208
    uintptr_t base;
212
    uintptr_t base;
209
    index_t i;
213
    index_t i;
210
   
214
   
211
    ASSERT((page >= entry->p_vaddr) && (page < entry->p_vaddr + entry->p_memsz));
215
    ASSERT((page >= entry->p_vaddr) && (page < entry->p_vaddr + entry->p_memsz));
212
    i = (page - entry->p_vaddr) >> PAGE_WIDTH;
216
    i = (page - entry->p_vaddr) >> PAGE_WIDTH;
213
    base = (uintptr_t) (((void *) elf) + entry->p_offset);
217
    base = (uintptr_t) (((void *) elf) + entry->p_offset);
214
    ASSERT(ALIGN_UP(base, FRAME_SIZE) == base);
218
    ASSERT(ALIGN_UP(base, FRAME_SIZE) == base);
215
   
219
   
216
    if (page + PAGE_SIZE < ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) {
220
    if (page + PAGE_SIZE < ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) {
217
        if (entry->p_flags & PF_W) {
221
        if (entry->p_flags & PF_W) {
218
            /*
222
            /*
219
             * Free the frame with the copy of writable segment data.
223
             * Free the frame with the copy of writable segment data.
220
             */
224
             */
221
            frame_free(frame);
225
            frame_free(frame);
-
 
226
#ifdef CONFIG_VIRT_IDX_DCACHE
-
 
227
                dcache_flush_frame(page, frame);
-
 
228
#endif
222
        }
229
        }
223
    } else {
230
    } else {
224
        /*
231
        /*
225
         * The frame is either anonymous memory or the mixed case (i.e. lower
232
         * The frame is either anonymous memory or the mixed case (i.e. lower
226
         * part is backed by the ELF image and the upper is anonymous).
233
         * part is backed by the ELF image and the upper is anonymous).
227
         * In any case, a frame needs to be freed.
234
         * In any case, a frame needs to be freed.
228
         */
235
         */
229
        frame_free(frame);
236
        frame_free(frame);
-
 
237
#ifdef CONFIG_VIRT_IDX_DCACHE
-
 
238
            dcache_flush_frame(page, frame);
-
 
239
#endif
230
    }
240
    }
231
}
241
}
232
 
242
 
233
/** Share ELF image backed address space area.
243
/** Share ELF image backed address space area.
234
 *
244
 *
235
 * If the area is writable, then all mapped pages are duplicated in the pagemap.
245
 * If the area is writable, then all mapped pages are duplicated in the pagemap.
236
 * Otherwise only portions of the area that are not backed by the ELF image
246
 * Otherwise only portions of the area that are not backed by the ELF image
237
 * are put into the pagemap.
247
 * are put into the pagemap.
238
 *
248
 *
239
 * The address space and address space area must be locked prior to the call.
249
 * The address space and address space area must be locked prior to the call.
240
 *
250
 *
241
 * @param area Address space area.
251
 * @param area Address space area.
242
 */
252
 */
243
void elf_share(as_area_t *area)
253
void elf_share(as_area_t *area)
244
{
254
{
245
    elf_segment_header_t *entry = area->backend_data.segment;
255
    elf_segment_header_t *entry = area->backend_data.segment;
246
    link_t *cur;
256
    link_t *cur;
247
    btree_node_t *leaf, *node;
257
    btree_node_t *leaf, *node;
248
    uintptr_t start_anon = entry->p_vaddr + entry->p_filesz;
258
    uintptr_t start_anon = entry->p_vaddr + entry->p_filesz;
249
 
259
 
250
    /*
260
    /*
251
     * Find the node in which to start linear search.
261
     * Find the node in which to start linear search.
252
     */
262
     */
253
    if (area->flags & AS_AREA_WRITE) {
263
    if (area->flags & AS_AREA_WRITE) {
254
        node = list_get_instance(area->used_space.leaf_head.next, btree_node_t, leaf_link);
264
        node = list_get_instance(area->used_space.leaf_head.next, btree_node_t, leaf_link);
255
    } else {
265
    } else {
256
        (void) btree_search(&area->sh_info->pagemap, start_anon, &leaf);
266
        (void) btree_search(&area->sh_info->pagemap, start_anon, &leaf);
257
        node = btree_leaf_node_left_neighbour(&area->sh_info->pagemap, leaf);
267
        node = btree_leaf_node_left_neighbour(&area->sh_info->pagemap, leaf);
258
        if (!node)
268
        if (!node)
259
            node = leaf;
269
            node = leaf;
260
    }
270
    }
261
 
271
 
262
    /*
272
    /*
263
     * Copy used anonymous portions of the area to sh_info's page map.
273
     * Copy used anonymous portions of the area to sh_info's page map.
264
     */
274
     */
265
    mutex_lock(&area->sh_info->lock);
275
    mutex_lock(&area->sh_info->lock);
266
    for (cur = &node->leaf_link; cur != &area->used_space.leaf_head; cur = cur->next) {
276
    for (cur = &node->leaf_link; cur != &area->used_space.leaf_head; cur = cur->next) {
267
        int i;
277
        int i;
268
       
278
       
269
        node = list_get_instance(cur, btree_node_t, leaf_link);
279
        node = list_get_instance(cur, btree_node_t, leaf_link);
270
       
280
       
271
        for (i = 0; i < node->keys; i++) {
281
        for (i = 0; i < node->keys; i++) {
272
            uintptr_t base = node->key[i];
282
            uintptr_t base = node->key[i];
273
            count_t count = (count_t) node->value[i];
283
            count_t count = (count_t) node->value[i];
274
            int j;
284
            int j;
275
           
285
           
276
            /*
286
            /*
277
             * Skip read-only areas of used space that are backed
287
             * Skip read-only areas of used space that are backed
278
             * by the ELF image.
288
             * by the ELF image.
279
             */
289
             */
280
            if (!(area->flags & AS_AREA_WRITE))
290
            if (!(area->flags & AS_AREA_WRITE))
281
                if (base + count*PAGE_SIZE <= start_anon)
291
                if (base + count*PAGE_SIZE <= start_anon)
282
                    continue;
292
                    continue;
283
           
293
           
284
            for (j = 0; j < count; j++) {
294
            for (j = 0; j < count; j++) {
285
                pte_t *pte;
295
                pte_t *pte;
286
           
296
           
287
                /*
297
                /*
288
                 * Skip read-only pages that are backed by the ELF image.
298
                 * Skip read-only pages that are backed by the ELF image.
289
                 */
299
                 */
290
                if (!(area->flags & AS_AREA_WRITE))
300
                if (!(area->flags & AS_AREA_WRITE))
291
                    if (base + (j + 1)*PAGE_SIZE <= start_anon)
301
                    if (base + (j + 1)*PAGE_SIZE <= start_anon)
292
                        continue;
302
                        continue;
293
               
303
               
294
                page_table_lock(area->as, false);
304
                page_table_lock(area->as, false);
295
                pte = page_mapping_find(area->as, base + j*PAGE_SIZE);
305
                pte = page_mapping_find(area->as, base + j*PAGE_SIZE);
296
                ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
306
                ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
297
                btree_insert(&area->sh_info->pagemap, (base + j*PAGE_SIZE) - area->base,
307
                btree_insert(&area->sh_info->pagemap, (base + j*PAGE_SIZE) - area->base,
298
                    (void *) PTE_GET_FRAME(pte), NULL);
308
                    (void *) PTE_GET_FRAME(pte), NULL);
299
                page_table_unlock(area->as, false);
309
                page_table_unlock(area->as, false);
300
                frame_reference_add(ADDR2PFN(PTE_GET_FRAME(pte)));
310
                frame_reference_add(ADDR2PFN(PTE_GET_FRAME(pte)));
301
            }
311
            }
302
               
312
               
303
        }
313
        }
304
    }
314
    }
305
    mutex_unlock(&area->sh_info->lock);
315
    mutex_unlock(&area->sh_info->lock);
306
}
316
}
307
 
317
 
308
/** @}
318
/** @}
309
 */
319
 */
310
 
320