Subversion Repositories HelenOS-historic

Rev

Rev 1409 | Rev 1423 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 1409 Rev 1411
1
/*
1
/*
2
 * Copyright (C) 2006 Sergey Bondari
2
 * Copyright (C) 2006 Sergey Bondari
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
/**
29
/**
30
 * @file    elf.c
30
 * @file    elf.c
31
 * @brief   Kernel ELF loader.
31
 * @brief   Kernel ELF loader.
32
 */
32
 */
33
 
33
 
34
#include <elf.h>
34
#include <elf.h>
35
#include <debug.h>
35
#include <debug.h>
36
#include <arch/types.h>
36
#include <arch/types.h>
37
#include <typedefs.h>
37
#include <typedefs.h>
38
#include <mm/as.h>
38
#include <mm/as.h>
39
#include <mm/frame.h>
39
#include <mm/frame.h>
40
#include <mm/slab.h>
40
#include <mm/slab.h>
41
#include <align.h>
41
#include <align.h>
42
#include <memstr.h>
42
#include <memstr.h>
43
#include <macros.h>
43
#include <macros.h>
44
#include <arch.h>
44
#include <arch.h>
45
 
45
 
46
static char *error_codes[] = {
46
static char *error_codes[] = {
47
    "no error",
47
    "no error",
48
    "invalid image",
48
    "invalid image",
49
    "address space error",
49
    "address space error",
50
    "incompatible image",
50
    "incompatible image",
51
    "unsupported image type",
51
    "unsupported image type",
52
    "irrecoverable error"
52
    "irrecoverable error"
53
};
53
};
54
 
54
 
55
static int segment_header(elf_segment_header_t *entry, elf_header_t *elf, as_t *as);
55
static int segment_header(elf_segment_header_t *entry, elf_header_t *elf, as_t *as);
56
static int section_header(elf_section_header_t *entry, elf_header_t *elf, as_t *as);
56
static int section_header(elf_section_header_t *entry, elf_header_t *elf, as_t *as);
57
static int load_segment(elf_segment_header_t *entry, elf_header_t *elf, as_t *as);
57
static int load_segment(elf_segment_header_t *entry, elf_header_t *elf, as_t *as);
58
 
58
 
59
static int elf_page_fault(as_area_t *area, __address addr);
59
static int elf_page_fault(as_area_t *area, __address addr, pf_access_t access);
60
static void elf_frame_free(as_area_t *area, __address page, __address frame);
60
static void elf_frame_free(as_area_t *area, __address page, __address frame);
61
 
61
 
62
mem_backend_t elf_backend = {
62
mem_backend_t elf_backend = {
63
    .backend_page_fault = elf_page_fault,
63
    .backend_page_fault = elf_page_fault,
64
    .backend_frame_free = elf_frame_free
64
    .backend_frame_free = elf_frame_free
65
};
65
};
66
 
66
 
67
/** ELF loader
67
/** ELF loader
68
 *
68
 *
69
 * @param header Pointer to ELF header in memory
69
 * @param header Pointer to ELF header in memory
70
 * @param as Created and properly mapped address space
70
 * @param as Created and properly mapped address space
71
 * @return EE_OK on success
71
 * @return EE_OK on success
72
 */
72
 */
73
int elf_load(elf_header_t *header, as_t * as)
73
int elf_load(elf_header_t *header, as_t * as)
74
{
74
{
75
    int i, rc;
75
    int i, rc;
76
 
76
 
77
    /* Identify ELF */
77
    /* Identify ELF */
78
    if (header->e_ident[EI_MAG0] != ELFMAG0 || header->e_ident[EI_MAG1] != ELFMAG1 ||
78
    if (header->e_ident[EI_MAG0] != ELFMAG0 || header->e_ident[EI_MAG1] != ELFMAG1 ||
79
        header->e_ident[EI_MAG2] != ELFMAG2 || header->e_ident[EI_MAG3] != ELFMAG3) {
79
        header->e_ident[EI_MAG2] != ELFMAG2 || header->e_ident[EI_MAG3] != ELFMAG3) {
80
        return EE_INVALID;
80
        return EE_INVALID;
81
    }
81
    }
82
   
82
   
83
    /* Identify ELF compatibility */
83
    /* Identify ELF compatibility */
84
    if (header->e_ident[EI_DATA] != ELF_DATA_ENCODING || header->e_machine != ELF_MACHINE ||
84
    if (header->e_ident[EI_DATA] != ELF_DATA_ENCODING || header->e_machine != ELF_MACHINE ||
85
        header->e_ident[EI_VERSION] != EV_CURRENT || header->e_version != EV_CURRENT ||
85
        header->e_ident[EI_VERSION] != EV_CURRENT || header->e_version != EV_CURRENT ||
86
        header->e_ident[EI_CLASS] != ELF_CLASS) {
86
        header->e_ident[EI_CLASS] != ELF_CLASS) {
87
        return EE_INCOMPATIBLE;
87
        return EE_INCOMPATIBLE;
88
    }
88
    }
89
 
89
 
90
    if (header->e_phentsize != sizeof(elf_segment_header_t))
90
    if (header->e_phentsize != sizeof(elf_segment_header_t))
91
        return EE_INCOMPATIBLE;
91
        return EE_INCOMPATIBLE;
92
 
92
 
93
    if (header->e_shentsize != sizeof(elf_section_header_t))
93
    if (header->e_shentsize != sizeof(elf_section_header_t))
94
        return EE_INCOMPATIBLE;
94
        return EE_INCOMPATIBLE;
95
 
95
 
96
    /* Check if the object type is supported. */
96
    /* Check if the object type is supported. */
97
    if (header->e_type != ET_EXEC)
97
    if (header->e_type != ET_EXEC)
98
        return EE_UNSUPPORTED;
98
        return EE_UNSUPPORTED;
99
 
99
 
100
    /* Walk through all segment headers and process them. */
100
    /* Walk through all segment headers and process them. */
101
    for (i = 0; i < header->e_phnum; i++) {
101
    for (i = 0; i < header->e_phnum; i++) {
102
        rc = segment_header(&((elf_segment_header_t *)(((__u8 *) header) + header->e_phoff))[i], header, as);
102
        rc = segment_header(&((elf_segment_header_t *)(((__u8 *) header) + header->e_phoff))[i], header, as);
103
        if (rc != EE_OK)
103
        if (rc != EE_OK)
104
            return rc;
104
            return rc;
105
    }
105
    }
106
 
106
 
107
    /* Inspect all section headers and proccess them. */
107
    /* Inspect all section headers and proccess them. */
108
    for (i = 0; i < header->e_shnum; i++) {
108
    for (i = 0; i < header->e_shnum; i++) {
109
        rc = section_header(&((elf_section_header_t *)(((__u8 *) header) + header->e_shoff))[i], header, as);
109
        rc = section_header(&((elf_section_header_t *)(((__u8 *) header) + header->e_shoff))[i], header, as);
110
        if (rc != EE_OK)
110
        if (rc != EE_OK)
111
            return rc;
111
            return rc;
112
    }
112
    }
113
 
113
 
114
    return EE_OK;
114
    return EE_OK;
115
}
115
}
116
 
116
 
117
/** Print error message according to error code.
117
/** Print error message according to error code.
118
 *
118
 *
119
 * @param rc Return code returned by elf_load().
119
 * @param rc Return code returned by elf_load().
120
 *
120
 *
121
 * @return NULL terminated description of error.
121
 * @return NULL terminated description of error.
122
 */
122
 */
123
char *elf_error(int rc)
123
char *elf_error(int rc)
124
{
124
{
125
    ASSERT(rc < sizeof(error_codes)/sizeof(char *));
125
    ASSERT(rc < sizeof(error_codes)/sizeof(char *));
126
 
126
 
127
    return error_codes[rc];
127
    return error_codes[rc];
128
}
128
}
129
 
129
 
130
/** Process segment header.
130
/** Process segment header.
131
 *
131
 *
132
 * @param entry Segment header.
132
 * @param entry Segment header.
133
 * @param elf ELF header.
133
 * @param elf ELF header.
134
 * @param as Address space into wich the ELF is being loaded.
134
 * @param as Address space into wich the ELF is being loaded.
135
 *
135
 *
136
 * @return EE_OK on success, error code otherwise.
136
 * @return EE_OK on success, error code otherwise.
137
 */
137
 */
138
static int segment_header(elf_segment_header_t *entry, elf_header_t *elf, as_t *as)
138
static int segment_header(elf_segment_header_t *entry, elf_header_t *elf, as_t *as)
139
{
139
{
140
    switch (entry->p_type) {
140
    switch (entry->p_type) {
141
        case PT_NULL:
141
        case PT_NULL:
142
        case PT_PHDR:
142
        case PT_PHDR:
143
        break;
143
        break;
144
        case PT_LOAD:
144
        case PT_LOAD:
145
        return load_segment(entry, elf, as);
145
        return load_segment(entry, elf, as);
146
        break;
146
        break;
147
        case PT_DYNAMIC:
147
        case PT_DYNAMIC:
148
        case PT_INTERP:
148
        case PT_INTERP:
149
        case PT_SHLIB:
149
        case PT_SHLIB:
150
        case PT_NOTE:
150
        case PT_NOTE:
151
        case PT_LOPROC:
151
        case PT_LOPROC:
152
        case PT_HIPROC:
152
        case PT_HIPROC:
153
        default:
153
        default:
154
        return EE_UNSUPPORTED;
154
        return EE_UNSUPPORTED;
155
        break;
155
        break;
156
    }
156
    }
157
    return EE_OK;
157
    return EE_OK;
158
}
158
}
159
 
159
 
160
/** Load segment described by program header entry.
160
/** Load segment described by program header entry.
161
 *
161
 *
162
 * @param entry Program header entry describing segment to be loaded.
162
 * @param entry Program header entry describing segment to be loaded.
163
 * @param elf ELF header.
163
 * @param elf ELF header.
164
 * @param as Address space into wich the ELF is being loaded.
164
 * @param as Address space into wich the ELF is being loaded.
165
 *
165
 *
166
 * @return EE_OK on success, error code otherwise.
166
 * @return EE_OK on success, error code otherwise.
167
 */
167
 */
168
int load_segment(elf_segment_header_t *entry, elf_header_t *elf, as_t *as)
168
int load_segment(elf_segment_header_t *entry, elf_header_t *elf, as_t *as)
169
{
169
{
170
    as_area_t *a;
170
    as_area_t *a;
171
    int flags = 0;
171
    int flags = 0;
172
    void *backend_data[2] = { elf, entry };
172
    void *backend_data[2] = { elf, entry };
173
 
173
 
174
    if (entry->p_align > 1) {
174
    if (entry->p_align > 1) {
175
        if ((entry->p_offset % entry->p_align) != (entry->p_vaddr % entry->p_align)) {
175
        if ((entry->p_offset % entry->p_align) != (entry->p_vaddr % entry->p_align)) {
176
            return EE_INVALID;
176
            return EE_INVALID;
177
        }
177
        }
178
    }
178
    }
179
 
179
 
180
    if (entry->p_flags & PF_X)
180
    if (entry->p_flags & PF_X)
181
        flags |= AS_AREA_EXEC;
181
        flags |= AS_AREA_EXEC;
182
    if (entry->p_flags & PF_W)
182
    if (entry->p_flags & PF_W)
183
        flags |= AS_AREA_WRITE;
183
        flags |= AS_AREA_WRITE;
184
    if (entry->p_flags & PF_R)
184
    if (entry->p_flags & PF_R)
185
        flags |= AS_AREA_READ;
185
        flags |= AS_AREA_READ;
186
 
186
 
187
    /*
187
    /*
188
     * Check if the virtual address starts on page boundary.
188
     * Check if the virtual address starts on page boundary.
189
     */
189
     */
190
    if (ALIGN_UP(entry->p_vaddr, PAGE_SIZE) != entry->p_vaddr)
190
    if (ALIGN_UP(entry->p_vaddr, PAGE_SIZE) != entry->p_vaddr)
191
        return EE_UNSUPPORTED;
191
        return EE_UNSUPPORTED;
192
 
192
 
193
    a = as_area_create(as, flags, entry->p_memsz, entry->p_vaddr, AS_AREA_ATTR_NONE, &elf_backend, backend_data);
193
    a = as_area_create(as, flags, entry->p_memsz, entry->p_vaddr, AS_AREA_ATTR_NONE, &elf_backend, backend_data);
194
    if (!a)
194
    if (!a)
195
        return EE_MEMORY;
195
        return EE_MEMORY;
196
   
196
   
197
    /*
197
    /*
198
     * The segment will be mapped on demand by elf_page_fault().
198
     * The segment will be mapped on demand by elf_page_fault().
199
     */
199
     */
200
 
200
 
201
    return EE_OK;
201
    return EE_OK;
202
}
202
}
203
 
203
 
204
/** Process section header.
204
/** Process section header.
205
 *
205
 *
206
 * @param entry Segment header.
206
 * @param entry Segment header.
207
 * @param elf ELF header.
207
 * @param elf ELF header.
208
 * @param as Address space into wich the ELF is being loaded.
208
 * @param as Address space into wich the ELF is being loaded.
209
 *
209
 *
210
 * @return EE_OK on success, error code otherwise.
210
 * @return EE_OK on success, error code otherwise.
211
 */
211
 */
212
static int section_header(elf_section_header_t *entry, elf_header_t *elf, as_t *as)
212
static int section_header(elf_section_header_t *entry, elf_header_t *elf, as_t *as)
213
{
213
{
214
    switch (entry->sh_type) {
214
    switch (entry->sh_type) {
215
        default:
215
        default:
216
        break;
216
        break;
217
    }
217
    }
218
   
218
   
219
    return EE_OK;
219
    return EE_OK;
220
}
220
}
221
 
221
 
222
/** Service a page fault in the ELF backend address space area.
222
/** Service a page fault in the ELF backend address space area.
223
 *
223
 *
224
 * The address space area and page tables must be already locked.
224
 * The address space area and page tables must be already locked.
225
 *
225
 *
226
 * @param area Pointer to the address space area.
226
 * @param area Pointer to the address space area.
227
 * @param addr Faulting virtual address.
227
 * @param addr Faulting virtual address.
-
 
228
 * @param access Access mode that caused the fault (i.e. read/write/exec).
228
 *
229
 *
229
 * @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. serviced).
230
 * @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. serviced).
230
 */
231
 */
231
int elf_page_fault(as_area_t *area, __address addr)
232
int elf_page_fault(as_area_t *area, __address addr, pf_access_t access)
232
{
233
{
233
    elf_header_t *elf = (elf_header_t *) area->backend_data[0];
234
    elf_header_t *elf = (elf_header_t *) area->backend_data[0];
234
    elf_segment_header_t *entry = (elf_segment_header_t *) area->backend_data[1];
235
    elf_segment_header_t *entry = (elf_segment_header_t *) area->backend_data[1];
235
    __address base, frame;
236
    __address base, frame;
236
    index_t i;
237
    index_t i;
237
 
238
 
238
    ASSERT((addr >= entry->p_vaddr) && (addr < entry->p_vaddr + entry->p_memsz));
239
    ASSERT((addr >= entry->p_vaddr) && (addr < entry->p_vaddr + entry->p_memsz));
239
    i = (addr - entry->p_vaddr) >> PAGE_WIDTH;
240
    i = (addr - entry->p_vaddr) >> PAGE_WIDTH;
240
    base = (__address) (((void *) elf) + entry->p_offset);
241
    base = (__address) (((void *) elf) + entry->p_offset);
241
    ASSERT(ALIGN_UP(base, FRAME_SIZE) == base);
242
    ASSERT(ALIGN_UP(base, FRAME_SIZE) == base);
242
   
243
   
243
    if (ALIGN_DOWN(addr, PAGE_SIZE) + PAGE_SIZE < entry->p_vaddr + entry->p_filesz) {
244
    if (ALIGN_DOWN(addr, PAGE_SIZE) + PAGE_SIZE < entry->p_vaddr + entry->p_filesz) {
244
        /*
245
        /*
245
         * Initialized portion of the segment. The memory is backed
246
         * Initialized portion of the segment. The memory is backed
246
         * directly by the content of the ELF image. Pages are
247
         * directly by the content of the ELF image. Pages are
247
         * only copied if the segment is writable so that there
248
         * only copied if the segment is writable so that there
248
         * can be more instantions of the same memory ELF image
249
         * can be more instantions of the same memory ELF image
249
         * used at a time. Note that this could be later done
250
         * used at a time. Note that this could be later done
250
         * as COW.
251
         * as COW.
251
         */
252
         */
252
        if (entry->p_flags & PF_W) {
253
        if (entry->p_flags & PF_W) {
253
            frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
254
            frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
254
            memcpy((void *) PA2KA(frame), (void *) (base + i*FRAME_SIZE), FRAME_SIZE);
255
            memcpy((void *) PA2KA(frame), (void *) (base + i*FRAME_SIZE), FRAME_SIZE);
255
        } else {
256
        } else {
256
            frame = KA2PA(base + i*FRAME_SIZE);
257
            frame = KA2PA(base + i*FRAME_SIZE);
257
        }  
258
        }  
258
    } else if (ALIGN_DOWN(addr, PAGE_SIZE) >= ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) {
259
    } else if (ALIGN_DOWN(addr, PAGE_SIZE) >= ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) {
259
        /*
260
        /*
260
         * This is the uninitialized portion of the segment.
261
         * This is the uninitialized portion of the segment.
261
         * It is not physically present in the ELF image.
262
         * It is not physically present in the ELF image.
262
         * To resolve the situation, a frame must be allocated
263
         * To resolve the situation, a frame must be allocated
263
         * and cleared.
264
         * and cleared.
264
         */
265
         */
265
        frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
266
        frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
266
        memsetb(PA2KA(frame), FRAME_SIZE, 0);
267
        memsetb(PA2KA(frame), FRAME_SIZE, 0);
267
    } else {
268
    } else {
268
        size_t size;
269
        size_t size;
269
        /*
270
        /*
270
         * The mixed case.
271
         * The mixed case.
271
         * The lower part is backed by the ELF image and
272
         * The lower part is backed by the ELF image and
272
         * the upper part is anonymous memory.
273
         * the upper part is anonymous memory.
273
         */
274
         */
274
        size = entry->p_filesz - (i<<PAGE_WIDTH);
275
        size = entry->p_filesz - (i<<PAGE_WIDTH);
275
        frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
276
        frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
276
        memsetb(PA2KA(frame) + size, FRAME_SIZE - size, 0);
277
        memsetb(PA2KA(frame) + size, FRAME_SIZE - size, 0);
277
        memcpy((void *) PA2KA(frame), (void *) (base + i*FRAME_SIZE), size);
278
        memcpy((void *) PA2KA(frame), (void *) (base + i*FRAME_SIZE), size);
278
    }
279
    }
279
   
280
   
280
    page_mapping_insert(AS, addr, frame, as_area_get_flags(area));
281
    page_mapping_insert(AS, addr, frame, as_area_get_flags(area));
281
        if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1))
282
        if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1))
282
                panic("Could not insert used space.\n");
283
                panic("Could not insert used space.\n");
283
 
284
 
284
    return AS_PF_OK;
285
    return AS_PF_OK;
285
}
286
}
286
 
287
 
287
/** Free a frame that is backed by the ELF backend.
288
/** Free a frame that is backed by the ELF backend.
288
 *
289
 *
289
 * The address space area and page tables must be already locked.
290
 * The address space area and page tables must be already locked.
290
 *
291
 *
291
 * @param area Pointer to the address space area.
292
 * @param area Pointer to the address space area.
292
 * @param page Page that is mapped to frame. Must be aligned to PAGE_SIZE.
293
 * @param page Page that is mapped to frame. Must be aligned to PAGE_SIZE.
293
 * @param frame Frame to be released.
294
 * @param frame Frame to be released.
294
 *
295
 *
295
 */
296
 */
296
void elf_frame_free(as_area_t *area, __address page, __address frame)
297
void elf_frame_free(as_area_t *area, __address page, __address frame)
297
{
298
{
298
    elf_header_t *elf = (elf_header_t *) area->backend_data[0];
299
    elf_header_t *elf = (elf_header_t *) area->backend_data[0];
299
    elf_segment_header_t *entry = (elf_segment_header_t *) area->backend_data[1];
300
    elf_segment_header_t *entry = (elf_segment_header_t *) area->backend_data[1];
300
    __address base;
301
    __address base;
301
    index_t i;
302
    index_t i;
302
   
303
   
303
    ASSERT((page >= entry->p_vaddr) && (page < entry->p_vaddr + entry->p_memsz));
304
    ASSERT((page >= entry->p_vaddr) && (page < entry->p_vaddr + entry->p_memsz));
304
    i = (page - entry->p_vaddr) >> PAGE_WIDTH;
305
    i = (page - entry->p_vaddr) >> PAGE_WIDTH;
305
    base = (__address) (((void *) elf) + entry->p_offset);
306
    base = (__address) (((void *) elf) + entry->p_offset);
306
    ASSERT(ALIGN_UP(base, FRAME_SIZE) == base);
307
    ASSERT(ALIGN_UP(base, FRAME_SIZE) == base);
307
   
308
   
308
    if (page + PAGE_SIZE < ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) {
309
    if (page + PAGE_SIZE < ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) {
309
        if (entry->p_flags & PF_W) {
310
        if (entry->p_flags & PF_W) {
310
            /*
311
            /*
311
             * Free the frame with the copy of writable segment data.
312
             * Free the frame with the copy of writable segment data.
312
             */
313
             */
313
            frame_free(ADDR2PFN(frame));
314
            frame_free(ADDR2PFN(frame));
314
        }
315
        }
315
    } else {
316
    } else {
316
        /*
317
        /*
317
         * The frame is either anonymous memory or the mixed case (i.e. lower
318
         * The frame is either anonymous memory or the mixed case (i.e. lower
318
         * part is backed by the ELF image and the upper is anonymous).
319
         * part is backed by the ELF image and the upper is anonymous).
319
         * In any case, a frame needs to be freed.
320
         * In any case, a frame needs to be freed.
320
         */
321
         */
321
        frame_free(ADDR2PFN(frame));
322
        frame_free(ADDR2PFN(frame));
322
    }
323
    }
323
}
324
}
324
 
325