Subversion Repositories HelenOS-historic

Rev

Rev 1411 | Rev 1424 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 1411 Rev 1423
1
/*
1
/*
2
 * Copyright (C) 2006 Sergey Bondari
2
 * Copyright (C) 2006 Sergey Bondari
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
/**
29
/**
30
 * @file    elf.c
30
 * @file    elf.c
31
 * @brief   Kernel ELF loader.
31
 * @brief   Kernel ELF loader.
32
 */
32
 */
33
 
33
 
34
#include <elf.h>
34
#include <elf.h>
35
#include <debug.h>
35
#include <debug.h>
36
#include <arch/types.h>
36
#include <arch/types.h>
37
#include <typedefs.h>
37
#include <typedefs.h>
38
#include <mm/as.h>
38
#include <mm/as.h>
39
#include <mm/frame.h>
39
#include <mm/frame.h>
40
#include <mm/slab.h>
40
#include <mm/slab.h>
41
#include <align.h>
41
#include <align.h>
42
#include <memstr.h>
42
#include <memstr.h>
43
#include <macros.h>
43
#include <macros.h>
44
#include <arch.h>
44
#include <arch.h>
45
 
45
 
46
static char *error_codes[] = {
46
static char *error_codes[] = {
47
    "no error",
47
    "no error",
48
    "invalid image",
48
    "invalid image",
49
    "address space error",
49
    "address space error",
50
    "incompatible image",
50
    "incompatible image",
51
    "unsupported image type",
51
    "unsupported image type",
52
    "irrecoverable error"
52
    "irrecoverable error"
53
};
53
};
54
 
54
 
55
static int segment_header(elf_segment_header_t *entry, elf_header_t *elf, as_t *as);
55
static int segment_header(elf_segment_header_t *entry, elf_header_t *elf, as_t *as);
56
static int section_header(elf_section_header_t *entry, elf_header_t *elf, as_t *as);
56
static int section_header(elf_section_header_t *entry, elf_header_t *elf, as_t *as);
57
static int load_segment(elf_segment_header_t *entry, elf_header_t *elf, as_t *as);
57
static int load_segment(elf_segment_header_t *entry, elf_header_t *elf, as_t *as);
58
 
58
 
59
static int elf_page_fault(as_area_t *area, __address addr, pf_access_t access);
59
static int elf_page_fault(as_area_t *area, __address addr, pf_access_t access);
60
static void elf_frame_free(as_area_t *area, __address page, __address frame);
60
static void elf_frame_free(as_area_t *area, __address page, __address frame);
61
 
61
 
62
mem_backend_t elf_backend = {
62
mem_backend_t elf_backend = {
63
    .backend_page_fault = elf_page_fault,
63
    .backend_page_fault = elf_page_fault,
64
    .backend_frame_free = elf_frame_free
64
    .backend_frame_free = elf_frame_free
65
};
65
};
66
 
66
 
67
/** ELF loader
67
/** ELF loader
68
 *
68
 *
69
 * @param header Pointer to ELF header in memory
69
 * @param header Pointer to ELF header in memory
70
 * @param as Created and properly mapped address space
70
 * @param as Created and properly mapped address space
71
 * @return EE_OK on success
71
 * @return EE_OK on success
72
 */
72
 */
73
int elf_load(elf_header_t *header, as_t * as)
73
int elf_load(elf_header_t *header, as_t * as)
74
{
74
{
75
    int i, rc;
75
    int i, rc;
76
 
76
 
77
    /* Identify ELF */
77
    /* Identify ELF */
78
    if (header->e_ident[EI_MAG0] != ELFMAG0 || header->e_ident[EI_MAG1] != ELFMAG1 ||
78
    if (header->e_ident[EI_MAG0] != ELFMAG0 || header->e_ident[EI_MAG1] != ELFMAG1 ||
79
        header->e_ident[EI_MAG2] != ELFMAG2 || header->e_ident[EI_MAG3] != ELFMAG3) {
79
        header->e_ident[EI_MAG2] != ELFMAG2 || header->e_ident[EI_MAG3] != ELFMAG3) {
80
        return EE_INVALID;
80
        return EE_INVALID;
81
    }
81
    }
82
   
82
   
83
    /* Identify ELF compatibility */
83
    /* Identify ELF compatibility */
84
    if (header->e_ident[EI_DATA] != ELF_DATA_ENCODING || header->e_machine != ELF_MACHINE ||
84
    if (header->e_ident[EI_DATA] != ELF_DATA_ENCODING || header->e_machine != ELF_MACHINE ||
85
        header->e_ident[EI_VERSION] != EV_CURRENT || header->e_version != EV_CURRENT ||
85
        header->e_ident[EI_VERSION] != EV_CURRENT || header->e_version != EV_CURRENT ||
86
        header->e_ident[EI_CLASS] != ELF_CLASS) {
86
        header->e_ident[EI_CLASS] != ELF_CLASS) {
87
        return EE_INCOMPATIBLE;
87
        return EE_INCOMPATIBLE;
88
    }
88
    }
89
 
89
 
90
    if (header->e_phentsize != sizeof(elf_segment_header_t))
90
    if (header->e_phentsize != sizeof(elf_segment_header_t))
91
        return EE_INCOMPATIBLE;
91
        return EE_INCOMPATIBLE;
92
 
92
 
93
    if (header->e_shentsize != sizeof(elf_section_header_t))
93
    if (header->e_shentsize != sizeof(elf_section_header_t))
94
        return EE_INCOMPATIBLE;
94
        return EE_INCOMPATIBLE;
95
 
95
 
96
    /* Check if the object type is supported. */
96
    /* Check if the object type is supported. */
97
    if (header->e_type != ET_EXEC)
97
    if (header->e_type != ET_EXEC)
98
        return EE_UNSUPPORTED;
98
        return EE_UNSUPPORTED;
99
 
99
 
100
    /* Walk through all segment headers and process them. */
100
    /* Walk through all segment headers and process them. */
101
    for (i = 0; i < header->e_phnum; i++) {
101
    for (i = 0; i < header->e_phnum; i++) {
102
        rc = segment_header(&((elf_segment_header_t *)(((__u8 *) header) + header->e_phoff))[i], header, as);
102
        rc = segment_header(&((elf_segment_header_t *)(((__u8 *) header) + header->e_phoff))[i], header, as);
103
        if (rc != EE_OK)
103
        if (rc != EE_OK)
104
            return rc;
104
            return rc;
105
    }
105
    }
106
 
106
 
107
    /* Inspect all section headers and proccess them. */
107
    /* Inspect all section headers and proccess them. */
108
    for (i = 0; i < header->e_shnum; i++) {
108
    for (i = 0; i < header->e_shnum; i++) {
109
        rc = section_header(&((elf_section_header_t *)(((__u8 *) header) + header->e_shoff))[i], header, as);
109
        rc = section_header(&((elf_section_header_t *)(((__u8 *) header) + header->e_shoff))[i], header, as);
110
        if (rc != EE_OK)
110
        if (rc != EE_OK)
111
            return rc;
111
            return rc;
112
    }
112
    }
113
 
113
 
114
    return EE_OK;
114
    return EE_OK;
115
}
115
}
116
 
116
 
117
/** Print error message according to error code.
117
/** Print error message according to error code.
118
 *
118
 *
119
 * @param rc Return code returned by elf_load().
119
 * @param rc Return code returned by elf_load().
120
 *
120
 *
121
 * @return NULL terminated description of error.
121
 * @return NULL terminated description of error.
122
 */
122
 */
123
char *elf_error(int rc)
123
char *elf_error(int rc)
124
{
124
{
125
    ASSERT(rc < sizeof(error_codes)/sizeof(char *));
125
    ASSERT(rc < sizeof(error_codes)/sizeof(char *));
126
 
126
 
127
    return error_codes[rc];
127
    return error_codes[rc];
128
}
128
}
129
 
129
 
130
/** Process segment header.
130
/** Process segment header.
131
 *
131
 *
132
 * @param entry Segment header.
132
 * @param entry Segment header.
133
 * @param elf ELF header.
133
 * @param elf ELF header.
134
 * @param as Address space into wich the ELF is being loaded.
134
 * @param as Address space into wich the ELF is being loaded.
135
 *
135
 *
136
 * @return EE_OK on success, error code otherwise.
136
 * @return EE_OK on success, error code otherwise.
137
 */
137
 */
138
static int segment_header(elf_segment_header_t *entry, elf_header_t *elf, as_t *as)
138
static int segment_header(elf_segment_header_t *entry, elf_header_t *elf, as_t *as)
139
{
139
{
140
    switch (entry->p_type) {
140
    switch (entry->p_type) {
141
        case PT_NULL:
141
        case PT_NULL:
142
        case PT_PHDR:
142
        case PT_PHDR:
143
        break;
143
        break;
144
        case PT_LOAD:
144
        case PT_LOAD:
145
        return load_segment(entry, elf, as);
145
        return load_segment(entry, elf, as);
146
        break;
146
        break;
147
        case PT_DYNAMIC:
147
        case PT_DYNAMIC:
148
        case PT_INTERP:
148
        case PT_INTERP:
149
        case PT_SHLIB:
149
        case PT_SHLIB:
150
        case PT_NOTE:
150
        case PT_NOTE:
151
        case PT_LOPROC:
151
        case PT_LOPROC:
152
        case PT_HIPROC:
152
        case PT_HIPROC:
153
        default:
153
        default:
154
        return EE_UNSUPPORTED;
154
        return EE_UNSUPPORTED;
155
        break;
155
        break;
156
    }
156
    }
157
    return EE_OK;
157
    return EE_OK;
158
}
158
}
159
 
159
 
160
/** Load segment described by program header entry.
160
/** Load segment described by program header entry.
161
 *
161
 *
162
 * @param entry Program header entry describing segment to be loaded.
162
 * @param entry Program header entry describing segment to be loaded.
163
 * @param elf ELF header.
163
 * @param elf ELF header.
164
 * @param as Address space into wich the ELF is being loaded.
164
 * @param as Address space into wich the ELF is being loaded.
165
 *
165
 *
166
 * @return EE_OK on success, error code otherwise.
166
 * @return EE_OK on success, error code otherwise.
167
 */
167
 */
168
int load_segment(elf_segment_header_t *entry, elf_header_t *elf, as_t *as)
168
int load_segment(elf_segment_header_t *entry, elf_header_t *elf, as_t *as)
169
{
169
{
170
    as_area_t *a;
170
    as_area_t *a;
171
    int flags = 0;
171
    int flags = 0;
172
    void *backend_data[2] = { elf, entry };
172
    void *backend_data[2] = { elf, entry };
173
 
173
 
174
    if (entry->p_align > 1) {
174
    if (entry->p_align > 1) {
175
        if ((entry->p_offset % entry->p_align) != (entry->p_vaddr % entry->p_align)) {
175
        if ((entry->p_offset % entry->p_align) != (entry->p_vaddr % entry->p_align)) {
176
            return EE_INVALID;
176
            return EE_INVALID;
177
        }
177
        }
178
    }
178
    }
179
 
179
 
180
    if (entry->p_flags & PF_X)
180
    if (entry->p_flags & PF_X)
181
        flags |= AS_AREA_EXEC;
181
        flags |= AS_AREA_EXEC;
182
    if (entry->p_flags & PF_W)
182
    if (entry->p_flags & PF_W)
183
        flags |= AS_AREA_WRITE;
183
        flags |= AS_AREA_WRITE;
184
    if (entry->p_flags & PF_R)
184
    if (entry->p_flags & PF_R)
185
        flags |= AS_AREA_READ;
185
        flags |= AS_AREA_READ;
186
 
186
 
187
    /*
187
    /*
188
     * Check if the virtual address starts on page boundary.
188
     * Check if the virtual address starts on page boundary.
189
     */
189
     */
190
    if (ALIGN_UP(entry->p_vaddr, PAGE_SIZE) != entry->p_vaddr)
190
    if (ALIGN_UP(entry->p_vaddr, PAGE_SIZE) != entry->p_vaddr)
191
        return EE_UNSUPPORTED;
191
        return EE_UNSUPPORTED;
192
 
192
 
193
    a = as_area_create(as, flags, entry->p_memsz, entry->p_vaddr, AS_AREA_ATTR_NONE, &elf_backend, backend_data);
193
    a = as_area_create(as, flags, entry->p_memsz, entry->p_vaddr, AS_AREA_ATTR_NONE, &elf_backend, backend_data);
194
    if (!a)
194
    if (!a)
195
        return EE_MEMORY;
195
        return EE_MEMORY;
196
   
196
   
197
    /*
197
    /*
198
     * The segment will be mapped on demand by elf_page_fault().
198
     * The segment will be mapped on demand by elf_page_fault().
199
     */
199
     */
200
 
200
 
201
    return EE_OK;
201
    return EE_OK;
202
}
202
}
203
 
203
 
204
/** Process section header.
204
/** Process section header.
205
 *
205
 *
206
 * @param entry Segment header.
206
 * @param entry Segment header.
207
 * @param elf ELF header.
207
 * @param elf ELF header.
208
 * @param as Address space into wich the ELF is being loaded.
208
 * @param as Address space into wich the ELF is being loaded.
209
 *
209
 *
210
 * @return EE_OK on success, error code otherwise.
210
 * @return EE_OK on success, error code otherwise.
211
 */
211
 */
212
static int section_header(elf_section_header_t *entry, elf_header_t *elf, as_t *as)
212
static int section_header(elf_section_header_t *entry, elf_header_t *elf, as_t *as)
213
{
213
{
214
    switch (entry->sh_type) {
214
    switch (entry->sh_type) {
215
        default:
215
        default:
216
        break;
216
        break;
217
    }
217
    }
218
   
218
   
219
    return EE_OK;
219
    return EE_OK;
220
}
220
}
221
 
221
 
222
/** Service a page fault in the ELF backend address space area.
222
/** Service a page fault in the ELF backend address space area.
223
 *
223
 *
224
 * The address space area and page tables must be already locked.
224
 * The address space area and page tables must be already locked.
225
 *
225
 *
226
 * @param area Pointer to the address space area.
226
 * @param area Pointer to the address space area.
227
 * @param addr Faulting virtual address.
227
 * @param addr Faulting virtual address.
228
 * @param access Access mode that caused the fault (i.e. read/write/exec).
228
 * @param access Access mode that caused the fault (i.e. read/write/exec).
229
 *
229
 *
230
 * @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. serviced).
230
 * @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. serviced).
231
 */
231
 */
232
int elf_page_fault(as_area_t *area, __address addr, pf_access_t access)
232
int elf_page_fault(as_area_t *area, __address addr, pf_access_t access)
233
{
233
{
234
    elf_header_t *elf = (elf_header_t *) area->backend_data[0];
234
    elf_header_t *elf = (elf_header_t *) area->backend_data[0];
235
    elf_segment_header_t *entry = (elf_segment_header_t *) area->backend_data[1];
235
    elf_segment_header_t *entry = (elf_segment_header_t *) area->backend_data[1];
236
    __address base, frame;
236
    __address base, frame;
237
    index_t i;
237
    index_t i;
238
 
238
 
-
 
239
    if (!as_area_check_access(area, access))
-
 
240
        return AS_PF_FAULT;
-
 
241
 
239
    ASSERT((addr >= entry->p_vaddr) && (addr < entry->p_vaddr + entry->p_memsz));
242
    ASSERT((addr >= entry->p_vaddr) && (addr < entry->p_vaddr + entry->p_memsz));
240
    i = (addr - entry->p_vaddr) >> PAGE_WIDTH;
243
    i = (addr - entry->p_vaddr) >> PAGE_WIDTH;
241
    base = (__address) (((void *) elf) + entry->p_offset);
244
    base = (__address) (((void *) elf) + entry->p_offset);
242
    ASSERT(ALIGN_UP(base, FRAME_SIZE) == base);
245
    ASSERT(ALIGN_UP(base, FRAME_SIZE) == base);
243
   
246
   
244
    if (ALIGN_DOWN(addr, PAGE_SIZE) + PAGE_SIZE < entry->p_vaddr + entry->p_filesz) {
247
    if (ALIGN_DOWN(addr, PAGE_SIZE) + PAGE_SIZE < entry->p_vaddr + entry->p_filesz) {
245
        /*
248
        /*
246
         * Initialized portion of the segment. The memory is backed
249
         * Initialized portion of the segment. The memory is backed
247
         * directly by the content of the ELF image. Pages are
250
         * directly by the content of the ELF image. Pages are
248
         * only copied if the segment is writable so that there
251
         * only copied if the segment is writable so that there
249
         * can be more instantions of the same memory ELF image
252
         * can be more instantions of the same memory ELF image
250
         * used at a time. Note that this could be later done
253
         * used at a time. Note that this could be later done
251
         * as COW.
254
         * as COW.
252
         */
255
         */
253
        if (entry->p_flags & PF_W) {
256
        if (entry->p_flags & PF_W) {
254
            frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
257
            frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
255
            memcpy((void *) PA2KA(frame), (void *) (base + i*FRAME_SIZE), FRAME_SIZE);
258
            memcpy((void *) PA2KA(frame), (void *) (base + i*FRAME_SIZE), FRAME_SIZE);
256
        } else {
259
        } else {
257
            frame = KA2PA(base + i*FRAME_SIZE);
260
            frame = KA2PA(base + i*FRAME_SIZE);
258
        }  
261
        }  
259
    } else if (ALIGN_DOWN(addr, PAGE_SIZE) >= ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) {
262
    } else if (ALIGN_DOWN(addr, PAGE_SIZE) >= ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) {
260
        /*
263
        /*
261
         * This is the uninitialized portion of the segment.
264
         * This is the uninitialized portion of the segment.
262
         * It is not physically present in the ELF image.
265
         * It is not physically present in the ELF image.
263
         * To resolve the situation, a frame must be allocated
266
         * To resolve the situation, a frame must be allocated
264
         * and cleared.
267
         * and cleared.
265
         */
268
         */
266
        frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
269
        frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
267
        memsetb(PA2KA(frame), FRAME_SIZE, 0);
270
        memsetb(PA2KA(frame), FRAME_SIZE, 0);
268
    } else {
271
    } else {
269
        size_t size;
272
        size_t size;
270
        /*
273
        /*
271
         * The mixed case.
274
         * The mixed case.
272
         * The lower part is backed by the ELF image and
275
         * The lower part is backed by the ELF image and
273
         * the upper part is anonymous memory.
276
         * the upper part is anonymous memory.
274
         */
277
         */
275
        size = entry->p_filesz - (i<<PAGE_WIDTH);
278
        size = entry->p_filesz - (i<<PAGE_WIDTH);
276
        frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
279
        frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
277
        memsetb(PA2KA(frame) + size, FRAME_SIZE - size, 0);
280
        memsetb(PA2KA(frame) + size, FRAME_SIZE - size, 0);
278
        memcpy((void *) PA2KA(frame), (void *) (base + i*FRAME_SIZE), size);
281
        memcpy((void *) PA2KA(frame), (void *) (base + i*FRAME_SIZE), size);
279
    }
282
    }
280
   
283
   
281
    page_mapping_insert(AS, addr, frame, as_area_get_flags(area));
284
    page_mapping_insert(AS, addr, frame, as_area_get_flags(area));
282
        if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1))
285
        if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1))
283
                panic("Could not insert used space.\n");
286
                panic("Could not insert used space.\n");
284
 
287
 
285
    return AS_PF_OK;
288
    return AS_PF_OK;
286
}
289
}
287
 
290
 
288
/** Free a frame that is backed by the ELF backend.
291
/** Free a frame that is backed by the ELF backend.
289
 *
292
 *
290
 * The address space area and page tables must be already locked.
293
 * The address space area and page tables must be already locked.
291
 *
294
 *
292
 * @param area Pointer to the address space area.
295
 * @param area Pointer to the address space area.
293
 * @param page Page that is mapped to frame. Must be aligned to PAGE_SIZE.
296
 * @param page Page that is mapped to frame. Must be aligned to PAGE_SIZE.
294
 * @param frame Frame to be released.
297
 * @param frame Frame to be released.
295
 *
298
 *
296
 */
299
 */
297
void elf_frame_free(as_area_t *area, __address page, __address frame)
300
void elf_frame_free(as_area_t *area, __address page, __address frame)
298
{
301
{
299
    elf_header_t *elf = (elf_header_t *) area->backend_data[0];
302
    elf_header_t *elf = (elf_header_t *) area->backend_data[0];
300
    elf_segment_header_t *entry = (elf_segment_header_t *) area->backend_data[1];
303
    elf_segment_header_t *entry = (elf_segment_header_t *) area->backend_data[1];
301
    __address base;
304
    __address base;
302
    index_t i;
305
    index_t i;
303
   
306
   
304
    ASSERT((page >= entry->p_vaddr) && (page < entry->p_vaddr + entry->p_memsz));
307
    ASSERT((page >= entry->p_vaddr) && (page < entry->p_vaddr + entry->p_memsz));
305
    i = (page - entry->p_vaddr) >> PAGE_WIDTH;
308
    i = (page - entry->p_vaddr) >> PAGE_WIDTH;
306
    base = (__address) (((void *) elf) + entry->p_offset);
309
    base = (__address) (((void *) elf) + entry->p_offset);
307
    ASSERT(ALIGN_UP(base, FRAME_SIZE) == base);
310
    ASSERT(ALIGN_UP(base, FRAME_SIZE) == base);
308
   
311
   
309
    if (page + PAGE_SIZE < ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) {
312
    if (page + PAGE_SIZE < ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) {
310
        if (entry->p_flags & PF_W) {
313
        if (entry->p_flags & PF_W) {
311
            /*
314
            /*
312
             * Free the frame with the copy of writable segment data.
315
             * Free the frame with the copy of writable segment data.
313
             */
316
             */
314
            frame_free(ADDR2PFN(frame));
317
            frame_free(ADDR2PFN(frame));
315
        }
318
        }
316
    } else {
319
    } else {
317
        /*
320
        /*
318
         * The frame is either anonymous memory or the mixed case (i.e. lower
321
         * The frame is either anonymous memory or the mixed case (i.e. lower
319
         * part is backed by the ELF image and the upper is anonymous).
322
         * part is backed by the ELF image and the upper is anonymous).
320
         * In any case, a frame needs to be freed.
323
         * In any case, a frame needs to be freed.
321
         */
324
         */
322
        frame_free(ADDR2PFN(frame));
325
        frame_free(ADDR2PFN(frame));
323
    }
326
    }
324
}
327
}
325
 
328