Subversion Repositories HelenOS-historic

Rev

Rev 1264 | Rev 1411 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 1264 Rev 1409
1
/*
1
/*
2
 * Copyright (C) 2006 Sergey Bondari
2
 * Copyright (C) 2006 Sergey Bondari
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
/**
29
/**
30
 * @file    elf.c
30
 * @file    elf.c
31
 * @brief   Kernel ELF loader.
31
 * @brief   Kernel ELF loader.
32
 */
32
 */
33
 
33
 
34
#include <elf.h>
34
#include <elf.h>
35
#include <debug.h>
35
#include <debug.h>
36
#include <arch/types.h>
36
#include <arch/types.h>
37
#include <typedefs.h>
37
#include <typedefs.h>
38
#include <mm/as.h>
38
#include <mm/as.h>
39
#include <mm/frame.h>
39
#include <mm/frame.h>
40
#include <mm/slab.h>
40
#include <mm/slab.h>
41
#include <align.h>
41
#include <align.h>
42
#include <memstr.h>
42
#include <memstr.h>
43
#include <macros.h>
43
#include <macros.h>
-
 
44
#include <arch.h>
44
 
45
 
45
static char *error_codes[] = {
46
static char *error_codes[] = {
46
    "no error",
47
    "no error",
47
    "invalid image",
48
    "invalid image",
48
    "address space error",
49
    "address space error",
49
    "incompatible image",
50
    "incompatible image",
50
    "unsupported image type",
51
    "unsupported image type",
51
    "irrecoverable error"
52
    "irrecoverable error"
52
};
53
};
53
 
54
 
54
static int segment_header(elf_segment_header_t *entry, elf_header_t *elf, as_t *as);
55
static int segment_header(elf_segment_header_t *entry, elf_header_t *elf, as_t *as);
55
static int section_header(elf_section_header_t *entry, elf_header_t *elf, as_t *as);
56
static int section_header(elf_section_header_t *entry, elf_header_t *elf, as_t *as);
56
static int load_segment(elf_segment_header_t *entry, elf_header_t *elf, as_t *as);
57
static int load_segment(elf_segment_header_t *entry, elf_header_t *elf, as_t *as);
57
 
58
 
-
 
59
static int elf_page_fault(as_area_t *area, __address addr);
-
 
60
static void elf_frame_free(as_area_t *area, __address page, __address frame);
-
 
61
 
-
 
62
mem_backend_t elf_backend = {
-
 
63
    .backend_page_fault = elf_page_fault,
-
 
64
    .backend_frame_free = elf_frame_free
-
 
65
};
-
 
66
 
58
/** ELF loader
67
/** ELF loader
59
 *
68
 *
60
 * @param header Pointer to ELF header in memory
69
 * @param header Pointer to ELF header in memory
61
 * @param as Created and properly mapped address space
70
 * @param as Created and properly mapped address space
62
 * @return EE_OK on success
71
 * @return EE_OK on success
63
 */
72
 */
64
int elf_load(elf_header_t *header, as_t * as)
73
int elf_load(elf_header_t *header, as_t * as)
65
{
74
{
66
    int i, rc;
75
    int i, rc;
67
 
76
 
68
    /* Identify ELF */
77
    /* Identify ELF */
69
    if (header->e_ident[EI_MAG0] != ELFMAG0 || header->e_ident[EI_MAG1] != ELFMAG1 ||
78
    if (header->e_ident[EI_MAG0] != ELFMAG0 || header->e_ident[EI_MAG1] != ELFMAG1 ||
70
        header->e_ident[EI_MAG2] != ELFMAG2 || header->e_ident[EI_MAG3] != ELFMAG3) {
79
        header->e_ident[EI_MAG2] != ELFMAG2 || header->e_ident[EI_MAG3] != ELFMAG3) {
71
        return EE_INVALID;
80
        return EE_INVALID;
72
    }
81
    }
73
   
82
   
74
    /* Identify ELF compatibility */
83
    /* Identify ELF compatibility */
75
    if (header->e_ident[EI_DATA] != ELF_DATA_ENCODING || header->e_machine != ELF_MACHINE ||
84
    if (header->e_ident[EI_DATA] != ELF_DATA_ENCODING || header->e_machine != ELF_MACHINE ||
76
        header->e_ident[EI_VERSION] != EV_CURRENT || header->e_version != EV_CURRENT ||
85
        header->e_ident[EI_VERSION] != EV_CURRENT || header->e_version != EV_CURRENT ||
77
        header->e_ident[EI_CLASS] != ELF_CLASS) {
86
        header->e_ident[EI_CLASS] != ELF_CLASS) {
78
        return EE_INCOMPATIBLE;
87
        return EE_INCOMPATIBLE;
79
    }
88
    }
80
 
89
 
81
    if (header->e_phentsize != sizeof(elf_segment_header_t))
90
    if (header->e_phentsize != sizeof(elf_segment_header_t))
82
        return EE_INCOMPATIBLE;
91
        return EE_INCOMPATIBLE;
83
 
92
 
84
    if (header->e_shentsize != sizeof(elf_section_header_t))
93
    if (header->e_shentsize != sizeof(elf_section_header_t))
85
        return EE_INCOMPATIBLE;
94
        return EE_INCOMPATIBLE;
86
 
95
 
87
    /* Check if the object type is supported. */
96
    /* Check if the object type is supported. */
88
    if (header->e_type != ET_EXEC)
97
    if (header->e_type != ET_EXEC)
89
        return EE_UNSUPPORTED;
98
        return EE_UNSUPPORTED;
90
 
99
 
91
    /* Walk through all segment headers and process them. */
100
    /* Walk through all segment headers and process them. */
92
    for (i = 0; i < header->e_phnum; i++) {
101
    for (i = 0; i < header->e_phnum; i++) {
93
        rc = segment_header(&((elf_segment_header_t *)(((__u8 *) header) + header->e_phoff))[i], header, as);
102
        rc = segment_header(&((elf_segment_header_t *)(((__u8 *) header) + header->e_phoff))[i], header, as);
94
        if (rc != EE_OK)
103
        if (rc != EE_OK)
95
            return rc;
104
            return rc;
96
    }
105
    }
97
 
106
 
98
    /* Inspect all section headers and proccess them. */
107
    /* Inspect all section headers and proccess them. */
99
    for (i = 0; i < header->e_shnum; i++) {
108
    for (i = 0; i < header->e_shnum; i++) {
100
        rc = section_header(&((elf_section_header_t *)(((__u8 *) header) + header->e_shoff))[i], header, as);
109
        rc = section_header(&((elf_section_header_t *)(((__u8 *) header) + header->e_shoff))[i], header, as);
101
        if (rc != EE_OK)
110
        if (rc != EE_OK)
102
            return rc;
111
            return rc;
103
    }
112
    }
104
 
113
 
105
    return EE_OK;
114
    return EE_OK;
106
}
115
}
107
 
116
 
108
/** Print error message according to error code.
117
/** Print error message according to error code.
109
 *
118
 *
110
 * @param rc Return code returned by elf_load().
119
 * @param rc Return code returned by elf_load().
111
 *
120
 *
112
 * @return NULL terminated description of error.
121
 * @return NULL terminated description of error.
113
 */
122
 */
114
char *elf_error(int rc)
123
char *elf_error(int rc)
115
{
124
{
116
    ASSERT(rc < sizeof(error_codes)/sizeof(char *));
125
    ASSERT(rc < sizeof(error_codes)/sizeof(char *));
117
 
126
 
118
    return error_codes[rc];
127
    return error_codes[rc];
119
}
128
}
120
 
129
 
121
/** Process segment header.
130
/** Process segment header.
122
 *
131
 *
123
 * @param entry Segment header.
132
 * @param entry Segment header.
124
 * @param elf ELF header.
133
 * @param elf ELF header.
125
 * @param as Address space into wich the ELF is being loaded.
134
 * @param as Address space into wich the ELF is being loaded.
126
 *
135
 *
127
 * @return EE_OK on success, error code otherwise.
136
 * @return EE_OK on success, error code otherwise.
128
 */
137
 */
129
static int segment_header(elf_segment_header_t *entry, elf_header_t *elf, as_t *as)
138
static int segment_header(elf_segment_header_t *entry, elf_header_t *elf, as_t *as)
130
{
139
{
131
    switch (entry->p_type) {
140
    switch (entry->p_type) {
132
        case PT_NULL:
141
        case PT_NULL:
133
        case PT_PHDR:
142
        case PT_PHDR:
134
        break;
143
        break;
135
        case PT_LOAD:
144
        case PT_LOAD:
136
        return load_segment(entry, elf, as);
145
        return load_segment(entry, elf, as);
137
        break;
146
        break;
138
        case PT_DYNAMIC:
147
        case PT_DYNAMIC:
139
        case PT_INTERP:
148
        case PT_INTERP:
140
        case PT_SHLIB:
149
        case PT_SHLIB:
141
        case PT_NOTE:
150
        case PT_NOTE:
142
        case PT_LOPROC:
151
        case PT_LOPROC:
143
        case PT_HIPROC:
152
        case PT_HIPROC:
144
        default:
153
        default:
145
        return EE_UNSUPPORTED;
154
        return EE_UNSUPPORTED;
146
        break;
155
        break;
147
    }
156
    }
148
    return EE_OK;
157
    return EE_OK;
149
}
158
}
150
 
159
 
151
/** Load segment described by program header entry.
160
/** Load segment described by program header entry.
152
 *
161
 *
153
 * @param entry Program header entry describing segment to be loaded.
162
 * @param entry Program header entry describing segment to be loaded.
154
 * @param elf ELF header.
163
 * @param elf ELF header.
155
 * @param as Address space into wich the ELF is being loaded.
164
 * @param as Address space into wich the ELF is being loaded.
156
 *
165
 *
157
 * @return EE_OK on success, error code otherwise.
166
 * @return EE_OK on success, error code otherwise.
158
 */
167
 */
159
int load_segment(elf_segment_header_t *entry, elf_header_t *elf, as_t *as)
168
int load_segment(elf_segment_header_t *entry, elf_header_t *elf, as_t *as)
160
{
169
{
161
    as_area_t *a;
170
    as_area_t *a;
162
    int i, flags = 0;
171
    int flags = 0;
163
    size_t segment_size;
172
    void *backend_data[2] = { elf, entry };
164
    __u8 *segment;
-
 
165
 
173
 
166
    if (entry->p_align > 1) {
174
    if (entry->p_align > 1) {
167
        if ((entry->p_offset % entry->p_align) != (entry->p_vaddr % entry->p_align)) {
175
        if ((entry->p_offset % entry->p_align) != (entry->p_vaddr % entry->p_align)) {
168
            return EE_INVALID;
176
            return EE_INVALID;
169
        }
177
        }
170
    }
178
    }
171
 
179
 
172
    if (entry->p_flags & PF_X)
180
    if (entry->p_flags & PF_X)
173
        flags |= AS_AREA_EXEC;
181
        flags |= AS_AREA_EXEC;
174
    if (entry->p_flags & PF_W)
182
    if (entry->p_flags & PF_W)
175
        flags |= AS_AREA_WRITE;
183
        flags |= AS_AREA_WRITE;
176
    if (entry->p_flags & PF_R)
184
    if (entry->p_flags & PF_R)
177
        flags |= AS_AREA_READ;
185
        flags |= AS_AREA_READ;
178
 
186
 
179
    /*
187
    /*
180
     * Check if the virtual address starts on page boundary.
188
     * Check if the virtual address starts on page boundary.
181
     */
189
     */
182
    if (ALIGN_UP(entry->p_vaddr, PAGE_SIZE) != entry->p_vaddr)
190
    if (ALIGN_UP(entry->p_vaddr, PAGE_SIZE) != entry->p_vaddr)
183
        return EE_UNSUPPORTED;
191
        return EE_UNSUPPORTED;
184
 
192
 
185
    segment_size = ALIGN_UP(max(entry->p_filesz, entry->p_memsz), PAGE_SIZE);
-
 
186
    if ((entry->p_flags & PF_W)) {
-
 
187
        /* If writable, copy data (should be COW in the future) */
-
 
188
        segment = malloc(segment_size, 0);
-
 
189
        memsetb((__address) (segment + entry->p_filesz), segment_size - entry->p_filesz, 0);
-
 
190
        memcpy(segment, (void *) (((__address) elf) + entry->p_offset), entry->p_filesz);
-
 
191
    } else /* Map identically original data */
-
 
192
        segment = ((void *) elf) + entry->p_offset;
-
 
193
 
-
 
194
    a = as_area_create(as, flags, entry->p_memsz, entry->p_vaddr, AS_AREA_ATTR_NONE);
193
    a = as_area_create(as, flags, entry->p_memsz, entry->p_vaddr, AS_AREA_ATTR_NONE, &elf_backend, backend_data);
195
    if (!a)
194
    if (!a)
196
        return EE_MEMORY;
195
        return EE_MEMORY;
197
   
196
   
-
 
197
    /*
198
    for (i = 0; i < SIZE2FRAMES(entry->p_filesz); i++) {
198
     * The segment will be mapped on demand by elf_page_fault().
199
        as_set_mapping(as, entry->p_vaddr + i*PAGE_SIZE, KA2PA(((__address) segment) + i*PAGE_SIZE));
-
 
200
    }
199
     */
201
   
200
 
202
    return EE_OK;
201
    return EE_OK;
203
}
202
}
204
 
203
 
205
/** Process section header.
204
/** Process section header.
206
 *
205
 *
207
 * @param entry Segment header.
206
 * @param entry Segment header.
208
 * @param elf ELF header.
207
 * @param elf ELF header.
209
 * @param as Address space into wich the ELF is being loaded.
208
 * @param as Address space into wich the ELF is being loaded.
210
 *
209
 *
211
 * @return EE_OK on success, error code otherwise.
210
 * @return EE_OK on success, error code otherwise.
212
 */
211
 */
213
static int section_header(elf_section_header_t *entry, elf_header_t *elf, as_t *as)
212
static int section_header(elf_section_header_t *entry, elf_header_t *elf, as_t *as)
214
{
213
{
215
    switch (entry->sh_type) {
214
    switch (entry->sh_type) {
216
        default:
215
        default:
217
        break;
216
        break;
218
    }
217
    }
219
   
218
   
220
    return EE_OK;
219
    return EE_OK;
221
}
220
}
-
 
221
 
-
 
222
/** Service a page fault in the ELF backend address space area.
-
 
223
 *
-
 
224
 * The address space area and page tables must be already locked.
-
 
225
 *
-
 
226
 * @param area Pointer to the address space area.
-
 
227
 * @param addr Faulting virtual address.
-
 
228
 *
-
 
229
 * @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. serviced).
-
 
230
 */
-
 
231
int elf_page_fault(as_area_t *area, __address addr)
-
 
232
{
-
 
233
    elf_header_t *elf = (elf_header_t *) area->backend_data[0];
-
 
234
    elf_segment_header_t *entry = (elf_segment_header_t *) area->backend_data[1];
-
 
235
    __address base, frame;
-
 
236
    index_t i;
-
 
237
 
-
 
238
    ASSERT((addr >= entry->p_vaddr) && (addr < entry->p_vaddr + entry->p_memsz));
-
 
239
    i = (addr - entry->p_vaddr) >> PAGE_WIDTH;
-
 
240
    base = (__address) (((void *) elf) + entry->p_offset);
-
 
241
    ASSERT(ALIGN_UP(base, FRAME_SIZE) == base);
-
 
242
   
-
 
243
    if (ALIGN_DOWN(addr, PAGE_SIZE) + PAGE_SIZE < entry->p_vaddr + entry->p_filesz) {
-
 
244
        /*
-
 
245
         * Initialized portion of the segment. The memory is backed
-
 
246
         * directly by the content of the ELF image. Pages are
-
 
247
         * only copied if the segment is writable so that there
-
 
248
         * can be more instantions of the same memory ELF image
-
 
249
         * used at a time. Note that this could be later done
-
 
250
         * as COW.
-
 
251
         */
-
 
252
        if (entry->p_flags & PF_W) {
-
 
253
            frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
-
 
254
            memcpy((void *) PA2KA(frame), (void *) (base + i*FRAME_SIZE), FRAME_SIZE);
-
 
255
        } else {
-
 
256
            frame = KA2PA(base + i*FRAME_SIZE);
-
 
257
        }  
-
 
258
    } else if (ALIGN_DOWN(addr, PAGE_SIZE) >= ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) {
-
 
259
        /*
-
 
260
         * This is the uninitialized portion of the segment.
-
 
261
         * It is not physically present in the ELF image.
-
 
262
         * To resolve the situation, a frame must be allocated
-
 
263
         * and cleared.
-
 
264
         */
-
 
265
        frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
-
 
266
        memsetb(PA2KA(frame), FRAME_SIZE, 0);
-
 
267
    } else {
-
 
268
        size_t size;
-
 
269
        /*
-
 
270
         * The mixed case.
-
 
271
         * The lower part is backed by the ELF image and
-
 
272
         * the upper part is anonymous memory.
-
 
273
         */
-
 
274
        size = entry->p_filesz - (i<<PAGE_WIDTH);
-
 
275
        frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
-
 
276
        memsetb(PA2KA(frame) + size, FRAME_SIZE - size, 0);
-
 
277
        memcpy((void *) PA2KA(frame), (void *) (base + i*FRAME_SIZE), size);
-
 
278
    }
-
 
279
   
-
 
280
    page_mapping_insert(AS, addr, frame, as_area_get_flags(area));
-
 
281
        if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1))
-
 
282
                panic("Could not insert used space.\n");
-
 
283
 
-
 
284
    return AS_PF_OK;
-
 
285
}
-
 
286
 
-
 
287
/** Free a frame that is backed by the ELF backend.
-
 
288
 *
-
 
289
 * The address space area and page tables must be already locked.
-
 
290
 *
-
 
291
 * @param area Pointer to the address space area.
-
 
292
 * @param page Page that is mapped to frame. Must be aligned to PAGE_SIZE.
-
 
293
 * @param frame Frame to be released.
-
 
294
 *
-
 
295
 */
-
 
296
void elf_frame_free(as_area_t *area, __address page, __address frame)
-
 
297
{
-
 
298
    elf_header_t *elf = (elf_header_t *) area->backend_data[0];
-
 
299
    elf_segment_header_t *entry = (elf_segment_header_t *) area->backend_data[1];
-
 
300
    __address base;
-
 
301
    index_t i;
-
 
302
   
-
 
303
    ASSERT((page >= entry->p_vaddr) && (page < entry->p_vaddr + entry->p_memsz));
-
 
304
    i = (page - entry->p_vaddr) >> PAGE_WIDTH;
-
 
305
    base = (__address) (((void *) elf) + entry->p_offset);
-
 
306
    ASSERT(ALIGN_UP(base, FRAME_SIZE) == base);
-
 
307
   
-
 
308
    if (page + PAGE_SIZE < ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) {
-
 
309
        if (entry->p_flags & PF_W) {
-
 
310
            /*
-
 
311
             * Free the frame with the copy of writable segment data.
-
 
312
             */
-
 
313
            frame_free(ADDR2PFN(frame));
-
 
314
        }
-
 
315
    } else {
-
 
316
        /*
-
 
317
         * The frame is either anonymous memory or the mixed case (i.e. lower
-
 
318
         * part is backed by the ELF image and the upper is anonymous).
-
 
319
         * In any case, a frame needs to be freed.
-
 
320
         */
-
 
321
        frame_free(ADDR2PFN(frame));
-
 
322
    }
-
 
323
}
222
 
324