Subversion Repositories HelenOS-historic

Rev

Rev 1264 | Rev 1411 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
854 bondari 1
/*
2
 * Copyright (C) 2006 Sergey Bondari
3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
1264 jermar 29
/**
30
 * @file    elf.c
31
 * @brief   Kernel ELF loader.
32
 */
33
 
910 bondari 34
#include <elf.h>
938 jermar 35
#include <debug.h>
36
#include <arch/types.h>
37
#include <typedefs.h>
38
#include <mm/as.h>
39
#include <mm/frame.h>
952 jermar 40
#include <mm/slab.h>
938 jermar 41
#include <align.h>
952 jermar 42
#include <memstr.h>
43
#include <macros.h>
1409 jermar 44
#include <arch.h>
854 bondari 45
 
938 jermar 46
static char *error_codes[] = {
47
    "no error",
48
    "invalid image",
49
    "address space error",
50
    "incompatible image",
51
    "unsupported image type",
52
    "irrecoverable error"
53
};
54
 
952 jermar 55
static int segment_header(elf_segment_header_t *entry, elf_header_t *elf, as_t *as);
56
static int section_header(elf_section_header_t *entry, elf_header_t *elf, as_t *as);
57
static int load_segment(elf_segment_header_t *entry, elf_header_t *elf, as_t *as);
938 jermar 58
 
1409 jermar 59
static int elf_page_fault(as_area_t *area, __address addr);
60
static void elf_frame_free(as_area_t *area, __address page, __address frame);
61
 
62
mem_backend_t elf_backend = {
63
    .backend_page_fault = elf_page_fault,
64
    .backend_frame_free = elf_frame_free
65
};
66
 
938 jermar 67
/** ELF loader
854 bondari 68
 *
69
 * @param header Pointer to ELF header in memory
70
 * @param as Created and properly mapped address space
71
 * @return EE_OK on success
72
 */
938 jermar 73
int elf_load(elf_header_t *header, as_t * as)
74
{
75
    int i, rc;
910 bondari 76
 
77
    /* Identify ELF */
938 jermar 78
    if (header->e_ident[EI_MAG0] != ELFMAG0 || header->e_ident[EI_MAG1] != ELFMAG1 ||
79
        header->e_ident[EI_MAG2] != ELFMAG2 || header->e_ident[EI_MAG3] != ELFMAG3) {
910 bondari 80
        return EE_INVALID;
81
    }
82
 
83
    /* Identify ELF compatibility */
938 jermar 84
    if (header->e_ident[EI_DATA] != ELF_DATA_ENCODING || header->e_machine != ELF_MACHINE ||
85
        header->e_ident[EI_VERSION] != EV_CURRENT || header->e_version != EV_CURRENT ||
86
        header->e_ident[EI_CLASS] != ELF_CLASS) {
87
        return EE_INCOMPATIBLE;
88
    }
89
 
952 jermar 90
    if (header->e_phentsize != sizeof(elf_segment_header_t))
938 jermar 91
        return EE_INCOMPATIBLE;
92
 
952 jermar 93
    if (header->e_shentsize != sizeof(elf_section_header_t))
94
        return EE_INCOMPATIBLE;
95
 
938 jermar 96
    /* Check if the object type is supported. */
97
    if (header->e_type != ET_EXEC)
910 bondari 98
        return EE_UNSUPPORTED;
938 jermar 99
 
952 jermar 100
    /* Walk through all segment headers and process them. */
938 jermar 101
    for (i = 0; i < header->e_phnum; i++) {
952 jermar 102
        rc = segment_header(&((elf_segment_header_t *)(((__u8 *) header) + header->e_phoff))[i], header, as);
938 jermar 103
        if (rc != EE_OK)
104
            return rc;
910 bondari 105
    }
938 jermar 106
 
952 jermar 107
    /* Inspect all section headers and proccess them. */
108
    for (i = 0; i < header->e_shnum; i++) {
109
        rc = section_header(&((elf_section_header_t *)(((__u8 *) header) + header->e_shoff))[i], header, as);
110
        if (rc != EE_OK)
111
            return rc;
112
    }
113
 
938 jermar 114
    return EE_OK;
115
}
116
 
117
/** Print error message according to error code.
118
 *
119
 * @param rc Return code returned by elf_load().
120
 *
121
 * @return NULL terminated description of error.
122
 */
123
char *elf_error(int rc)
124
{
125
    ASSERT(rc < sizeof(error_codes)/sizeof(char *));
126
 
127
    return error_codes[rc];
128
}
129
 
952 jermar 130
/** Process segment header.
938 jermar 131
 *
952 jermar 132
 * @param entry Segment header.
133
 * @param elf ELF header.
938 jermar 134
 * @param as Address space into wich the ELF is being loaded.
135
 *
136
 * @return EE_OK on success, error code otherwise.
137
 */
952 jermar 138
static int segment_header(elf_segment_header_t *entry, elf_header_t *elf, as_t *as)
938 jermar 139
{
140
    switch (entry->p_type) {
141
        case PT_NULL:
142
        case PT_PHDR:
143
        break;
144
        case PT_LOAD:
952 jermar 145
        return load_segment(entry, elf, as);
938 jermar 146
        break;
147
        case PT_DYNAMIC:
148
        case PT_INTERP:
149
        case PT_SHLIB:
150
        case PT_NOTE:
151
        case PT_LOPROC:
152
        case PT_HIPROC:
153
        default:
154
        return EE_UNSUPPORTED;
155
        break;
156
    }
157
    return EE_OK;
158
}
159
 
160
/** Load segment described by program header entry.
161
 *
162
 * @param entry Program header entry describing segment to be loaded.
952 jermar 163
 * @param elf ELF header.
1248 jermar 164
 * @param as Address space into wich the ELF is being loaded.
938 jermar 165
 *
166
 * @return EE_OK on success, error code otherwise.
167
 */
952 jermar 168
int load_segment(elf_segment_header_t *entry, elf_header_t *elf, as_t *as)
938 jermar 169
{
170
    as_area_t *a;
1409 jermar 171
    int flags = 0;
172
    void *backend_data[2] = { elf, entry };
938 jermar 173
 
174
    if (entry->p_align > 1) {
175
        if ((entry->p_offset % entry->p_align) != (entry->p_vaddr % entry->p_align)) {
176
            return EE_INVALID;
177
        }
178
    }
179
 
1026 jermar 180
    if (entry->p_flags & PF_X)
181
        flags |= AS_AREA_EXEC;
182
    if (entry->p_flags & PF_W)
183
        flags |= AS_AREA_WRITE;
184
    if (entry->p_flags & PF_R)
185
        flags |= AS_AREA_READ;
910 bondari 186
 
952 jermar 187
    /*
188
     * Check if the virtual address starts on page boundary.
189
     */
190
    if (ALIGN_UP(entry->p_vaddr, PAGE_SIZE) != entry->p_vaddr)
191
        return EE_UNSUPPORTED;
192
 
1409 jermar 193
    a = as_area_create(as, flags, entry->p_memsz, entry->p_vaddr, AS_AREA_ATTR_NONE, &elf_backend, backend_data);
938 jermar 194
    if (!a)
1070 jermar 195
        return EE_MEMORY;
938 jermar 196
 
1409 jermar 197
    /*
198
     * The segment will be mapped on demand by elf_page_fault().
199
     */
200
 
938 jermar 201
    return EE_OK;
854 bondari 202
}
952 jermar 203
 
204
/** Process section header.
205
 *
206
 * @param entry Segment header.
207
 * @param elf ELF header.
208
 * @param as Address space into wich the ELF is being loaded.
209
 *
210
 * @return EE_OK on success, error code otherwise.
211
 */
212
static int section_header(elf_section_header_t *entry, elf_header_t *elf, as_t *as)
213
{
214
    switch (entry->sh_type) {
215
        default:
216
        break;
217
    }
218
 
219
    return EE_OK;
220
}
1409 jermar 221
 
222
/** Service a page fault in the ELF backend address space area.
223
 *
224
 * The address space area and page tables must be already locked.
225
 *
226
 * @param area Pointer to the address space area.
227
 * @param addr Faulting virtual address.
228
 *
229
 * @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. serviced).
230
 */
231
int elf_page_fault(as_area_t *area, __address addr)
232
{
233
    elf_header_t *elf = (elf_header_t *) area->backend_data[0];
234
    elf_segment_header_t *entry = (elf_segment_header_t *) area->backend_data[1];
235
    __address base, frame;
236
    index_t i;
237
 
238
    ASSERT((addr >= entry->p_vaddr) && (addr < entry->p_vaddr + entry->p_memsz));
239
    i = (addr - entry->p_vaddr) >> PAGE_WIDTH;
240
    base = (__address) (((void *) elf) + entry->p_offset);
241
    ASSERT(ALIGN_UP(base, FRAME_SIZE) == base);
242
 
243
    if (ALIGN_DOWN(addr, PAGE_SIZE) + PAGE_SIZE < entry->p_vaddr + entry->p_filesz) {
244
        /*
245
         * Initialized portion of the segment. The memory is backed
246
         * directly by the content of the ELF image. Pages are
247
         * only copied if the segment is writable so that there
248
         * can be more instantions of the same memory ELF image
249
         * used at a time. Note that this could be later done
250
         * as COW.
251
         */
252
        if (entry->p_flags & PF_W) {
253
            frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
254
            memcpy((void *) PA2KA(frame), (void *) (base + i*FRAME_SIZE), FRAME_SIZE);
255
        } else {
256
            frame = KA2PA(base + i*FRAME_SIZE);
257
        }  
258
    } else if (ALIGN_DOWN(addr, PAGE_SIZE) >= ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) {
259
        /*
260
         * This is the uninitialized portion of the segment.
261
         * It is not physically present in the ELF image.
262
         * To resolve the situation, a frame must be allocated
263
         * and cleared.
264
         */
265
        frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
266
        memsetb(PA2KA(frame), FRAME_SIZE, 0);
267
    } else {
268
        size_t size;
269
        /*
270
         * The mixed case.
271
         * The lower part is backed by the ELF image and
272
         * the upper part is anonymous memory.
273
         */
274
        size = entry->p_filesz - (i<<PAGE_WIDTH);
275
        frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
276
        memsetb(PA2KA(frame) + size, FRAME_SIZE - size, 0);
277
        memcpy((void *) PA2KA(frame), (void *) (base + i*FRAME_SIZE), size);
278
    }
279
 
280
    page_mapping_insert(AS, addr, frame, as_area_get_flags(area));
281
        if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1))
282
                panic("Could not insert used space.\n");
283
 
284
    return AS_PF_OK;
285
}
286
 
287
/** Free a frame that is backed by the ELF backend.
288
 *
289
 * The address space area and page tables must be already locked.
290
 *
291
 * @param area Pointer to the address space area.
292
 * @param page Page that is mapped to frame. Must be aligned to PAGE_SIZE.
293
 * @param frame Frame to be released.
294
 *
295
 */
296
void elf_frame_free(as_area_t *area, __address page, __address frame)
297
{
298
    elf_header_t *elf = (elf_header_t *) area->backend_data[0];
299
    elf_segment_header_t *entry = (elf_segment_header_t *) area->backend_data[1];
300
    __address base;
301
    index_t i;
302
 
303
    ASSERT((page >= entry->p_vaddr) && (page < entry->p_vaddr + entry->p_memsz));
304
    i = (page - entry->p_vaddr) >> PAGE_WIDTH;
305
    base = (__address) (((void *) elf) + entry->p_offset);
306
    ASSERT(ALIGN_UP(base, FRAME_SIZE) == base);
307
 
308
    if (page + PAGE_SIZE < ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) {
309
        if (entry->p_flags & PF_W) {
310
            /*
311
             * Free the frame with the copy of writable segment data.
312
             */
313
            frame_free(ADDR2PFN(frame));
314
        }
315
    } else {
316
        /*
317
         * The frame is either anonymous memory or the mixed case (i.e. lower
318
         * part is backed by the ELF image and the upper is anonymous).
319
         * In any case, a frame needs to be freed.
320
         */
321
        frame_free(ADDR2PFN(frame));
322
    }
323
}