Subversion Repositories HelenOS

Rev

Rev 4490 | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1424 jermar 1
/*
2071 jermar 2
 * Copyright (c) 2006 Jakub Jermar
1424 jermar 3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
1757 jermar 29
/** @addtogroup genericmm
1702 cejka 30
 * @{
31
 */
32
 
1424 jermar 33
/**
1702 cejka 34
 * @file
1424 jermar 35
 * @brief   Backend for address space areas backed by an ELF image.
36
 */
37
 
2000 decky 38
#include <lib/elf.h>
1424 jermar 39
#include <debug.h>
40
#include <arch/types.h>
41
#include <mm/as.h>
42
#include <mm/frame.h>
43
#include <mm/slab.h>
1426 jermar 44
#include <mm/page.h>
45
#include <genarch/mm/page_pt.h>
46
#include <genarch/mm/page_ht.h>
1424 jermar 47
#include <align.h>
48
#include <memstr.h>
49
#include <macros.h>
50
#include <arch.h>
3141 jermar 51
#include <arch/barrier.h>
1424 jermar 52
 
2076 jermar 53
#ifdef CONFIG_VIRT_IDX_DCACHE
54
#include <arch/mm/cache.h>
55
#endif
56
 
1780 jermar 57
static int elf_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access);
58
static void elf_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame);
1426 jermar 59
static void elf_share(as_area_t *area);
1424 jermar 60
 
61
mem_backend_t elf_backend = {
62
    .page_fault = elf_page_fault,
63
    .frame_free = elf_frame_free,
1426 jermar 64
    .share = elf_share
1424 jermar 65
};
66
 
67
/** Service a page fault in the ELF backend address space area.
68
 *
69
 * The address space area and page tables must be already locked.
70
 *
3141 jermar 71
 * @param area      Pointer to the address space area.
72
 * @param addr      Faulting virtual address.
73
 * @param access    Access mode that caused the fault (i.e.
74
 *          read/write/exec).
1424 jermar 75
 *
3141 jermar 76
 * @return      AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK
77
 *          on success (i.e. serviced).
1424 jermar 78
 */
1780 jermar 79
int elf_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access)
1424 jermar 80
{
1425 jermar 81
    elf_header_t *elf = area->backend_data.elf;
82
    elf_segment_header_t *entry = area->backend_data.segment;
1426 jermar 83
    btree_node_t *leaf;
3007 jermar 84
    uintptr_t base, frame, page, start_anon;
4490 decky 85
    size_t i;
2134 jermar 86
    bool dirty = false;
1424 jermar 87
 
88
    if (!as_area_check_access(area, access))
89
        return AS_PF_FAULT;
90
 
3007 jermar 91
    ASSERT((addr >= ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) &&
2132 jermar 92
        (addr < entry->p_vaddr + entry->p_memsz));
3007 jermar 93
    i = (addr - ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) >> PAGE_WIDTH;
94
    base = (uintptr_t)
95
        (((void *) elf) + ALIGN_DOWN(entry->p_offset, PAGE_SIZE));
1426 jermar 96
 
3007 jermar 97
    /* Virtual address of faulting page*/
98
    page = ALIGN_DOWN(addr, PAGE_SIZE);
99
 
100
    /* Virtual address of the end of initialized part of segment */
101
    start_anon = entry->p_vaddr + entry->p_filesz;
102
 
1426 jermar 103
    if (area->sh_info) {
104
        bool found = false;
105
 
106
        /*
107
         * The address space area is shared.
108
         */
3007 jermar 109
 
1426 jermar 110
        mutex_lock(&area->sh_info->lock);
1780 jermar 111
        frame = (uintptr_t) btree_search(&area->sh_info->pagemap,
3007 jermar 112
            page - area->base, &leaf);
1426 jermar 113
        if (!frame) {
2745 decky 114
            unsigned int i;
1426 jermar 115
 
116
            /*
117
             * Workaround for valid NULL address.
118
             */
119
 
120
            for (i = 0; i < leaf->keys; i++) {
3382 jermar 121
                if (leaf->key[i] == page - area->base) {
1426 jermar 122
                    found = true;
123
                    break;
124
                }
125
            }
126
        }
127
        if (frame || found) {
1546 jermar 128
            frame_reference_add(ADDR2PFN(frame));
2132 jermar 129
            page_mapping_insert(AS, addr, frame,
130
                as_area_get_flags(area));
3007 jermar 131
            if (!used_space_insert(area, page, 1))
3790 svoboda 132
                panic("Cannot insert used space.");
1426 jermar 133
            mutex_unlock(&area->sh_info->lock);
134
            return AS_PF_OK;
135
        }
136
    }
3007 jermar 137
 
1426 jermar 138
    /*
2132 jermar 139
     * The area is either not shared or the pagemap does not contain the
140
     * mapping.
1426 jermar 141
     */
3007 jermar 142
    if (page >= entry->p_vaddr && page + PAGE_SIZE <= start_anon) {
1424 jermar 143
        /*
144
         * Initialized portion of the segment. The memory is backed
145
         * directly by the content of the ELF image. Pages are
146
         * only copied if the segment is writable so that there
147
         * can be more instantions of the same memory ELF image
148
         * used at a time. Note that this could be later done
149
         * as COW.
150
         */
151
        if (entry->p_flags & PF_W) {
1780 jermar 152
            frame = (uintptr_t)frame_alloc(ONE_FRAME, 0);
2132 jermar 153
            memcpy((void *) PA2KA(frame),
154
                (void *) (base + i * FRAME_SIZE), FRAME_SIZE);
3142 svoboda 155
            if (entry->p_flags & PF_X) {
3143 svoboda 156
                smc_coherence_block((void *) PA2KA(frame),
157
                    FRAME_SIZE);
3142 svoboda 158
            }
2134 jermar 159
            dirty = true;
1424 jermar 160
        } else {
3007 jermar 161
            frame = KA2PA(base + i * FRAME_SIZE);
1424 jermar 162
        }  
3007 jermar 163
    } else if (page >= start_anon) {
1424 jermar 164
        /*
165
         * This is the uninitialized portion of the segment.
166
         * It is not physically present in the ELF image.
167
         * To resolve the situation, a frame must be allocated
168
         * and cleared.
169
         */
1780 jermar 170
        frame = (uintptr_t)frame_alloc(ONE_FRAME, 0);
3104 svoboda 171
        memsetb((void *) PA2KA(frame), FRAME_SIZE, 0);
2134 jermar 172
        dirty = true;
1424 jermar 173
    } else {
3007 jermar 174
        size_t pad_lo, pad_hi;
1424 jermar 175
        /*
176
         * The mixed case.
3007 jermar 177
         *
178
         * The middle part is backed by the ELF image and
179
         * the lower and upper parts are anonymous memory.
180
         * (The segment can be and often is shorter than 1 page).
1424 jermar 181
         */
3007 jermar 182
        if (page < entry->p_vaddr)
183
            pad_lo = entry->p_vaddr - page;
184
        else
185
            pad_lo = 0;
186
 
187
        if (start_anon < page + PAGE_SIZE)
188
            pad_hi = page + PAGE_SIZE - start_anon;
189
        else
190
            pad_hi = 0;
191
 
1780 jermar 192
        frame = (uintptr_t)frame_alloc(ONE_FRAME, 0);
3007 jermar 193
        memcpy((void *) (PA2KA(frame) + pad_lo),
194
            (void *) (base + i * FRAME_SIZE + pad_lo),
195
            FRAME_SIZE - pad_lo - pad_hi);
3142 svoboda 196
        if (entry->p_flags & PF_X) {
3143 svoboda 197
            smc_coherence_block((void *) (PA2KA(frame) + pad_lo),
198
                FRAME_SIZE - pad_lo - pad_hi);
3142 svoboda 199
        }
3104 svoboda 200
        memsetb((void *) PA2KA(frame), pad_lo, 0);
3141 jermar 201
        memsetb((void *) (PA2KA(frame) + FRAME_SIZE - pad_hi), pad_hi,
202
            0);
2134 jermar 203
        dirty = true;
3007 jermar 204
    }
1426 jermar 205
 
3007 jermar 206
    if (dirty && area->sh_info) {
207
        frame_reference_add(ADDR2PFN(frame));
208
        btree_insert(&area->sh_info->pagemap, page - area->base,
209
            (void *) frame, leaf);
210
    }
1426 jermar 211
 
212
    if (area->sh_info)
213
        mutex_unlock(&area->sh_info->lock);
3007 jermar 214
 
1424 jermar 215
    page_mapping_insert(AS, addr, frame, as_area_get_flags(area));
3007 jermar 216
    if (!used_space_insert(area, page, 1))
3790 svoboda 217
        panic("Cannot insert used space.");
1424 jermar 218
 
219
    return AS_PF_OK;
220
}
221
 
222
/** Free a frame that is backed by the ELF backend.
223
 *
224
 * The address space area and page tables must be already locked.
225
 *
3141 jermar 226
 * @param area      Pointer to the address space area.
227
 * @param page      Page that is mapped to frame. Must be aligned to
228
 *          PAGE_SIZE.
229
 * @param frame     Frame to be released.
1424 jermar 230
 *
231
 */
1780 jermar 232
void elf_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame)
1424 jermar 233
{
1425 jermar 234
    elf_header_t *elf = area->backend_data.elf;
235
    elf_segment_header_t *entry = area->backend_data.segment;
3007 jermar 236
    uintptr_t base, start_anon;
4490 decky 237
    size_t i;
3007 jermar 238
 
239
    ASSERT((page >= ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) &&
2132 jermar 240
        (page < entry->p_vaddr + entry->p_memsz));
3007 jermar 241
    i = (page - ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) >> PAGE_WIDTH;
242
    base = (uintptr_t) (((void *) elf) +
243
        ALIGN_DOWN(entry->p_offset, FRAME_SIZE));
244
    start_anon = entry->p_vaddr + entry->p_filesz;
245
 
246
    if (page >= entry->p_vaddr && page + PAGE_SIZE <= start_anon) {
1424 jermar 247
        if (entry->p_flags & PF_W) {
248
            /*
2132 jermar 249
             * Free the frame with the copy of writable segment
250
             * data.
1424 jermar 251
             */
1760 palkovsky 252
            frame_free(frame);
1424 jermar 253
        }
254
    } else {
255
        /*
2132 jermar 256
         * The frame is either anonymous memory or the mixed case (i.e.
257
         * lower part is backed by the ELF image and the upper is
258
         * anonymous). In any case, a frame needs to be freed.
2134 jermar 259
         */
260
        frame_free(frame);
1424 jermar 261
    }
262
}
1426 jermar 263
 
264
/** Share ELF image backed address space area.
265
 *
266
 * If the area is writable, then all mapped pages are duplicated in the pagemap.
267
 * Otherwise only portions of the area that are not backed by the ELF image
268
 * are put into the pagemap.
269
 *
270
 * The address space and address space area must be locked prior to the call.
271
 *
3141 jermar 272
 * @param area      Address space area.
1426 jermar 273
 */
274
void elf_share(as_area_t *area)
275
{
276
    elf_segment_header_t *entry = area->backend_data.segment;
277
    link_t *cur;
278
    btree_node_t *leaf, *node;
1780 jermar 279
    uintptr_t start_anon = entry->p_vaddr + entry->p_filesz;
1426 jermar 280
 
281
    /*
282
     * Find the node in which to start linear search.
283
     */
284
    if (area->flags & AS_AREA_WRITE) {
2132 jermar 285
        node = list_get_instance(area->used_space.leaf_head.next,
286
            btree_node_t, leaf_link);
1426 jermar 287
    } else {
288
        (void) btree_search(&area->sh_info->pagemap, start_anon, &leaf);
2132 jermar 289
        node = btree_leaf_node_left_neighbour(&area->sh_info->pagemap,
290
            leaf);
1426 jermar 291
        if (!node)
292
            node = leaf;
293
    }
294
 
295
    /*
296
     * Copy used anonymous portions of the area to sh_info's page map.
297
     */
298
    mutex_lock(&area->sh_info->lock);
2132 jermar 299
    for (cur = &node->leaf_link; cur != &area->used_space.leaf_head;
300
        cur = cur->next) {
2745 decky 301
        unsigned int i;
1426 jermar 302
 
303
        node = list_get_instance(cur, btree_node_t, leaf_link);
304
 
305
        for (i = 0; i < node->keys; i++) {
1780 jermar 306
            uintptr_t base = node->key[i];
4490 decky 307
            size_t count = (size_t) node->value[i];
2745 decky 308
            unsigned int j;
1426 jermar 309
 
310
            /*
311
             * Skip read-only areas of used space that are backed
312
             * by the ELF image.
313
             */
314
            if (!(area->flags & AS_AREA_WRITE))
3007 jermar 315
                if (base >= entry->p_vaddr &&
316
                    base + count * PAGE_SIZE <= start_anon)
1426 jermar 317
                    continue;
318
 
319
            for (j = 0; j < count; j++) {
320
                pte_t *pte;
321
 
322
                /*
2132 jermar 323
                 * Skip read-only pages that are backed by the
324
                 * ELF image.
1426 jermar 325
                 */
326
                if (!(area->flags & AS_AREA_WRITE))
3007 jermar 327
                    if (base >= entry->p_vaddr &&
328
                        base + (j + 1) * PAGE_SIZE <=
2132 jermar 329
                        start_anon)
1426 jermar 330
                        continue;
331
 
332
                page_table_lock(area->as, false);
2132 jermar 333
                pte = page_mapping_find(area->as,
334
                    base + j * PAGE_SIZE);
335
                ASSERT(pte && PTE_VALID(pte) &&
336
                    PTE_PRESENT(pte));
337
                btree_insert(&area->sh_info->pagemap,
338
                    (base + j * PAGE_SIZE) - area->base,
2467 jermar 339
                    (void *) PTE_GET_FRAME(pte), NULL);
1426 jermar 340
                page_table_unlock(area->as, false);
2132 jermar 341
 
342
                pfn_t pfn = ADDR2PFN(PTE_GET_FRAME(pte));
343
                frame_reference_add(pfn);
1426 jermar 344
            }
345
 
346
        }
347
    }
348
    mutex_unlock(&area->sh_info->lock);
349
}
1702 cejka 350
 
1757 jermar 351
/** @}
1702 cejka 352
 */