Subversion Repositories HelenOS-historic

Rev

Rev 1760 | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 1760 Rev 1780
Line 48... Line 48...
48
#include <align.h>
48
#include <align.h>
49
#include <memstr.h>
49
#include <memstr.h>
50
#include <macros.h>
50
#include <macros.h>
51
#include <arch.h>
51
#include <arch.h>
52
 
52
 
53
static int elf_page_fault(as_area_t *area, __address addr, pf_access_t access);
53
static int elf_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access);
54
static void elf_frame_free(as_area_t *area, __address page, __address frame);
54
static void elf_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame);
55
static void elf_share(as_area_t *area);
55
static void elf_share(as_area_t *area);
56
 
56
 
57
mem_backend_t elf_backend = {
57
mem_backend_t elf_backend = {
58
    .page_fault = elf_page_fault,
58
    .page_fault = elf_page_fault,
59
    .frame_free = elf_frame_free,
59
    .frame_free = elf_frame_free,
Line 68... Line 68...
68
 * @param addr Faulting virtual address.
68
 * @param addr Faulting virtual address.
69
 * @param access Access mode that caused the fault (i.e. read/write/exec).
69
 * @param access Access mode that caused the fault (i.e. read/write/exec).
70
 *
70
 *
71
 * @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. serviced).
71
 * @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. serviced).
72
 */
72
 */
73
int elf_page_fault(as_area_t *area, __address addr, pf_access_t access)
73
int elf_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access)
74
{
74
{
75
    elf_header_t *elf = area->backend_data.elf;
75
    elf_header_t *elf = area->backend_data.elf;
76
    elf_segment_header_t *entry = area->backend_data.segment;
76
    elf_segment_header_t *entry = area->backend_data.segment;
77
    btree_node_t *leaf;
77
    btree_node_t *leaf;
78
    __address base, frame;
78
    uintptr_t base, frame;
79
    index_t i;
79
    index_t i;
80
 
80
 
81
    if (!as_area_check_access(area, access))
81
    if (!as_area_check_access(area, access))
82
        return AS_PF_FAULT;
82
        return AS_PF_FAULT;
83
 
83
 
84
    ASSERT((addr >= entry->p_vaddr) && (addr < entry->p_vaddr + entry->p_memsz));
84
    ASSERT((addr >= entry->p_vaddr) && (addr < entry->p_vaddr + entry->p_memsz));
85
    i = (addr - entry->p_vaddr) >> PAGE_WIDTH;
85
    i = (addr - entry->p_vaddr) >> PAGE_WIDTH;
86
    base = (__address) (((void *) elf) + entry->p_offset);
86
    base = (uintptr_t) (((void *) elf) + entry->p_offset);
87
    ASSERT(ALIGN_UP(base, FRAME_SIZE) == base);
87
    ASSERT(ALIGN_UP(base, FRAME_SIZE) == base);
88
 
88
 
89
    if (area->sh_info) {
89
    if (area->sh_info) {
90
        bool found = false;
90
        bool found = false;
91
 
91
 
92
        /*
92
        /*
93
         * The address space area is shared.
93
         * The address space area is shared.
94
         */
94
         */
95
         
95
         
96
        mutex_lock(&area->sh_info->lock);
96
        mutex_lock(&area->sh_info->lock);
97
        frame = (__address) btree_search(&area->sh_info->pagemap,
97
        frame = (uintptr_t) btree_search(&area->sh_info->pagemap,
98
            ALIGN_DOWN(addr, PAGE_SIZE) - area->base, &leaf);
98
            ALIGN_DOWN(addr, PAGE_SIZE) - area->base, &leaf);
99
        if (!frame) {
99
        if (!frame) {
100
            int i;
100
            int i;
101
 
101
 
102
            /*
102
            /*
Line 132... Line 132...
132
         * can be more instantions of the same memory ELF image
132
         * can be more instantions of the same memory ELF image
133
         * used at a time. Note that this could be later done
133
         * used at a time. Note that this could be later done
134
         * as COW.
134
         * as COW.
135
         */
135
         */
136
        if (entry->p_flags & PF_W) {
136
        if (entry->p_flags & PF_W) {
137
            frame = (__address)frame_alloc(ONE_FRAME, 0);
137
            frame = (uintptr_t)frame_alloc(ONE_FRAME, 0);
138
            memcpy((void *) PA2KA(frame), (void *) (base + i*FRAME_SIZE), FRAME_SIZE);
138
            memcpy((void *) PA2KA(frame), (void *) (base + i*FRAME_SIZE), FRAME_SIZE);
139
           
139
           
140
            if (area->sh_info) {
140
            if (area->sh_info) {
141
                frame_reference_add(ADDR2PFN(frame));
141
                frame_reference_add(ADDR2PFN(frame));
142
                btree_insert(&area->sh_info->pagemap, ALIGN_DOWN(addr, PAGE_SIZE) - area->base,
142
                btree_insert(&area->sh_info->pagemap, ALIGN_DOWN(addr, PAGE_SIZE) - area->base,
Line 151... Line 151...
151
         * This is the uninitialized portion of the segment.
151
         * This is the uninitialized portion of the segment.
152
         * It is not physically present in the ELF image.
152
         * It is not physically present in the ELF image.
153
         * To resolve the situation, a frame must be allocated
153
         * To resolve the situation, a frame must be allocated
154
         * and cleared.
154
         * and cleared.
155
         */
155
         */
156
        frame = (__address)frame_alloc(ONE_FRAME, 0);
156
        frame = (uintptr_t)frame_alloc(ONE_FRAME, 0);
157
        memsetb(PA2KA(frame), FRAME_SIZE, 0);
157
        memsetb(PA2KA(frame), FRAME_SIZE, 0);
158
 
158
 
159
        if (area->sh_info) {
159
        if (area->sh_info) {
160
            frame_reference_add(ADDR2PFN(frame));
160
            frame_reference_add(ADDR2PFN(frame));
161
            btree_insert(&area->sh_info->pagemap, ALIGN_DOWN(addr, PAGE_SIZE) - area->base,
161
            btree_insert(&area->sh_info->pagemap, ALIGN_DOWN(addr, PAGE_SIZE) - area->base,
Line 168... Line 168...
168
         * The mixed case.
168
         * The mixed case.
169
         * The lower part is backed by the ELF image and
169
         * The lower part is backed by the ELF image and
170
         * the upper part is anonymous memory.
170
         * the upper part is anonymous memory.
171
         */
171
         */
172
        size = entry->p_filesz - (i<<PAGE_WIDTH);
172
        size = entry->p_filesz - (i<<PAGE_WIDTH);
173
        frame = (__address)frame_alloc(ONE_FRAME, 0);
173
        frame = (uintptr_t)frame_alloc(ONE_FRAME, 0);
174
        memsetb(PA2KA(frame) + size, FRAME_SIZE - size, 0);
174
        memsetb(PA2KA(frame) + size, FRAME_SIZE - size, 0);
175
        memcpy((void *) PA2KA(frame), (void *) (base + i*FRAME_SIZE), size);
175
        memcpy((void *) PA2KA(frame), (void *) (base + i*FRAME_SIZE), size);
176
 
176
 
177
        if (area->sh_info) {
177
        if (area->sh_info) {
178
            frame_reference_add(ADDR2PFN(frame));
178
            frame_reference_add(ADDR2PFN(frame));
Line 199... Line 199...
199
 * @param area Pointer to the address space area.
199
 * @param area Pointer to the address space area.
200
 * @param page Page that is mapped to frame. Must be aligned to PAGE_SIZE.
200
 * @param page Page that is mapped to frame. Must be aligned to PAGE_SIZE.
201
 * @param frame Frame to be released.
201
 * @param frame Frame to be released.
202
 *
202
 *
203
 */
203
 */
204
void elf_frame_free(as_area_t *area, __address page, __address frame)
204
void elf_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame)
205
{
205
{
206
    elf_header_t *elf = area->backend_data.elf;
206
    elf_header_t *elf = area->backend_data.elf;
207
    elf_segment_header_t *entry = area->backend_data.segment;
207
    elf_segment_header_t *entry = area->backend_data.segment;
208
    __address base;
208
    uintptr_t base;
209
    index_t i;
209
    index_t i;
210
   
210
   
211
    ASSERT((page >= entry->p_vaddr) && (page < entry->p_vaddr + entry->p_memsz));
211
    ASSERT((page >= entry->p_vaddr) && (page < entry->p_vaddr + entry->p_memsz));
212
    i = (page - entry->p_vaddr) >> PAGE_WIDTH;
212
    i = (page - entry->p_vaddr) >> PAGE_WIDTH;
213
    base = (__address) (((void *) elf) + entry->p_offset);
213
    base = (uintptr_t) (((void *) elf) + entry->p_offset);
214
    ASSERT(ALIGN_UP(base, FRAME_SIZE) == base);
214
    ASSERT(ALIGN_UP(base, FRAME_SIZE) == base);
215
   
215
   
216
    if (page + PAGE_SIZE < ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) {
216
    if (page + PAGE_SIZE < ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) {
217
        if (entry->p_flags & PF_W) {
217
        if (entry->p_flags & PF_W) {
218
            /*
218
            /*
Line 243... Line 243...
243
void elf_share(as_area_t *area)
243
void elf_share(as_area_t *area)
244
{
244
{
245
    elf_segment_header_t *entry = area->backend_data.segment;
245
    elf_segment_header_t *entry = area->backend_data.segment;
246
    link_t *cur;
246
    link_t *cur;
247
    btree_node_t *leaf, *node;
247
    btree_node_t *leaf, *node;
248
    __address start_anon = entry->p_vaddr + entry->p_filesz;
248
    uintptr_t start_anon = entry->p_vaddr + entry->p_filesz;
249
 
249
 
250
    /*
250
    /*
251
     * Find the node in which to start linear search.
251
     * Find the node in which to start linear search.
252
     */
252
     */
253
    if (area->flags & AS_AREA_WRITE) {
253
    if (area->flags & AS_AREA_WRITE) {
Line 267... Line 267...
267
        int i;
267
        int i;
268
       
268
       
269
        node = list_get_instance(cur, btree_node_t, leaf_link);
269
        node = list_get_instance(cur, btree_node_t, leaf_link);
270
       
270
       
271
        for (i = 0; i < node->keys; i++) {
271
        for (i = 0; i < node->keys; i++) {
272
            __address base = node->key[i];
272
            uintptr_t base = node->key[i];
273
            count_t count = (count_t) node->value[i];
273
            count_t count = (count_t) node->value[i];
274
            int j;
274
            int j;
275
           
275
           
276
            /*
276
            /*
277
             * Skip read-only areas of used space that are backed
277
             * Skip read-only areas of used space that are backed