Subversion Repositories HelenOS

Rev

Rev 2131 | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 2131 Rev 2292
Line 69... Line 69...
69
 *
69
 *
70
 * @param area Pointer to the address space area.
70
 * @param area Pointer to the address space area.
71
 * @param addr Faulting virtual address.
71
 * @param addr Faulting virtual address.
72
 * @param access Access mode that caused the fault (i.e. read/write/exec).
72
 * @param access Access mode that caused the fault (i.e. read/write/exec).
73
 *
73
 *
74
 * @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. serviced).
74
 * @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e.
-
 
75
 *     serviced).
75
 */
76
 */
76
int elf_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access)
77
int elf_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access)
77
{
78
{
78
    elf_header_t *elf = area->backend_data.elf;
79
    elf_header_t *elf = area->backend_data.elf;
79
    elf_segment_header_t *entry = area->backend_data.segment;
80
    elf_segment_header_t *entry = area->backend_data.segment;
80
    btree_node_t *leaf;
81
    btree_node_t *leaf;
81
    uintptr_t base, frame;
82
    uintptr_t base, frame;
82
    index_t i;
83
    index_t i;
-
 
84
    bool dirty = false;
83
 
85
 
84
    if (!as_area_check_access(area, access))
86
    if (!as_area_check_access(area, access))
85
        return AS_PF_FAULT;
87
        return AS_PF_FAULT;
86
 
88
 
-
 
89
    ASSERT((addr >= entry->p_vaddr) &&
87
    ASSERT((addr >= entry->p_vaddr) && (addr < entry->p_vaddr + entry->p_memsz));
90
        (addr < entry->p_vaddr + entry->p_memsz));
88
    i = (addr - entry->p_vaddr) >> PAGE_WIDTH;
91
    i = (addr - entry->p_vaddr) >> PAGE_WIDTH;
89
    base = (uintptr_t) (((void *) elf) + entry->p_offset);
92
    base = (uintptr_t) (((void *) elf) + entry->p_offset);
90
    ASSERT(ALIGN_UP(base, FRAME_SIZE) == base);
93
    ASSERT(ALIGN_UP(base, FRAME_SIZE) == base);
91
 
94
 
92
    if (area->sh_info) {
95
    if (area->sh_info) {
Line 105... Line 108...
105
            /*
108
            /*
106
             * Workaround for valid NULL address.
109
             * Workaround for valid NULL address.
107
             */
110
             */
108
 
111
 
109
            for (i = 0; i < leaf->keys; i++) {
112
            for (i = 0; i < leaf->keys; i++) {
-
 
113
                if (leaf->key[i] ==
110
                if (leaf->key[i] == ALIGN_DOWN(addr, PAGE_SIZE)) {
114
                    ALIGN_DOWN(addr, PAGE_SIZE)) {
111
                    found = true;
115
                    found = true;
112
                    break;
116
                    break;
113
                }
117
                }
114
            }
118
            }
115
        }
119
        }
116
        if (frame || found) {
120
        if (frame || found) {
117
            frame_reference_add(ADDR2PFN(frame));
121
            frame_reference_add(ADDR2PFN(frame));
118
            page_mapping_insert(AS, addr, frame, as_area_get_flags(area));
122
            page_mapping_insert(AS, addr, frame,
-
 
123
                as_area_get_flags(area));
-
 
124
            if (!used_space_insert(area,
119
            if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1))
125
                ALIGN_DOWN(addr, PAGE_SIZE), 1))
120
                panic("Could not insert used space.\n");
126
                panic("Could not insert used space.\n");
121
            mutex_unlock(&area->sh_info->lock);
127
            mutex_unlock(&area->sh_info->lock);
122
            return AS_PF_OK;
128
            return AS_PF_OK;
123
        }
129
        }
124
    }
130
    }
125
   
131
   
126
    /*
132
    /*
127
     * The area is either not shared or the pagemap does not contain the mapping.
133
     * The area is either not shared or the pagemap does not contain the
-
 
134
     * mapping.
128
     */
135
     */
129
   
136
   
130
    if (ALIGN_DOWN(addr, PAGE_SIZE) + PAGE_SIZE < entry->p_vaddr + entry->p_filesz) {
137
    if (ALIGN_DOWN(addr, PAGE_SIZE) + PAGE_SIZE <
-
 
138
        entry->p_vaddr + entry->p_filesz) {
131
        /*
139
        /*
132
         * Initialized portion of the segment. The memory is backed
140
         * Initialized portion of the segment. The memory is backed
133
         * directly by the content of the ELF image. Pages are
141
         * directly by the content of the ELF image. Pages are
134
         * only copied if the segment is writable so that there
142
         * only copied if the segment is writable so that there
135
         * can be more instantions of the same memory ELF image
143
         * can be more instantions of the same memory ELF image
136
         * used at a time. Note that this could be later done
144
         * used at a time. Note that this could be later done
137
         * as COW.
145
         * as COW.
138
         */
146
         */
139
        if (entry->p_flags & PF_W) {
147
        if (entry->p_flags & PF_W) {
140
            frame = (uintptr_t)frame_alloc(ONE_FRAME, 0);
148
            frame = (uintptr_t)frame_alloc(ONE_FRAME, 0);
-
 
149
            memcpy((void *) PA2KA(frame),
141
            memcpy((void *) PA2KA(frame), (void *) (base + i*FRAME_SIZE), FRAME_SIZE);
150
                (void *) (base + i * FRAME_SIZE), FRAME_SIZE);
-
 
151
            dirty = true;
142
           
152
 
143
            if (area->sh_info) {
153
            if (area->sh_info) {
144
                frame_reference_add(ADDR2PFN(frame));
154
                frame_reference_add(ADDR2PFN(frame));
-
 
155
                btree_insert(&area->sh_info->pagemap,
145
                btree_insert(&area->sh_info->pagemap, ALIGN_DOWN(addr, PAGE_SIZE) - area->base,
156
                    ALIGN_DOWN(addr, PAGE_SIZE) - area->base,
146
                    (void *) frame, leaf);
157
                    (void *) frame, leaf);
147
            }
158
            }
148
 
159
 
149
        } else {
160
        } else {
150
            frame = KA2PA(base + i*FRAME_SIZE);
161
            frame = KA2PA(base + i*FRAME_SIZE);
151
        }  
162
        }  
-
 
163
    } else if (ALIGN_DOWN(addr, PAGE_SIZE) >=
152
    } else if (ALIGN_DOWN(addr, PAGE_SIZE) >= ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) {
164
        ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) {
153
        /*
165
        /*
154
         * This is the uninitialized portion of the segment.
166
         * This is the uninitialized portion of the segment.
155
         * It is not physically present in the ELF image.
167
         * It is not physically present in the ELF image.
156
         * To resolve the situation, a frame must be allocated
168
         * To resolve the situation, a frame must be allocated
157
         * and cleared.
169
         * and cleared.
158
         */
170
         */
159
        frame = (uintptr_t)frame_alloc(ONE_FRAME, 0);
171
        frame = (uintptr_t)frame_alloc(ONE_FRAME, 0);
160
        memsetb(PA2KA(frame), FRAME_SIZE, 0);
172
        memsetb(PA2KA(frame), FRAME_SIZE, 0);
-
 
173
        dirty = true;
161
 
174
 
162
        if (area->sh_info) {
175
        if (area->sh_info) {
163
            frame_reference_add(ADDR2PFN(frame));
176
            frame_reference_add(ADDR2PFN(frame));
-
 
177
            btree_insert(&area->sh_info->pagemap,
164
            btree_insert(&area->sh_info->pagemap, ALIGN_DOWN(addr, PAGE_SIZE) - area->base,
178
                ALIGN_DOWN(addr, PAGE_SIZE) - area->base,
165
                (void *) frame, leaf);
179
                (void *) frame, leaf);
166
        }
180
        }
167
 
181
 
168
    } else {
182
    } else {
169
        size_t size;
183
        size_t size;
170
        /*
184
        /*
Line 173... Line 187...
173
         * the upper part is anonymous memory.
187
         * the upper part is anonymous memory.
174
         */
188
         */
175
        size = entry->p_filesz - (i<<PAGE_WIDTH);
189
        size = entry->p_filesz - (i<<PAGE_WIDTH);
176
        frame = (uintptr_t)frame_alloc(ONE_FRAME, 0);
190
        frame = (uintptr_t)frame_alloc(ONE_FRAME, 0);
177
        memsetb(PA2KA(frame) + size, FRAME_SIZE - size, 0);
191
        memsetb(PA2KA(frame) + size, FRAME_SIZE - size, 0);
178
        memcpy((void *) PA2KA(frame), (void *) (base + i*FRAME_SIZE), size);
192
        memcpy((void *) PA2KA(frame), (void *) (base + i * FRAME_SIZE),
-
 
193
            size);
-
 
194
        dirty = true;
179
 
195
 
180
        if (area->sh_info) {
196
        if (area->sh_info) {
181
            frame_reference_add(ADDR2PFN(frame));
197
            frame_reference_add(ADDR2PFN(frame));
-
 
198
            btree_insert(&area->sh_info->pagemap,
182
            btree_insert(&area->sh_info->pagemap, ALIGN_DOWN(addr, PAGE_SIZE) - area->base,
199
                ALIGN_DOWN(addr, PAGE_SIZE) - area->base,
183
                (void *) frame, leaf);
200
                (void *) frame, leaf);
184
        }
201
        }
185
 
202
 
186
    }
203
    }
187
   
204
   
188
    if (area->sh_info)
205
    if (area->sh_info)
Line 209... Line 226...
209
    elf_header_t *elf = area->backend_data.elf;
226
    elf_header_t *elf = area->backend_data.elf;
210
    elf_segment_header_t *entry = area->backend_data.segment;
227
    elf_segment_header_t *entry = area->backend_data.segment;
211
    uintptr_t base;
228
    uintptr_t base;
212
    index_t i;
229
    index_t i;
213
   
230
   
-
 
231
    ASSERT((page >= entry->p_vaddr) &&
214
    ASSERT((page >= entry->p_vaddr) && (page < entry->p_vaddr + entry->p_memsz));
232
        (page < entry->p_vaddr + entry->p_memsz));
215
    i = (page - entry->p_vaddr) >> PAGE_WIDTH;
233
    i = (page - entry->p_vaddr) >> PAGE_WIDTH;
216
    base = (uintptr_t) (((void *) elf) + entry->p_offset);
234
    base = (uintptr_t) (((void *) elf) + entry->p_offset);
217
    ASSERT(ALIGN_UP(base, FRAME_SIZE) == base);
235
    ASSERT(ALIGN_UP(base, FRAME_SIZE) == base);
218
   
236
   
-
 
237
    if (page + PAGE_SIZE <
219
    if (page + PAGE_SIZE < ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) {
238
        ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) {
220
        if (entry->p_flags & PF_W) {
239
        if (entry->p_flags & PF_W) {
221
            /*
240
            /*
222
             * Free the frame with the copy of writable segment data.
241
             * Free the frame with the copy of writable segment
-
 
242
             * data.
223
             */
243
             */
224
            frame_free(frame);
244
            frame_free(frame);
225
#ifdef CONFIG_VIRT_IDX_DCACHE
-
 
226
                dcache_flush_frame(page, frame);
-
 
227
#endif
-
 
228
        }
245
        }
229
    } else {
246
    } else {
230
        /*
247
        /*
231
         * The frame is either anonymous memory or the mixed case (i.e. lower
248
         * The frame is either anonymous memory or the mixed case (i.e.
232
         * part is backed by the ELF image and the upper is anonymous).
249
         * lower part is backed by the ELF image and the upper is
233
         * In any case, a frame needs to be freed.
250
         * anonymous). In any case, a frame needs to be freed.
234
         */
251
         */
235
        frame_free(frame);
252
        frame_free(frame);
236
#ifdef CONFIG_VIRT_IDX_DCACHE
-
 
237
            dcache_flush_frame(page, frame);
-
 
238
#endif
-
 
239
    }
253
    }
240
}
254
}
241
 
255
 
242
/** Share ELF image backed address space area.
256
/** Share ELF image backed address space area.
243
 *
257
 *
Line 258... Line 272...
258
 
272
 
259
    /*
273
    /*
260
     * Find the node in which to start linear search.
274
     * Find the node in which to start linear search.
261
     */
275
     */
262
    if (area->flags & AS_AREA_WRITE) {
276
    if (area->flags & AS_AREA_WRITE) {
263
        node = list_get_instance(area->used_space.leaf_head.next, btree_node_t, leaf_link);
277
        node = list_get_instance(area->used_space.leaf_head.next,
-
 
278
            btree_node_t, leaf_link);
264
    } else {
279
    } else {
265
        (void) btree_search(&area->sh_info->pagemap, start_anon, &leaf);
280
        (void) btree_search(&area->sh_info->pagemap, start_anon, &leaf);
266
        node = btree_leaf_node_left_neighbour(&area->sh_info->pagemap, leaf);
281
        node = btree_leaf_node_left_neighbour(&area->sh_info->pagemap,
-
 
282
            leaf);
267
        if (!node)
283
        if (!node)
268
            node = leaf;
284
            node = leaf;
269
    }
285
    }
270
 
286
 
271
    /*
287
    /*
272
     * Copy used anonymous portions of the area to sh_info's page map.
288
     * Copy used anonymous portions of the area to sh_info's page map.
273
     */
289
     */
274
    mutex_lock(&area->sh_info->lock);
290
    mutex_lock(&area->sh_info->lock);
275
    for (cur = &node->leaf_link; cur != &area->used_space.leaf_head; cur = cur->next) {
291
    for (cur = &node->leaf_link; cur != &area->used_space.leaf_head;
-
 
292
        cur = cur->next) {
276
        int i;
293
        int i;
277
       
294
       
278
        node = list_get_instance(cur, btree_node_t, leaf_link);
295
        node = list_get_instance(cur, btree_node_t, leaf_link);
279
       
296
       
280
        for (i = 0; i < node->keys; i++) {
297
        for (i = 0; i < node->keys; i++) {
Line 292... Line 309...
292
           
309
           
293
            for (j = 0; j < count; j++) {
310
            for (j = 0; j < count; j++) {
294
                pte_t *pte;
311
                pte_t *pte;
295
           
312
           
296
                /*
313
                /*
297
                 * Skip read-only pages that are backed by the ELF image.
314
                 * Skip read-only pages that are backed by the
-
 
315
                 * ELF image.
298
                 */
316
                 */
299
                if (!(area->flags & AS_AREA_WRITE))
317
                if (!(area->flags & AS_AREA_WRITE))
300
                    if (base + (j + 1)*PAGE_SIZE <= start_anon)
318
                    if (base + (j + 1) * PAGE_SIZE <=
-
 
319
                        start_anon)
301
                        continue;
320
                        continue;
302
               
321
               
303
                page_table_lock(area->as, false);
322
                page_table_lock(area->as, false);
304
                pte = page_mapping_find(area->as, base + j*PAGE_SIZE);
323
                pte = page_mapping_find(area->as,
-
 
324
                    base + j * PAGE_SIZE);
305
                ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
325
                ASSERT(pte && PTE_VALID(pte) &&
-
 
326
                    PTE_PRESENT(pte));
306
                btree_insert(&area->sh_info->pagemap, (base + j*PAGE_SIZE) - area->base,
327
                btree_insert(&area->sh_info->pagemap,
-
 
328
                    (base + j * PAGE_SIZE) - area->base,
307
                    (void *) PTE_GET_FRAME(pte), NULL);
329
                    (void *) PTE_GET_FRAME(pte), NULL);
308
                page_table_unlock(area->as, false);
330
                page_table_unlock(area->as, false);
-
 
331
 
309
                frame_reference_add(ADDR2PFN(PTE_GET_FRAME(pte)));
332
                pfn_t pfn = ADDR2PFN(PTE_GET_FRAME(pte));
-
 
333
                frame_reference_add(pfn);
310
            }
334
            }
311
               
335
               
312
        }
336
        }
313
    }
337
    }
314
    mutex_unlock(&area->sh_info->lock);
338
    mutex_unlock(&area->sh_info->lock);