Rev 1423 | Rev 1425 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 1423 | Rev 1424 | ||
---|---|---|---|
Line 1... | Line 1... | ||
1 | /* |
1 | /* |
2 | * Copyright (C) 2006 Sergey Bondari |
2 | * Copyright (C) 2006 Sergey Bondari |
- | 3 | * Copyright (C) 2006 Jakub Jermar |
|
3 | * All rights reserved. |
4 | * All rights reserved. |
4 | * |
5 | * |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
7 | * modification, are permitted provided that the following conditions |
7 | * are met: |
8 | * are met: |
Line 54... | Line 55... | ||
54 | 55 | ||
55 | static int segment_header(elf_segment_header_t *entry, elf_header_t *elf, as_t *as); |
56 | static int segment_header(elf_segment_header_t *entry, elf_header_t *elf, as_t *as); |
56 | static int section_header(elf_section_header_t *entry, elf_header_t *elf, as_t *as); |
57 | static int section_header(elf_section_header_t *entry, elf_header_t *elf, as_t *as); |
57 | static int load_segment(elf_segment_header_t *entry, elf_header_t *elf, as_t *as); |
58 | static int load_segment(elf_segment_header_t *entry, elf_header_t *elf, as_t *as); |
58 | 59 | ||
59 | static int elf_page_fault(as_area_t *area, __address addr, pf_access_t access); |
- | |
60 | static void elf_frame_free(as_area_t *area, __address page, __address frame); |
- | |
61 | - | ||
62 | mem_backend_t elf_backend = { |
- | |
63 | .backend_page_fault = elf_page_fault, |
- | |
64 | .backend_frame_free = elf_frame_free |
- | |
65 | }; |
- | |
66 | - | ||
67 | /** ELF loader |
60 | /** ELF loader |
68 | * |
61 | * |
69 | * @param header Pointer to ELF header in memory |
62 | * @param header Pointer to ELF header in memory |
70 | * @param as Created and properly mapped address space |
63 | * @param as Created and properly mapped address space |
71 | * @return EE_OK on success |
64 | * @return EE_OK on success |
Line 167... | Line 160... | ||
167 | */ |
160 | */ |
168 | int load_segment(elf_segment_header_t *entry, elf_header_t *elf, as_t *as) |
161 | int load_segment(elf_segment_header_t *entry, elf_header_t *elf, as_t *as) |
169 | { |
162 | { |
170 | as_area_t *a; |
163 | as_area_t *a; |
171 | int flags = 0; |
164 | int flags = 0; |
172 | void *backend_data[2] = { elf, entry }; |
165 | mem_backend_data_t backend_data = { .d1 = (__native) elf, .d2 = (__native) entry }; |
173 | 166 | ||
174 | if (entry->p_align > 1) { |
167 | if (entry->p_align > 1) { |
175 | if ((entry->p_offset % entry->p_align) != (entry->p_vaddr % entry->p_align)) { |
168 | if ((entry->p_offset % entry->p_align) != (entry->p_vaddr % entry->p_align)) { |
176 | return EE_INVALID; |
169 | return EE_INVALID; |
177 | } |
170 | } |
Line 181... | Line 174... | ||
181 | flags |= AS_AREA_EXEC; |
174 | flags |= AS_AREA_EXEC; |
182 | if (entry->p_flags & PF_W) |
175 | if (entry->p_flags & PF_W) |
183 | flags |= AS_AREA_WRITE; |
176 | flags |= AS_AREA_WRITE; |
184 | if (entry->p_flags & PF_R) |
177 | if (entry->p_flags & PF_R) |
185 | flags |= AS_AREA_READ; |
178 | flags |= AS_AREA_READ; |
- | 179 | flags |= AS_AREA_CACHEABLE; |
|
186 | 180 | ||
187 | /* |
181 | /* |
188 | * Check if the virtual address starts on page boundary. |
182 | * Check if the virtual address starts on page boundary. |
189 | */ |
183 | */ |
190 | if (ALIGN_UP(entry->p_vaddr, PAGE_SIZE) != entry->p_vaddr) |
184 | if (ALIGN_UP(entry->p_vaddr, PAGE_SIZE) != entry->p_vaddr) |
191 | return EE_UNSUPPORTED; |
185 | return EE_UNSUPPORTED; |
192 | 186 | ||
193 | a = as_area_create(as, flags, entry->p_memsz, entry->p_vaddr, AS_AREA_ATTR_NONE, &elf_backend, backend_data); |
187 | a = as_area_create(as, flags, entry->p_memsz, entry->p_vaddr, AS_AREA_ATTR_NONE, |
- | 188 | &elf_backend, &backend_data); |
|
194 | if (!a) |
189 | if (!a) |
195 | return EE_MEMORY; |
190 | return EE_MEMORY; |
196 | 191 | ||
197 | /* |
192 | /* |
198 | * The segment will be mapped on demand by elf_page_fault(). |
193 | * The segment will be mapped on demand by elf_page_fault(). |
Line 216... | Line 211... | ||
216 | break; |
211 | break; |
217 | } |
212 | } |
218 | 213 | ||
219 | return EE_OK; |
214 | return EE_OK; |
220 | } |
215 | } |
221 | - | ||
222 | /** Service a page fault in the ELF backend address space area. |
- | |
223 | * |
- | |
224 | * The address space area and page tables must be already locked. |
- | |
225 | * |
- | |
226 | * @param area Pointer to the address space area. |
- | |
227 | * @param addr Faulting virtual address. |
- | |
228 | * @param access Access mode that caused the fault (i.e. read/write/exec). |
- | |
229 | * |
- | |
230 | * @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. serviced). |
- | |
231 | */ |
- | |
232 | int elf_page_fault(as_area_t *area, __address addr, pf_access_t access) |
- | |
233 | { |
- | |
234 | elf_header_t *elf = (elf_header_t *) area->backend_data[0]; |
- | |
235 | elf_segment_header_t *entry = (elf_segment_header_t *) area->backend_data[1]; |
- | |
236 | __address base, frame; |
- | |
237 | index_t i; |
- | |
238 | - | ||
239 | if (!as_area_check_access(area, access)) |
- | |
240 | return AS_PF_FAULT; |
- | |
241 | - | ||
242 | ASSERT((addr >= entry->p_vaddr) && (addr < entry->p_vaddr + entry->p_memsz)); |
- | |
243 | i = (addr - entry->p_vaddr) >> PAGE_WIDTH; |
- | |
244 | base = (__address) (((void *) elf) + entry->p_offset); |
- | |
245 | ASSERT(ALIGN_UP(base, FRAME_SIZE) == base); |
- | |
246 | - | ||
247 | if (ALIGN_DOWN(addr, PAGE_SIZE) + PAGE_SIZE < entry->p_vaddr + entry->p_filesz) { |
- | |
248 | /* |
- | |
249 | * Initialized portion of the segment. The memory is backed |
- | |
250 | * directly by the content of the ELF image. Pages are |
- | |
251 | * only copied if the segment is writable so that there |
- | |
252 | * can be more instantions of the same memory ELF image |
- | |
253 | * used at a time. Note that this could be later done |
- | |
254 | * as COW. |
- | |
255 | */ |
- | |
256 | if (entry->p_flags & PF_W) { |
- | |
257 | frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0)); |
- | |
258 | memcpy((void *) PA2KA(frame), (void *) (base + i*FRAME_SIZE), FRAME_SIZE); |
- | |
259 | } else { |
- | |
260 | frame = KA2PA(base + i*FRAME_SIZE); |
- | |
261 | } |
- | |
262 | } else if (ALIGN_DOWN(addr, PAGE_SIZE) >= ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) { |
- | |
263 | /* |
- | |
264 | * This is the uninitialized portion of the segment. |
- | |
265 | * It is not physically present in the ELF image. |
- | |
266 | * To resolve the situation, a frame must be allocated |
- | |
267 | * and cleared. |
- | |
268 | */ |
- | |
269 | frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0)); |
- | |
270 | memsetb(PA2KA(frame), FRAME_SIZE, 0); |
- | |
271 | } else { |
- | |
272 | size_t size; |
- | |
273 | /* |
- | |
274 | * The mixed case. |
- | |
275 | * The lower part is backed by the ELF image and |
- | |
276 | * the upper part is anonymous memory. |
- | |
277 | */ |
- | |
278 | size = entry->p_filesz - (i<<PAGE_WIDTH); |
- | |
279 | frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0)); |
- | |
280 | memsetb(PA2KA(frame) + size, FRAME_SIZE - size, 0); |
- | |
281 | memcpy((void *) PA2KA(frame), (void *) (base + i*FRAME_SIZE), size); |
- | |
282 | } |
- | |
283 | - | ||
284 | page_mapping_insert(AS, addr, frame, as_area_get_flags(area)); |
- | |
285 | if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1)) |
- | |
286 | panic("Could not insert used space.\n"); |
- | |
287 | - | ||
288 | return AS_PF_OK; |
- | |
289 | } |
- | |
290 | - | ||
291 | /** Free a frame that is backed by the ELF backend. |
- | |
292 | * |
- | |
293 | * The address space area and page tables must be already locked. |
- | |
294 | * |
- | |
295 | * @param area Pointer to the address space area. |
- | |
296 | * @param page Page that is mapped to frame. Must be aligned to PAGE_SIZE. |
- | |
297 | * @param frame Frame to be released. |
- | |
298 | * |
- | |
299 | */ |
- | |
300 | void elf_frame_free(as_area_t *area, __address page, __address frame) |
- | |
301 | { |
- | |
302 | elf_header_t *elf = (elf_header_t *) area->backend_data[0]; |
- | |
303 | elf_segment_header_t *entry = (elf_segment_header_t *) area->backend_data[1]; |
- | |
304 | __address base; |
- | |
305 | index_t i; |
- | |
306 | - | ||
307 | ASSERT((page >= entry->p_vaddr) && (page < entry->p_vaddr + entry->p_memsz)); |
- | |
308 | i = (page - entry->p_vaddr) >> PAGE_WIDTH; |
- | |
309 | base = (__address) (((void *) elf) + entry->p_offset); |
- | |
310 | ASSERT(ALIGN_UP(base, FRAME_SIZE) == base); |
- | |
311 | - | ||
312 | if (page + PAGE_SIZE < ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) { |
- | |
313 | if (entry->p_flags & PF_W) { |
- | |
314 | /* |
- | |
315 | * Free the frame with the copy of writable segment data. |
- | |
316 | */ |
- | |
317 | frame_free(ADDR2PFN(frame)); |
- | |
318 | } |
- | |
319 | } else { |
- | |
320 | /* |
- | |
321 | * The frame is either anonymous memory or the mixed case (i.e. lower |
- | |
322 | * part is backed by the ELF image and the upper is anonymous). |
- | |
323 | * In any case, a frame needs to be freed. |
- | |
324 | */ |
- | |
325 | frame_free(ADDR2PFN(frame)); |
- | |
326 | } |
- | |
327 | } |
- |