Subversion Repositories HelenOS

Rev

Rev 1425 | Rev 1546 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1424 jermar 1
/*
2
 * Copyright (C) 2006 Jakub Jermar
3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
29
/**
30
 * @file	backend_elf.c
31
 * @brief	Backend for address space areas backed by an ELF image.
32
 */
33
 
34
#include <elf.h>
35
#include <debug.h>
36
#include <arch/types.h>
37
#include <typedefs.h>
38
#include <mm/as.h>
39
#include <mm/frame.h>
40
#include <mm/slab.h>
1426 jermar 41
#include <mm/page.h>
42
#include <genarch/mm/page_pt.h>
43
#include <genarch/mm/page_ht.h>
1424 jermar 44
#include <align.h>
45
#include <memstr.h>
46
#include <macros.h>
47
#include <arch.h>
48
 
49
static int elf_page_fault(as_area_t *area, __address addr, pf_access_t access);
50
static void elf_frame_free(as_area_t *area, __address page, __address frame);
1426 jermar 51
static void elf_share(as_area_t *area);
1424 jermar 52
 
53
mem_backend_t elf_backend = {
54
	.page_fault = elf_page_fault,
55
	.frame_free = elf_frame_free,
1426 jermar 56
	.share = elf_share
1424 jermar 57
};
58
 
59
/** Service a page fault in the ELF backend address space area.
60
 *
61
 * The address space area and page tables must be already locked.
62
 *
63
 * @param area Pointer to the address space area.
64
 * @param addr Faulting virtual address.
65
 * @param access Access mode that caused the fault (i.e. read/write/exec).
66
 *
67
 * @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. serviced).
68
 */
69
int elf_page_fault(as_area_t *area, __address addr, pf_access_t access)
70
{
1425 jermar 71
	elf_header_t *elf = area->backend_data.elf;
72
	elf_segment_header_t *entry = area->backend_data.segment;
1426 jermar 73
	btree_node_t *leaf;
1424 jermar 74
	__address base, frame;
75
	index_t i;
76
 
77
	if (!as_area_check_access(area, access))
78
		return AS_PF_FAULT;
79
 
80
	ASSERT((addr >= entry->p_vaddr) && (addr < entry->p_vaddr + entry->p_memsz));
81
	i = (addr - entry->p_vaddr) >> PAGE_WIDTH;
82
	base = (__address) (((void *) elf) + entry->p_offset);
83
	ASSERT(ALIGN_UP(base, FRAME_SIZE) == base);
1426 jermar 84
 
85
	if (area->sh_info) {
86
		bool found = false;
87
 
88
		/*
89
		 * The address space area is shared.
90
		 */
91
 
92
		mutex_lock(&area->sh_info->lock);
93
		frame = (__address) btree_search(&area->sh_info->pagemap,
94
			ALIGN_DOWN(addr, PAGE_SIZE) - area->base, &leaf);
95
		if (!frame) {
96
			int i;
97
 
98
			/*
99
			 * Workaround for valid NULL address.
100
			 */
101
 
102
			for (i = 0; i < leaf->keys; i++) {
103
				if (leaf->key[i] == ALIGN_DOWN(addr, PAGE_SIZE)) {
104
					found = true;
105
					break;
106
				}
107
			}
108
		}
109
		if (frame || found) {
110
			page_mapping_insert(AS, addr, frame, as_area_get_flags(area));
111
			if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1))
112
				panic("Could not insert used space.\n");
113
			mutex_unlock(&area->sh_info->lock);
114
			return AS_PF_OK;
115
		}
116
	}
1424 jermar 117
 
1426 jermar 118
	/*
119
	 * The area is either not shared or the pagemap does not contain the mapping.
120
	 */
121
 
1424 jermar 122
	if (ALIGN_DOWN(addr, PAGE_SIZE) + PAGE_SIZE < entry->p_vaddr + entry->p_filesz) {
123
		/*
124
		 * Initialized portion of the segment. The memory is backed
125
		 * directly by the content of the ELF image. Pages are
126
		 * only copied if the segment is writable so that there
127
		 * can be more instantions of the same memory ELF image
128
		 * used at a time. Note that this could be later done
129
		 * as COW.
130
		 */
131
		if (entry->p_flags & PF_W) {
132
			frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
133
			memcpy((void *) PA2KA(frame), (void *) (base + i*FRAME_SIZE), FRAME_SIZE);
1426 jermar 134
 
135
			if (area->sh_info) {
136
				btree_insert(&area->sh_info->pagemap, ALIGN_DOWN(addr, PAGE_SIZE) - area->base,
137
					(void *) frame, leaf);
138
			}
139
 
1424 jermar 140
		} else {
141
			frame = KA2PA(base + i*FRAME_SIZE);
142
		}	
143
	} else if (ALIGN_DOWN(addr, PAGE_SIZE) >= ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) {
144
		/*
145
		 * This is the uninitialized portion of the segment.
146
		 * It is not physically present in the ELF image.
147
		 * To resolve the situation, a frame must be allocated
148
		 * and cleared.
149
		 */
150
		frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
151
		memsetb(PA2KA(frame), FRAME_SIZE, 0);
1426 jermar 152
 
153
		if (area->sh_info) {
154
			btree_insert(&area->sh_info->pagemap, ALIGN_DOWN(addr, PAGE_SIZE) - area->base,
155
				(void *) frame, leaf);
156
		}
157
 
1424 jermar 158
	} else {
159
		size_t size;
160
		/*
161
		 * The mixed case.
162
		 * The lower part is backed by the ELF image and
163
		 * the upper part is anonymous memory.
164
		 */
165
		size = entry->p_filesz - (i<<PAGE_WIDTH);
166
		frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
167
		memsetb(PA2KA(frame) + size, FRAME_SIZE - size, 0);
168
		memcpy((void *) PA2KA(frame), (void *) (base + i*FRAME_SIZE), size);
1426 jermar 169
 
170
		if (area->sh_info) {
171
			btree_insert(&area->sh_info->pagemap, ALIGN_DOWN(addr, PAGE_SIZE) - area->base,
172
				(void *) frame, leaf);
173
		}
174
 
1424 jermar 175
	}
176
 
1426 jermar 177
	if (area->sh_info)
178
		mutex_unlock(&area->sh_info->lock);
179
 
1424 jermar 180
	page_mapping_insert(AS, addr, frame, as_area_get_flags(area));
1426 jermar 181
	if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1))
182
		panic("Could not insert used space.\n");
1424 jermar 183
 
184
	return AS_PF_OK;
185
}
186
 
187
/** Free a frame that is backed by the ELF backend.
188
 *
189
 * The address space area and page tables must be already locked.
190
 *
191
 * @param area Pointer to the address space area.
192
 * @param page Page that is mapped to frame. Must be aligned to PAGE_SIZE.
193
 * @param frame Frame to be released.
194
 *
195
 */
196
void elf_frame_free(as_area_t *area, __address page, __address frame)
197
{
1425 jermar 198
	elf_header_t *elf = area->backend_data.elf;
199
	elf_segment_header_t *entry = area->backend_data.segment;
1424 jermar 200
	__address base;
201
	index_t i;
202
 
203
	ASSERT((page >= entry->p_vaddr) && (page < entry->p_vaddr + entry->p_memsz));
204
	i = (page - entry->p_vaddr) >> PAGE_WIDTH;
205
	base = (__address) (((void *) elf) + entry->p_offset);
206
	ASSERT(ALIGN_UP(base, FRAME_SIZE) == base);
207
 
208
	if (page + PAGE_SIZE < ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) {
209
		if (entry->p_flags & PF_W) {
210
			/*
211
			 * Free the frame with the copy of writable segment data.
212
			 */
213
			frame_free(ADDR2PFN(frame));
214
		}
215
	} else {
216
		/*
217
		 * The frame is either anonymous memory or the mixed case (i.e. lower
218
		 * part is backed by the ELF image and the upper is anonymous).
219
		 * In any case, a frame needs to be freed.
220
		 */
221
		frame_free(ADDR2PFN(frame)); 
222
	}
223
}
1426 jermar 224
 
225
/** Share ELF image backed address space area.
226
 *
227
 * If the area is writable, then all mapped pages are duplicated in the pagemap.
228
 * Otherwise only portions of the area that are not backed by the ELF image
229
 * are put into the pagemap.
230
 *
231
 * The address space and address space area must be locked prior to the call.
232
 *
233
 * @param area Address space area.
234
 */
235
void elf_share(as_area_t *area)
236
{
237
	elf_segment_header_t *entry = area->backend_data.segment;
238
	link_t *cur;
239
	btree_node_t *leaf, *node;
240
	__address start_anon = entry->p_vaddr + entry->p_filesz;
241
 
242
	/*
243
	 * Find the node in which to start linear search.
244
	 */
245
	if (area->flags & AS_AREA_WRITE) {
246
		node = list_get_instance(area->used_space.leaf_head.next, btree_node_t, leaf_link);
247
	} else {
248
		(void) btree_search(&area->sh_info->pagemap, start_anon, &leaf);
249
		node = btree_leaf_node_left_neighbour(&area->sh_info->pagemap, leaf);
250
		if (!node)
251
			node = leaf;
252
	}
253
 
254
	/*
255
	 * Copy used anonymous portions of the area to sh_info's page map.
256
	 */
257
	mutex_lock(&area->sh_info->lock);
258
	for (cur = &node->leaf_link; cur != &area->used_space.leaf_head; cur = cur->next) {
259
		int i;
260
 
261
		node = list_get_instance(cur, btree_node_t, leaf_link);
262
 
263
		for (i = 0; i < node->keys; i++) {
264
			__address base = node->key[i];
265
			count_t count = (count_t) node->value[i];
266
			int j;
267
 
268
			/*
269
			 * Skip read-only areas of used space that are backed
270
			 * by the ELF image.
271
			 */
272
			if (!(area->flags & AS_AREA_WRITE))
273
				if (base + count*PAGE_SIZE <= start_anon)
274
					continue;
275
 
276
			for (j = 0; j < count; j++) {
277
				pte_t *pte;
278
 
279
				/*
280
				 * Skip read-only pages that are backed by the ELF image.
281
				 */
282
				if (!(area->flags & AS_AREA_WRITE))
283
					if (base + (j + 1)*PAGE_SIZE <= start_anon)
284
						continue;
285
 
286
				page_table_lock(area->as, false);
287
				pte = page_mapping_find(area->as, base + j*PAGE_SIZE);
288
				ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
289
				btree_insert(&area->sh_info->pagemap, (base + j*PAGE_SIZE) - area->base,
290
					(void *) PTE_GET_FRAME(pte), NULL);
291
				page_table_unlock(area->as, false);
292
			}
293
 
294
		}
295
	}
296
	mutex_unlock(&area->sh_info->lock);
297
}