Subversion Repositories HelenOS-historic

Compare Revisions

Ignore whitespace Rev 1425 → Rev 1426

/kernel/trunk/generic/src/mm/backend_anon.c
161,7 → 161,7
* Sharing of anonymous area is done by duplicating its entire mapping
* to the pagemap. Page faults will primarily search for frames there.
*
* The address space area and page tables must be already locked.
* The address space and address space area must be already locked.
*
* @param area Address space area to be shared.
*/
/kernel/trunk/generic/src/mm/backend_elf.c
38,6 → 38,9
#include <mm/as.h>
#include <mm/frame.h>
#include <mm/slab.h>
#include <mm/page.h>
#include <genarch/mm/page_pt.h>
#include <genarch/mm/page_ht.h>
#include <align.h>
#include <memstr.h>
#include <macros.h>
45,11 → 48,12
 
static int elf_page_fault(as_area_t *area, __address addr, pf_access_t access);
static void elf_frame_free(as_area_t *area, __address page, __address frame);
static void elf_share(as_area_t *area);
 
mem_backend_t elf_backend = {
.page_fault = elf_page_fault,
.frame_free = elf_frame_free,
.share = NULL
.share = elf_share
};
 
/** Service a page fault in the ELF backend address space area.
66,6 → 70,7
{
elf_header_t *elf = area->backend_data.elf;
elf_segment_header_t *entry = area->backend_data.segment;
btree_node_t *leaf;
__address base, frame;
index_t i;
 
76,7 → 81,44
i = (addr - entry->p_vaddr) >> PAGE_WIDTH;
base = (__address) (((void *) elf) + entry->p_offset);
ASSERT(ALIGN_UP(base, FRAME_SIZE) == base);
 
if (area->sh_info) {
bool found = false;
 
/*
* The address space area is shared.
*/
mutex_lock(&area->sh_info->lock);
frame = (__address) btree_search(&area->sh_info->pagemap,
ALIGN_DOWN(addr, PAGE_SIZE) - area->base, &leaf);
if (!frame) {
int i;
 
/*
* Workaround for valid NULL address.
*/
 
for (i = 0; i < leaf->keys; i++) {
if (leaf->key[i] == ALIGN_DOWN(addr, PAGE_SIZE)) {
found = true;
break;
}
}
}
if (frame || found) {
page_mapping_insert(AS, addr, frame, as_area_get_flags(area));
if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1))
panic("Could not insert used space.\n");
mutex_unlock(&area->sh_info->lock);
return AS_PF_OK;
}
}
/*
* The area is either not shared or the pagemap does not contain the mapping.
*/
if (ALIGN_DOWN(addr, PAGE_SIZE) + PAGE_SIZE < entry->p_vaddr + entry->p_filesz) {
/*
* Initialized portion of the segment. The memory is backed
89,6 → 131,12
if (entry->p_flags & PF_W) {
frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
memcpy((void *) PA2KA(frame), (void *) (base + i*FRAME_SIZE), FRAME_SIZE);
if (area->sh_info) {
btree_insert(&area->sh_info->pagemap, ALIGN_DOWN(addr, PAGE_SIZE) - area->base,
(void *) frame, leaf);
}
 
} else {
frame = KA2PA(base + i*FRAME_SIZE);
}
101,6 → 149,12
*/
frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
memsetb(PA2KA(frame), FRAME_SIZE, 0);
 
if (area->sh_info) {
btree_insert(&area->sh_info->pagemap, ALIGN_DOWN(addr, PAGE_SIZE) - area->base,
(void *) frame, leaf);
}
 
} else {
size_t size;
/*
112,11 → 166,20
frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
memsetb(PA2KA(frame) + size, FRAME_SIZE - size, 0);
memcpy((void *) PA2KA(frame), (void *) (base + i*FRAME_SIZE), size);
 
if (area->sh_info) {
btree_insert(&area->sh_info->pagemap, ALIGN_DOWN(addr, PAGE_SIZE) - area->base,
(void *) frame, leaf);
}
 
}
if (area->sh_info)
mutex_unlock(&area->sh_info->lock);
page_mapping_insert(AS, addr, frame, as_area_get_flags(area));
if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1))
panic("Could not insert used space.\n");
if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1))
panic("Could not insert used space.\n");
 
return AS_PF_OK;
}
158,3 → 221,77
frame_free(ADDR2PFN(frame));
}
}
 
/** Share ELF image backed address space area.
*
* If the area is writable, then all mapped pages are duplicated in the pagemap.
* Otherwise only portions of the area that are not backed by the ELF image
* are put into the pagemap.
*
* The address space and address space area must be locked prior to the call.
*
* @param area Address space area.
*/
void elf_share(as_area_t *area)
{
elf_segment_header_t *entry = area->backend_data.segment;
link_t *cur;
btree_node_t *leaf, *node;
__address start_anon = entry->p_vaddr + entry->p_filesz;
 
/*
* Find the node in which to start linear search.
*/
if (area->flags & AS_AREA_WRITE) {
node = list_get_instance(area->used_space.leaf_head.next, btree_node_t, leaf_link);
} else {
(void) btree_search(&area->sh_info->pagemap, start_anon, &leaf);
node = btree_leaf_node_left_neighbour(&area->sh_info->pagemap, leaf);
if (!node)
node = leaf;
}
 
/*
* Copy used anonymous portions of the area to sh_info's page map.
*/
mutex_lock(&area->sh_info->lock);
for (cur = &node->leaf_link; cur != &area->used_space.leaf_head; cur = cur->next) {
int i;
node = list_get_instance(cur, btree_node_t, leaf_link);
for (i = 0; i < node->keys; i++) {
__address base = node->key[i];
count_t count = (count_t) node->value[i];
int j;
/*
* Skip read-only areas of used space that are backed
* by the ELF image.
*/
if (!(area->flags & AS_AREA_WRITE))
if (base + count*PAGE_SIZE <= start_anon)
continue;
for (j = 0; j < count; j++) {
pte_t *pte;
/*
* Skip read-only pages that are backed by the ELF image.
*/
if (!(area->flags & AS_AREA_WRITE))
if (base + (j + 1)*PAGE_SIZE <= start_anon)
continue;
page_table_lock(area->as, false);
pte = page_mapping_find(area->as, base + j*PAGE_SIZE);
ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
btree_insert(&area->sh_info->pagemap, (base + j*PAGE_SIZE) - area->base,
(void *) PTE_GET_FRAME(pte), NULL);
page_table_unlock(area->as, false);
}
}
}
mutex_unlock(&area->sh_info->lock);
}