Subversion Repositories HelenOS-historic

Compare Revisions

Ignore whitespace Rev 1423 → Rev 1424

/kernel/trunk/genarch/src/mm/page_ht.c
186,8 → 186,8
t->p = !(flags & PAGE_NOT_PRESENT);
 
t->as = as;
t->page = page;
t->frame = frame;
t->page = ALIGN_DOWN(page, PAGE_SIZE);
t->frame = ALIGN_DOWN(frame, FRAME_SIZE);
 
hash_table_insert(&page_ht, key, &t->link);
}
/kernel/trunk/generic/include/mm/as.h
30,11 → 30,10
#define __AS_H__
 
/** Address space area flags. */
#define AS_AREA_READ 1
#define AS_AREA_WRITE 2
#define AS_AREA_EXEC 4
#define AS_AREA_DEVICE 8
#define AS_AREA_ANON 16
#define AS_AREA_READ 1
#define AS_AREA_WRITE 2
#define AS_AREA_EXEC 4
#define AS_AREA_CACHEABLE 8
 
#ifdef KERNEL
 
104,9 → 103,26
#define AS_PF_DEFER 2 /**< The page fault was caused by memcpy_from_uspace()
or memcpy_to_uspace(). */
 
typedef struct share_info share_info_t;
typedef struct mem_backend mem_backend_t;
/** This structure contains information associated with the shared address space area. */
typedef struct {
mutex_t lock; /**< This lock must be acquired only when the as_area lock is held. */
count_t refcount; /**< This structure can be deallocated if refcount drops to 0. */
btree_t pagemap; /**< B+tree containing complete map of anonymous pages of the shared area. */
} share_info_t;
 
/** Address space area backend structure. */
typedef struct {
int (* page_fault)(as_area_t *area, __address addr, pf_access_t access);
void (* frame_free)(as_area_t *area, __address page, __address frame);
void (* share)(as_area_t *area);
} mem_backend_t;
 
/** Backend data stored in address space area. */
typedef struct backend_data {
__native d1;
__native d2;
} mem_backend_data_t;
 
/** Address space area structure.
*
* Each as_area_t structure describes one contiguous area of virtual memory.
114,6 → 130,7
*/
struct as_area {
mutex_t lock;
as_t *as; /**< Containing address space. */
int flags; /**< Flags related to the memory represented by the address space area. */
int attributes; /**< Attributes related to the address space area itself. */
count_t pages; /**< Size of this area in multiples of PAGE_SIZE. */
120,15 → 137,11
__address base; /**< Base address of this area. */
btree_t used_space; /**< Map of used space. */
share_info_t *sh_info; /**< If the address space area has been shared, this pointer will
reference the share info structure. */
reference the share info structure. */
mem_backend_t *backend; /**< Memory backend backing this address space area. */
void *backend_data[2]; /**< Data to be used by the backend. */
};
 
/** Address space area backend structure. */
struct mem_backend {
int (* backend_page_fault)(as_area_t *area, __address addr, pf_access_t access);
void (* backend_frame_free)(as_area_t *area, __address page, __address frame);
/** Data to be used by the backend. */
mem_backend_data_t backend_data;
};
 
extern as_t *AS_KERNEL;
140,11 → 153,10
extern void as_init(void);
extern as_t *as_create(int flags);
extern as_area_t *as_area_create(as_t *as, int flags, size_t size, __address base, int attrs,
mem_backend_t *backend, void **backend_data);
mem_backend_t *backend, mem_backend_data_t *backend_data);
extern int as_area_resize(as_t *as, __address address, size_t size, int flags);
extern int as_area_destroy(as_t *as, __address address);
extern int as_area_get_flags(as_area_t *area);
extern void as_set_mapping(as_t *as, __address page, __address frame);
extern bool as_area_check_access(as_area_t *area, pf_access_t access);
extern int as_page_fault(__address page, pf_access_t access, istate_t *istate);
extern void as_switch(as_t *old, as_t *new);
163,6 → 175,7
/* Backend declarations. */
extern mem_backend_t anon_backend;
extern mem_backend_t elf_backend;
extern mem_backend_t phys_backend;
 
/* Address space area related syscalls. */
extern __native sys_as_area_create(__address address, size_t size, int flags);
/kernel/trunk/generic/src/ddi/ddi.c
65,7 → 65,7
cap_t caps;
task_t *t;
int flags;
count_t i;
mem_backend_data_t backend_data = { .d1 = (__native) pf, .d2 = (__native) pages };
/*
* Make sure the caller is authorised to make this syscall.
98,10 → 98,11
spinlock_lock(&t->lock);
spinlock_unlock(&tasks_lock);
flags = AS_AREA_DEVICE | AS_AREA_READ;
flags = AS_AREA_READ;
if (writable)
flags |= AS_AREA_WRITE;
if (!as_area_create(t->as, flags, pages * PAGE_SIZE, vp, AS_AREA_ATTR_NONE, NULL, NULL)) {
if (!as_area_create(t->as, flags, pages * PAGE_SIZE, vp, AS_AREA_ATTR_NONE,
&phys_backend, &backend_data)) {
/*
* The address space area could not have been created.
* We report it using ENOMEM.
111,10 → 112,10
return ENOMEM;
}
/* Initialize page tables. */
for (i = 0; i < pages; i++)
as_set_mapping(t->as, vp + i * PAGE_SIZE, pf + i * FRAME_SIZE);
 
/*
* Mapping is created on-demand during page fault.
*/
spinlock_unlock(&t->lock);
interrupts_restore(ipl);
return 0;
/kernel/trunk/generic/src/proc/task.c
153,7 → 153,8
/*
* Create the data as_area.
*/
a = as_area_create(as, AS_AREA_READ | AS_AREA_WRITE, LOADED_PROG_STACK_PAGES_NO*PAGE_SIZE,
a = as_area_create(as, AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE,
LOADED_PROG_STACK_PAGES_NO*PAGE_SIZE,
USTACK_ADDRESS, AS_AREA_ATTR_NONE, &anon_backend, NULL);
 
t = thread_create(uinit, kernel_uarg, task, 0, "uinit");
/kernel/trunk/generic/src/lib/elf.c
1,5 → 1,6
/*
* Copyright (C) 2006 Sergey Bondari
* Copyright (C) 2006 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
56,14 → 57,6
static int section_header(elf_section_header_t *entry, elf_header_t *elf, as_t *as);
static int load_segment(elf_segment_header_t *entry, elf_header_t *elf, as_t *as);
 
static int elf_page_fault(as_area_t *area, __address addr, pf_access_t access);
static void elf_frame_free(as_area_t *area, __address page, __address frame);
 
mem_backend_t elf_backend = {
.backend_page_fault = elf_page_fault,
.backend_frame_free = elf_frame_free
};
 
/** ELF loader
*
* @param header Pointer to ELF header in memory
169,7 → 162,7
{
as_area_t *a;
int flags = 0;
void *backend_data[2] = { elf, entry };
mem_backend_data_t backend_data = { .d1 = (__native) elf, .d2 = (__native) entry };
 
if (entry->p_align > 1) {
if ((entry->p_offset % entry->p_align) != (entry->p_vaddr % entry->p_align)) {
183,6 → 176,7
flags |= AS_AREA_WRITE;
if (entry->p_flags & PF_R)
flags |= AS_AREA_READ;
flags |= AS_AREA_CACHEABLE;
 
/*
* Check if the virtual address starts on page boundary.
190,7 → 184,8
if (ALIGN_UP(entry->p_vaddr, PAGE_SIZE) != entry->p_vaddr)
return EE_UNSUPPORTED;
 
a = as_area_create(as, flags, entry->p_memsz, entry->p_vaddr, AS_AREA_ATTR_NONE, &elf_backend, backend_data);
a = as_area_create(as, flags, entry->p_memsz, entry->p_vaddr, AS_AREA_ATTR_NONE,
&elf_backend, &backend_data);
if (!a)
return EE_MEMORY;
218,110 → 213,3
return EE_OK;
}
 
/** Service a page fault in the ELF backend address space area.
*
* The address space area and page tables must be already locked.
*
* @param area Pointer to the address space area.
* @param addr Faulting virtual address.
* @param access Access mode that caused the fault (i.e. read/write/exec).
*
* @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. serviced).
*/
int elf_page_fault(as_area_t *area, __address addr, pf_access_t access)
{
elf_header_t *elf = (elf_header_t *) area->backend_data[0];
elf_segment_header_t *entry = (elf_segment_header_t *) area->backend_data[1];
__address base, frame;
index_t i;
 
if (!as_area_check_access(area, access))
return AS_PF_FAULT;
 
ASSERT((addr >= entry->p_vaddr) && (addr < entry->p_vaddr + entry->p_memsz));
i = (addr - entry->p_vaddr) >> PAGE_WIDTH;
base = (__address) (((void *) elf) + entry->p_offset);
ASSERT(ALIGN_UP(base, FRAME_SIZE) == base);
if (ALIGN_DOWN(addr, PAGE_SIZE) + PAGE_SIZE < entry->p_vaddr + entry->p_filesz) {
/*
* Initialized portion of the segment. The memory is backed
* directly by the content of the ELF image. Pages are
* only copied if the segment is writable so that there
* can be more instantions of the same memory ELF image
* used at a time. Note that this could be later done
* as COW.
*/
if (entry->p_flags & PF_W) {
frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
memcpy((void *) PA2KA(frame), (void *) (base + i*FRAME_SIZE), FRAME_SIZE);
} else {
frame = KA2PA(base + i*FRAME_SIZE);
}
} else if (ALIGN_DOWN(addr, PAGE_SIZE) >= ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) {
/*
* This is the uninitialized portion of the segment.
* It is not physically present in the ELF image.
* To resolve the situation, a frame must be allocated
* and cleared.
*/
frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
memsetb(PA2KA(frame), FRAME_SIZE, 0);
} else {
size_t size;
/*
* The mixed case.
* The lower part is backed by the ELF image and
* the upper part is anonymous memory.
*/
size = entry->p_filesz - (i<<PAGE_WIDTH);
frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
memsetb(PA2KA(frame) + size, FRAME_SIZE - size, 0);
memcpy((void *) PA2KA(frame), (void *) (base + i*FRAME_SIZE), size);
}
page_mapping_insert(AS, addr, frame, as_area_get_flags(area));
if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1))
panic("Could not insert used space.\n");
 
return AS_PF_OK;
}
 
/** Free a frame that is backed by the ELF backend.
*
* The address space area and page tables must be already locked.
*
* @param area Pointer to the address space area.
* @param page Page that is mapped to frame. Must be aligned to PAGE_SIZE.
* @param frame Frame to be released.
*
*/
void elf_frame_free(as_area_t *area, __address page, __address frame)
{
elf_header_t *elf = (elf_header_t *) area->backend_data[0];
elf_segment_header_t *entry = (elf_segment_header_t *) area->backend_data[1];
__address base;
index_t i;
ASSERT((page >= entry->p_vaddr) && (page < entry->p_vaddr + entry->p_memsz));
i = (page - entry->p_vaddr) >> PAGE_WIDTH;
base = (__address) (((void *) elf) + entry->p_offset);
ASSERT(ALIGN_UP(base, FRAME_SIZE) == base);
if (page + PAGE_SIZE < ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) {
if (entry->p_flags & PF_W) {
/*
* Free the frame with the copy of writable segment data.
*/
frame_free(ADDR2PFN(frame));
}
} else {
/*
* The frame is either anonymous memory or the mixed case (i.e. lower
* part is backed by the ELF image and the upper is anonymous).
* In any case, a frame needs to be freed.
*/
frame_free(ADDR2PFN(frame));
}
}
/kernel/trunk/generic/src/mm/backend_anon.c
0,0 → 1,203
/*
* Copyright (C) 2006 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/**
* @file backend_anon.c
* @brief Backend for anonymous memory address space areas.
*
*/
 
#include <mm/as.h>
#include <mm/page.h>
#include <genarch/mm/page_pt.h>
#include <genarch/mm/page_ht.h>
#include <mm/frame.h>
#include <mm/slab.h>
#include <synch/mutex.h>
#include <adt/list.h>
#include <adt/btree.h>
#include <errno.h>
#include <arch/types.h>
#include <typedefs.h>
#include <align.h>
#include <arch.h>
 
static int anon_page_fault(as_area_t *area, __address addr, pf_access_t access);
static void anon_frame_free(as_area_t *area, __address page, __address frame);
static void anon_share(as_area_t *area);
 
/*
* Anonymous memory backend.
*/
mem_backend_t anon_backend = {
.page_fault = anon_page_fault,
.frame_free = anon_frame_free,
.share = anon_share
};
 
/** Service a page fault in the anonymous memory address space area.
*
* The address space area and page tables must be already locked.
*
* @param area Pointer to the address space area.
* @param addr Faulting virtual address.
* @param access Access mode that caused the fault (i.e. read/write/exec).
*
* @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. serviced).
*/
int anon_page_fault(as_area_t *area, __address addr, pf_access_t access)
{
__address frame;
 
if (!as_area_check_access(area, access))
return AS_PF_FAULT;
 
if (area->sh_info) {
btree_node_t *leaf;
/*
* The area is shared, chances are that the mapping can be found
* in the pagemap of the address space area share info structure.
* In the case that the pagemap does not contain the respective
* mapping, a new frame is allocated and the mapping is created.
*/
mutex_lock(&area->sh_info->lock);
frame = (__address) btree_search(&area->sh_info->pagemap,
ALIGN_DOWN(addr, PAGE_SIZE) - area->base, &leaf);
if (!frame) {
bool allocate = true;
int i;
/*
* Zero can be returned as a valid frame address.
* Just a small workaround.
*/
for (i = 0; i < leaf->keys; i++) {
if (leaf->key[i] == ALIGN_DOWN(addr, PAGE_SIZE)) {
allocate = false;
break;
}
}
if (allocate) {
frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
memsetb(PA2KA(frame), FRAME_SIZE, 0);
/*
* Insert the address of the newly allocated frame to the pagemap.
*/
btree_insert(&area->sh_info->pagemap, ALIGN_DOWN(addr, PAGE_SIZE) - area->base, (void *) frame, leaf);
}
}
mutex_unlock(&area->sh_info->lock);
} else {
 
/*
* In general, there can be several reasons that
* can have caused this fault.
*
* - non-existent mapping: the area is an anonymous
* area (e.g. heap or stack) and so far has not been
* allocated a frame for the faulting page
*
* - non-present mapping: another possibility,
* currently not implemented, would be frame
* reuse; when this becomes a possibility,
* do not forget to distinguish between
* the different causes
*/
frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
memsetb(PA2KA(frame), FRAME_SIZE, 0);
}
/*
* Map 'page' to 'frame'.
* Note that TLB shootdown is not attempted as only new information is being
* inserted into page tables.
*/
page_mapping_insert(AS, addr, frame, as_area_get_flags(area));
if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1))
panic("Could not insert used space.\n");
return AS_PF_OK;
}
 
/** Free a frame that is backed by the anonymous memory backend.
*
* The address space area and page tables must be already locked.
*
* @param area Ignored.
* @param page Ignored.
* @param frame Frame to be released.
*/
void anon_frame_free(as_area_t *area, __address page, __address frame)
{
frame_free(ADDR2PFN(frame));
}
 
/** Share the anonymous address space area.
*
* Sharing of anonymous area is done by duplicating its entire mapping
* to the pagemap. Page faults will primarily search for frames there.
*
* The address space area and page tables must be already locked.
*
* @param area Address space area to be shared.
*/
void anon_share(as_area_t *area)
{
link_t *cur;
 
/*
* Copy used portions of the area to sh_info's page map.
*/
mutex_lock(&area->sh_info->lock);
for (cur = area->used_space.leaf_head.next; cur != &area->used_space.leaf_head; cur = cur->next) {
btree_node_t *node;
int i;
node = list_get_instance(cur, btree_node_t, leaf_link);
for (i = 0; i < node->keys; i++) {
__address base = node->key[i];
count_t count = (count_t) node->value[i];
int j;
for (j = 0; j < count; j++) {
pte_t *pte;
page_table_lock(area->as, false);
pte = page_mapping_find(area->as, base + j*PAGE_SIZE);
ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
btree_insert(&area->sh_info->pagemap, (base + j*PAGE_SIZE) - area->base,
(void *) PTE_GET_FRAME(pte), NULL);
page_table_unlock(area->as, false);
}
}
}
mutex_unlock(&area->sh_info->lock);
}
/kernel/trunk/generic/src/mm/as.c
74,13 → 74,6
#include <syscall/copy.h>
#include <arch/interrupt.h>
 
/** This structure contains information associated with the shared address space area. */
struct share_info {
mutex_t lock; /**< This lock must be acquired only when the as_area lock is held. */
count_t refcount; /**< This structure can be deallocated if refcount drops to 0. */
btree_t pagemap; /**< B+tree containing complete map of anonymous pages of the shared area. */
};
 
as_operations_t *as_operations = NULL;
 
/** This lock protects inactive_as_with_asid_head list. It must be acquired before as_t mutex. */
159,7 → 152,7
* @return Address space area on success or NULL on failure.
*/
as_area_t *as_area_create(as_t *as, int flags, size_t size, __address base, int attrs,
mem_backend_t *backend, void **backend_data)
mem_backend_t *backend, mem_backend_data_t *backend_data)
{
ipl_t ipl;
as_area_t *a;
187,6 → 180,7
 
mutex_initialize(&a->lock);
a->as = as;
a->flags = flags;
a->attributes = attrs;
a->pages = SIZE2FRAMES(size);
193,10 → 187,11
a->base = base;
a->sh_info = NULL;
a->backend = backend;
if (backend_data) {
a->backend_data[0] = backend_data[0];
a->backend_data[1] = backend_data[1];
}
if (backend_data)
a->backend_data = *backend_data;
else
memsetb((__address) &a->backend_data, sizeof(a->backend_data), 0);
 
btree_create(&a->used_space);
btree_insert(&as->as_area_btree, base, (void *) a, NULL);
235,7 → 230,7
return ENOENT;
}
 
if (area->flags & AS_AREA_DEVICE) {
if (area->backend == &phys_backend) {
/*
* Remapping of address space areas associated
* with memory mapped devices is not supported.
326,8 → 321,8
page_table_lock(as, false);
pte = page_mapping_find(as, b + i*PAGE_SIZE);
ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
if (area->backend && area->backend->backend_frame_free) {
area->backend->backend_frame_free(area,
if (area->backend && area->backend->frame_free) {
area->backend->frame_free(area,
b + i*PAGE_SIZE, PTE_GET_FRAME(pte));
}
page_mapping_remove(as, b + i*PAGE_SIZE);
411,8 → 406,8
page_table_lock(as, false);
pte = page_mapping_find(as, b + i*PAGE_SIZE);
ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
if (area->backend && area->backend->backend_frame_free) {
area->backend->backend_frame_free(area,
if (area->backend && area->backend->frame_free) {
area->backend->frame_free(area,
b + i*PAGE_SIZE, PTE_GET_FRAME(pte));
}
page_mapping_remove(as, b + i*PAGE_SIZE);
452,11 → 447,11
 
/** Share address space area with another or the same address space.
*
* Address space area of anonymous memory is shared with a new address
* space area. If the source address space area has not been shared so
* far, a new sh_info is created and the original mapping is duplicated
* in its pagemap B+tree. The new address space are simply gets the
* sh_info of the source area.
* Address space area mapping is shared with a new address space area.
* If the source address space area has not been shared so far,
* a new sh_info is created. The new address space area simply gets the
* sh_info of the source area. The process of duplicating the
* mapping is done through the backend share function.
*
* @param src_as Pointer to source address space.
* @param src_base Base address of the source address space area.
479,7 → 474,8
size_t src_size;
as_area_t *src_area, *dst_area;
share_info_t *sh_info;
link_t *cur;
mem_backend_t *src_backend;
mem_backend_data_t src_backend_data;
 
ipl = interrupts_disable();
mutex_lock(&src_as->lock);
493,9 → 489,10
return ENOENT;
}
if (!src_area->backend || src_area->backend != &anon_backend) {
if (!src_area->backend || !src_area->backend->share) {
/*
* As of now, only anonymous address space areas can be shared.
* There is now backend or the backend does not
* know how to share the area.
*/
mutex_unlock(&src_area->lock);
mutex_unlock(&src_as->lock);
505,6 → 502,8
src_size = src_area->pages * PAGE_SIZE;
src_flags = src_area->flags;
src_backend = src_area->backend;
src_backend_data = src_area->backend_data;
if (src_size != acc_size) {
mutex_unlock(&src_area->lock);
531,34 → 530,7
mutex_unlock(&sh_info->lock);
}
 
/*
* Copy used portions of the area to sh_info's page map.
*/
mutex_lock(&sh_info->lock);
for (cur = src_area->used_space.leaf_head.next; cur != &src_area->used_space.leaf_head; cur = cur->next) {
btree_node_t *node;
int i;
node = list_get_instance(cur, btree_node_t, leaf_link);
for (i = 0; i < node->keys; i++) {
__address base = node->key[i];
count_t count = (count_t) node->value[i];
int j;
for (j = 0; j < count; j++) {
pte_t *pte;
page_table_lock(src_as, false);
pte = page_mapping_find(src_as, base + j*PAGE_SIZE);
ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
btree_insert(&sh_info->pagemap, (base + j*PAGE_SIZE) - src_area->base,
(void *) PTE_GET_FRAME(pte), NULL);
page_table_unlock(src_as, false);
}
}
}
mutex_unlock(&sh_info->lock);
src_area->backend->share(src_area);
 
mutex_unlock(&src_area->lock);
mutex_unlock(&src_as->lock);
572,7 → 544,7
* to support sharing in less privileged mode.
*/
dst_area = as_area_create(AS, src_flags & dst_flags_mask, src_size, dst_base,
AS_AREA_ATTR_PARTIAL, &anon_backend, NULL);
AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data);
if (!dst_area) {
/*
* Destination address space area could not be created.
598,40 → 570,6
return 0;
}
 
/** Initialize mapping for one page of address space.
*
* This functions maps 'page' to 'frame' according
* to attributes of the address space area to
* wich 'page' belongs.
*
* @param as Target address space.
* @param page Virtual page within the area.
* @param frame Physical frame to which page will be mapped.
*/
void as_set_mapping(as_t *as, __address page, __address frame)
{
as_area_t *area;
ipl_t ipl;
ipl = interrupts_disable();
page_table_lock(as, true);
area = find_area_and_lock(as, page);
if (!area) {
panic("Page not part of any as_area.\n");
}
 
ASSERT(!area->backend);
page_mapping_insert(as, page, frame, as_area_get_flags(area));
if (!used_space_insert(area, page, 1))
panic("Could not insert used space.\n");
mutex_unlock(&area->lock);
page_table_unlock(as, true);
interrupts_restore(ipl);
}
 
/** Check access mode for address space area.
*
* The address space area must be locked prior to this call.
702,7 → 640,7
goto page_fault;
}
 
if (!area->backend || !area->backend->backend_page_fault) {
if (!area->backend || !area->backend->page_fault) {
/*
* The address space area is not backed by any backend
* or the backend cannot handle page faults.
735,7 → 673,7
/*
* Resort to the backend page fault handler.
*/
if (area->backend->backend_page_fault(area, page, access) != AS_PF_OK) {
if (area->backend->page_fault(area, page, access) != AS_PF_OK) {
page_table_unlock(AS, false);
mutex_unlock(&area->lock);
mutex_unlock(&AS->lock);
854,7 → 792,7
if (aflags & AS_AREA_EXEC)
flags |= PAGE_EXEC;
if (!(aflags & AS_AREA_DEVICE))
if (aflags & AS_AREA_CACHEABLE)
flags |= PAGE_CACHEABLE;
return flags;
1497,117 → 1435,7
}
}
 
static int anon_page_fault(as_area_t *area, __address addr, pf_access_t access);
static void anon_frame_free(as_area_t *area, __address page, __address frame);
 
/*
* Anonymous memory backend.
*/
mem_backend_t anon_backend = {
.backend_page_fault = anon_page_fault,
.backend_frame_free = anon_frame_free
};
 
/** Service a page fault in the anonymous memory address space area.
*
* The address space area and page tables must be already locked.
*
* @param area Pointer to the address space area.
* @param addr Faulting virtual address.
* @param access Access mode that caused the fault (i.e. read/write/exec).
*
* @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. serviced).
*/
int anon_page_fault(as_area_t *area, __address addr, pf_access_t access)
{
__address frame;
 
if (!as_area_check_access(area, access))
return AS_PF_FAULT;
 
if (area->sh_info) {
btree_node_t *leaf;
/*
* The area is shared, chances are that the mapping can be found
* in the pagemap of the address space area share info structure.
* In the case that the pagemap does not contain the respective
* mapping, a new frame is allocated and the mapping is created.
*/
mutex_lock(&area->sh_info->lock);
frame = (__address) btree_search(&area->sh_info->pagemap,
ALIGN_DOWN(addr, PAGE_SIZE) - area->base, &leaf);
if (!frame) {
bool allocate = true;
int i;
/*
* Zero can be returned as a valid frame address.
* Just a small workaround.
*/
for (i = 0; i < leaf->keys; i++) {
if (leaf->key[i] == ALIGN_DOWN(addr, PAGE_SIZE)) {
allocate = false;
break;
}
}
if (allocate) {
frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
memsetb(PA2KA(frame), FRAME_SIZE, 0);
/*
* Insert the address of the newly allocated frame to the pagemap.
*/
btree_insert(&area->sh_info->pagemap, ALIGN_DOWN(addr, PAGE_SIZE) - area->base, (void *) frame, leaf);
}
}
mutex_unlock(&area->sh_info->lock);
} else {
 
/*
* In general, there can be several reasons that
* can have caused this fault.
*
* - non-existent mapping: the area is an anonymous
* area (e.g. heap or stack) and so far has not been
* allocated a frame for the faulting page
*
* - non-present mapping: another possibility,
* currently not implemented, would be frame
* reuse; when this becomes a possibility,
* do not forget to distinguish between
* the different causes
*/
frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
memsetb(PA2KA(frame), FRAME_SIZE, 0);
}
/*
* Map 'page' to 'frame'.
* Note that TLB shootdown is not attempted as only new information is being
* inserted into page tables.
*/
page_mapping_insert(AS, addr, frame, as_area_get_flags(area));
if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1))
panic("Could not insert used space.\n");
return AS_PF_OK;
}
 
/** Free a frame that is backed by the anonymous memory backend.
*
* The address space area and page tables must be already locked.
*
* @param area Ignored.
* @param page Ignored.
* @param frame Frame to be released.
*/
void anon_frame_free(as_area_t *area, __address page, __address frame)
{
frame_free(ADDR2PFN(frame));
}
 
/*
* Address space related syscalls.
*/
 
1614,7 → 1442,7
/** Wrapper for as_area_create(). */
__native sys_as_area_create(__address address, size_t size, int flags)
{
if (as_area_create(AS, flags, size, address, AS_AREA_ATTR_NONE, &anon_backend, NULL))
if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address, AS_AREA_ATTR_NONE, &anon_backend, NULL))
return (__native) address;
else
return (__native) -1;
/kernel/trunk/generic/src/mm/backend_phys.c
0,0 → 1,89
/*
* Copyright (C) 2006 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/**
* @file backend_elf.c
* @brief Backend for address space areas backed by continuous physical memory.
*/
 
#include <debug.h>
#include <arch/types.h>
#include <typedefs.h>
#include <mm/as.h>
#include <mm/frame.h>
#include <mm/slab.h>
#include <memstr.h>
#include <macros.h>
#include <arch.h>
#include <align.h>
 
static int phys_page_fault(as_area_t *area, __address addr, pf_access_t access);
static void phys_share(as_area_t *area);
 
mem_backend_t phys_backend = {
.page_fault = phys_page_fault,
.frame_free = NULL,
.share = phys_share
};
 
/** Service a page fault in the address space area backed by physical memory.
*
* The address space area and page tables must be already locked.
*
* @param area Pointer to the address space area.
* @param addr Faulting virtual address.
* @param access Access mode that caused the fault (i.e. read/write/exec).
*
* @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. serviced).
*/
int phys_page_fault(as_area_t *area, __address addr, pf_access_t access)
{
__address base = (__address) area->backend_data.d1;
count_t frames = (count_t) area->backend_data.d2;
 
if (!as_area_check_access(area, access))
return AS_PF_FAULT;
 
ASSERT(addr - area->base < frames * FRAME_SIZE);
page_mapping_insert(AS, addr, base + (addr - area->base), as_area_get_flags(area));
if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1))
panic("Could not insert used space.\n");
 
return AS_PF_OK;
}
 
/** Share address space area backed by physical memory.
*
* Do actually nothing as sharing of address space areas
* that are backed up by physical memory is very easy.
* Note that the function must be defined so that
* as_area_share() will succeed.
*/
void phys_share(as_area_t *area)
{
}
/kernel/trunk/generic/src/mm/backend_elf.c
0,0 → 1,160
/*
* Copyright (C) 2006 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/**
* @file backend_elf.c
* @brief Backend for address space areas backed by an ELF image.
*/
 
#include <elf.h>
#include <debug.h>
#include <arch/types.h>
#include <typedefs.h>
#include <mm/as.h>
#include <mm/frame.h>
#include <mm/slab.h>
#include <align.h>
#include <memstr.h>
#include <macros.h>
#include <arch.h>
 
static int elf_page_fault(as_area_t *area, __address addr, pf_access_t access);
static void elf_frame_free(as_area_t *area, __address page, __address frame);
 
mem_backend_t elf_backend = {
.page_fault = elf_page_fault,
.frame_free = elf_frame_free,
.share = NULL
};
 
/** Service a page fault in the ELF backend address space area.
*
* The address space area and page tables must be already locked.
*
* @param area Pointer to the address space area.
* @param addr Faulting virtual address.
* @param access Access mode that caused the fault (i.e. read/write/exec).
*
* @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. serviced).
*/
int elf_page_fault(as_area_t *area, __address addr, pf_access_t access)
{
elf_header_t *elf = (elf_header_t *) area->backend_data.d1;
elf_segment_header_t *entry = (elf_segment_header_t *) area->backend_data.d2;
__address base, frame;
index_t i;
 
if (!as_area_check_access(area, access))
return AS_PF_FAULT;
 
ASSERT((addr >= entry->p_vaddr) && (addr < entry->p_vaddr + entry->p_memsz));
i = (addr - entry->p_vaddr) >> PAGE_WIDTH;
base = (__address) (((void *) elf) + entry->p_offset);
ASSERT(ALIGN_UP(base, FRAME_SIZE) == base);
if (ALIGN_DOWN(addr, PAGE_SIZE) + PAGE_SIZE < entry->p_vaddr + entry->p_filesz) {
/*
* Initialized portion of the segment. The memory is backed
* directly by the content of the ELF image. Pages are
* only copied if the segment is writable so that there
* can be more instantions of the same memory ELF image
* used at a time. Note that this could be later done
* as COW.
*/
if (entry->p_flags & PF_W) {
frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
memcpy((void *) PA2KA(frame), (void *) (base + i*FRAME_SIZE), FRAME_SIZE);
} else {
frame = KA2PA(base + i*FRAME_SIZE);
}
} else if (ALIGN_DOWN(addr, PAGE_SIZE) >= ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) {
/*
* This is the uninitialized portion of the segment.
* It is not physically present in the ELF image.
* To resolve the situation, a frame must be allocated
* and cleared.
*/
frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
memsetb(PA2KA(frame), FRAME_SIZE, 0);
} else {
size_t size;
/*
* The mixed case.
* The lower part is backed by the ELF image and
* the upper part is anonymous memory.
*/
size = entry->p_filesz - (i<<PAGE_WIDTH);
frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
memsetb(PA2KA(frame) + size, FRAME_SIZE - size, 0);
memcpy((void *) PA2KA(frame), (void *) (base + i*FRAME_SIZE), size);
}
page_mapping_insert(AS, addr, frame, as_area_get_flags(area));
if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1))
panic("Could not insert used space.\n");
 
return AS_PF_OK;
}
 
/** Free a frame that is backed by the ELF backend.
*
* The address space area and page tables must be already locked.
*
* @param area Pointer to the address space area.
* @param page Page that is mapped to frame. Must be aligned to PAGE_SIZE.
* @param frame Frame to be released.
*
*/
void elf_frame_free(as_area_t *area, __address page, __address frame)
{
elf_header_t *elf = (elf_header_t *) area->backend_data.d1;
elf_segment_header_t *entry = (elf_segment_header_t *) area->backend_data.d2;
__address base;
index_t i;
ASSERT((page >= entry->p_vaddr) && (page < entry->p_vaddr + entry->p_memsz));
i = (page - entry->p_vaddr) >> PAGE_WIDTH;
base = (__address) (((void *) elf) + entry->p_offset);
ASSERT(ALIGN_UP(base, FRAME_SIZE) == base);
if (page + PAGE_SIZE < ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) {
if (entry->p_flags & PF_W) {
/*
* Free the frame with the copy of writable segment data.
*/
frame_free(ADDR2PFN(frame));
}
} else {
/*
* The frame is either anonymous memory or the mixed case (i.e. lower
* part is backed by the ELF image and the upper is anonymous).
* In any case, a frame needs to be freed.
*/
frame_free(ADDR2PFN(frame));
}
}
/kernel/trunk/Makefile
141,6 → 141,9
generic/src/mm/page.c \
generic/src/mm/tlb.c \
generic/src/mm/as.c \
generic/src/mm/backend_anon.c \
generic/src/mm/backend_elf.c \
generic/src/mm/backend_phys.c \
generic/src/mm/slab.c \
generic/src/lib/func.c \
generic/src/lib/memstr.c \