Subversion Repositories HelenOS-historic

Compare Revisions

Ignore whitespace Rev 1408 → Rev 1409

/kernel/trunk/generic/include/mm/as.h
34,6 → 34,7
#define AS_AREA_WRITE 2
#define AS_AREA_EXEC 4
#define AS_AREA_DEVICE 8
#define AS_AREA_ANON 16
 
#ifdef KERNEL
 
61,30 → 62,6
 
#define FLAG_AS_KERNEL (1 << 0) /**< Kernel address space. */
 
 
/** Address space area attributes. */
#define AS_AREA_ATTR_NONE 0
#define AS_AREA_ATTR_PARTIAL 1 /* Not fully initialized area. */
 
#define AS_PF_FAULT 0 /**< The page fault was not resolved by as_page_fault(). */
#define AS_PF_OK 1 /**< The page fault was resolved by as_page_fault(). */
#define AS_PF_DEFER 2 /**< The page fault was caused by memcpy_from_uspace()
or memcpy_to_uspace(). */
 
/** Address space area structure.
*
* Each as_area_t structure describes one contiguous area of virtual memory.
* In the future, it should not be difficult to support shared areas.
*/
struct as_area {
mutex_t lock;
int flags; /**< Flags related to the memory represented by the address space area. */
int attributes; /**< Attributes related to the address space area itself. */
count_t pages; /**< Size of this area in multiples of PAGE_SIZE. */
__address base; /**< Base address of this area. */
btree_t used_space; /**< Map of used space. */
};
 
/** Address space structure.
*
* as_t contains the list of as_areas of userspace accessible
118,6 → 95,42
};
typedef struct as_operations as_operations_t;
 
/** Address space area attributes. */
#define AS_AREA_ATTR_NONE 0
#define AS_AREA_ATTR_PARTIAL 1 /**< Not fully initialized area. */
 
#define AS_PF_FAULT 0 /**< The page fault was not resolved by as_page_fault(). */
#define AS_PF_OK 1 /**< The page fault was resolved by as_page_fault(). */
#define AS_PF_DEFER 2 /**< The page fault was caused by memcpy_from_uspace()
or memcpy_to_uspace(). */
 
typedef struct share_info share_info_t;
typedef struct mem_backend mem_backend_t;
 
/** Address space area structure.
*
* Each as_area_t structure describes one contiguous area of virtual memory.
* In the future, it should not be difficult to support shared areas.
*/
struct as_area {
mutex_t lock;
int flags; /**< Flags related to the memory represented by the address space area. */
int attributes; /**< Attributes related to the address space area itself. */
count_t pages; /**< Size of this area in multiples of PAGE_SIZE. */
__address base; /**< Base address of this area. */
btree_t used_space; /**< Map of used space. */
share_info_t *sh_info; /**< If the address space area has been shared, this pointer will
reference the share info structure. */
mem_backend_t *backend; /**< Memory backend backing this address space area. */
void *backend_data[2]; /**< Data to be used by the backend. */
};
 
/** Address space area backend structure. */
struct mem_backend {
int (* backend_page_fault)(as_area_t *area, __address addr);
void (* backend_frame_free)(as_area_t *area, __address page, __address frame);
};
 
extern as_t *AS_KERNEL;
extern as_operations_t *as_operations;
 
126,9 → 139,11
 
extern void as_init(void);
extern as_t *as_create(int flags);
extern as_area_t *as_area_create(as_t *as, int flags, size_t size, __address base, int attrs);
extern as_area_t *as_area_create(as_t *as, int flags, size_t size, __address base, int attrs,
mem_backend_t *backend, void **backend_data);
extern int as_area_resize(as_t *as, __address address, size_t size, int flags);
extern int as_area_destroy(as_t *as, __address address);
extern int as_area_get_flags(as_area_t *area);
extern void as_set_mapping(as_t *as, __address page, __address frame);
extern int as_page_fault(__address page, istate_t *istate);
extern void as_switch(as_t *old, as_t *new);
135,6 → 150,8
extern void as_free(as_t *as);
extern int as_area_steal(task_t *src_task, __address src_base, size_t acc_size, __address dst_base);
extern size_t as_get_size(__address base);
extern int used_space_insert(as_area_t *a, __address page, count_t count);
extern int used_space_remove(as_area_t *a, __address page, count_t count);
 
/* Interface to be implemented by architectures. */
#ifndef as_install_arch
141,6 → 158,10
extern void as_install_arch(as_t *as);
#endif /* !def as_install_arch */
 
/* Backend declarations. */
extern mem_backend_t anon_backend;
extern mem_backend_t elf_backend;
 
/* Address space area related syscalls. */
extern __native sys_as_area_create(__address address, size_t size, int flags);
extern __native sys_as_area_resize(__address address, size_t size, int flags);
/kernel/trunk/generic/src/ddi/ddi.c
101,7 → 101,7
flags = AS_AREA_DEVICE | AS_AREA_READ;
if (writable)
flags |= AS_AREA_WRITE;
if (!as_area_create(t->as, flags, pages * PAGE_SIZE, vp, AS_AREA_ATTR_NONE)) {
if (!as_area_create(t->as, flags, pages * PAGE_SIZE, vp, AS_AREA_ATTR_NONE, NULL, NULL)) {
/*
* The address space area could not have been created.
* We report it using ENOMEM.
/kernel/trunk/generic/src/proc/task.c
154,7 → 154,7
* Create the data as_area.
*/
a = as_area_create(as, AS_AREA_READ | AS_AREA_WRITE, LOADED_PROG_STACK_PAGES_NO*PAGE_SIZE,
USTACK_ADDRESS, AS_AREA_ATTR_NONE);
USTACK_ADDRESS, AS_AREA_ATTR_NONE, &anon_backend, NULL);
 
t = thread_create(uinit, kernel_uarg, task, 0, "uinit");
ASSERT(t);
/kernel/trunk/generic/src/lib/elf.c
41,6 → 41,7
#include <align.h>
#include <memstr.h>
#include <macros.h>
#include <arch.h>
 
static char *error_codes[] = {
"no error",
55,6 → 56,14
static int section_header(elf_section_header_t *entry, elf_header_t *elf, as_t *as);
static int load_segment(elf_segment_header_t *entry, elf_header_t *elf, as_t *as);
 
static int elf_page_fault(as_area_t *area, __address addr);
static void elf_frame_free(as_area_t *area, __address page, __address frame);
 
mem_backend_t elf_backend = {
.backend_page_fault = elf_page_fault,
.backend_frame_free = elf_frame_free
};
 
/** ELF loader
*
* @param header Pointer to ELF header in memory
159,9 → 168,8
int load_segment(elf_segment_header_t *entry, elf_header_t *elf, as_t *as)
{
as_area_t *a;
int i, flags = 0;
size_t segment_size;
__u8 *segment;
int flags = 0;
void *backend_data[2] = { elf, entry };
 
if (entry->p_align > 1) {
if ((entry->p_offset % entry->p_align) != (entry->p_vaddr % entry->p_align)) {
182,23 → 190,14
if (ALIGN_UP(entry->p_vaddr, PAGE_SIZE) != entry->p_vaddr)
return EE_UNSUPPORTED;
 
segment_size = ALIGN_UP(max(entry->p_filesz, entry->p_memsz), PAGE_SIZE);
if ((entry->p_flags & PF_W)) {
/* If writable, copy data (should be COW in the future) */
segment = malloc(segment_size, 0);
memsetb((__address) (segment + entry->p_filesz), segment_size - entry->p_filesz, 0);
memcpy(segment, (void *) (((__address) elf) + entry->p_offset), entry->p_filesz);
} else /* Map identically original data */
segment = ((void *) elf) + entry->p_offset;
 
a = as_area_create(as, flags, entry->p_memsz, entry->p_vaddr, AS_AREA_ATTR_NONE);
a = as_area_create(as, flags, entry->p_memsz, entry->p_vaddr, AS_AREA_ATTR_NONE, &elf_backend, backend_data);
if (!a)
return EE_MEMORY;
for (i = 0; i < SIZE2FRAMES(entry->p_filesz); i++) {
as_set_mapping(as, entry->p_vaddr + i*PAGE_SIZE, KA2PA(((__address) segment) + i*PAGE_SIZE));
}
/*
* The segment will be mapped on demand by elf_page_fault().
*/
 
return EE_OK;
}
 
219,3 → 218,106
return EE_OK;
}
 
/** Service a page fault in the ELF backend address space area.
*
* The address space area and page tables must be already locked.
*
* @param area Pointer to the address space area.
* @param addr Faulting virtual address.
*
* @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. serviced).
*/
int elf_page_fault(as_area_t *area, __address addr)
{
elf_header_t *elf = (elf_header_t *) area->backend_data[0];
elf_segment_header_t *entry = (elf_segment_header_t *) area->backend_data[1];
__address base, frame;
index_t i;
 
ASSERT((addr >= entry->p_vaddr) && (addr < entry->p_vaddr + entry->p_memsz));
i = (addr - entry->p_vaddr) >> PAGE_WIDTH;
base = (__address) (((void *) elf) + entry->p_offset);
ASSERT(ALIGN_UP(base, FRAME_SIZE) == base);
if (ALIGN_DOWN(addr, PAGE_SIZE) + PAGE_SIZE < entry->p_vaddr + entry->p_filesz) {
/*
* Initialized portion of the segment. The memory is backed
* directly by the content of the ELF image. Pages are
* only copied if the segment is writable so that there
* can be more instantions of the same memory ELF image
* used at a time. Note that this could be later done
* as COW.
*/
if (entry->p_flags & PF_W) {
frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
memcpy((void *) PA2KA(frame), (void *) (base + i*FRAME_SIZE), FRAME_SIZE);
} else {
frame = KA2PA(base + i*FRAME_SIZE);
}
} else if (ALIGN_DOWN(addr, PAGE_SIZE) >= ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) {
/*
* This is the uninitialized portion of the segment.
* It is not physically present in the ELF image.
* To resolve the situation, a frame must be allocated
* and cleared.
*/
frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
memsetb(PA2KA(frame), FRAME_SIZE, 0);
} else {
size_t size;
/*
* The mixed case.
* The lower part is backed by the ELF image and
* the upper part is anonymous memory.
*/
size = entry->p_filesz - (i<<PAGE_WIDTH);
frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
memsetb(PA2KA(frame) + size, FRAME_SIZE - size, 0);
memcpy((void *) PA2KA(frame), (void *) (base + i*FRAME_SIZE), size);
}
page_mapping_insert(AS, addr, frame, as_area_get_flags(area));
if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1))
panic("Could not insert used space.\n");
 
return AS_PF_OK;
}
 
/** Free a frame that is backed by the ELF backend.
*
* The address space area and page tables must be already locked.
*
* @param area Pointer to the address space area.
* @param page Page that is mapped to frame. Must be aligned to PAGE_SIZE.
* @param frame Frame to be released.
*
*/
void elf_frame_free(as_area_t *area, __address page, __address frame)
{
elf_header_t *elf = (elf_header_t *) area->backend_data[0];
elf_segment_header_t *entry = (elf_segment_header_t *) area->backend_data[1];
__address base;
index_t i;
ASSERT((page >= entry->p_vaddr) && (page < entry->p_vaddr + entry->p_memsz));
i = (page - entry->p_vaddr) >> PAGE_WIDTH;
base = (__address) (((void *) elf) + entry->p_offset);
ASSERT(ALIGN_UP(base, FRAME_SIZE) == base);
if (page + PAGE_SIZE < ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) {
if (entry->p_flags & PF_W) {
/*
* Free the frame with the copy of writable segment data.
*/
frame_free(ADDR2PFN(frame));
}
} else {
/*
* The frame is either anonymous memory or the mixed case (i.e. lower
* part is backed by the ELF image and the upper is anonymous).
* In any case, a frame needs to be freed.
*/
frame_free(ADDR2PFN(frame));
}
}
/kernel/trunk/generic/src/mm/as.c
74,6 → 74,13
#include <syscall/copy.h>
#include <arch/interrupt.h>
 
/** This structure contains information associated with the shared address space area. */
struct share_info {
mutex_t lock; /**< This lock must be acquired only when the as_area lock is held. */
count_t refcount; /**< This structure can be deallocated if refcount drops to 0. */
btree_t pagemap; /**< B+tree containing complete map of anonymous pages of the shared area. */
};
 
as_operations_t *as_operations = NULL;
 
/** Address space lock. It protects inactive_as_with_asid_head. Must be acquired before as_t mutex. */
89,11 → 96,9
as_t *AS_KERNEL = NULL;
 
static int area_flags_to_page_flags(int aflags);
static int get_area_flags(as_area_t *a);
static as_area_t *find_area_and_lock(as_t *as, __address va);
static bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area);
static int used_space_insert(as_area_t *a, __address page, count_t count);
static int used_space_remove(as_area_t *a, __address page, count_t count);
static void sh_info_remove_reference(share_info_t *sh_info);
 
/** Initialize address space subsystem. */
void as_init(void)
148,10 → 153,13
* @param size Size of area.
* @param base Base address of area.
* @param attrs Attributes of the area.
* @param backend Address space area backend. NULL if no backend is used.
* @param backend_data NULL or a pointer to an array holding two void *.
*
* @return Address space area on success or NULL on failure.
*/
as_area_t *as_area_create(as_t *as, int flags, size_t size, __address base, int attrs)
as_area_t *as_area_create(as_t *as, int flags, size_t size, __address base, int attrs,
mem_backend_t *backend, void **backend_data)
{
ipl_t ipl;
as_area_t *a;
183,6 → 191,12
a->attributes = attrs;
a->pages = SIZE2FRAMES(size);
a->base = base;
a->sh_info = NULL;
a->backend = backend;
if (backend_data) {
a->backend_data[0] = backend_data[0];
a->backend_data[1] = backend_data[1];
}
btree_create(&a->used_space);
btree_insert(&as->as_area_btree, base, (void *) a, NULL);
231,6 → 245,16
interrupts_restore(ipl);
return ENOTSUP;
}
if (area->sh_info) {
/*
* Remapping of shared address space areas
* is not supported.
*/
mutex_unlock(&area->lock);
mutex_unlock(&as->lock);
interrupts_restore(ipl);
return ENOTSUP;
}
 
pages = SIZE2FRAMES((address - area->base) + size);
if (!pages) {
302,7 → 326,10
page_table_lock(as, false);
pte = page_mapping_find(as, b + i*PAGE_SIZE);
ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
frame_free(ADDR2PFN(PTE_GET_FRAME(pte)));
if (area->backend && area->backend->backend_frame_free) {
area->backend->backend_frame_free(area,
b + i*PAGE_SIZE, PTE_GET_FRAME(pte));
}
page_mapping_remove(as, b + i*PAGE_SIZE);
page_table_unlock(as, false);
}
391,7 → 418,10
page_table_lock(as, false);
pte = page_mapping_find(as, b + i*PAGE_SIZE);
ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
frame_free(ADDR2PFN(PTE_GET_FRAME(pte)));
if (area->backend && area->backend->backend_frame_free) {
area->backend->backend_frame_free(area,
b + i*PAGE_SIZE, PTE_GET_FRAME(pte));
}
page_mapping_remove(as, b + i*PAGE_SIZE);
page_table_unlock(as, false);
}
410,6 → 440,10
tlb_shootdown_finalize();
 
area->attributes |= AS_AREA_ATTR_PARTIAL;
if (area->sh_info)
sh_info_remove_reference(area->sh_info);
mutex_unlock(&area->lock);
 
/*
484,7 → 518,7
* attribute set which prevents race condition with
* preliminary as_page_fault() calls.
*/
dst_area = as_area_create(AS, src_flags, src_size, dst_base, AS_AREA_ATTR_PARTIAL);
dst_area = as_area_create(AS, src_flags, src_size, dst_base, AS_AREA_ATTR_PARTIAL, &anon_backend, NULL);
if (!dst_area) {
/*
* Destination address space area could not be created.
568,7 → 602,9
panic("Page not part of any as_area.\n");
}
 
page_mapping_insert(as, page, frame, get_area_flags(area));
ASSERT(!area->backend);
page_mapping_insert(as, page, frame, as_area_get_flags(area));
if (!used_space_insert(area, page, 1))
panic("Could not insert used space.\n");
579,22 → 615,26
 
/** Handle page fault within the current address space.
*
* This is the high-level page fault handler.
* This is the high-level page fault handler. It decides
* whether the page fault can be resolved by any backend
* and if so, it invokes the backend to resolve the page
* fault.
*
* Interrupts are assumed disabled.
*
* @param page Faulting page.
* @param istate Pointer to interrupted state.
*
* @return 0 on page fault, 1 on success or 2 if the fault was caused by copy_to_uspace() or copy_from_uspace().
* @return AS_PF_FAULT on page fault, AS_PF_OK on success or AS_PF_DEFER if the
* fault was caused by copy_to_uspace() or copy_from_uspace().
*/
int as_page_fault(__address page, istate_t *istate)
{
pte_t *pte;
as_area_t *area;
__address frame;
if (!THREAD)
return 0;
return AS_PF_FAULT;
ASSERT(AS);
 
619,7 → 659,15
goto page_fault;
}
 
ASSERT(!(area->flags & AS_AREA_DEVICE));
if (!area->backend || !area->backend->backend_page_fault) {
/*
* The address space area is not backed by any backend
* or the backend cannot handle page faults.
*/
mutex_unlock(&area->lock);
mutex_unlock(&AS->lock);
goto page_fault;
}
 
page_table_lock(AS, false);
633,45 → 681,26
page_table_unlock(AS, false);
mutex_unlock(&area->lock);
mutex_unlock(&AS->lock);
return 1;
return AS_PF_OK;
}
}
 
/*
* In general, there can be several reasons that
* can have caused this fault.
*
* - non-existent mapping: the area is a scratch
* area (e.g. stack) and so far has not been
* allocated a frame for the faulting page
*
* - non-present mapping: another possibility,
* currently not implemented, would be frame
* reuse; when this becomes a possibility,
* do not forget to distinguish between
* the different causes
* Resort to the backend page fault handler.
*/
frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
memsetb(PA2KA(frame), FRAME_SIZE, 0);
if (area->backend->backend_page_fault(area, page) != AS_PF_OK) {
page_table_unlock(AS, false);
mutex_unlock(&area->lock);
mutex_unlock(&AS->lock);
goto page_fault;
}
/*
* Map 'page' to 'frame'.
* Note that TLB shootdown is not attempted as only new information is being
* inserted into page tables.
*/
page_mapping_insert(AS, page, frame, get_area_flags(area));
if (!used_space_insert(area, ALIGN_DOWN(page, PAGE_SIZE), 1))
panic("Could not insert used space.\n");
page_table_unlock(AS, false);
mutex_unlock(&area->lock);
mutex_unlock(&AS->lock);
return AS_PF_OK;
 
page_fault:
if (!THREAD)
return AS_PF_FAULT;
if (THREAD->in_copy_from_uspace) {
THREAD->in_copy_from_uspace = false;
istate_set_retaddr(istate, (__address) &memcpy_from_uspace_failover_address);
793,7 → 822,7
*
* @return Flags to be used in page_mapping_insert().
*/
int get_area_flags(as_area_t *a)
int as_area_get_flags(as_area_t *a)
{
return area_flags_to_page_flags(a->flags);
}
1381,7 → 1410,152
panic("Inconsistency detected while removing %d pages of used space from %P.\n", count, page);
}
 
/** Remove reference to address space area share info.
*
* If the reference count drops to 0, the sh_info is deallocated.
*
* @param sh_info Pointer to address space area share info.
*/
void sh_info_remove_reference(share_info_t *sh_info)
{
bool dealloc = false;
 
mutex_lock(&sh_info->lock);
ASSERT(sh_info->refcount);
if (--sh_info->refcount == 0) {
dealloc = true;
bool cond;
/*
* Now walk carefully the pagemap B+tree and free/remove
* reference from all frames found there.
*/
for (cond = true; cond;) {
btree_node_t *node;
ASSERT(!list_empty(&sh_info->pagemap.leaf_head));
node = list_get_instance(sh_info->pagemap.leaf_head.next, btree_node_t, leaf_link);
if ((cond = node->keys)) {
frame_free(ADDR2PFN((__address) node->value[0]));
btree_remove(&sh_info->pagemap, node->key[0], node);
}
}
}
mutex_unlock(&sh_info->lock);
if (dealloc) {
btree_destroy(&sh_info->pagemap);
free(sh_info);
}
}
 
static int anon_page_fault(as_area_t *area, __address addr);
static void anon_frame_free(as_area_t *area, __address page, __address frame);
 
/*
* Anonymous memory backend.
*/
mem_backend_t anon_backend = {
.backend_page_fault = anon_page_fault,
.backend_frame_free = anon_frame_free
};
 
/** Service a page fault in the anonymous memory address space area.
*
* The address space area and page tables must be already locked.
*
* @param area Pointer to the address space area.
* @param addr Faulting virtual address.
*
* @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. serviced).
*/
int anon_page_fault(as_area_t *area, __address addr)
{
__address frame;
 
if (area->sh_info) {
btree_node_t *leaf;
/*
* The area is shared, chances are that the mapping can be found
* in the pagemap of the address space area share info structure.
* In the case that the pagemap does not contain the respective
* mapping, a new frame is allocated and the mapping is created.
*/
mutex_lock(&area->sh_info->lock);
frame = (__address) btree_search(&area->sh_info->pagemap, ALIGN_DOWN(addr, PAGE_SIZE), &leaf);
if (!frame) {
bool allocate = true;
int i;
/*
* Zero can be returned as a valid frame address.
* Just a small workaround.
*/
for (i = 0; i < leaf->keys; i++) {
if (leaf->key[i] == ALIGN_DOWN(addr, PAGE_SIZE)) {
allocate = false;
break;
}
}
if (allocate) {
frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
memsetb(PA2KA(frame), FRAME_SIZE, 0);
/*
* Insert the address of the newly allocated frame to the pagemap.
*/
btree_insert(&area->sh_info->pagemap, ALIGN_DOWN(addr, PAGE_SIZE), (void *) frame, leaf);
}
}
mutex_unlock(&area->sh_info->lock);
} else {
 
/*
* In general, there can be several reasons that
* can have caused this fault.
*
* - non-existent mapping: the area is an anonymous
* area (e.g. heap or stack) and so far has not been
* allocated a frame for the faulting page
*
* - non-present mapping: another possibility,
* currently not implemented, would be frame
* reuse; when this becomes a possibility,
* do not forget to distinguish between
* the different causes
*/
frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
memsetb(PA2KA(frame), FRAME_SIZE, 0);
}
/*
* Map 'page' to 'frame'.
* Note that TLB shootdown is not attempted as only new information is being
* inserted into page tables.
*/
page_mapping_insert(AS, addr, frame, as_area_get_flags(area));
if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1))
panic("Could not insert used space.\n");
return AS_PF_OK;
}
 
/** Free a frame that is backed by the anonymous memory backend.
*
* The address space area and page tables must be already locked.
*
* @param area Ignored.
* @param page Ignored.
* @param frame Frame to be released.
*/
void anon_frame_free(as_area_t *area, __address page, __address frame)
{
frame_free(ADDR2PFN(frame));
}
 
/*
* Address space related syscalls.
*/
 
1388,7 → 1562,7
/** Wrapper for as_area_create(). */
__native sys_as_area_create(__address address, size_t size, int flags)
{
if (as_area_create(AS, flags, size, address, AS_AREA_ATTR_NONE))
if (as_area_create(AS, flags, size, address, AS_AREA_ATTR_NONE, &anon_backend, NULL))
return (__native) address;
else
return (__native) -1;