/kernel/trunk/generic/include/proc/thread.h |
---|
91,6 → 91,12 |
timeout_t sleep_timeout; /**< Timeout used for timeoutable sleeping. */ |
volatile int timeout_pending; /**< Flag signalling sleep timeout in progress. */ |
/** True if this thread is executing copy_from_uspace(). False otherwise. */ |
bool in_copy_from_uspace; |
/** True if this thread is executing copy_to_uspace(). False otherwise. */ |
bool in_copy_to_uspace; |
fpu_context_t *saved_fpu_context; |
int fpu_context_exists; |
/kernel/trunk/generic/include/interrupt.h |
---|
31,6 → 31,7 |
#include <arch/interrupt.h> |
#include <typedefs.h> |
#include <arch/types.h> |
#ifndef IVT_ITEMS |
# define IVT_ITEMS 0 |
/kernel/trunk/generic/include/mm/page.h |
---|
60,17 → 60,6 |
#define PAGE_GLOBAL (1<<PAGE_GLOBAL_SHIFT) |
/* TODO - check that userspace is OK, platform specific functions etc */ |
static inline void copy_to_uspace(void *dst, void *src, count_t cnt) |
{ |
memcpy(dst, src, cnt); |
} |
static inline void copy_from_uspace(void *dst, void *src, count_t cnt) |
{ |
memcpy(dst, src, cnt); |
} |
/** Operations to manipulate page mappings. */ |
struct page_mapping_operations { |
void (* mapping_insert)(as_t *as, __address page, __address frame, int flags); |
/kernel/trunk/generic/include/mm/as.h |
---|
63,6 → 63,10 |
#define AS_AREA_ATTR_NONE 0 |
#define AS_AREA_ATTR_PARTIAL 1 /* Not fully initialized area. */ |
#define AS_PF_FAULT 0 /**< The page fault was not resolved by asp_page_fault(). */ |
#define AS_PF_OK 1 /**< The page fault was resolved by as_page_fault(). */ |
#define AS_PF_DEFER 2 /**< The page fault was caused by memcpy_from_uspace(). */ |
/** Address space area structure. |
* |
* Each as_area_t structure describes one contiguous area of virtual memory. |
121,7 → 125,7 |
extern __address as_area_resize(as_t *as, __address address, size_t size, int flags); |
int as_area_send(task_id_t dst_id, __address base); |
extern void as_set_mapping(as_t *as, __address page, __address frame); |
extern int as_page_fault(__address page); |
extern int as_page_fault(__address page, istate_t *istate); |
extern void as_switch(as_t *old, as_t *new); |
extern void as_free(as_t *as); |
/kernel/trunk/generic/include/syscall/copy.h |
---|
0,0 → 1,49 |
/* |
* Copyright (C) 2006 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
#ifndef __COPY_H__ |
#define __COPY_H__ |
#include <typedefs.h> |
/** Label within memcpy_from_uspace() that contains return -1. */ |
extern char memcpy_from_uspace_failover_address; |
/** Label within memcpy_to_uspace() that contains return -1. */ |
extern char memcpy_to_uspace_failover_address; |
extern int copy_from_uspace(void *dst, void *uspace_src, size_t size); |
extern int copy_to_uspace(void *dst_uspace, void *src, size_t size); |
/* |
* This interface must be implemented by each architecture. |
*/ |
extern int memcpy_from_uspace(void *dst, void *uspace_src, size_t size); |
extern int memcpy_to_uspace(void *uspace_dst, void *src, size_t size); |
#endif |
/kernel/trunk/generic/src/synch/waitq.c |
---|
30,7 → 30,7 |
* @file waitq.c |
* @brief Wait queue. |
* |
* Wait queue is the basic synchronization primitive upon all |
* Wait queue is the basic synchronization primitive upon which all |
* other synchronization primitives build. |
* |
* It allows threads to wait for an event in first-come, first-served |
/kernel/trunk/generic/src/smp/ipi.c |
---|
43,7 → 43,7 |
* |
* @param ipi Message to broadcast. |
* |
* @bugs The decision whether to actually send the IPI must be based |
* @bug The decision whether to actually send the IPI must be based |
* on a different criterion. The current version has |
* problems when some of the detected CPUs are marked |
* disabled in machine configuration. |
/kernel/trunk/generic/src/ddi/ddi.c |
---|
40,9 → 40,9 |
#include <proc/task.h> |
#include <security/cap.h> |
#include <mm/frame.h> |
#include <mm/page.h> |
#include <mm/as.h> |
#include <synch/spinlock.h> |
#include <syscall/copy.h> |
#include <arch.h> |
#include <align.h> |
#include <errno.h> |
183,8 → 183,12 |
__native sys_physmem_map(ddi_memarg_t *uspace_mem_arg) |
{ |
ddi_memarg_t arg; |
int rc; |
copy_from_uspace(&arg, uspace_mem_arg, sizeof(ddi_memarg_t)); |
rc = copy_from_uspace(&arg, uspace_mem_arg, sizeof(ddi_memarg_t)); |
if (rc != 0) |
return (__native) rc; |
return (__native) ddi_physmem_map((task_id_t) arg.task_id, ALIGN_DOWN((__address) arg.phys_base, FRAME_SIZE), |
ALIGN_DOWN((__address) arg.virt_base, PAGE_SIZE), (count_t) arg.pages, |
(bool) arg.writable); |
199,7 → 203,11 |
__native sys_iospace_enable(ddi_ioarg_t *uspace_io_arg) |
{ |
ddi_ioarg_t arg; |
int rc; |
copy_from_uspace(&arg, uspace_io_arg, sizeof(ddi_ioarg_t)); |
rc = copy_from_uspace(&arg, uspace_io_arg, sizeof(ddi_ioarg_t)); |
if (rc != 0) |
return (__native) rc; |
return (__native) ddi_iospace_enable((task_id_t) arg.task_id, (__address) arg.ioaddr, (size_t) arg.size); |
} |
/kernel/trunk/generic/src/printf/vsnprintf.c |
---|
39,9 → 39,9 |
int vsnprintf_write(const char *str, size_t count, struct vsnprintf_data *data); |
/** Write string to given buffer. |
* Write at most data->size characters including trailing zero. According to C99 has snprintf to return number |
* Write at most data->size characters including trailing zero. According to C99, snprintf() has to return number |
* of characters that would have been written if enough space had been available. Hence the return value is not |
* number of really printed characters but size of input string. Number of really used characters |
* number of really printed characters but size of the input string. Number of really used characters |
* is stored in data->len. |
* @param str source string to print |
* @param count size of source string |
90,5 → 90,3 |
/* vsnprintf_write ensures that str will be terminated by zero. */ |
return printf_core(fmt, &ps, ap); |
} |
/kernel/trunk/generic/src/proc/task.c |
---|
48,8 → 48,8 |
#include <memstr.h> |
#include <print.h> |
#include <elf.h> |
#include <syscall/copy.h> |
#ifndef LOADED_PROG_STACK_PAGES_NO |
#define LOADED_PROG_STACK_PAGES_NO 1 |
#endif |
170,7 → 170,7 |
* |
* @param uspace_task_id Userspace address of 8-byte buffer where to store current task ID. |
* |
* @return Always returns 0. |
* @return 0 on success or an error code from @ref errno.h. |
*/ |
__native sys_task_get_id(task_id_t *uspace_task_id) |
{ |
178,9 → 178,7 |
* No need to acquire lock on TASK because taskid |
* remains constant for the lifespan of the task. |
*/ |
copy_to_uspace(uspace_task_id, &TASK->taskid, sizeof(TASK->taskid)); |
return 0; |
return (__native) copy_to_uspace(uspace_task_id, &TASK->taskid, sizeof(TASK->taskid)); |
} |
/** Find task structure corresponding to task ID. |
/kernel/trunk/generic/src/proc/thread.c |
---|
60,6 → 60,8 |
#include <mm/slab.h> |
#include <debug.h> |
#include <main/uinit.h> |
#include <syscall/copy.h> |
#include <errno.h> |
char *thread_states[] = {"Invalid", "Running", "Sleeping", "Ready", "Entering", "Exiting"}; /**< Thread states */ |
304,6 → 306,9 |
t->sleep_queue = NULL; |
t->timeout_pending = 0; |
t->in_copy_from_uspace = false; |
t->in_copy_to_uspace = false; |
t->rwlock_holder_type = RWLOCK_NONE; |
t->task = task; |
462,11 → 467,18 |
char namebuf[THREAD_NAME_BUFLEN]; |
uspace_arg_t *kernel_uarg; |
__u32 tid; |
int rc; |
copy_from_uspace(namebuf, uspace_name, THREAD_NAME_BUFLEN); |
rc = copy_from_uspace(namebuf, uspace_name, THREAD_NAME_BUFLEN); |
if (rc != 0) |
return (__native) rc; |
kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0); |
copy_from_uspace(kernel_uarg, uspace_uarg, sizeof(uspace_arg_t)); |
rc = copy_from_uspace(kernel_uarg, uspace_uarg, sizeof(uspace_arg_t)); |
if (rc != 0) { |
free(kernel_uarg); |
return (__native) rc; |
} |
if ((t = thread_create(uinit, kernel_uarg, TASK, 0, namebuf))) { |
tid = t->tid; |
476,7 → 488,7 |
free(kernel_uarg); |
} |
return (__native) -1; |
return (__native) ENOMEM; |
} |
/** Process syscall to terminate thread. |
/kernel/trunk/generic/src/mm/slab.c |
---|
277,8 → 277,8 |
if (list_empty(&cache->partial_slabs)) { |
/* Allow recursion and reclaiming |
* - this should work, as the slab control structures |
* are small and do not need to allocte with anything |
* other ten frame_alloc when they are allocating, |
* are small and do not need to allocate with anything |
* other than frame_alloc when they are allocating, |
* that's why we should get recursion at most 1-level deep |
*/ |
spinlock_unlock(&cache->slablock); |
889,7 → 889,6 |
return slab_alloc(malloc_caches[idx], flags); |
} |
void free(void *obj) |
{ |
slab_t *slab; |
/kernel/trunk/generic/src/mm/as.c |
---|
57,6 → 57,7 |
#include <adt/list.h> |
#include <adt/btree.h> |
#include <proc/task.h> |
#include <proc/thread.h> |
#include <arch/asm.h> |
#include <panic.h> |
#include <debug.h> |
68,6 → 69,8 |
#include <config.h> |
#include <arch/types.h> |
#include <typedefs.h> |
#include <syscall/copy.h> |
#include <arch/interrupt.h> |
as_operations_t *as_operations = NULL; |
477,10 → 480,11 |
* Interrupts are assumed disabled. |
* |
* @param page Faulting page. |
* @param istate Pointer to interrupted state. |
* |
* @return 0 on page fault, 1 on success. |
* @return 0 on page fault, 1 on success or 2 if the fault was caused by copy_to_uspace() or copy_from_uspace(). |
*/ |
int as_page_fault(__address page) |
int as_page_fault(__address page, istate_t *istate) |
{ |
pte_t *pte; |
as_area_t *area; |
496,7 → 500,7 |
* Signal page fault to low-level handler. |
*/ |
spinlock_unlock(&AS->lock); |
return 0; |
goto page_fault; |
} |
if (area->attributes & AS_AREA_ATTR_PARTIAL) { |
506,7 → 510,7 |
*/ |
spinlock_unlock(&area->lock); |
spinlock_unlock(&AS->lock); |
return 0; |
goto page_fault; |
} |
ASSERT(!(area->flags & AS_AREA_DEVICE)); |
554,9 → 558,25 |
spinlock_unlock(&area->lock); |
spinlock_unlock(&AS->lock); |
return 1; |
return AS_PF_OK; |
page_fault: |
if (!THREAD) |
return AS_PF_FAULT; |
if (THREAD->in_copy_from_uspace) { |
THREAD->in_copy_from_uspace = false; |
istate_set_retaddr(istate, (__address) &memcpy_from_uspace_failover_address); |
} else if (THREAD->in_copy_to_uspace) { |
THREAD->in_copy_to_uspace = false; |
istate_set_retaddr(istate, (__address) &memcpy_to_uspace_failover_address); |
} else { |
return AS_PF_FAULT; |
} |
return AS_PF_DEFER; |
} |
/** Switch address spaces. |
* |
* @param old Old address space or NULL. |
884,8 → 904,11 |
__native sys_as_area_accept(as_area_acptsnd_arg_t *uspace_accept_arg) |
{ |
as_area_acptsnd_arg_t arg; |
int rc; |
copy_from_uspace(&arg, uspace_accept_arg, sizeof(as_area_acptsnd_arg_t)); |
rc = copy_from_uspace(&arg, uspace_accept_arg, sizeof(as_area_acptsnd_arg_t)); |
if (rc != 0) |
return rc; |
if (!arg.size) |
return (__native) EPERM; |
906,8 → 929,11 |
__native sys_as_area_send(as_area_acptsnd_arg_t *uspace_send_arg) |
{ |
as_area_acptsnd_arg_t arg; |
int rc; |
copy_from_uspace(&arg, uspace_send_arg, sizeof(as_area_acptsnd_arg_t)); |
rc = copy_from_uspace(&arg, uspace_send_arg, sizeof(as_area_acptsnd_arg_t)); |
if (rc != 0) |
return rc; |
if (!arg.size) |
return (__native) EPERM; |
/kernel/trunk/generic/src/syscall/copy.c |
---|
0,0 → 1,125 |
/* |
* Copyright (C) 2006 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** |
* @file copy.c |
* @brief Copying between kernel and userspace. |
* |
* This file contains sanitized functions for copying data |
* between kernel and userspace. |
*/ |
#include <syscall/copy.h> |
#include <proc/thread.h> |
#include <mm/as.h> |
#include <macros.h> |
#include <arch.h> |
#include <errno.h> |
#include <typedefs.h> |
/** Copy data from userspace to kernel. |
* |
* Provisions are made to return value even after page fault. |
* |
* This function can be called only from syscall. |
* |
* @param dst Destination kernel address. |
* @param uspace_src Source userspace address. |
* @param size Size of the data to be copied. |
* |
* @return 0 on success or error code from @ref errno.h. |
*/ |
int copy_from_uspace(void *dst, void *uspace_src, size_t size) |
{ |
ipl_t ipl; |
int rc; |
ASSERT(THREAD); |
ASSERT(!THREAD->in_copy_from_uspace); |
if (!KERNEL_ADDRESS_SPACE_SHADOWED) { |
if (overlaps((__address) uspace_src, size, |
KERNEL_ADDRESS_SPACE_START, KERNEL_ADDRESS_SPACE_END-KERNEL_ADDRESS_SPACE_START)) { |
/* |
* The userspace source block conflicts with kernel address space. |
*/ |
return EPERM; |
} |
} |
ipl = interrupts_disable(); |
THREAD->in_copy_from_uspace = true; |
rc = memcpy_from_uspace(dst, uspace_src, size); |
THREAD->in_copy_from_uspace = false; |
interrupts_restore(ipl); |
return !rc ? EPERM : 0; |
} |
/** Copy data from kernel to userspace. |
* |
* Provisions are made to return value even after page fault. |
* |
* This function can be called only from syscall. |
* |
* @param uspace_dst Destination userspace address. |
* @param uspace_src Source kernel address. |
* @param size Size of the data to be copied. |
* |
* @return 0 on success or error code from @ref errno.h. |
*/ |
int copy_to_uspace(void *uspace_dst, void *src, size_t size) |
{ |
ipl_t ipl; |
int rc; |
ASSERT(THREAD); |
ASSERT(!THREAD->in_copy_from_uspace); |
if (!KERNEL_ADDRESS_SPACE_SHADOWED) { |
if (overlaps((__address) uspace_dst, size, |
KERNEL_ADDRESS_SPACE_START, KERNEL_ADDRESS_SPACE_END-KERNEL_ADDRESS_SPACE_START)) { |
/* |
* The userspace destination block conflicts with kernel address space. |
*/ |
return EPERM; |
} |
} |
ipl = interrupts_disable(); |
THREAD->in_copy_from_uspace = true; |
rc = memcpy_to_uspace(uspace_dst, src, size); |
THREAD->in_copy_from_uspace = false; |
interrupts_restore(ipl); |
return !rc ? EPERM : 0; |
} |
/kernel/trunk/generic/src/ipc/sysipc.c |
---|
28,9 → 28,8 |
#include <arch.h> |
#include <proc/task.h> |
#include <proc/thread.h> |
#include <errno.h> |
#include <mm/page.h> |
#include <memstr.h> |
#include <debug.h> |
#include <ipc/ipc.h> |
38,10 → 37,8 |
#include <ipc/irq.h> |
#include <ipc/ipcrsc.h> |
#include <arch/interrupt.h> |
#include <print.h> |
#include <arch.h> |
#include <proc/thread.h> |
#include <syscall/copy.h> |
#define GET_CHECK_PHONE(phone,phoneid,err) { \ |
if (phoneid > IPC_MAX_PHONES) { err; } \ |
228,9 → 225,12 |
call_t call; |
phone_t *phone; |
int res; |
int rc; |
ipc_call_static_init(&call); |
copy_from_uspace(&call.data.args, &question->args, sizeof(call.data.args)); |
rc = copy_from_uspace(&call.data.args, &question->args, sizeof(call.data.args)); |
if (rc != 0) |
return (__native) rc; |
GET_CHECK_PHONE(phone, phoneid, return ENOENT); |
240,7 → 240,9 |
} else |
IPC_SET_RETVAL(call.data, res); |
STRUCT_TO_USPACE(&reply->args, &call.data.args); |
rc = STRUCT_TO_USPACE(&reply->args, &call.data.args); |
if (rc != 0) |
return rc; |
return 0; |
} |
297,6 → 299,7 |
call_t *call; |
phone_t *phone; |
int res; |
int rc; |
if (check_call_limit()) |
return IPC_CALLRET_TEMPORARY; |
304,7 → 307,9 |
GET_CHECK_PHONE(phone, phoneid, return IPC_CALLRET_FATAL); |
call = ipc_call_alloc(0); |
copy_from_uspace(&call->data.args, &data->args, sizeof(call->data.args)); |
rc = copy_from_uspace(&call->data.args, &data->args, sizeof(call->data.args)); |
if (rc != 0) |
return (__native) rc; |
if (!(res=request_preprocess(call))) |
ipc_call(phone, call); |
else |
393,6 → 398,7 |
call_t *call; |
ipc_data_t saved_data; |
int saveddata = 0; |
int rc; |
call = get_call(callid); |
if (!call) |
402,8 → 408,10 |
memcpy(&saved_data, &call->data, sizeof(call->data)); |
saveddata = 1; |
} |
copy_from_uspace(&call->data.args, &data->args, |
rc = copy_from_uspace(&call->data.args, &data->args, |
sizeof(call->data.args)); |
if (rc != 0) |
return rc; |
answer_preprocess(call, saveddata ? &saved_data : NULL); |
/kernel/trunk/generic/src/ipc/irq.c |
---|
47,6 → 47,7 |
#include <ipc/ipc.h> |
#include <ipc/irq.h> |
#include <atomic.h> |
#include <syscall/copy.h> |
typedef struct { |
SPINLOCK_DECLARE(lock); |
120,9 → 121,14 |
{ |
irq_code_t *code; |
irq_cmd_t *ucmds; |
int rc; |
code = malloc(sizeof(*code), 0); |
copy_from_uspace(code, ucode, sizeof(*code)); |
rc = copy_from_uspace(code, ucode, sizeof(*code)); |
if (rc != 0) { |
free(code); |
return NULL; |
} |
if (code->cmdcount > IRQ_MAX_PROG_SIZE) { |
free(code); |
130,7 → 136,12 |
} |
ucmds = code->cmds; |
code->cmds = malloc(sizeof(code->cmds[0]) * (code->cmdcount), 0); |
copy_from_uspace(code->cmds, ucmds, sizeof(code->cmds[0]) * (code->cmdcount)); |
rc = copy_from_uspace(code->cmds, ucmds, sizeof(code->cmds[0]) * (code->cmdcount)); |
if (rc != 0) { |
free(code->cmds); |
free(code); |
return NULL; |
} |
return code; |
} |
/kernel/trunk/Makefile |
---|
128,6 → 128,7 |
generic/src/proc/task.c \ |
generic/src/proc/the.c \ |
generic/src/syscall/syscall.c \ |
generic/src/syscall/copy.c \ |
generic/src/mm/buddy.c \ |
generic/src/mm/frame.c \ |
generic/src/mm/page.c \ |
/kernel/trunk/arch/sparc64/include/interrupt.h |
---|
30,6 → 30,7 |
#define __sparc64_INTERRUPT_H__ |
#include <typedefs.h> |
#include <arch/types.h> |
#define IRQ_COUNT 1 /* TODO */ |
43,6 → 44,14 |
#define trap_virtual_enable_irqs(x) |
#define trap_virtual_eoi() |
struct istate { |
}; |
static inline void istate_set_retaddr(istate_t *istate, __address retaddr) |
{ |
/* TODO */ |
} |
extern void interrupt_register(int n, const char *name, iroutine f); |
#endif |
/kernel/trunk/arch/sparc64/src/asm.S |
---|
29,12 → 29,24 |
.text |
.global memcpy |
.global memcpy_from_uspace |
.global memcpy_to_uspace |
.global memcpy_from_uspace_failover_address |
.global memcpy_to_uspace_failover_address |
.global memsetb |
memcpy: |
memcpy_from_uspace: |
memcpy_to_uspace: |
b _memcpy |
nop |
memcpy_from_uspace_failover_address: |
memcpy_to_uspace_failover_address: |
b memcpy_from_uspace_failover_address |
nop |
memsetb: |
b _memsetb |
nop |
/kernel/trunk/arch/ia64/include/interrupt.h |
---|
106,6 → 106,11 |
__u64 in4; |
}; |
static inline void istate_set_retaddr(istate_t *istate, __address retaddr) |
{ |
/* TODO */ |
} |
extern void *ivt; |
extern void general_exception(__u64 vector, istate_t *istate); |
/kernel/trunk/arch/ia64/src/asm.S |
---|
30,10 → 30,26 |
.text |
/** Copy memory from/to userspace. |
* |
* @param in0 Destination address. |
* @param in1 Source address. |
* @param in2 Number of byte to copy. |
*/ |
.global memcpy |
.global memcpy_from_uspace |
.global memcpy_to_uspace |
.global memcpy_from_uspace_failover_address |
.global memcpy_to_uspace_failover_address |
memcpy: |
memcpy_from_uspace: |
memcpy_to_uspace: |
br _memcpy |
memcpy_from_uspace_failover_address: |
memcpy_to_uspace_failover_address: |
br memcpy_from_uspace_failover_address |
.global memsetb |
memsetb: |
br _memsetb |
/kernel/trunk/arch/ia64/src/mm/tlb.c |
---|
447,7 → 447,7 |
* Forward the page fault to address space page fault handler. |
*/ |
page_table_unlock(AS, true); |
if (!as_page_fault(va)) { |
if (as_page_fault(va, istate) == AS_PF_FAULT) { |
panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, istate->cr_ifa, rr.map.rid, istate->cr_iip); |
} |
} |
493,7 → 493,7 |
* Forward the page fault to address space page fault handler. |
*/ |
page_table_unlock(AS, true); |
if (!as_page_fault(va)) { |
if (as_page_fault(va, istate) == AS_PF_FAULT) { |
panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip); |
} |
} |
608,7 → 608,7 |
page_table_unlock(AS, true); |
} else { |
page_table_unlock(AS, true); |
if (!as_page_fault(va)) { |
if (as_page_fault(va, istate) == AS_PF_FAULT) { |
panic("%s: va=%p, rid=%d\n", __FUNCTION__, va, rr.map.rid); |
} |
} |
/kernel/trunk/arch/ppc32/include/interrupt.h |
---|
29,6 → 29,8 |
#ifndef __ppc32_INTERRUPT_H__ |
#define __ppc32_INTERRUPT_H__ |
#include <arch/exception.h> |
#define IRQ_COUNT 1 |
#define IVT_ITEMS 15 |
#define INT_OFFSET 0 |
/kernel/trunk/arch/ppc32/include/exception.h |
---|
76,4 → 76,9 |
__u32 sp; |
}; |
static inline void istate_set_retaddr(istate_t *istate, __address retaddr) |
{ |
/* TODO */ |
} |
#endif |
/kernel/trunk/arch/ppc32/src/asm.S |
---|
35,6 → 35,10 |
.global iret_syscall |
.global memsetb |
.global memcpy |
.global memcpy_from_uspace |
.global memcpy_to_uspace |
.global memcpy_from_uspace_failover_address |
.global memcpy_to_uspace_failover_address |
userspace_asm: |
233,6 → 237,9 |
blr |
memcpy: |
memcpy_from_uspace: |
memcpy_to_uspace: |
srwi. r7, r5, 3 |
addi r6, r3, -4 |
addi r4, r4, -4 |
293,3 → 300,7 |
beq 2b |
mtctr r7 |
b 1b |
memcpy_from_uspace_failover_address: |
memcpy_to_uspace_failover_address: |
b memcpy_from_uspace_failover_address |
/kernel/trunk/arch/ppc32/src/mm/tlb.c |
---|
68,10 → 68,12 |
* The AS->lock must be held on entry to this function. |
* |
* @param badvaddr Faulting virtual address. |
* @param istate Pointer to interrupted state. |
* @param pfrc Pointer to variable where as_page_fault() return code will be stored. |
* @return PTE on success, NULL otherwise. |
* |
*/ |
static pte_t *find_mapping_and_check(__address badvaddr) |
static pte_t *find_mapping_and_check(__address badvaddr, istate_t *istate, int *pfcr) |
{ |
/* |
* Check if the mapping exists in page tables. |
84,12 → 86,15 |
*/ |
return pte; |
} else { |
int rc; |
/* |
* Mapping not found in page tables. |
* Resort to higher-level page fault handler. |
*/ |
page_table_unlock(AS, true); |
if (as_page_fault(badvaddr)) { |
switch (rc = as_page_fault(badvaddr, istate)) { |
case AS_PF_OK: |
/* |
* The higher-level page fault handler succeeded, |
* The mapping ought to be in place. |
98,12 → 103,22 |
pte = page_mapping_find(AS, badvaddr); |
ASSERT((pte) && (pte->p)); |
return pte; |
} else { |
break; |
case AS_PF_DEFER: |
page_table_lock(AS, true); |
*pfcr = rc; |
return NULL; |
break; |
case AS_PF_FAULT: |
page_table_lock(AS, true); |
printf("Page fault.\n"); |
*pfcr = rc; |
return NULL; |
break; |
default: |
panic("unexpected rc (%d)\n", rc); |
break; |
} |
} |
} |
139,6 → 154,7 |
__u32 vsid; |
__u32 hash; |
__u32 i; |
int pfcr; |
if (data) { |
asm volatile ( |
154,9 → 170,24 |
page_table_lock(AS, true); |
pte = find_mapping_and_check(badvaddr); |
if (!pte) |
pte = find_mapping_and_check(badvaddr, istate, &pfcr); |
if (!pte) { |
switch (pfcr) { |
case AS_PF_FAULT: |
goto fail; |
break; |
case AS_PF_DEFER: |
/* |
* The page fault came during copy_from_uspace() |
* or copy_to_uspace(). |
*/ |
page_table_unlock(AS, true); |
return; |
default: |
panic("Unexpected pfrc (%d)\n", pfcr); |
break; |
} |
} |
/* Record access to PTE */ |
pte->a = 1; |
/kernel/trunk/arch/amd64/include/interrupt.h |
---|
86,6 → 86,11 |
__u64 stack[]; /* Additional data on stack */ |
}; |
static inline void istate_set_retaddr(istate_t *istate, __address retaddr) |
{ |
istate->rip = retaddr; |
} |
extern void (* disable_irqs_function)(__u16 irqmask); |
extern void (* enable_irqs_function)(__u16 irqmask); |
extern void (* eoi_function)(void); |
/kernel/trunk/arch/amd64/src/asm_utils.S |
---|
60,16 → 60,60 |
movq $halt, (%rsp) |
jmp printf |
.global memcpy |
memcpy: |
jmp _memcpy |
.global cpuid |
.global has_cpuid |
.global rdtsc |
.global read_efer_flag |
.global set_efer_flag |
.global memcpy |
.global memcpy_from_uspace |
.global memcpy_to_uspace |
.global memcpy_from_uspace_failover_address |
.global memcpy_to_uspace_failover_address |
#define MEMCPY_DST %rdi |
#define MEMCPY_SRC %rsi |
#define MEMCPY_SIZE %rdx |
/** |
* Copy memory from/to userspace. |
* |
* This is almost conventional memcpy(). |
* The difference is that there is a failover part |
* to where control is returned from a page fault if |
* the page fault occurs during copy_from_uspace() |
* or copy_to_uspace(). |
* |
* @param MEMCPY_DST Destination address. |
* @param MEMCPY_SRC Source address. |
* @param MEMCPY_SIZE Number of bytes to copy. |
* |
* @retrun MEMCPY_SRC on success, 0 on failure. |
*/ |
memcpy: |
memcpy_from_uspace: |
memcpy_to_uspace: |
movq MEMCPY_SRC, %rax |
movq MEMCPY_SIZE, %rcx |
shrq $3, %rcx /* size / 8 */ |
rep movsq /* copy as much as possible word by word */ |
movq MEMCPY_SIZE, %rcx |
andq $7, %rcx /* size % 8 */ |
jz 0f |
rep movsb /* copy the rest byte by byte */ |
0: |
ret /* return MEMCPY_SRC, success */ |
memcpy_from_uspace_failover_address: |
memcpy_to_uspace_failover_address: |
xorq %rax, %rax /* return 0, failure */ |
ret |
## Determine CPUID support |
# |
# Return 0 in EAX if CPUID is not support, 1 if supported. |
/kernel/trunk/arch/amd64/src/mm/page.c |
---|
166,7 → 166,7 |
__address page; |
page = read_cr2(); |
if (!as_page_fault(page)) { |
if (as_page_fault(page, istate) == AS_PF_FAULT) { |
print_info_errcode(n, istate); |
printf("Page fault address: %llX\n", page); |
panic("page fault\n"); |
/kernel/trunk/arch/mips32/include/exception.h |
---|
93,6 → 93,11 |
__u32 k1; /* We use it as thread-local pointer */ |
}; |
static inline void istate_set_retaddr(istate_t *istate, __address retaddr) |
{ |
/* TODO */ |
} |
extern void exception(istate_t *istate); |
extern void tlb_refill_entry(void); |
extern void exception_entry(void); |
/kernel/trunk/arch/mips32/src/asm.S |
---|
57,11 → 57,25 |
j _memsetb |
nop |
.global memcpy |
.global memcpy_from_uspace |
.global memcpy_to_uspace |
.global memcpy_from_uspace_failover_address |
.global memcpy_to_uspace_failover_address |
memcpy: |
memcpy_from_uspace: |
memcpy_to_uspace: |
j _memcpy |
nop |
memcpy_from_uspace_failover_address: |
memcpy_to_uspace_failover_address: |
j memcpy_from_uspace_failover_address |
nop |
.macro fpu_gp_save reg ctx |
mfc1 $t0,$\reg |
sw $t0, \reg*4(\ctx) |
/kernel/trunk/arch/mips32/src/mm/tlb.c |
---|
44,7 → 44,7 |
static void tlb_invalid_fail(istate_t *istate); |
static void tlb_modified_fail(istate_t *istate); |
static pte_t *find_mapping_and_check(__address badvaddr); |
static pte_t *find_mapping_and_check(__address badvaddr, istate_t *istate, int *pfrc); |
static void prepare_entry_lo(entry_lo_t *lo, bool g, bool v, bool d, bool cacheable, __address pfn); |
static void prepare_entry_hi(entry_hi_t *hi, asid_t asid, __address addr); |
91,6 → 91,7 |
asid_t asid; |
__address badvaddr; |
pte_t *pte; |
int pfrc; |
badvaddr = cp0_badvaddr_read(); |
100,9 → 101,23 |
page_table_lock(AS, true); |
pte = find_mapping_and_check(badvaddr); |
if (!pte) |
pte = find_mapping_and_check(badvaddr, istate, &pfrc); |
if (!pte) { |
switch (pfrc) { |
case AS_PF_FAULT: |
goto fail; |
break; |
case AS_PF_DEFER: |
/* |
* The page fault came during copy_from_uspace() |
* or copy_to_uspace(). |
*/ |
page_table_unlock(AS, true); |
return; |
default: |
panic("unexpected pfrc (%d)\n", pfrc); |
} |
} |
/* |
* Record access to PTE. |
148,6 → 163,7 |
entry_lo_t lo; |
entry_hi_t hi; |
pte_t *pte; |
int pfrc; |
badvaddr = cp0_badvaddr_read(); |
170,9 → 186,23 |
goto fail; |
} |
pte = find_mapping_and_check(badvaddr); |
if (!pte) |
pte = find_mapping_and_check(badvaddr, istate, &pfrc); |
if (!pte) { |
switch (pfrc) { |
case AS_PF_FAULT: |
goto fail; |
break; |
case AS_PF_DEFER: |
/* |
* The page fault came during copy_from_uspace() |
* or copy_to_uspace(). |
*/ |
page_table_unlock(AS, true); |
return; |
default: |
panic("unexpected pfrc (%d)\n", pfrc); |
} |
} |
/* |
* Read the faulting TLB entry. |
217,6 → 247,7 |
entry_lo_t lo; |
entry_hi_t hi; |
pte_t *pte; |
int pfrc; |
badvaddr = cp0_badvaddr_read(); |
239,9 → 270,23 |
goto fail; |
} |
pte = find_mapping_and_check(badvaddr); |
if (!pte) |
pte = find_mapping_and_check(badvaddr, istate, &pfrc); |
if (!pte) { |
switch (pfrc) { |
case AS_PF_FAULT: |
goto fail; |
break; |
case AS_PF_DEFER: |
/* |
* The page fault came during copy_from_uspace() |
* or copy_to_uspace(). |
*/ |
page_table_unlock(AS, true); |
return; |
default: |
panic("unexpected pfrc (%d)\n", pfrc); |
} |
} |
/* |
* Fail if the page is not writable. |
321,10 → 366,12 |
* The AS->lock must be held on entry to this function. |
* |
* @param badvaddr Faulting virtual address. |
* @param istate Pointer to interrupted state. |
* @param pfrc Pointer to variable where as_page_fault() return code will be stored. |
* |
* @return PTE on success, NULL otherwise. |
*/ |
pte_t *find_mapping_and_check(__address badvaddr) |
pte_t *find_mapping_and_check(__address badvaddr, istate_t *istate, int *pfrc) |
{ |
entry_hi_t hi; |
pte_t *pte; |
350,12 → 397,15 |
*/ |
return pte; |
} else { |
int rc; |
/* |
* Mapping not found in page tables. |
* Resort to higher-level page fault handler. |
*/ |
page_table_unlock(AS, true); |
if (as_page_fault(badvaddr)) { |
switch (rc = as_page_fault(badvaddr, istate)) { |
case AS_PF_OK: |
/* |
* The higher-level page fault handler succeeded, |
* The mapping ought to be in place. |
364,10 → 414,20 |
pte = page_mapping_find(AS, badvaddr); |
ASSERT(pte && pte->p); |
return pte; |
} else { |
break; |
case AS_PF_DEFER: |
page_table_lock(AS, true); |
*pfrc = AS_PF_DEFER; |
return NULL; |
break; |
case AS_PF_FAULT: |
page_table_lock(AS, true); |
printf("Page fault.\n"); |
*pfrc = AS_PF_FAULT; |
return NULL; |
break; |
default: |
panic("unexpected rc (%d)\n", rc); |
} |
} |
/kernel/trunk/arch/ia32/include/interrupt.h |
---|
83,6 → 83,11 |
__u32 stack[]; |
}; |
static inline void istate_set_retaddr(istate_t *istate, __address retaddr) |
{ |
istate->eip = retaddr; |
} |
extern void (* disable_irqs_function)(__u16 irqmask); |
extern void (* enable_irqs_function)(__u16 irqmask); |
extern void (* eoi_function)(void); |
/kernel/trunk/arch/ia32/src/asm.S |
---|
37,7 → 37,68 |
.global paging_on |
.global enable_l_apic_in_msr |
.global interrupt_handlers |
.global memcpy |
.global memcpy_from_uspace |
.global memcpy_from_uspace_failover_address |
.global memcpy_to_uspace |
.global memcpy_to_uspace_failover_address |
#define MEMCPY_DST 4 |
#define MEMCPY_SRC 8 |
#define MEMCPY_SIZE 12 |
/** Copy memory to/from userspace. |
* |
* This is almost conventional memcpy(). |
* The difference is that there is a failover part |
* to where control is returned from a page fault |
* if the page fault occurs during copy_from_uspace() |
* or copy_to_uspace(). |
* |
* @param MEMCPY_DST(%esp) Destination address. |
* @param MEMCPY_SRC(%esp) Source address. |
* @param MEMCPY_SIZE(%esp) Size. |
* |
* @return MEMCPY_SRC(%esp) on success and 0 on failure. |
*/ |
memcpy: |
memcpy_from_uspace: |
memcpy_to_uspace: |
movl %edi, %edx /* save %edi */ |
movl %esi, %eax /* save %esi */ |
movl MEMCPY_SIZE(%esp), %ecx |
shrl $2, %ecx /* size / 4 */ |
movl MEMCPY_DST(%esp), %edi |
movl MEMCPY_SRC(%esp), %esi |
rep movsl /* copy as much as possible word by word */ |
movl MEMCPY_SIZE(%esp), %ecx |
andl $3, %ecx /* size % 4 */ |
jz 0f |
rep movsb /* copy the rest byte by byte */ |
0: |
movl %edx, %edi |
movl %eax, %esi |
movl MEMCPY_SRC(%esp), %eax /* MEMCPY_SRC(%esp), success */ |
ret |
/* |
* We got here from as_page_fault() after the memory operations |
* above had caused a page fault. |
*/ |
memcpy_from_uspace_failover_address: |
memcpy_to_uspace_failover_address: |
movl %edx, %edi |
movl %eax, %esi |
xorl %eax, %eax /* return 0, failure */ |
ret |
## Turn paging on |
# |
# Enable paging and write-back caching in CR0. |
/kernel/trunk/arch/ia32/src/boot/boot.S |
---|
34,14 → 34,6 |
#define START_STACK (BOOT_OFFSET - BOOT_STACK_SIZE) |
.section K_TEXT_START, "ax" |
KTEXT=8 |
435,12 → 427,8 |
e_vesa_init: |
#endif |
.section K_DATA_START, "aw", @progbits |
.align 4096 |
page_directory: |
.space 4096, 0 |
/kernel/trunk/arch/ia32/src/interrupt.c |
---|
144,7 → 144,7 |
__address page; |
page = read_cr2(); |
if (!as_page_fault(page)) { |
if (as_page_fault(page, istate) == AS_PF_FAULT) { |
PRINT_INFO_ERRCODE(istate); |
printf("page fault address: %#x\n", page); |
panic("page fault\n"); |