/branches/fs/kernel/generic/include/print.h |
---|
40,7 → 40,7 |
#include <arch/arg.h> |
/* We need this address in spinlock to avoid deadlock in deadlock detection */ |
SPINLOCK_EXTERN(printflock); |
SPINLOCK_EXTERN(printf_lock); |
#define EOF (-1) |
/branches/fs/kernel/generic/include/time/clock.h |
---|
35,8 → 35,19 |
#ifndef KERN_CLOCK_H_ |
#define KERN_CLOCK_H_ |
#include <arch/types.h> |
#define HZ 100 |
/** Uptime structure */ |
typedef struct { |
unative_t seconds1; |
unative_t useconds; |
unative_t seconds2; |
} uptime_t; |
extern uptime_t *uptime; |
extern void clock(void); |
extern void clock_counter_init(void); |
/branches/fs/kernel/generic/include/proc/task.h |
---|
90,10 → 90,10 |
* Active asynchronous messages. It is used for limiting uspace to |
* certain extent. |
*/ |
atomic_t active_calls; |
atomic_t active_calls; |
/** Architecture specific task data. */ |
task_arch_t arch; |
task_arch_t arch; |
/** |
* Serializes access to the B+tree of task's futexes. This mutex is |
111,6 → 111,7 |
extern btree_t tasks_btree; |
extern void task_init(void); |
extern void task_done(void); |
extern task_t *task_create(as_t *as, char *name); |
extern void task_destroy(task_t *t); |
extern task_t *task_run_program(void *program_addr, char *name); |
/branches/fs/kernel/generic/include/proc/thread.h |
---|
53,7 → 53,11 |
/* Thread flags */ |
/** Thread cannot be migrated to another CPU. */ |
/** Thread cannot be migrated to another CPU. |
* |
* When using this flag, the caller must set cpu in the thread_t |
* structure manually before calling thread_ready (even on uniprocessor). |
*/ |
#define THREAD_FLAG_WIRED (1 << 0) |
/** Thread was migrated to another CPU and has not run yet. */ |
#define THREAD_FLAG_STOLEN (1 << 1) |
193,7 → 197,7 |
/** Thread's priority. Implemented as index to CPU->rq */ |
int priority; |
/** Thread ID. */ |
uint32_t tid; |
thread_id_t tid; |
/** Architecture-specific data. */ |
thread_arch_t arch; |
248,8 → 252,9 |
extern slab_cache_t *fpu_context_slab; |
/* Thread syscall prototypes. */ |
unative_t sys_thread_create(uspace_arg_t *uspace_uarg, char *uspace_name); |
unative_t sys_thread_exit(int uspace_status); |
extern unative_t sys_thread_create(uspace_arg_t *uspace_uarg, char *uspace_name, thread_id_t *uspace_thread_id); |
extern unative_t sys_thread_exit(int uspace_status); |
extern unative_t sys_thread_get_id(thread_id_t *uspace_thread_id); |
#endif |
/branches/fs/kernel/generic/include/interrupt.h |
---|
48,7 → 48,7 |
#define fault_if_from_uspace(istate, cmd, ...) \ |
{ \ |
if (istate_from_uspace(istate)) { \ |
klog_printf("Task %lld killed due to an exception at %p.", TASK->taskid, istate_get_pc(istate)); \ |
klog_printf("Task %llu killed due to an exception at %p.", TASK->taskid, istate_get_pc(istate)); \ |
klog_printf(" " cmd, ##__VA_ARGS__); \ |
task_kill(TASK->taskid); \ |
thread_exit(); \ |
/branches/fs/kernel/generic/include/synch/mutex.h |
---|
44,13 → 44,11 |
} mutex_t; |
#define mutex_lock(mtx) \ |
_mutex_lock_timeout((mtx),SYNCH_NO_TIMEOUT,SYNCH_FLAGS_NONE) |
_mutex_lock_timeout((mtx), SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE) |
#define mutex_trylock(mtx) \ |
_mutex_lock_timeout((mtx),SYNCH_NO_TIMEOUT,SYNCH_FLAGS_NON_BLOCKING) |
#define mutex_lock_timeout(mtx,usec) \ |
_mutex_lock_timeout((mtx),(usec),SYNCH_FLAGS_NON_BLOCKING) |
#define mutex_lock_active(mtx) \ |
while (mutex_trylock((mtx)) != ESYNCH_OK_ATOMIC) |
_mutex_lock_timeout((mtx), SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NON_BLOCKING) |
#define mutex_lock_timeout(mtx, usec) \ |
_mutex_lock_timeout((mtx), (usec), SYNCH_FLAGS_NON_BLOCKING) |
extern void mutex_initialize(mutex_t *mtx); |
extern int _mutex_lock_timeout(mutex_t *mtx, uint32_t usec, int flags); |
/branches/fs/kernel/generic/include/synch/spinlock.h |
---|
101,8 → 101,26 |
preemption_enable(); |
} |
#ifdef CONFIG_DEBUG_SPINLOCK |
extern int printf(const char *, ...); |
#define DEADLOCK_THRESHOLD 100000000 |
#define DEADLOCK_PROBE_INIT(pname) count_t pname = 0 |
#define DEADLOCK_PROBE(pname, value) \ |
if ((pname)++ > (value)) { \ |
(pname) = 0; \ |
printf("Deadlock probe %s: exceeded threshold %d\n", \ |
"cpu%d: function=%s, line=%d\n", \ |
#pname, (value), CPU->id, __FUNCTION__, __LINE__); \ |
} |
#else |
#define DEADLOCK_PROBE_INIT(pname) |
#define DEADLOCK_PROBE(pname, value) |
#endif |
#else |
/* On UP systems, spinlocks are effectively left out. */ |
#define SPINLOCK_DECLARE(name) |
#define SPINLOCK_EXTERN(name) |
113,6 → 131,9 |
#define spinlock_trylock(x) (preemption_disable(), 1) |
#define spinlock_unlock(x) preemption_enable() |
#define DEADLOCK_PROBE_INIT(pname) |
#define DEADLOCK_PROBE(pname, value) |
#endif |
#endif |
/branches/fs/kernel/generic/include/synch/waitq.h |
---|
40,8 → 40,10 |
#include <synch/synch.h> |
#include <adt/list.h> |
#define WAKEUP_FIRST 0 |
#define WAKEUP_ALL 1 |
typedef enum { |
WAKEUP_FIRST = 0, |
WAKEUP_ALL |
} wakeup_mode_t; |
/** Wait queue structure. */ |
typedef struct { |
70,8 → 72,8 |
extern ipl_t waitq_sleep_prepare(waitq_t *wq); |
extern int waitq_sleep_timeout_unsafe(waitq_t *wq, uint32_t usec, int flags); |
extern void waitq_sleep_finish(waitq_t *wq, int rc, ipl_t ipl); |
extern void waitq_wakeup(waitq_t *wq, bool all); |
extern void _waitq_wakeup_unsafe(waitq_t *wq, bool all); |
extern void waitq_wakeup(waitq_t *wq, wakeup_mode_t mode); |
extern void _waitq_wakeup_unsafe(waitq_t *wq, wakeup_mode_t mode); |
extern void waitq_interrupt_sleep(struct thread *t); |
#endif |
/branches/fs/kernel/generic/include/ddi/irq.h |
---|
121,6 → 121,14 |
* this lock must not be taken first. |
*/ |
SPINLOCK_DECLARE(lock); |
/** Send EOI before processing the interrupt. |
* This is essential for timer interrupt which |
* has to be acknowledged before doing preemption |
* to make sure another timer interrupt will |
* be eventually generated. |
*/ |
bool preack; |
/** Unique device number. -1 if not yet assigned. */ |
devno_t devno; |
127,7 → 135,7 |
/** Actual IRQ number. -1 if not yet assigned. */ |
inr_t inr; |
/** Trigger level of the IRQ.*/ |
/** Trigger level of the IRQ. */ |
irq_trigger_t trigger; |
/** Claim ownership of the IRQ. */ |
irq_ownership_t (* claim)(void); |
/branches/fs/kernel/generic/include/printf/printf_core.h |
---|
47,7 → 47,7 |
}; |
int printf_core(const char *fmt, struct printf_spec *ps ,va_list ap); |
int printf_core(const char *fmt, struct printf_spec *ps, va_list ap); |
#endif |
/branches/fs/kernel/generic/include/arch.h |
---|
74,8 → 74,12 |
extern void arch_post_cpu_init(void); |
extern void arch_pre_smp_init(void); |
extern void arch_post_smp_init(void); |
extern void calibrate_delay_loop(void); |
extern void reboot(void); |
extern void arch_reboot(void); |
#endif |
/** @} |
/branches/fs/kernel/generic/include/mm/as.h |
---|
89,24 → 89,26 |
@public |
/** Protected by asidlock. */ |
link_t inactive_as_with_asid_link; |
/** |
* Number of processors on wich is this address space active. |
* Protected by asidlock. |
*/ |
count_t cpu_refcount; |
/** |
* Address space identifier. |
* Constant on architectures that do not support ASIDs. |
* Protected by asidlock. |
*/ |
asid_t asid; |
/** Number of references (i.e tasks that reference this as). */ |
atomic_t refcount; |
mutex_t lock; |
/** Number of references (i.e tasks that reference this as). */ |
count_t refcount; |
/** Number of processors on wich is this address space active. */ |
count_t cpu_refcount; |
/** B+tree of address space areas. */ |
btree_t as_area_btree; |
/** |
* Address space identifier. |
* Constant on architectures that do not support ASIDs. |
*/ |
asid_t asid; |
/** Non-generic content. */ |
as_genarch_t genarch; |
133,24 → 135,26 |
typedef struct as { |
/** Protected by asidlock. */ |
link_t inactive_as_with_asid_link; |
/** |
* Number of processors on wich is this address space active. |
* Protected by asidlock. |
*/ |
count_t cpu_refcount; |
/** |
* Address space identifier. |
* Constant on architectures that do not support ASIDs. |
* Protected by asidlock. |
*/ |
asid_t asid; |
mutex_t lock; |
/** Number of references (i.e tasks that reference this as). */ |
count_t refcount; |
atomic_t refcount; |
/** Number of processors on wich is this address space active. */ |
count_t cpu_refcount; |
mutex_t lock; |
/** B+tree of address space areas. */ |
btree_t as_area_btree; |
/** |
* Address space identifier. |
* Constant on architectures that do not support ASIDs. |
*/ |
asid_t asid; |
/** Non-generic content. */ |
as_genarch_t genarch; |
205,7 → 209,6 |
/** Address space area structure. |
* |
* Each as_area_t structure describes one contiguous area of virtual memory. |
* In the future, it should not be difficult to support shared areas. |
*/ |
typedef struct { |
mutex_t lock; |
250,7 → 253,6 |
extern as_operations_t *as_operations; |
#endif |
SPINLOCK_EXTERN(inactive_as_with_asid_lock); |
extern link_t inactive_as_with_asid_head; |
extern void as_init(void); |
/branches/fs/kernel/generic/include/syscall/syscall.h |
---|
40,6 → 40,7 |
SYS_TLS_SET = 1, /* Hardcoded in AMD64, IA32 uspace - psthread.S */ |
SYS_THREAD_CREATE, |
SYS_THREAD_EXIT, |
SYS_THREAD_GET_ID, |
SYS_TASK_GET_ID, |
SYS_FUTEX_SLEEP, |
SYS_FUTEX_WAKEUP, |
/branches/fs/kernel/generic/src/synch/spinlock.c |
---|
73,7 → 73,6 |
* @param sl Pointer to spinlock_t structure. |
*/ |
#ifdef CONFIG_DEBUG_SPINLOCK |
#define DEADLOCK_THRESHOLD 100000000 |
void spinlock_lock_debug(spinlock_t *sl) |
{ |
count_t i = 0; |
84,7 → 83,7 |
while (test_and_set(&sl->val)) { |
/* |
* We need to be careful about printflock and fb_lock. |
* We need to be careful about printf_lock and fb_lock. |
* Both of them are used to report deadlocks via |
* printf() and fb_putchar(). |
* |
94,13 → 93,13 |
* However, we encountered false positives caused by very |
* slow VESA framebuffer interaction (especially when |
* run in a simulator) that caused problems with both |
* printflock and fb_lock. |
* printf_lock and fb_lock. |
* |
* Possible deadlocks on both printflock and fb_lock |
* Possible deadlocks on both printf_lock and fb_lock |
* are therefore not reported as they would cause an |
* infinite recursion. |
*/ |
if (sl == &printflock) |
if (sl == &printf_lock) |
continue; |
#ifdef CONFIG_FB |
if (sl == &fb_lock) |
/branches/fs/kernel/generic/src/synch/waitq.c |
---|
86,6 → 86,7 |
thread_t *t = (thread_t *) data; |
waitq_t *wq; |
bool do_wakeup = false; |
DEADLOCK_PROBE_INIT(p_wqlock); |
spinlock_lock(&threads_lock); |
if (!thread_exists(t)) |
96,6 → 97,7 |
if ((wq = t->sleep_queue)) { /* assignment */ |
if (!spinlock_trylock(&wq->lock)) { |
spinlock_unlock(&t->lock); |
DEADLOCK_PROBE(p_wqlock, DEADLOCK_THRESHOLD); |
goto grab_locks; /* avoid deadlock */ |
} |
128,6 → 130,7 |
waitq_t *wq; |
bool do_wakeup = false; |
ipl_t ipl; |
DEADLOCK_PROBE_INIT(p_wqlock); |
ipl = interrupts_disable(); |
spinlock_lock(&threads_lock); |
147,6 → 150,7 |
if (!spinlock_trylock(&wq->lock)) { |
spinlock_unlock(&t->lock); |
DEADLOCK_PROBE(p_wqlock, DEADLOCK_THRESHOLD); |
goto grab_locks; /* avoid deadlock */ |
} |
379,11 → 383,10 |
* Besides its 'normal' wakeup operation, it attempts to unregister possible |
* timeout. |
* |
* @param wq Pointer to wait queue. |
* @param all If this is non-zero, all sleeping threads will be woken up and |
* missed count will be zeroed. |
* @param wq Pointer to wait queue. |
* @param mode Wakeup mode. |
*/ |
void waitq_wakeup(waitq_t *wq, bool all) |
void waitq_wakeup(waitq_t *wq, wakeup_mode_t mode) |
{ |
ipl_t ipl; |
390,10 → 393,10 |
ipl = interrupts_disable(); |
spinlock_lock(&wq->lock); |
_waitq_wakeup_unsafe(wq, all); |
_waitq_wakeup_unsafe(wq, mode); |
spinlock_unlock(&wq->lock); |
interrupts_restore(ipl); |
spinlock_unlock(&wq->lock); |
interrupts_restore(ipl); |
} |
/** Internal SMP- and IRQ-unsafe version of waitq_wakeup() |
401,22 → 404,27 |
* This is the internal SMP- and IRQ-unsafe version of waitq_wakeup(). It |
* assumes wq->lock is already locked and interrupts are already disabled. |
* |
* @param wq Pointer to wait queue. |
* @param all If this is non-zero, all sleeping threads will be woken up and |
* missed count will be zeroed. |
* @param wq Pointer to wait queue. |
* @param mode If mode is WAKEUP_FIRST, then the longest waiting thread, |
* if any, is woken up. If mode is WAKEUP_ALL, then all |
* waiting threads, if any, are woken up. If there are no |
* waiting threads to be woken up, the missed wakeup is |
* recorded in the wait queue. |
*/ |
void _waitq_wakeup_unsafe(waitq_t *wq, bool all) |
void _waitq_wakeup_unsafe(waitq_t *wq, wakeup_mode_t mode) |
{ |
thread_t *t; |
count_t count = 0; |
loop: |
if (list_empty(&wq->head)) { |
wq->missed_wakeups++; |
if (all) |
wq->missed_wakeups = 0; |
if (count && mode == WAKEUP_ALL) |
wq->missed_wakeups--; |
return; |
} |
count++; |
t = list_get_instance(wq->head.next, thread_t, wq_link); |
/* |
445,7 → 453,7 |
thread_ready(t); |
if (all) |
if (mode == WAKEUP_ALL) |
goto loop; |
} |
/branches/fs/kernel/generic/src/main/kinit.c |
---|
118,7 → 118,7 |
#ifdef CONFIG_SMP |
if (config.cpu_count > 1) { |
unsigned int i; |
count_t i; |
/* |
* For each CPU, create its load balancing thread. |
167,6 → 167,7 |
task_t *utask = task_run_program((void *) init.tasks[i].addr, |
"uspace"); |
if (utask) { |
/* |
* Set capabilities to init userspace tasks. |
/branches/fs/kernel/generic/src/main/shutdown.c |
---|
0,0 → 1,53 |
/* |
* Copyright (c) 2007 Martin Decky |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup main |
* @{ |
*/ |
/** |
* @file |
* @brief Shutdown procedures. |
*/ |
#include <arch.h> |
#include <print.h> |
void reboot(void) |
{ |
task_done(); |
#ifdef CONFIG_DEBUG |
printf("Rebooting the system\n"); |
#endif |
arch_reboot(); |
} |
/** @} |
*/ |
/branches/fs/kernel/generic/src/time/timeout.c |
---|
45,7 → 45,6 |
#include <arch/asm.h> |
#include <arch.h> |
/** Initialize timeouts |
* |
* Initialize kernel timeouts. |
175,6 → 174,7 |
timeout_t *hlp; |
link_t *l; |
ipl_t ipl; |
DEADLOCK_PROBE_INIT(p_tolock); |
grab_locks: |
ipl = interrupts_disable(); |
186,7 → 186,8 |
} |
if (!spinlock_trylock(&t->cpu->timeoutlock)) { |
spinlock_unlock(&t->lock); |
interrupts_restore(ipl); |
interrupts_restore(ipl); |
DEADLOCK_PROBE(p_tolock, DEADLOCK_THRESHOLD); |
goto grab_locks; |
} |
/branches/fs/kernel/generic/src/time/clock.c |
---|
41,7 → 41,6 |
#include <time/clock.h> |
#include <time/timeout.h> |
#include <arch/types.h> |
#include <config.h> |
#include <synch/spinlock.h> |
#include <synch/waitq.h> |
57,16 → 56,12 |
#include <mm/frame.h> |
#include <ddi/ddi.h> |
/** Physical memory area of the real time clock. */ |
/* Pointer to variable with uptime */ |
uptime_t *uptime; |
/** Physical memory area of the real time clock */ |
static parea_t clock_parea; |
/* Pointers to public variables with time */ |
struct ptime { |
unative_t seconds1; |
unative_t useconds; |
unative_t seconds2; |
}; |
struct ptime *public_time; |
/* Variable holding fragment of second, so that we would update |
* seconds correctly |
*/ |
86,15 → 81,14 |
if (!faddr) |
panic("Cannot allocate page for clock"); |
public_time = (struct ptime *) PA2KA(faddr); |
uptime = (uptime_t *) PA2KA(faddr); |
uptime->seconds1 = 0; |
uptime->seconds2 = 0; |
uptime->useconds = 0; |
/* TODO: We would need some arch dependent settings here */ |
public_time->seconds1 = 0; |
public_time->seconds2 = 0; |
public_time->useconds = 0; |
clock_parea.pbase = (uintptr_t) faddr; |
clock_parea.vbase = (uintptr_t) public_time; |
clock_parea.vbase = (uintptr_t) uptime; |
clock_parea.frames = 1; |
clock_parea.cacheable = true; |
ddi_parea_register(&clock_parea); |
104,8 → 98,6 |
* physmem_map() the clock_parea. |
*/ |
sysinfo_set_item_val("clock.cacheable", NULL, (unative_t) true); |
sysinfo_set_item_val("clock.fcolor", NULL, (unative_t) |
PAGE_COLOR(clock_parea.vbase)); |
sysinfo_set_item_val("clock.faddr", NULL, (unative_t) faddr); |
} |
118,16 → 110,16 |
static void clock_update_counters(void) |
{ |
if (CPU->id == 0) { |
secfrag += 1000000/HZ; |
secfrag += 1000000 / HZ; |
if (secfrag >= 1000000) { |
secfrag -= 1000000; |
public_time->seconds1++; |
uptime->seconds1++; |
write_barrier(); |
public_time->useconds = secfrag; |
uptime->useconds = secfrag; |
write_barrier(); |
public_time->seconds2 = public_time->seconds1; |
uptime->seconds2 = uptime->seconds1; |
} else |
public_time->useconds += 1000000/HZ; |
uptime->useconds += 1000000 / HZ; |
} |
} |
/branches/fs/kernel/generic/src/ddi/ddi.c |
---|
99,8 → 99,7 |
* @return 0 on success, EPERM if the caller lacks capabilities to use this |
* syscall, ENOENT if there is no task matching the specified ID or the |
* physical address space is not enabled for mapping and ENOMEM if there |
* was a problem in creating address space area. ENOTSUP is returned when |
* an attempt to create an illegal address alias is detected. |
* was a problem in creating address space area. |
*/ |
static int ddi_physmem_map(uintptr_t pf, uintptr_t vp, count_t pages, int flags) |
{ |
139,18 → 138,6 |
interrupts_restore(ipl); |
return ENOENT; |
} |
#ifdef CONFIG_VIRT_IDX_DCACHE |
if (PAGE_COLOR(parea->vbase) != PAGE_COLOR(vp)) { |
/* |
* Refuse to create an illegal address alias. |
*/ |
spinlock_unlock(&parea_lock); |
interrupts_restore(ipl); |
return ENOTSUP; |
} |
#endif /* CONFIG_VIRT_IDX_DCACHE */ |
spinlock_unlock(&parea_lock); |
spinlock_lock(&TASK->lock); |
/branches/fs/kernel/generic/src/ddi/irq.c |
---|
138,6 → 138,7 |
{ |
link_initialize(&irq->link); |
spinlock_initialize(&irq->lock, "irq.lock"); |
irq->preack = false; |
irq->inr = -1; |
irq->devno = -1; |
irq->trigger = (irq_trigger_t) 0; |
/branches/fs/kernel/generic/src/printf/snprintf.c |
---|
50,4 → 50,3 |
/** @} |
*/ |
/branches/fs/kernel/generic/src/printf/vprintf.c |
---|
35,7 → 35,11 |
#include <print.h> |
#include <printf/printf_core.h> |
#include <putchar.h> |
#include <synch/spinlock.h> |
#include <arch/asm.h> |
SPINLOCK_INITIALIZE(printf_lock); /**< vprintf spinlock */ |
static int vprintf_write(const char *str, size_t count, void *unused) |
{ |
size_t i; |
55,8 → 59,16 |
int vprintf(const char *fmt, va_list ap) |
{ |
struct printf_spec ps = {(int(*)(void *, size_t, void *)) vprintf_write, NULL}; |
return printf_core(fmt, &ps, ap); |
int irqpri = interrupts_disable(); |
spinlock_lock(&printf_lock); |
int ret = printf_core(fmt, &ps, ap); |
spinlock_unlock(&printf_lock); |
interrupts_restore(irqpri); |
return ret; |
} |
/** @} |
/branches/fs/kernel/generic/src/printf/vsnprintf.c |
---|
42,8 → 42,6 |
char *string; /* destination string */ |
}; |
int vsnprintf_write(const char *str, size_t count, struct vsnprintf_data *data); |
/** Write string to given buffer. |
* Write at most data->size characters including trailing zero. According to C99, snprintf() has to return number |
* of characters that would have been written if enough space had been available. Hence the return value is not |
54,7 → 52,7 |
* @param data structure with destination string, counter of used space and total string size. |
* @return number of characters to print (not characters really printed!) |
*/ |
int vsnprintf_write(const char *str, size_t count, struct vsnprintf_data *data) |
static int vsnprintf_write(const char *str, size_t count, struct vsnprintf_data *data) |
{ |
size_t i; |
i = data->size - data->len; |
/branches/fs/kernel/generic/src/printf/printf_core.c |
---|
38,14 → 38,9 |
#include <printf/printf_core.h> |
#include <putchar.h> |
#include <print.h> |
#include <synch/spinlock.h> |
#include <arch/arg.h> |
#include <arch/asm.h> |
#include <arch.h> |
SPINLOCK_INITIALIZE(printflock); /**< printf spinlock */ |
#define __PRINTF_FLAG_PREFIX 0x00000001 /**< show prefixes 0x or 0*/ |
#define __PRINTF_FLAG_SIGNED 0x00000002 /**< signed / unsigned number */ |
#define __PRINTF_FLAG_ZEROPADDED 0x00000004 /**< print leading zeroes */ |
458,7 → 453,6 |
*/ |
int printf_core(const char *fmt, struct printf_spec *ps, va_list ap) |
{ |
int irqpri; |
int i = 0, j = 0; /**< i is index of currently processed char from fmt, j is index to the first not printed nonformating character */ |
int end; |
int counter; /**< counter of printed characters */ |
472,10 → 466,7 |
uint64_t flags; |
counter = 0; |
irqpri = interrupts_disable(); |
spinlock_lock(&printflock); |
while ((c = fmt[i])) { |
/* control character */ |
if (c == '%' ) { |
712,8 → 703,6 |
} |
out: |
spinlock_unlock(&printflock); |
interrupts_restore(irqpri); |
return counter; |
} |
/branches/fs/kernel/generic/src/printf/vsprintf.c |
---|
36,7 → 36,7 |
int vsprintf(char *str, const char *fmt, va_list ap) |
{ |
return vsnprintf(str, (size_t)-1, fmt, ap); |
return vsnprintf(str, (size_t) - 1, fmt, ap); |
} |
/** @} |
/branches/fs/kernel/generic/src/console/cmd.c |
---|
48,6 → 48,7 |
#include <arch/types.h> |
#include <adt/list.h> |
#include <arch.h> |
#include <config.h> |
#include <func.h> |
#include <macros.h> |
#include <debug.h> |
79,10 → 80,26 |
static cmd_info_t exit_info = { |
.name = "exit", |
.description = "Exit kconsole", |
.description = "Exit kconsole.", |
.argc = 0 |
}; |
static int cmd_reboot(cmd_arg_t *argv); |
static cmd_info_t reboot_info = { |
.name = "reboot", |
.description = "Reboot.", |
.func = cmd_reboot, |
.argc = 0 |
}; |
static int cmd_uptime(cmd_arg_t *argv); |
static cmd_info_t uptime_info = { |
.name = "uptime", |
.description = "Print uptime information.", |
.func = cmd_uptime, |
.argc = 0 |
}; |
static int cmd_continue(cmd_arg_t *argv); |
static cmd_info_t continue_info = { |
.name = "continue", |
192,10 → 209,10 |
}; |
/* Data and methods for 'call0' command. */ |
static char call0_buf[MAX_CMDLINE+1]; |
static char carg1_buf[MAX_CMDLINE+1]; |
static char carg2_buf[MAX_CMDLINE+1]; |
static char carg3_buf[MAX_CMDLINE+1]; |
static char call0_buf[MAX_CMDLINE + 1]; |
static char carg1_buf[MAX_CMDLINE + 1]; |
static char carg2_buf[MAX_CMDLINE + 1]; |
static char carg3_buf[MAX_CMDLINE + 1]; |
static int cmd_call0(cmd_arg_t *argv); |
static cmd_arg_t call0_argv = { |
211,6 → 228,21 |
.argv = &call0_argv |
}; |
/* Data and methods for 'mcall0' command. */ |
static int cmd_mcall0(cmd_arg_t *argv); |
static cmd_arg_t mcall0_argv = { |
.type = ARG_TYPE_STRING, |
.buffer = call0_buf, |
.len = sizeof(call0_buf) |
}; |
static cmd_info_t mcall0_info = { |
.name = "mcall0", |
.description = "mcall0 <function> -> call function() on each CPU.", |
.func = cmd_mcall0, |
.argc = 1, |
.argv = &mcall0_argv |
}; |
/* Data and methods for 'call1' command. */ |
static int cmd_call1(cmd_arg_t *argv); |
static cmd_arg_t call1_argv[] = { |
406,6 → 438,7 |
static cmd_info_t *basic_commands[] = { |
&call0_info, |
&mcall0_info, |
&call1_info, |
&call2_info, |
&call3_info, |
413,6 → 446,8 |
&cpus_info, |
&desc_info, |
&exit_info, |
&reboot_info, |
&uptime_info, |
&halt_info, |
&help_info, |
&ipc_task_info, |
488,6 → 523,41 |
return 1; |
} |
/** Reboot the system. |
* |
* @param argv Argument vector. |
* |
* @return 0 on failure, 1 on success. |
*/ |
int cmd_reboot(cmd_arg_t *argv) |
{ |
reboot(); |
/* Not reached */ |
return 1; |
} |
/** Print system uptime information. |
* |
* @param argv Argument vector. |
* |
* @return 0 on failure, 1 on success. |
*/ |
int cmd_uptime(cmd_arg_t *argv) |
{ |
ASSERT(uptime); |
/* This doesn't have to be very accurate */ |
unative_t sec = uptime->seconds1; |
printf("Up %u days, %u hours, %u minutes, %u seconds\n", |
sec / 86400, (sec % 86400) / 3600, (sec % 3600) / 60, sec % 60); |
return 1; |
} |
/** Describe specified command. |
* |
* @param argv Argument vector. |
540,7 → 610,7 |
struct { |
unative_t f; |
unative_t gp; |
}fptr; |
} fptr; |
#endif |
symaddr = get_symbol_addr((char *) argv->buffer); |
551,7 → 621,7 |
printf("Duplicate symbol, be more specific.\n"); |
} else { |
symbol = get_symtab_entry(symaddr); |
printf("Calling f(): %.*p: %s\n", sizeof(uintptr_t) * 2, symaddr, symbol); |
printf("Calling %s() (%.*p)\n", symbol, sizeof(uintptr_t) * 2, symaddr); |
#ifdef ia64 |
fptr.f = symaddr; |
fptr.gp = ((unative_t *)cmd_call2)[1]; |
565,6 → 635,35 |
return 1; |
} |
/** Call function with zero parameters on each CPU */ |
int cmd_mcall0(cmd_arg_t *argv) |
{ |
/* |
* For each CPU, create a thread which will |
* call the function. |
*/ |
count_t i; |
for (i = 0; i < config.cpu_count; i++) { |
if (!cpus[i].active) |
continue; |
thread_t *t; |
if ((t = thread_create((void (*)(void *)) cmd_call0, (void *) argv, TASK, THREAD_FLAG_WIRED, "call0", false))) { |
spinlock_lock(&t->lock); |
t->cpu = &cpus[i]; |
spinlock_unlock(&t->lock); |
printf("cpu%u: ", i); |
thread_ready(t); |
thread_join(t); |
thread_detach(t); |
} else |
printf("Unable to create thread for cpu%u\n", i); |
} |
return 1; |
} |
/** Call function with one parameter */ |
int cmd_call1(cmd_arg_t *argv) |
{ |
713,7 → 812,7 |
/** Write 4 byte value to address */ |
int cmd_set4(cmd_arg_t *argv) |
{ |
uint32_t *addr ; |
uint32_t *addr; |
uint32_t arg1 = argv[1].intval; |
bool pointer = false; |
/branches/fs/kernel/generic/src/console/klog.c |
---|
90,8 → 90,6 |
ddi_parea_register(&klog_parea); |
sysinfo_set_item_val("klog.faddr", NULL, (unative_t) faddr); |
sysinfo_set_item_val("klog.fcolor", NULL, (unative_t) |
PAGE_COLOR((uintptr_t) klog)); |
sysinfo_set_item_val("klog.pages", NULL, 1 << KLOG_ORDER); |
sysinfo_set_item_val("klog.devno", NULL, devno); |
sysinfo_set_item_val("klog.inr", NULL, KLOG_VIRT_INR); |
/branches/fs/kernel/generic/src/proc/scheduler.c |
---|
207,7 → 207,7 |
interrupts_disable(); |
for (i = 0; i<RQ_COUNT; i++) { |
for (i = 0; i < RQ_COUNT; i++) { |
r = &CPU->rq[i]; |
spinlock_lock(&r->lock); |
if (r->n == 0) { |
377,7 → 377,8 |
void scheduler_separated_stack(void) |
{ |
int priority; |
DEADLOCK_PROBE_INIT(p_joinwq); |
ASSERT(CPU != NULL); |
if (THREAD) { |
406,9 → 407,12 |
spinlock_unlock(&THREAD->lock); |
delay(10); |
spinlock_lock(&THREAD->lock); |
DEADLOCK_PROBE(p_joinwq, |
DEADLOCK_THRESHOLD); |
goto repeat; |
} |
_waitq_wakeup_unsafe(&THREAD->join_wq, false); |
_waitq_wakeup_unsafe(&THREAD->join_wq, |
WAKEUP_FIRST); |
spinlock_unlock(&THREAD->join_wq.lock); |
THREAD->state = Undead; |
447,8 → 451,8 |
/* |
* Entering state is unexpected. |
*/ |
panic("tid%d: unexpected state %s\n", THREAD->tid, |
thread_states[THREAD->state]); |
panic("tid%llu: unexpected state %s\n", THREAD->tid, |
thread_states[THREAD->state]); |
break; |
} |
500,7 → 504,7 |
THREAD->state = Running; |
#ifdef SCHEDULER_VERBOSE |
printf("cpu%d: tid %d (priority=%d, ticks=%lld, nrdy=%ld)\n", |
printf("cpu%d: tid %llu (priority=%d, ticks=%llu, nrdy=%ld)\n", |
CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, |
atomic_get(&CPU->nrdy)); |
#endif |
568,7 → 572,7 |
* Searching least priority queues on all CPU's first and most priority |
* queues on all CPU's last. |
*/ |
for (j= RQ_COUNT - 1; j >= 0; j--) { |
for (j = RQ_COUNT - 1; j >= 0; j--) { |
for (i = 0; i < config.cpu_active; i++) { |
link_t *l; |
runq_t *r; |
609,8 → 613,8 |
*/ |
spinlock_lock(&t->lock); |
if ((!(t->flags & (THREAD_FLAG_WIRED | |
THREAD_FLAG_STOLEN))) && |
(!(t->fpu_context_engaged)) ) { |
THREAD_FLAG_STOLEN))) && |
(!(t->fpu_context_engaged))) { |
/* |
* Remove t from r. |
*/ |
636,7 → 640,7 |
*/ |
spinlock_lock(&t->lock); |
#ifdef KCPULB_VERBOSE |
printf("kcpulb%d: TID %d -> cpu%d, nrdy=%ld, " |
printf("kcpulb%d: TID %llu -> cpu%d, nrdy=%ld, " |
"avg=%nd\n", CPU->id, t->tid, CPU->id, |
atomic_get(&CPU->nrdy), |
atomic_get(&nrdy) / config.cpu_active); |
719,7 → 723,7 |
for (cur = r->rq_head.next; cur != &r->rq_head; |
cur = cur->next) { |
t = list_get_instance(cur, thread_t, rq_link); |
printf("%d(%s) ", t->tid, |
printf("%llu(%s) ", t->tid, |
thread_states[t->state]); |
} |
printf("\n"); |
/branches/fs/kernel/generic/src/proc/task.c |
---|
41,6 → 41,7 |
#include <proc/uarg.h> |
#include <mm/as.h> |
#include <mm/slab.h> |
#include <atomic.h> |
#include <synch/spinlock.h> |
#include <synch/waitq.h> |
#include <arch.h> |
92,6 → 93,49 |
btree_create(&tasks_btree); |
} |
/** Kill all tasks except the current task. |
* |
*/ |
void task_done(void) |
{ |
task_t *t; |
do { /* Repeat until there are any tasks except TASK */ |
/* Messing with task structures, avoid deadlock */ |
ipl_t ipl = interrupts_disable(); |
spinlock_lock(&tasks_lock); |
t = NULL; |
link_t *cur; |
for (cur = tasks_btree.leaf_head.next; cur != &tasks_btree.leaf_head; cur = cur->next) { |
btree_node_t *node = list_get_instance(cur, btree_node_t, leaf_link); |
unsigned int i; |
for (i = 0; i < node->keys; i++) { |
if ((task_t *) node->value[i] != TASK) { |
t = (task_t *) node->value[i]; |
break; |
} |
} |
} |
if (t != NULL) { |
task_id_t id = t->taskid; |
spinlock_unlock(&tasks_lock); |
interrupts_restore(ipl); |
#ifdef CONFIG_DEBUG |
printf("Killing task %llu\n", id); |
#endif |
task_kill(id); |
} else { |
spinlock_unlock(&tasks_lock); |
interrupts_restore(ipl); |
} |
} while (t != NULL); |
} |
/** Create new task |
* |
140,11 → 184,8 |
/* |
* Increment address space reference count. |
* TODO: Reconsider the locking scheme. |
*/ |
mutex_lock(&as->lock); |
as->refcount++; |
mutex_unlock(&as->lock); |
atomic_inc(&as->refcount); |
spinlock_lock(&tasks_lock); |
166,15 → 207,8 |
task_destroy_arch(t); |
btree_destroy(&t->futexes); |
mutex_lock_active(&t->as->lock); |
if (--t->as->refcount == 0) { |
mutex_unlock(&t->as->lock); |
if (atomic_predec(&t->as->refcount) == 0) |
as_destroy(t->as); |
/* |
* t->as is destroyed. |
*/ |
} else |
mutex_unlock(&t->as->lock); |
free(t); |
TASK = NULL; |
382,7 → 416,7 |
link_t *cur; |
ipl_t ipl; |
/* Messing with thread structures, avoid deadlock */ |
/* Messing with task structures, avoid deadlock */ |
ipl = interrupts_disable(); |
spinlock_lock(&tasks_lock); |
408,7 → 442,7 |
char suffix; |
order(task_get_accounting(t), &cycles, &suffix); |
printf("%-6lld %-10s %-3ld %#10zx %#10zx %9llu%c %7zd " |
printf("%-6llu %-10s %-3ld %#10zx %#10zx %9llu%c %7zd " |
"%6zd", t->taskid, t->name, t->context, t, t->as, |
cycles, suffix, t->refcount, |
atomic_get(&t->active_calls)); |
495,7 → 529,7 |
ipc_cleanup(); |
futex_cleanup(); |
klog_printf("Cleanup of task %lld completed.", TASK->taskid); |
klog_printf("Cleanup of task %llu completed.", TASK->taskid); |
} |
/** Kernel thread used to kill the userspace task when its main thread exits. |
/branches/fs/kernel/generic/src/proc/thread.c |
---|
94,7 → 94,7 |
btree_t threads_btree; |
SPINLOCK_INITIALIZE(tidlock); |
uint32_t last_tid = 0; |
thread_id_t last_tid = 0; |
static slab_cache_t *thread_slab; |
#ifdef ARCH_HAS_FPU |
238,6 → 238,7 |
cpu = CPU; |
if (t->flags & THREAD_FLAG_WIRED) { |
ASSERT(t->cpu != NULL); |
cpu = t->cpu; |
} |
t->state = Ready; |
496,7 → 497,7 |
ipl_t ipl; |
/* |
* Since the thread is expected to not be already detached, |
* Since the thread is expected not to be already detached, |
* pointer to it must be still valid. |
*/ |
ipl = interrupts_disable(); |
580,7 → 581,7 |
char suffix; |
order(t->cycles, &cycles, &suffix); |
printf("%-6zd %-10s %#10zx %-8s %#10zx %-3ld %#10zx " |
printf("%-6llu %-10s %#10zx %-8s %#10zx %-3ld %#10zx " |
"%#10zx %9llu%c ", t->tid, t->name, t, |
thread_states[t->state], t->task, t->task->context, |
t->thread_code, t->kstack, cycles, suffix); |
636,12 → 637,11 |
/** Process syscall to create new thread. |
* |
*/ |
unative_t sys_thread_create(uspace_arg_t *uspace_uarg, char *uspace_name) |
unative_t sys_thread_create(uspace_arg_t *uspace_uarg, char *uspace_name, thread_id_t *uspace_thread_id) |
{ |
thread_t *t; |
char namebuf[THREAD_NAME_BUFLEN]; |
uspace_arg_t *kernel_uarg; |
uint32_t tid; |
int rc; |
rc = copy_from_uspace(namebuf, uspace_name, THREAD_NAME_BUFLEN); |
658,12 → 658,14 |
t = thread_create(uinit, kernel_uarg, TASK, THREAD_FLAG_USPACE, namebuf, |
false); |
if (t) { |
tid = t->tid; |
thread_ready(t); |
return (unative_t) tid; |
} else { |
if (uspace_thread_id != NULL) |
return (unative_t) copy_to_uspace(uspace_thread_id, &t->tid, |
sizeof(t->tid)); |
else |
return 0; |
} else |
free(kernel_uarg); |
} |
return (unative_t) ENOMEM; |
} |
678,6 → 680,22 |
return 0; |
} |
/** Syscall for getting TID. |
* |
* @param uspace_thread_id Userspace address of 8-byte buffer where to store |
* current thread ID. |
* |
* @return 0 on success or an error code from @ref errno.h. |
*/ |
unative_t sys_thread_get_id(thread_id_t *uspace_thread_id) |
{ |
/* |
* No need to acquire lock on THREAD because tid |
* remains constant for the lifespan of the thread. |
*/ |
return (unative_t) copy_to_uspace(uspace_thread_id, &THREAD->tid, |
sizeof(THREAD->tid)); |
} |
/** @} |
*/ |
/branches/fs/kernel/generic/src/lib/rd.c |
---|
49,7 → 49,7 |
int init_rd(rd_header * header, size_t size) |
{ |
printf("Header magic %c%c%c%c\n",header->magic[0],header->magic[1],header->magic[2],header->magic[3]); |
//printf("Header magic %c%c%c%c\n",header->magic[0],header->magic[1],header->magic[2],header->magic[3]); |
/* Identify RAM disk */ |
if ((header->magic[0] != RD_MAG0) || (header->magic[1] != RD_MAG1) || |
56,14 → 56,14 |
(header->magic[2] != RD_MAG2) || (header->magic[3] != RD_MAG3)) |
return RE_INVALID; |
printf("Header version %d\n",header->version); |
printf("Expected version %d\n",RD_VERSION); |
//printf("Header version %d\n",header->version); |
//printf("Expected version %d\n",RD_VERSION); |
/* Identify version */ |
if (header->version != RD_VERSION) |
return RE_UNSUPPORTED; |
printf("Header type %d\n",header->data_type); |
//printf("Header type %d\n",header->data_type); |
uint32_t hsize; |
uint64_t dsize; |
80,9 → 80,9 |
return RE_UNSUPPORTED; |
} |
printf("Header size %d\n",header->header_size); |
printf("Data size %d\n",header->data_size); |
printf("Size %d\n",size); |
//printf("Header size %d\n",header->header_size); |
//printf("Data size %d\n",header->data_size); |
//printf("Size %d\n",size); |
//jelen: does this make any sense? (we don't even care about the header size, do we?) |
/* |
98,7 → 98,6 |
if ((uint64_t) hsize + dsize > size) |
dsize = size - hsize; |
rd_parea.pbase = ALIGN_DOWN((uintptr_t)KA2PA((void *) header + hsize), FRAME_SIZE); |
rd_parea.vbase = (uintptr_t) ((void *) header + hsize); |
rd_parea.frames = SIZE2FRAMES(dsize); |
111,8 → 110,6 |
sysinfo_set_item_val("rd.size", NULL, dsize); |
sysinfo_set_item_val("rd.address.physical", NULL, (unative_t) |
KA2PA((void *) header + hsize)); |
sysinfo_set_item_val("rd.address.color", NULL, (unative_t) |
PAGE_COLOR((uintptr_t) header + hsize)); |
return RE_OK; |
} |
/branches/fs/kernel/generic/src/lib/memstr.c |
---|
130,7 → 130,8 |
{ |
char *orig = dest; |
while ((*(dest++) = *(src++))); |
while ((*(dest++) = *(src++))) |
; |
return orig; |
} |
/branches/fs/kernel/generic/src/lib/func.c |
---|
223,20 → 223,23 |
void order(const uint64_t val, uint64_t *rv, char *suffix) |
{ |
if (val > 1000000000000000000LL) { |
*rv = val / 1000000000000000LL; |
if (val > 10000000000000000000ULL) { |
*rv = val / 1000000000000000000ULL; |
*suffix = 'Z'; |
} else if (val > 1000000000000000000ULL) { |
*rv = val / 1000000000000000ULL; |
*suffix = 'E'; |
} else if (val > 1000000000000000LL) { |
*rv = val / 1000000000000LL; |
} else if (val > 1000000000000000ULL) { |
*rv = val / 1000000000000ULL; |
*suffix = 'T'; |
} else if (val > 1000000000000LL) { |
*rv = val / 1000000000LL; |
} else if (val > 1000000000000ULL) { |
*rv = val / 1000000000ULL; |
*suffix = 'G'; |
} else if (val > 1000000000LL) { |
*rv = val / 1000000LL; |
} else if (val > 1000000000ULL) { |
*rv = val / 1000000ULL; |
*suffix = 'M'; |
} else if (val > 1000000LL) { |
*rv = val / 1000LL; |
} else if (val > 1000000ULL) { |
*rv = val / 1000ULL; |
*suffix = 'k'; |
} else { |
*rv = val; |
/branches/fs/kernel/generic/src/lib/objc.c |
---|
49,7 → 49,7 |
return class_create_instance(self); |
} |
- (id) free |
- (id) dispose |
{ |
return object_dispose(self); |
} |
/branches/fs/kernel/generic/src/adt/btree.c |
---|
970,7 → 970,7 |
printf("("); |
for (i = 0; i < node->keys; i++) { |
printf("%lld%s", node->key[i], i < node->keys - 1 ? "," : ""); |
printf("%llu%s", node->key[i], i < node->keys - 1 ? "," : ""); |
if (node->depth && node->subtree[i]) { |
list_append(&node->subtree[i]->bfs_link, &head); |
} |
992,7 → 992,7 |
printf("("); |
for (i = 0; i < node->keys; i++) |
printf("%lld%s", node->key[i], i < node->keys - 1 ? "," : ""); |
printf("%llu%s", node->key[i], i < node->keys - 1 ? "," : ""); |
printf(")"); |
} |
printf("\n"); |
/branches/fs/kernel/generic/src/mm/tlb.c |
---|
78,7 → 78,8 |
* @param page Virtual page address, if required by type. |
* @param count Number of pages, if required by type. |
*/ |
void tlb_shootdown_start(tlb_invalidate_type_t type, asid_t asid, uintptr_t page, count_t count) |
void tlb_shootdown_start(tlb_invalidate_type_t type, asid_t asid, |
uintptr_t page, count_t count) |
{ |
int i; |
107,11 → 108,11 |
/* |
* Enqueue the message. |
*/ |
cpu->tlb_messages[cpu->tlb_messages_count].type = type; |
cpu->tlb_messages[cpu->tlb_messages_count].asid = asid; |
cpu->tlb_messages[cpu->tlb_messages_count].page = page; |
cpu->tlb_messages[cpu->tlb_messages_count].count = count; |
cpu->tlb_messages_count++; |
index_t idx = cpu->tlb_messages_count++; |
cpu->tlb_messages[idx].type = type; |
cpu->tlb_messages[idx].asid = asid; |
cpu->tlb_messages[idx].page = page; |
cpu->tlb_messages[idx].count = count; |
} |
spinlock_unlock(&cpu->lock); |
} |
/branches/fs/kernel/generic/src/mm/backend_anon.c |
---|
72,11 → 72,13 |
* @param addr Faulting virtual address. |
* @param access Access mode that caused the fault (i.e. read/write/exec). |
* |
* @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. serviced). |
* @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. |
* serviced). |
*/ |
int anon_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access) |
{ |
uintptr_t frame; |
bool dirty = false; |
if (!as_area_check_access(area, access)) |
return AS_PF_FAULT; |
86,13 → 88,14 |
/* |
* The area is shared, chances are that the mapping can be found |
* in the pagemap of the address space area share info structure. |
* in the pagemap of the address space area share info |
* structure. |
* In the case that the pagemap does not contain the respective |
* mapping, a new frame is allocated and the mapping is created. |
*/ |
mutex_lock(&area->sh_info->lock); |
frame = (uintptr_t) btree_search(&area->sh_info->pagemap, |
ALIGN_DOWN(addr, PAGE_SIZE) - area->base, &leaf); |
ALIGN_DOWN(addr, PAGE_SIZE) - area->base, &leaf); |
if (!frame) { |
bool allocate = true; |
int i; |
102,7 → 105,8 |
* Just a small workaround. |
*/ |
for (i = 0; i < leaf->keys; i++) { |
if (leaf->key[i] == ALIGN_DOWN(addr, PAGE_SIZE)) { |
if (leaf->key[i] == |
ALIGN_DOWN(addr, PAGE_SIZE)) { |
allocate = false; |
break; |
} |
110,11 → 114,15 |
if (allocate) { |
frame = (uintptr_t) frame_alloc(ONE_FRAME, 0); |
memsetb(PA2KA(frame), FRAME_SIZE, 0); |
dirty = true; |
/* |
* Insert the address of the newly allocated frame to the pagemap. |
* Insert the address of the newly allocated |
* frame to the pagemap. |
*/ |
btree_insert(&area->sh_info->pagemap, ALIGN_DOWN(addr, PAGE_SIZE) - area->base, (void *) frame, leaf); |
btree_insert(&area->sh_info->pagemap, |
ALIGN_DOWN(addr, PAGE_SIZE) - area->base, |
(void *) frame, leaf); |
} |
} |
frame_reference_add(ADDR2PFN(frame)); |
137,12 → 145,13 |
*/ |
frame = (uintptr_t) frame_alloc(ONE_FRAME, 0); |
memsetb(PA2KA(frame), FRAME_SIZE, 0); |
dirty = true; |
} |
/* |
* Map 'page' to 'frame'. |
* Note that TLB shootdown is not attempted as only new information is being |
* inserted into page tables. |
* Note that TLB shootdown is not attempted as only new information is |
* being inserted into page tables. |
*/ |
page_mapping_insert(AS, addr, frame, as_area_get_flags(area)); |
if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1)) |
162,9 → 171,6 |
void anon_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame) |
{ |
frame_free(frame); |
#ifdef CONFIG_VIRT_IDX_DCACHE |
dcache_flush_frame(page, frame); |
#endif |
} |
/** Share the anonymous address space area. |
184,7 → 190,8 |
* Copy used portions of the area to sh_info's page map. |
*/ |
mutex_lock(&area->sh_info->lock); |
for (cur = area->used_space.leaf_head.next; cur != &area->used_space.leaf_head; cur = cur->next) { |
for (cur = area->used_space.leaf_head.next; |
cur != &area->used_space.leaf_head; cur = cur->next) { |
btree_node_t *node; |
int i; |
198,14 → 205,19 |
pte_t *pte; |
page_table_lock(area->as, false); |
pte = page_mapping_find(area->as, base + j*PAGE_SIZE); |
ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte)); |
btree_insert(&area->sh_info->pagemap, (base + j*PAGE_SIZE) - area->base, |
(void *) PTE_GET_FRAME(pte), NULL); |
pte = page_mapping_find(area->as, |
base + j * PAGE_SIZE); |
ASSERT(pte && PTE_VALID(pte) && |
PTE_PRESENT(pte)); |
btree_insert(&area->sh_info->pagemap, |
(base + j * PAGE_SIZE) - area->base, |
(void *) PTE_GET_FRAME(pte), NULL); |
page_table_unlock(area->as, false); |
frame_reference_add(ADDR2PFN(PTE_GET_FRAME(pte))); |
pfn_t pfn = ADDR2PFN(PTE_GET_FRAME(pte)); |
frame_reference_add(pfn); |
} |
} |
} |
mutex_unlock(&area->sh_info->lock); |
/branches/fs/kernel/generic/src/mm/as.c |
---|
57,6 → 57,7 |
#include <genarch/mm/page_ht.h> |
#include <mm/asid.h> |
#include <arch/mm/asid.h> |
#include <preemption.h> |
#include <synch/spinlock.h> |
#include <synch/mutex.h> |
#include <adt/list.h> |
95,10 → 96,13 |
#endif |
/** |
* This lock protects inactive_as_with_asid_head list. It must be acquired |
* before as_t mutex. |
* This lock serializes access to the ASID subsystem. |
* It protects: |
* - inactive_as_with_asid_head list |
* - as->asid for each as of the as_t type |
* - asids_allocated counter |
*/ |
SPINLOCK_INITIALIZE(inactive_as_with_asid_lock); |
SPINLOCK_INITIALIZE(asidlock); |
/** |
* This list contains address spaces that are not active on any |
178,7 → 182,7 |
else |
as->asid = ASID_INVALID; |
as->refcount = 0; |
atomic_set(&as->refcount, 0); |
as->cpu_refcount = 0; |
#ifdef AS_PAGE_TABLE |
as->genarch.page_table = page_table_create(flags); |
193,26 → 197,45 |
* |
* When there are no tasks referencing this address space (i.e. its refcount is |
* zero), the address space can be destroyed. |
* |
* We know that we don't hold any spinlock. |
*/ |
void as_destroy(as_t *as) |
{ |
ipl_t ipl; |
bool cond; |
DEADLOCK_PROBE_INIT(p_asidlock); |
ASSERT(as->refcount == 0); |
ASSERT(atomic_get(&as->refcount) == 0); |
/* |
* Since there is no reference to this area, |
* it is safe not to lock its mutex. |
*/ |
ipl = interrupts_disable(); |
spinlock_lock(&inactive_as_with_asid_lock); |
/* |
* We need to avoid deadlock between TLB shootdown and asidlock. |
* We therefore try to take asid conditionally and if we don't succeed, |
* we enable interrupts and try again. This is done while preemption is |
* disabled to prevent nested context switches. We also depend on the |
* fact that so far no spinlocks are held. |
*/ |
preemption_disable(); |
ipl = interrupts_read(); |
retry: |
interrupts_disable(); |
if (!spinlock_trylock(&asidlock)) { |
interrupts_enable(); |
DEADLOCK_PROBE(p_asidlock, DEADLOCK_THRESHOLD); |
goto retry; |
} |
preemption_enable(); /* Interrupts disabled, enable preemption */ |
if (as->asid != ASID_INVALID && as != AS_KERNEL) { |
if (as != AS && as->cpu_refcount == 0) |
list_remove(&as->inactive_as_with_asid_link); |
asid_put(as->asid); |
} |
spinlock_unlock(&inactive_as_with_asid_lock); |
spinlock_unlock(&asidlock); |
/* |
* Destroy address space areas of the address space. |
411,7 → 434,7 |
int i = 0; |
if (overlaps(b, c * PAGE_SIZE, area->base, |
pages*PAGE_SIZE)) { |
pages * PAGE_SIZE)) { |
if (b + c * PAGE_SIZE <= start_free) { |
/* |
468,15 → 491,16 |
/* |
* Finish TLB shootdown sequence. |
*/ |
tlb_invalidate_pages(as->asid, area->base + pages * PAGE_SIZE, |
area->pages - pages); |
tlb_shootdown_finalize(); |
/* |
* Invalidate software translation caches (e.g. TSB on sparc64). |
*/ |
as_invalidate_translation_cache(as, area->base + |
pages * PAGE_SIZE, area->pages - pages); |
tlb_shootdown_finalize(); |
} else { |
/* |
* Growing the area. |
553,7 → 577,7 |
if (area->backend && |
area->backend->frame_free) { |
area->backend->frame_free(area, b + |
j * PAGE_SIZE, PTE_GET_FRAME(pte)); |
j * PAGE_SIZE, PTE_GET_FRAME(pte)); |
} |
page_mapping_remove(as, b + j * PAGE_SIZE); |
page_table_unlock(as, false); |
564,14 → 588,14 |
/* |
* Finish TLB shootdown sequence. |
*/ |
tlb_invalidate_pages(as->asid, area->base, area->pages); |
tlb_shootdown_finalize(); |
/* |
* Invalidate potential software translation caches (e.g. TSB on |
* sparc64). |
*/ |
as_invalidate_translation_cache(as, area->base, area->pages); |
tlb_shootdown_finalize(); |
btree_destroy(&area->used_space); |
613,8 → 637,7 |
* such address space area, EPERM if there was a problem in accepting the area |
* or ENOMEM if there was a problem in allocating destination address space |
* area. ENOTSUP is returned if the address space area backend does not support |
* sharing or if the kernel detects an attempt to create an illegal address |
* alias. |
* sharing. |
*/ |
int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size, |
as_t *dst_as, uintptr_t dst_base, int dst_flags_mask) |
667,20 → 690,6 |
return EPERM; |
} |
#ifdef CONFIG_VIRT_IDX_DCACHE |
if (!(dst_flags_mask & AS_AREA_EXEC)) { |
if (PAGE_COLOR(src_area->base) != PAGE_COLOR(dst_base)) { |
/* |
* Refuse to create an illegal address alias. |
*/ |
mutex_unlock(&src_area->lock); |
mutex_unlock(&src_as->lock); |
interrupts_restore(ipl); |
return ENOTSUP; |
} |
} |
#endif /* CONFIG_VIRT_IDX_DCACHE */ |
/* |
* Now we are committed to sharing the area. |
* First, prepare the area for sharing. |
875,24 → 884,37 |
/** Switch address spaces. |
* |
* Note that this function cannot sleep as it is essentially a part of |
* scheduling. Sleeping here would lead to deadlock on wakeup. |
* scheduling. Sleeping here would lead to deadlock on wakeup. Another |
* thing which is forbidden in this context is locking the address space. |
* |
* When this function is enetered, no spinlocks may be held. |
* |
* @param old Old address space or NULL. |
* @param new New address space. |
*/ |
void as_switch(as_t *old_as, as_t *new_as) |
{ |
ipl_t ipl; |
bool needs_asid = false; |
ipl = interrupts_disable(); |
spinlock_lock(&inactive_as_with_asid_lock); |
DEADLOCK_PROBE_INIT(p_asidlock); |
preemption_disable(); |
retry: |
(void) interrupts_disable(); |
if (!spinlock_trylock(&asidlock)) { |
/* |
* Avoid deadlock with TLB shootdown. |
* We can enable interrupts here because |
* preemption is disabled. We should not be |
* holding any other lock. |
*/ |
(void) interrupts_enable(); |
DEADLOCK_PROBE(p_asidlock, DEADLOCK_THRESHOLD); |
goto retry; |
} |
preemption_enable(); |
/* |
* First, take care of the old address space. |
*/ |
if (old_as) { |
mutex_lock_active(&old_as->lock); |
ASSERT(old_as->cpu_refcount); |
if((--old_as->cpu_refcount == 0) && (old_as != AS_KERNEL)) { |
/* |
901,11 → 923,10 |
* list of inactive address spaces with assigned |
* ASID. |
*/ |
ASSERT(old_as->asid != ASID_INVALID); |
list_append(&old_as->inactive_as_with_asid_link, |
&inactive_as_with_asid_head); |
ASSERT(old_as->asid != ASID_INVALID); |
list_append(&old_as->inactive_as_with_asid_link, |
&inactive_as_with_asid_head); |
} |
mutex_unlock(&old_as->lock); |
/* |
* Perform architecture-specific tasks when the address space |
917,36 → 938,15 |
/* |
* Second, prepare the new address space. |
*/ |
mutex_lock_active(&new_as->lock); |
if ((new_as->cpu_refcount++ == 0) && (new_as != AS_KERNEL)) { |
if (new_as->asid != ASID_INVALID) { |
if (new_as->asid != ASID_INVALID) |
list_remove(&new_as->inactive_as_with_asid_link); |
} else { |
/* |
* Defer call to asid_get() until new_as->lock is released. |
*/ |
needs_asid = true; |
} |
else |
new_as->asid = asid_get(); |
} |
#ifdef AS_PAGE_TABLE |
SET_PTL0_ADDRESS(new_as->genarch.page_table); |
#endif |
mutex_unlock(&new_as->lock); |
if (needs_asid) { |
/* |
* Allocation of new ASID was deferred |
* until now in order to avoid deadlock. |
*/ |
asid_t asid; |
asid = asid_get(); |
mutex_lock_active(&new_as->lock); |
new_as->asid = asid; |
mutex_unlock(&new_as->lock); |
} |
spinlock_unlock(&inactive_as_with_asid_lock); |
interrupts_restore(ipl); |
/* |
* Perform architecture-specific steps. |
953,6 → 953,8 |
* (e.g. write ASID to hardware register etc.) |
*/ |
as_install_arch(new_as); |
spinlock_unlock(&asidlock); |
AS = new_as; |
} |
/branches/fs/kernel/generic/src/mm/backend_phys.c |
---|
32,7 → 32,8 |
/** |
* @file |
* @brief Backend for address space areas backed by continuous physical memory. |
* @brief Backend for address space areas backed by continuous physical |
* memory. |
*/ |
#include <debug.h> |
62,7 → 63,8 |
* @param addr Faulting virtual address. |
* @param access Access mode that caused the fault (i.e. read/write/exec). |
* |
* @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. serviced). |
* @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. |
* serviced). |
*/ |
int phys_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access) |
{ |
72,7 → 74,8 |
return AS_PF_FAULT; |
ASSERT(addr - area->base < area->backend_data.frames * FRAME_SIZE); |
page_mapping_insert(AS, addr, base + (addr - area->base), as_area_get_flags(area)); |
page_mapping_insert(AS, addr, base + (addr - area->base), |
as_area_get_flags(area)); |
if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1)) |
panic("Could not insert used space.\n"); |
/branches/fs/kernel/generic/src/mm/frame.c |
---|
70,16 → 70,19 |
typedef struct { |
count_t refcount; /**< tracking of shared frames */ |
uint8_t buddy_order; /**< buddy system block order */ |
link_t buddy_link; /**< link to the next free block inside one order */ |
link_t buddy_link; /**< link to the next free block inside one |
order */ |
void *parent; /**< If allocated by slab, this points there */ |
} frame_t; |
typedef struct { |
SPINLOCK_DECLARE(lock); /**< this lock protects everything below */ |
pfn_t base; /**< frame_no of the first frame in the frames array */ |
pfn_t base; /**< frame_no of the first frame in the frames |
array */ |
count_t count; /**< Size of zone */ |
frame_t *frames; /**< array of frame_t structures in this zone */ |
frame_t *frames; /**< array of frame_t structures in this |
zone */ |
count_t free_count; /**< number of free frame_t structures */ |
count_t busy_count; /**< number of busy frame_t structures */ |
157,8 → 160,8 |
for (i = 0; i < zones.count; i++) { |
/* Check for overflow */ |
z = zones.info[i]; |
if (overlaps(newzone->base,newzone->count, |
z->base, z->count)) { |
if (overlaps(newzone->base,newzone->count, z->base, |
z->count)) { |
printf("Zones overlap!\n"); |
return -1; |
} |
166,7 → 169,7 |
break; |
} |
/* Move other zones up */ |
for (j = i;j < zones.count; j++) |
for (j = i; j < zones.count; j++) |
zones.info[j + 1] = zones.info[j]; |
zones.info[i] = newzone; |
zones.count++; |
202,7 → 205,8 |
z = zones.info[i]; |
spinlock_lock(&z->lock); |
if (z->base <= frame && z->base + z->count > frame) { |
spinlock_unlock(&zones.lock); /* Unlock the global lock */ |
/* Unlock the global lock */ |
spinlock_unlock(&zones.lock); |
if (pzone) |
*pzone = i; |
return z; |
229,7 → 233,8 |
* Assume interrupts are disabled. |
* |
* @param order Size (2^order) of free space we are trying to find |
* @param pzone Pointer to preferred zone or NULL, on return contains zone number |
* @param pzone Pointer to preferred zone or NULL, on return contains zone |
* number |
*/ |
static zone_t * find_free_zone_and_lock(uint8_t order, unsigned int *pzone) |
{ |
273,10 → 278,10 |
* @param order - Order of parent must be different then this parameter!! |
*/ |
static link_t *zone_buddy_find_block(buddy_system_t *b, link_t *child, |
uint8_t order) |
uint8_t order) |
{ |
frame_t * frame; |
zone_t * zone; |
frame_t *frame; |
zone_t *zone; |
index_t index; |
frame = list_get_instance(child, frame_t, buddy_link); |
293,8 → 298,8 |
static void zone_buddy_print_id(buddy_system_t *b, link_t *block) |
{ |
frame_t * frame; |
zone_t * zone; |
frame_t *frame; |
zone_t *zone; |
index_t index; |
frame = list_get_instance(block, frame_t, buddy_link); |
310,16 → 315,17 |
* |
* @return Buddy for given block if found |
*/ |
static link_t * zone_buddy_find_buddy(buddy_system_t *b, link_t * block) |
static link_t *zone_buddy_find_buddy(buddy_system_t *b, link_t *block) |
{ |
frame_t * frame; |
zone_t * zone; |
frame_t *frame; |
zone_t *zone; |
index_t index; |
bool is_left, is_right; |
frame = list_get_instance(block, frame_t, buddy_link); |
zone = (zone_t *) b->data; |
ASSERT(IS_BUDDY_ORDER_OK(frame_index_abs(zone, frame), frame->buddy_order)); |
ASSERT(IS_BUDDY_ORDER_OK(frame_index_abs(zone, frame), |
frame->buddy_order)); |
is_left = IS_BUDDY_LEFT_BLOCK_ABS(zone, frame); |
is_right = IS_BUDDY_RIGHT_BLOCK_ABS(zone, frame); |
348,8 → 354,8 |
* |
* @return right block |
*/ |
static link_t * zone_buddy_bisect(buddy_system_t *b, link_t * block) { |
frame_t * frame_l, * frame_r; |
static link_t * zone_buddy_bisect(buddy_system_t *b, link_t *block) { |
frame_t *frame_l, *frame_r; |
frame_l = list_get_instance(block, frame_t, buddy_link); |
frame_r = (frame_l + (1 << (frame_l->buddy_order - 1))); |
365,8 → 371,8 |
* |
* @return Coalesced block (actually block that represents lower address) |
*/ |
static link_t * zone_buddy_coalesce(buddy_system_t *b, link_t * block_1, |
link_t * block_2) |
static link_t *zone_buddy_coalesce(buddy_system_t *b, link_t *block_1, |
link_t *block_2) |
{ |
frame_t *frame1, *frame2; |
382,8 → 388,9 |
* @param block Buddy system block |
* @param order Order to set |
*/ |
static void zone_buddy_set_order(buddy_system_t *b, link_t * block, uint8_t order) { |
frame_t * frame; |
static void zone_buddy_set_order(buddy_system_t *b, link_t *block, |
uint8_t order) { |
frame_t *frame; |
frame = list_get_instance(block, frame_t, buddy_link); |
frame->buddy_order = order; |
} |
395,8 → 402,8 |
* |
* @return Order of block |
*/ |
static uint8_t zone_buddy_get_order(buddy_system_t *b, link_t * block) { |
frame_t * frame; |
static uint8_t zone_buddy_get_order(buddy_system_t *b, link_t *block) { |
frame_t *frame; |
frame = list_get_instance(block, frame_t, buddy_link); |
return frame->buddy_order; |
} |
420,8 → 427,8 |
* @param block Buddy system block |
* |
*/ |
static void zone_buddy_mark_available(buddy_system_t *b, link_t * block) { |
frame_t * frame; |
static void zone_buddy_mark_available(buddy_system_t *b, link_t *block) { |
frame_t *frame; |
frame = list_get_instance(block, frame_t, buddy_link); |
frame->refcount = 0; |
} |
520,8 → 527,8 |
frame = zone_get_frame(zone, frame_idx); |
if (frame->refcount) |
return; |
link = buddy_system_alloc_block(zone->buddy_system, |
&frame->buddy_link); |
link = buddy_system_alloc_block(zone->buddy_system, |
&frame->buddy_link); |
ASSERT(link); |
zone->free_count--; |
} |
545,12 → 552,12 |
pfn_t frame_idx; |
frame_t *frame; |
ASSERT(!overlaps(z1->base,z1->count,z2->base,z2->count)); |
ASSERT(!overlaps(z1->base, z1->count, z2->base, z2->count)); |
ASSERT(z1->base < z2->base); |
spinlock_initialize(&z->lock, "zone_lock"); |
z->base = z1->base; |
z->count = z2->base+z2->count - z1->base; |
z->count = z2->base + z2->count - z1->base; |
z->flags = z1->flags & z2->flags; |
z->free_count = z1->free_count + z2->free_count; |
558,12 → 565,12 |
max_order = fnzb(z->count); |
z->buddy_system = (buddy_system_t *)&z[1]; |
buddy_system_create(z->buddy_system, max_order, |
&zone_buddy_system_operations, |
(void *) z); |
z->buddy_system = (buddy_system_t *) &z[1]; |
buddy_system_create(z->buddy_system, max_order, |
&zone_buddy_system_operations, (void *) z); |
z->frames = (frame_t *)((uint8_t *) z->buddy_system + buddy_conf_size(max_order)); |
z->frames = (frame_t *)((uint8_t *) z->buddy_system + |
buddy_conf_size(max_order)); |
for (i = 0; i < z->count; i++) { |
/* This marks all frames busy */ |
frame_initialize(&z->frames[i]); |
603,7 → 610,7 |
} |
while (zone_can_alloc(z2, 0)) { |
frame_idx = zone_frame_alloc(z2, 0); |
frame = &z->frames[frame_idx + (z2->base-z1->base)]; |
frame = &z->frames[frame_idx + (z2->base - z1->base)]; |
frame->refcount = 0; |
buddy_system_free(z->buddy_system, &frame->buddy_link); |
} |
668,7 → 675,7 |
for (i = 0; i < (count_t) (1 << order); i++) { |
frame = &zone->frames[i + frame_idx]; |
frame->buddy_order = 0; |
if (! frame->refcount) |
if (!frame->refcount) |
frame->refcount = 1; |
ASSERT(frame->refcount == 1); |
} |
710,7 → 717,8 |
spinlock_lock(&zone1->lock); |
spinlock_lock(&zone2->lock); |
cframes = SIZE2FRAMES(zone_conf_size(zone2->base+zone2->count-zone1->base)); |
cframes = SIZE2FRAMES(zone_conf_size(zone2->base + zone2->count - |
zone1->base)); |
if (cframes == 1) |
order = 0; |
else |
803,7 → 811,8 |
/* Allocate frames _after_ the conframe */ |
/* Check sizes */ |
z->frames = (frame_t *)((uint8_t *) z->buddy_system + buddy_conf_size(max_order)); |
z->frames = (frame_t *)((uint8_t *) z->buddy_system + |
buddy_conf_size(max_order)); |
for (i = 0; i < count; i++) { |
frame_initialize(&z->frames[i]); |
} |
865,16 → 874,20 |
if (confframe >= start && confframe < start+count) { |
for (;confframe < start + count; confframe++) { |
addr = PFN2ADDR(confframe); |
if (overlaps(addr, PFN2ADDR(confcount), KA2PA(config.base), config.kernel_size)) |
if (overlaps(addr, PFN2ADDR(confcount), |
KA2PA(config.base), config.kernel_size)) |
continue; |
if (overlaps(addr, PFN2ADDR(confcount), KA2PA(config.stack_base), config.stack_size)) |
if (overlaps(addr, PFN2ADDR(confcount), |
KA2PA(config.stack_base), config.stack_size)) |
continue; |
bool overlap = false; |
count_t i; |
for (i = 0; i < init.cnt; i++) |
if (overlaps(addr, PFN2ADDR(confcount), KA2PA(init.tasks[i].addr), init.tasks[i].size)) { |
if (overlaps(addr, PFN2ADDR(confcount), |
KA2PA(init.tasks[i].addr), |
init.tasks[i].size)) { |
overlap = true; |
break; |
} |
915,7 → 928,7 |
spinlock_unlock(&zone->lock); |
} |
void * frame_get_parent(pfn_t pfn, unsigned int hint) |
void *frame_get_parent(pfn_t pfn, unsigned int hint) |
{ |
zone_t *zone = find_zone_and_lock(pfn, &hint); |
void *res; |
1073,15 → 1086,21 |
/* Tell the architecture to create some memory */ |
frame_arch_init(); |
if (config.cpu_active == 1) { |
frame_mark_unavailable(ADDR2PFN(KA2PA(config.base)), SIZE2FRAMES(config.kernel_size)); |
frame_mark_unavailable(ADDR2PFN(KA2PA(config.stack_base)), SIZE2FRAMES(config.stack_size)); |
frame_mark_unavailable(ADDR2PFN(KA2PA(config.base)), |
SIZE2FRAMES(config.kernel_size)); |
frame_mark_unavailable(ADDR2PFN(KA2PA(config.stack_base)), |
SIZE2FRAMES(config.stack_size)); |
count_t i; |
for (i = 0; i < init.cnt; i++) |
frame_mark_unavailable(ADDR2PFN(KA2PA(init.tasks[i].addr)), SIZE2FRAMES(init.tasks[i].size)); |
for (i = 0; i < init.cnt; i++) { |
pfn_t pfn = ADDR2PFN(KA2PA(init.tasks[i].addr)); |
frame_mark_unavailable(pfn, |
SIZE2FRAMES(init.tasks[i].size)); |
} |
if (ballocs.size) |
frame_mark_unavailable(ADDR2PFN(KA2PA(ballocs.base)), SIZE2FRAMES(ballocs.size)); |
frame_mark_unavailable(ADDR2PFN(KA2PA(ballocs.base)), |
SIZE2FRAMES(ballocs.size)); |
/* Black list first frame, as allocating NULL would |
* fail in some places */ |
1106,7 → 1125,8 |
for (i = 0; i < zones.count; i++) { |
zone = zones.info[i]; |
spinlock_lock(&zone->lock); |
printf("%-2d %12p %12zd %12zd\n", i, PFN2ADDR(zone->base), zone->free_count, zone->busy_count); |
printf("%-2d %12p %12zd %12zd\n", i, PFN2ADDR(zone->base), |
zone->free_count, zone->busy_count); |
spinlock_unlock(&zone->lock); |
} |
spinlock_unlock(&zones.lock); |
1138,10 → 1158,14 |
spinlock_lock(&zone->lock); |
printf("Memory zone information\n"); |
printf("Zone base address: %#.*p\n", sizeof(uintptr_t) * 2, PFN2ADDR(zone->base)); |
printf("Zone size: %zd frames (%zdK)\n", zone->count, ((zone->count) * FRAME_SIZE) >> 10); |
printf("Allocated space: %zd frames (%zdK)\n", zone->busy_count, (zone->busy_count * FRAME_SIZE) >> 10); |
printf("Available space: %zd frames (%zdK)\n", zone->free_count, (zone->free_count * FRAME_SIZE) >> 10); |
printf("Zone base address: %#.*p\n", sizeof(uintptr_t) * 2, |
PFN2ADDR(zone->base)); |
printf("Zone size: %zd frames (%zdK)\n", zone->count, |
((zone->count) * FRAME_SIZE) >> 10); |
printf("Allocated space: %zd frames (%zdK)\n", zone->busy_count, |
(zone->busy_count * FRAME_SIZE) >> 10); |
printf("Available space: %zd frames (%zdK)\n", zone->free_count, |
(zone->free_count * FRAME_SIZE) >> 10); |
buddy_system_structure_print(zone->buddy_system, FRAME_SIZE); |
spinlock_unlock(&zone->lock); |
1152,3 → 1176,4 |
/** @} |
*/ |
/branches/fs/kernel/generic/src/mm/page.c |
---|
76,7 → 76,7 |
cnt = length / PAGE_SIZE + (length % PAGE_SIZE > 0); |
for (i = 0; i < cnt; i++) |
page_mapping_insert(AS_KERNEL, s + i * PAGE_SIZE, s + i * PAGE_SIZE, PAGE_NOT_CACHEABLE); |
page_mapping_insert(AS_KERNEL, s + i * PAGE_SIZE, s + i * PAGE_SIZE, PAGE_NOT_CACHEABLE | PAGE_WRITE); |
} |
/branches/fs/kernel/generic/src/mm/backend_elf.c |
---|
71,7 → 71,8 |
* @param addr Faulting virtual address. |
* @param access Access mode that caused the fault (i.e. read/write/exec). |
* |
* @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. serviced). |
* @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. |
* serviced). |
*/ |
int elf_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access) |
{ |
80,11 → 81,13 |
btree_node_t *leaf; |
uintptr_t base, frame; |
index_t i; |
bool dirty = false; |
if (!as_area_check_access(area, access)) |
return AS_PF_FAULT; |
ASSERT((addr >= entry->p_vaddr) && (addr < entry->p_vaddr + entry->p_memsz)); |
ASSERT((addr >= entry->p_vaddr) && |
(addr < entry->p_vaddr + entry->p_memsz)); |
i = (addr - entry->p_vaddr) >> PAGE_WIDTH; |
base = (uintptr_t) (((void *) elf) + entry->p_offset); |
ASSERT(ALIGN_UP(base, FRAME_SIZE) == base); |
107,7 → 110,8 |
*/ |
for (i = 0; i < leaf->keys; i++) { |
if (leaf->key[i] == ALIGN_DOWN(addr, PAGE_SIZE)) { |
if (leaf->key[i] == |
ALIGN_DOWN(addr, PAGE_SIZE)) { |
found = true; |
break; |
} |
115,8 → 119,10 |
} |
if (frame || found) { |
frame_reference_add(ADDR2PFN(frame)); |
page_mapping_insert(AS, addr, frame, as_area_get_flags(area)); |
if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1)) |
page_mapping_insert(AS, addr, frame, |
as_area_get_flags(area)); |
if (!used_space_insert(area, |
ALIGN_DOWN(addr, PAGE_SIZE), 1)) |
panic("Could not insert used space.\n"); |
mutex_unlock(&area->sh_info->lock); |
return AS_PF_OK; |
124,10 → 130,12 |
} |
/* |
* The area is either not shared or the pagemap does not contain the mapping. |
* The area is either not shared or the pagemap does not contain the |
* mapping. |
*/ |
if (ALIGN_DOWN(addr, PAGE_SIZE) + PAGE_SIZE < entry->p_vaddr + entry->p_filesz) { |
if (ALIGN_DOWN(addr, PAGE_SIZE) + PAGE_SIZE < |
entry->p_vaddr + entry->p_filesz) { |
/* |
* Initialized portion of the segment. The memory is backed |
* directly by the content of the ELF image. Pages are |
138,18 → 146,22 |
*/ |
if (entry->p_flags & PF_W) { |
frame = (uintptr_t)frame_alloc(ONE_FRAME, 0); |
memcpy((void *) PA2KA(frame), (void *) (base + i*FRAME_SIZE), FRAME_SIZE); |
memcpy((void *) PA2KA(frame), |
(void *) (base + i * FRAME_SIZE), FRAME_SIZE); |
dirty = true; |
if (area->sh_info) { |
frame_reference_add(ADDR2PFN(frame)); |
btree_insert(&area->sh_info->pagemap, ALIGN_DOWN(addr, PAGE_SIZE) - area->base, |
(void *) frame, leaf); |
btree_insert(&area->sh_info->pagemap, |
ALIGN_DOWN(addr, PAGE_SIZE) - area->base, |
(void *) frame, leaf); |
} |
} else { |
frame = KA2PA(base + i*FRAME_SIZE); |
} |
} else if (ALIGN_DOWN(addr, PAGE_SIZE) >= ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) { |
} else if (ALIGN_DOWN(addr, PAGE_SIZE) >= |
ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) { |
/* |
* This is the uninitialized portion of the segment. |
* It is not physically present in the ELF image. |
158,11 → 170,13 |
*/ |
frame = (uintptr_t)frame_alloc(ONE_FRAME, 0); |
memsetb(PA2KA(frame), FRAME_SIZE, 0); |
dirty = true; |
if (area->sh_info) { |
frame_reference_add(ADDR2PFN(frame)); |
btree_insert(&area->sh_info->pagemap, ALIGN_DOWN(addr, PAGE_SIZE) - area->base, |
(void *) frame, leaf); |
btree_insert(&area->sh_info->pagemap, |
ALIGN_DOWN(addr, PAGE_SIZE) - area->base, |
(void *) frame, leaf); |
} |
} else { |
175,12 → 189,15 |
size = entry->p_filesz - (i<<PAGE_WIDTH); |
frame = (uintptr_t)frame_alloc(ONE_FRAME, 0); |
memsetb(PA2KA(frame) + size, FRAME_SIZE - size, 0); |
memcpy((void *) PA2KA(frame), (void *) (base + i*FRAME_SIZE), size); |
memcpy((void *) PA2KA(frame), (void *) (base + i * FRAME_SIZE), |
size); |
dirty = true; |
if (area->sh_info) { |
frame_reference_add(ADDR2PFN(frame)); |
btree_insert(&area->sh_info->pagemap, ALIGN_DOWN(addr, PAGE_SIZE) - area->base, |
(void *) frame, leaf); |
btree_insert(&area->sh_info->pagemap, |
ALIGN_DOWN(addr, PAGE_SIZE) - area->base, |
(void *) frame, leaf); |
} |
} |
211,31 → 228,28 |
uintptr_t base; |
index_t i; |
ASSERT((page >= entry->p_vaddr) && (page < entry->p_vaddr + entry->p_memsz)); |
ASSERT((page >= entry->p_vaddr) && |
(page < entry->p_vaddr + entry->p_memsz)); |
i = (page - entry->p_vaddr) >> PAGE_WIDTH; |
base = (uintptr_t) (((void *) elf) + entry->p_offset); |
ASSERT(ALIGN_UP(base, FRAME_SIZE) == base); |
if (page + PAGE_SIZE < ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) { |
if (page + PAGE_SIZE < |
ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) { |
if (entry->p_flags & PF_W) { |
/* |
* Free the frame with the copy of writable segment data. |
* Free the frame with the copy of writable segment |
* data. |
*/ |
frame_free(frame); |
#ifdef CONFIG_VIRT_IDX_DCACHE |
dcache_flush_frame(page, frame); |
#endif |
} |
} else { |
/* |
* The frame is either anonymous memory or the mixed case (i.e. lower |
* part is backed by the ELF image and the upper is anonymous). |
* In any case, a frame needs to be freed. |
*/ |
frame_free(frame); |
#ifdef CONFIG_VIRT_IDX_DCACHE |
dcache_flush_frame(page, frame); |
#endif |
* The frame is either anonymous memory or the mixed case (i.e. |
* lower part is backed by the ELF image and the upper is |
* anonymous). In any case, a frame needs to be freed. |
*/ |
frame_free(frame); |
} |
} |
260,10 → 274,12 |
* Find the node in which to start linear search. |
*/ |
if (area->flags & AS_AREA_WRITE) { |
node = list_get_instance(area->used_space.leaf_head.next, btree_node_t, leaf_link); |
node = list_get_instance(area->used_space.leaf_head.next, |
btree_node_t, leaf_link); |
} else { |
(void) btree_search(&area->sh_info->pagemap, start_anon, &leaf); |
node = btree_leaf_node_left_neighbour(&area->sh_info->pagemap, leaf); |
node = btree_leaf_node_left_neighbour(&area->sh_info->pagemap, |
leaf); |
if (!node) |
node = leaf; |
} |
272,7 → 288,8 |
* Copy used anonymous portions of the area to sh_info's page map. |
*/ |
mutex_lock(&area->sh_info->lock); |
for (cur = &node->leaf_link; cur != &area->used_space.leaf_head; cur = cur->next) { |
for (cur = &node->leaf_link; cur != &area->used_space.leaf_head; |
cur = cur->next) { |
int i; |
node = list_get_instance(cur, btree_node_t, leaf_link); |
294,19 → 311,26 |
pte_t *pte; |
/* |
* Skip read-only pages that are backed by the ELF image. |
* Skip read-only pages that are backed by the |
* ELF image. |
*/ |
if (!(area->flags & AS_AREA_WRITE)) |
if (base + (j + 1)*PAGE_SIZE <= start_anon) |
if (base + (j + 1) * PAGE_SIZE <= |
start_anon) |
continue; |
page_table_lock(area->as, false); |
pte = page_mapping_find(area->as, base + j*PAGE_SIZE); |
ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte)); |
btree_insert(&area->sh_info->pagemap, (base + j*PAGE_SIZE) - area->base, |
pte = page_mapping_find(area->as, |
base + j * PAGE_SIZE); |
ASSERT(pte && PTE_VALID(pte) && |
PTE_PRESENT(pte)); |
btree_insert(&area->sh_info->pagemap, |
(base + j * PAGE_SIZE) - area->base, |
(void *) PTE_GET_FRAME(pte), NULL); |
page_table_unlock(area->as, false); |
frame_reference_add(ADDR2PFN(PTE_GET_FRAME(pte))); |
pfn_t pfn = ADDR2PFN(PTE_GET_FRAME(pte)); |
frame_reference_add(pfn); |
} |
} |
/branches/fs/kernel/generic/src/syscall/syscall.c |
---|
100,7 → 100,7 |
if (id < SYSCALL_END) |
rc = syscall_table[id](a1, a2, a3, a4); |
else { |
klog_printf("TASK %lld: Unknown syscall id %d",TASK->taskid,id); |
klog_printf("TASK %llu: Unknown syscall id %d",TASK->taskid,id); |
task_kill(TASK->taskid); |
thread_exit(); |
} |
118,6 → 118,7 |
/* Thread and task related syscalls. */ |
(syshandler_t) sys_thread_create, |
(syshandler_t) sys_thread_exit, |
(syshandler_t) sys_thread_get_id, |
(syshandler_t) sys_task_get_id, |
/* Synchronization related syscalls. */ |
/branches/fs/kernel/generic/src/ipc/ipcrsc.c |
---|
34,8 → 34,8 |
/* IPC resources management |
* |
* The goal of this source code is to properly manage IPC resources |
* and allow straight and clean clean-up procedure upon task termination. |
* The goal of this source code is to properly manage IPC resources and allow |
* straight and clean clean-up procedure upon task termination. |
* |
* The pattern of usage of the resources is: |
* - allocate empty phone slot, connect | deallocate slot |
47,24 → 47,24 |
* |
* Locking strategy |
* |
* - To use a phone, disconnect a phone etc., the phone must be |
* first locked and then checked that it is connected |
* - To connect an allocated phone it need not be locked (assigning |
* pointer is atomic on all platforms) |
* - To use a phone, disconnect a phone etc., the phone must be first locked and |
* then checked that it is connected |
* - To connect an allocated phone it need not be locked (assigning pointer is |
* atomic on all platforms) |
* |
* - To find an empty phone slot, the TASK must be locked |
* - To answer a message, the answerbox must be locked |
* - The locking of phone and answerbox is done at the ipc_ level. |
* It is perfectly correct to pass unconnected phone to these functions |
* and proper reply will be generated. |
* It is perfectly correct to pass unconnected phone to these functions and |
* proper reply will be generated. |
* |
* Locking order |
* |
* - first phone, then answerbox |
* + Easy locking on calls |
* - Very hard traversing list of phones when disconnecting because |
* the phones may disconnect during traversal of list of connected phones. |
* The only possibility is try_lock with restart of list traversal. |
* - Very hard traversing list of phones when disconnecting because the phones |
* may disconnect during traversal of list of connected phones. The only |
* possibility is try_lock with restart of list traversal. |
* |
* Destroying is less frequent, this approach is taken. |
* |
71,24 → 71,23 |
* Phone call |
* |
* *** Connect_me_to *** |
* The caller sends IPC_M_CONNECT_ME_TO to an answerbox. The server |
* receives 'phoneid' of the connecting phone as an ARG3. If it answers |
* with RETVAL=0, the phonecall is accepted, otherwise it is refused. |
* The caller sends IPC_M_CONNECT_ME_TO to an answerbox. The server receives |
* 'phoneid' of the connecting phone as an ARG3. If it answers with RETVAL=0, |
* the phonecall is accepted, otherwise it is refused. |
* |
* *** Connect_to_me *** |
* The caller sends IPC_M_CONNECT_TO_ME, with special |
* The server receives an automatically |
* opened phoneid. If it accepts (RETVAL=0), it can use the phoneid |
* immediately. |
* Possible race condition can arise, when the client receives messages |
* from new connection before getting response for connect_to_me message. |
* Userspace should implement handshake protocol that would control it. |
* The caller sends IPC_M_CONNECT_TO_ME. |
* The server receives an automatically opened phoneid. If it accepts |
* (RETVAL=0), it can use the phoneid immediately. |
* Possible race condition can arise, when the client receives messages from new |
* connection before getting response for connect_to_me message. Userspace |
* should implement handshake protocol that would control it. |
* |
* Phone hangup |
* |
* *** The caller hangs up (sys_ipc_hangup) *** |
* - The phone is disconnected (no more messages can be sent over this phone), |
* all in-progress messages are correctly handled. The anwerbox receives |
* all in-progress messages are correctly handled. The answerbox receives |
* IPC_M_PHONE_HUNGUP call from the phone that hung up. When all async |
* calls are answered, the phone is deallocated. |
* |
/branches/fs/kernel/generic/src/ipc/sysipc.c |
---|
49,12 → 49,12 |
#include <mm/as.h> |
#include <print.h> |
#define GET_CHECK_PHONE(phone,phoneid,err) { \ |
#define GET_CHECK_PHONE(phone, phoneid, err) { \ |
if (phoneid > IPC_MAX_PHONES) { err; } \ |
phone = &TASK->phones[phoneid]; \ |
} |
#define STRUCT_TO_USPACE(dst,src) copy_to_uspace(dst,src,sizeof(*(src))) |
#define STRUCT_TO_USPACE(dst, src) copy_to_uspace(dst, src, sizeof(*(src))) |
/** Return true if the method is a system method */ |
static inline int is_system_method(unative_t method) |
71,8 → 71,8 |
*/ |
static inline int is_forwardable(unative_t method) |
{ |
if (method == IPC_M_PHONE_HUNGUP || method == IPC_M_AS_AREA_SEND \ |
|| method == IPC_M_AS_AREA_RECV) |
if (method == IPC_M_PHONE_HUNGUP || method == IPC_M_AS_AREA_SEND || |
method == IPC_M_AS_AREA_RECV) |
return 0; /* This message is meant only for the receiver */ |
return 1; |
} |
130,18 → 130,20 |
phone_dealloc(phoneid); |
} else { |
/* The connection was accepted */ |
phone_connect(phoneid,&answer->sender->answerbox); |
phone_connect(phoneid, &answer->sender->answerbox); |
/* Set 'phone identification' as arg3 of response */ |
IPC_SET_ARG3(answer->data, (unative_t)&TASK->phones[phoneid]); |
IPC_SET_ARG3(answer->data, |
(unative_t) &TASK->phones[phoneid]); |
} |
} else if (IPC_GET_METHOD(*olddata) == IPC_M_CONNECT_ME_TO) { |
/* If the users accepted call, connect */ |
if (!IPC_GET_RETVAL(answer->data)) { |
ipc_phone_connect((phone_t *)IPC_GET_ARG3(*olddata), |
&TASK->answerbox); |
ipc_phone_connect((phone_t *) IPC_GET_ARG3(*olddata), |
&TASK->answerbox); |
} |
} else if (IPC_GET_METHOD(*olddata) == IPC_M_AS_AREA_SEND) { |
if (!IPC_GET_RETVAL(answer->data)) { /* Accepted, handle as_area receipt */ |
if (!IPC_GET_RETVAL(answer->data)) { |
/* Accepted, handle as_area receipt */ |
ipl_t ipl; |
int rc; |
as_t *as; |
152,8 → 154,9 |
spinlock_unlock(&answer->sender->lock); |
interrupts_restore(ipl); |
rc = as_area_share(as, IPC_GET_ARG1(*olddata), IPC_GET_ARG2(*olddata), |
AS, IPC_GET_ARG1(answer->data), IPC_GET_ARG3(*olddata)); |
rc = as_area_share(as, IPC_GET_ARG1(*olddata), |
IPC_GET_ARG2(*olddata), AS, |
IPC_GET_ARG1(answer->data), IPC_GET_ARG3(*olddata)); |
IPC_SET_RETVAL(answer->data, rc); |
return rc; |
} |
169,8 → 172,9 |
spinlock_unlock(&answer->sender->lock); |
interrupts_restore(ipl); |
rc = as_area_share(AS, IPC_GET_ARG1(answer->data), IPC_GET_ARG2(*olddata), |
as, IPC_GET_ARG1(*olddata), IPC_GET_ARG2(answer->data)); |
rc = as_area_share(AS, IPC_GET_ARG1(answer->data), |
IPC_GET_ARG2(*olddata), as, IPC_GET_ARG1(*olddata), |
IPC_GET_ARG2(answer->data)); |
IPC_SET_RETVAL(answer->data, rc); |
} |
} |
192,7 → 196,7 |
if (newphid < 0) |
return ELIMIT; |
/* Set arg3 for server */ |
IPC_SET_ARG3(call->data, (unative_t)&TASK->phones[newphid]); |
IPC_SET_ARG3(call->data, (unative_t) &TASK->phones[newphid]); |
call->flags |= IPC_CALL_CONN_ME_TO; |
call->priv = newphid; |
break; |
217,8 → 221,8 |
/** Do basic kernel processing of received call answer */ |
static void process_answer(call_t *call) |
{ |
if (IPC_GET_RETVAL(call->data) == EHANGUP && \ |
call->flags & IPC_CALL_FORWARDED) |
if (IPC_GET_RETVAL(call->data) == EHANGUP && |
(call->flags & IPC_CALL_FORWARDED)) |
IPC_SET_RETVAL(call->data, EFORWARD); |
if (call->flags & IPC_CALL_CONN_ME_TO) { |
233,7 → 237,7 |
* |
* @return 0 - the call should be passed to userspace, 1 - ignore call |
*/ |
static int process_request(answerbox_t *box,call_t *call) |
static int process_request(answerbox_t *box, call_t *call) |
{ |
int phoneid; |
254,8 → 258,8 |
* @return Call identification, returns -1 on fatal error, |
-2 on 'Too many async request, handle answers first |
*/ |
unative_t sys_ipc_call_sync_fast(unative_t phoneid, unative_t method, |
unative_t arg1, ipc_data_t *data) |
unative_t sys_ipc_call_sync_fast(unative_t phoneid, unative_t method, |
unative_t arg1, ipc_data_t *data) |
{ |
call_t call; |
phone_t *phone; |
278,8 → 282,8 |
} |
/** Synchronous IPC call allowing to send whole message */ |
unative_t sys_ipc_call_sync(unative_t phoneid, ipc_data_t *question, |
ipc_data_t *reply) |
unative_t sys_ipc_call_sync(unative_t phoneid, ipc_data_t *question, |
ipc_data_t *reply) |
{ |
call_t call; |
phone_t *phone; |
287,7 → 291,8 |
int rc; |
ipc_call_static_init(&call); |
rc = copy_from_uspace(&call.data.args, &question->args, sizeof(call.data.args)); |
rc = copy_from_uspace(&call.data.args, &question->args, |
sizeof(call.data.args)); |
if (rc != 0) |
return (unative_t) rc; |
324,8 → 329,8 |
* @return Call identification, returns -1 on fatal error, |
-2 on 'Too many async request, handle answers first |
*/ |
unative_t sys_ipc_call_async_fast(unative_t phoneid, unative_t method, |
unative_t arg1, unative_t arg2) |
unative_t sys_ipc_call_async_fast(unative_t phoneid, unative_t method, |
unative_t arg1, unative_t arg2) |
{ |
call_t *call; |
phone_t *phone; |
342,7 → 347,7 |
IPC_SET_ARG2(call->data, arg2); |
IPC_SET_ARG3(call->data, 0); |
if (!(res=request_preprocess(call))) |
if (!(res = request_preprocess(call))) |
ipc_call(phone, call); |
else |
ipc_backsend_err(phone, call, res); |
367,12 → 372,13 |
GET_CHECK_PHONE(phone, phoneid, return IPC_CALLRET_FATAL); |
call = ipc_call_alloc(0); |
rc = copy_from_uspace(&call->data.args, &data->args, sizeof(call->data.args)); |
rc = copy_from_uspace(&call->data.args, &data->args, |
sizeof(call->data.args)); |
if (rc != 0) { |
ipc_call_free(call); |
return (unative_t) rc; |
} |
if (!(res=request_preprocess(call))) |
if (!(res = request_preprocess(call))) |
ipc_call(phone, call); |
else |
ipc_backsend_err(phone, call, res); |
388,7 → 394,7 |
* arg3 is not rewritten for certain system IPC |
*/ |
unative_t sys_ipc_forward_fast(unative_t callid, unative_t phoneid, |
unative_t method, unative_t arg1) |
unative_t method, unative_t arg1) |
{ |
call_t *call; |
phone_t *phone; |
429,8 → 435,8 |
} |
/** Send IPC answer */ |
unative_t sys_ipc_answer_fast(unative_t callid, unative_t retval, |
unative_t arg1, unative_t arg2) |
unative_t sys_ipc_answer_fast(unative_t callid, unative_t retval, |
unative_t arg1, unative_t arg2) |
{ |
call_t *call; |
ipc_data_t saved_data; |
480,7 → 486,7 |
saveddata = 1; |
} |
rc = copy_from_uspace(&call->data.args, &data->args, |
sizeof(call->data.args)); |
sizeof(call->data.args)); |
if (rc != 0) |
return rc; |
508,9 → 514,10 |
/** Wait for incoming ipc call or answer |
* |
* @param calldata Pointer to buffer where the call/answer data is stored |
* @param usec Timeout. See waitq_sleep_timeout() for explanation. |
* @param flags Select mode of sleep operation. See waitq_sleep_timeout() for explanation. |
* @param calldata Pointer to buffer where the call/answer data is stored. |
* @param usec Timeout. See waitq_sleep_timeout() for explanation. |
* @param flags Select mode of sleep operation. See waitq_sleep_timeout() |
* for explanation. |
* |
* @return Callid, if callid & 1, then the call is answer |
*/ |
519,7 → 526,8 |
call_t *call; |
restart: |
call = ipc_wait_for_call(&TASK->answerbox, usec, flags | SYNCH_FLAGS_INTERRUPTIBLE); |
call = ipc_wait_for_call(&TASK->answerbox, usec, |
flags | SYNCH_FLAGS_INTERRUPTIBLE); |
if (!call) |
return 0; |
574,7 → 582,8 |
* |
* @return EPERM or a return code returned by ipc_irq_register(). |
*/ |
unative_t sys_ipc_register_irq(inr_t inr, devno_t devno, unative_t method, irq_code_t *ucode) |
unative_t sys_ipc_register_irq(inr_t inr, devno_t devno, unative_t method, |
irq_code_t *ucode) |
{ |
if (!(cap_get(TASK) & CAP_IRQ_REG)) |
return EPERM; |
/branches/fs/kernel/generic/src/ipc/ipc.c |
---|
163,7 → 163,7 |
spinlock_lock(&callerbox->lock); |
list_append(&call->link, &callerbox->answers); |
spinlock_unlock(&callerbox->lock); |
waitq_wakeup(&callerbox->wq, 0); |
waitq_wakeup(&callerbox->wq, WAKEUP_FIRST); |
} |
/** Answer message, that is in callee queue |
205,7 → 205,7 |
spinlock_lock(&box->lock); |
list_append(&call->link, &box->calls); |
spinlock_unlock(&box->lock); |
waitq_wakeup(&box->wq, 0); |
waitq_wakeup(&box->wq, WAKEUP_FIRST); |
} |
/** Send a asynchronous request using phone to answerbox |
374,6 → 374,7 |
int i; |
call_t *call; |
phone_t *phone; |
DEADLOCK_PROBE_INIT(p_phonelck); |
/* Disconnect all our phones ('ipc_phone_hangup') */ |
for (i=0;i < IPC_MAX_PHONES; i++) |
387,9 → 388,10 |
spinlock_lock(&TASK->answerbox.lock); |
while (!list_empty(&TASK->answerbox.connected_phones)) { |
phone = list_get_instance(TASK->answerbox.connected_phones.next, |
phone_t, link); |
phone_t, link); |
if (! spinlock_trylock(&phone->lock)) { |
spinlock_unlock(&TASK->answerbox.lock); |
DEADLOCK_PROBE(p_phonelck, DEADLOCK_THRESHOLD); |
goto restart_phones; |
} |
500,7 → 502,7 |
printf("ABOX - CALLS:\n"); |
for (tmp=task->answerbox.calls.next; tmp != &task->answerbox.calls;tmp = tmp->next) { |
call = list_get_instance(tmp, call_t, link); |
printf("Callid: %p Srctask:%lld M:%d A1:%d A2:%d A3:%d Flags:%x\n",call, |
printf("Callid: %p Srctask:%llu M:%d A1:%d A2:%d A3:%d Flags:%x\n",call, |
call->sender->taskid, IPC_GET_METHOD(call->data), IPC_GET_ARG1(call->data), |
IPC_GET_ARG2(call->data), IPC_GET_ARG3(call->data), call->flags); |
} |
510,7 → 512,7 |
tmp != &task->answerbox.dispatched_calls; |
tmp = tmp->next) { |
call = list_get_instance(tmp, call_t, link); |
printf("Callid: %p Srctask:%lld M:%d A1:%d A2:%d A3:%d Flags:%x\n",call, |
printf("Callid: %p Srctask:%llu M:%d A1:%d A2:%d A3:%d Flags:%x\n",call, |
call->sender->taskid, IPC_GET_METHOD(call->data), IPC_GET_ARG1(call->data), |
IPC_GET_ARG2(call->data), IPC_GET_ARG3(call->data), call->flags); |
} |
/branches/fs/kernel/generic/src/ipc/irq.c |
---|
336,6 → 336,7 |
while (box->irq_head.next != &box->irq_head) { |
link_t *cur = box->irq_head.next; |
irq_t *irq; |
DEADLOCK_PROBE_INIT(p_irqlock); |
irq = list_get_instance(cur, irq_t, notif_cfg.link); |
if (!spinlock_trylock(&irq->lock)) { |
344,6 → 345,7 |
*/ |
spinlock_unlock(&box->irq_lock); |
interrupts_restore(ipl); |
DEADLOCK_PROBE(p_irqlock, DEADLOCK_THRESHOLD); |
goto loop; |
} |