/branches/rcu/kernel/generic/include/print.h |
---|
40,7 → 40,7 |
#include <arch/arg.h> |
/* We need this address in spinlock to avoid deadlock in deadlock detection */ |
SPINLOCK_EXTERN(printflock); |
SPINLOCK_EXTERN(printf_lock); |
#define EOF (-1) |
/branches/rcu/kernel/generic/include/time/clock.h |
---|
35,8 → 35,19 |
#ifndef KERN_CLOCK_H_ |
#define KERN_CLOCK_H_ |
#include <arch/types.h> |
#define HZ 100 |
/** Uptime structure */ |
typedef struct { |
unative_t seconds1; |
unative_t useconds; |
unative_t seconds2; |
} uptime_t; |
extern uptime_t *uptime; |
extern void clock(void); |
extern void clock_counter_init(void); |
/branches/rcu/kernel/generic/include/proc/task.h |
---|
90,10 → 90,10 |
* Active asynchronous messages. It is used for limiting uspace to |
* certain extent. |
*/ |
atomic_t active_calls; |
atomic_t active_calls; |
/** Architecture specific task data. */ |
task_arch_t arch; |
task_arch_t arch; |
/** |
* Serializes access to the B+tree of task's futexes. This mutex is |
111,6 → 111,7 |
extern btree_t tasks_btree; |
extern void task_init(void); |
extern void task_done(void); |
extern task_t *task_create(as_t *as, char *name); |
extern void task_destroy(task_t *t); |
extern task_t *task_run_program(void *program_addr, char *name); |
/branches/rcu/kernel/generic/include/proc/thread.h |
---|
53,7 → 53,11 |
/* Thread flags */ |
/** Thread cannot be migrated to another CPU. */ |
/** Thread cannot be migrated to another CPU. |
* |
* When using this flag, the caller must set cpu in the thread_t |
* structure manually before calling thread_ready (even on uniprocessor). |
*/ |
#define THREAD_FLAG_WIRED (1 << 0) |
/** Thread was migrated to another CPU and has not run yet. */ |
#define THREAD_FLAG_STOLEN (1 << 1) |
193,7 → 197,7 |
/** Thread's priority. Implemented as index to CPU->rq */ |
int priority; |
/** Thread ID. */ |
uint32_t tid; |
thread_id_t tid; |
/** Architecture-specific data. */ |
thread_arch_t arch; |
248,8 → 252,9 |
extern slab_cache_t *fpu_context_slab; |
/* Thread syscall prototypes. */ |
unative_t sys_thread_create(uspace_arg_t *uspace_uarg, char *uspace_name); |
unative_t sys_thread_exit(int uspace_status); |
extern unative_t sys_thread_create(uspace_arg_t *uspace_uarg, char *uspace_name, thread_id_t *uspace_thread_id); |
extern unative_t sys_thread_exit(int uspace_status); |
extern unative_t sys_thread_get_id(thread_id_t *uspace_thread_id); |
#endif |
/branches/rcu/kernel/generic/include/interrupt.h |
---|
48,7 → 48,7 |
#define fault_if_from_uspace(istate, cmd, ...) \ |
{ \ |
if (istate_from_uspace(istate)) { \ |
klog_printf("Task %lld killed due to an exception at %p.", TASK->taskid, istate_get_pc(istate)); \ |
klog_printf("Task %llu killed due to an exception at %p.", TASK->taskid, istate_get_pc(istate)); \ |
klog_printf(" " cmd, ##__VA_ARGS__); \ |
task_kill(TASK->taskid); \ |
thread_exit(); \ |
/branches/rcu/kernel/generic/include/synch/mutex.h |
---|
44,13 → 44,11 |
} mutex_t; |
#define mutex_lock(mtx) \ |
_mutex_lock_timeout((mtx),SYNCH_NO_TIMEOUT,SYNCH_FLAGS_NONE) |
_mutex_lock_timeout((mtx), SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE) |
#define mutex_trylock(mtx) \ |
_mutex_lock_timeout((mtx),SYNCH_NO_TIMEOUT,SYNCH_FLAGS_NON_BLOCKING) |
#define mutex_lock_timeout(mtx,usec) \ |
_mutex_lock_timeout((mtx),(usec),SYNCH_FLAGS_NON_BLOCKING) |
#define mutex_lock_active(mtx) \ |
while (mutex_trylock((mtx)) != ESYNCH_OK_ATOMIC) |
_mutex_lock_timeout((mtx), SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NON_BLOCKING) |
#define mutex_lock_timeout(mtx, usec) \ |
_mutex_lock_timeout((mtx), (usec), SYNCH_FLAGS_NON_BLOCKING) |
extern void mutex_initialize(mutex_t *mtx); |
extern int _mutex_lock_timeout(mutex_t *mtx, uint32_t usec, int flags); |
/branches/rcu/kernel/generic/include/synch/spinlock.h |
---|
101,8 → 101,26 |
preemption_enable(); |
} |
#ifdef CONFIG_DEBUG_SPINLOCK |
extern int printf(const char *, ...); |
#define DEADLOCK_THRESHOLD 100000000 |
#define DEADLOCK_PROBE_INIT(pname) count_t pname = 0 |
#define DEADLOCK_PROBE(pname, value) \ |
if ((pname)++ > (value)) { \ |
(pname) = 0; \ |
printf("Deadlock probe %s: exceeded threshold %d\n", \ |
"cpu%d: function=%s, line=%d\n", \ |
#pname, (value), CPU->id, __FUNCTION__, __LINE__); \ |
} |
#else |
#define DEADLOCK_PROBE_INIT(pname) |
#define DEADLOCK_PROBE(pname, value) |
#endif |
#else |
/* On UP systems, spinlocks are effectively left out. */ |
#define SPINLOCK_DECLARE(name) |
#define SPINLOCK_EXTERN(name) |
113,6 → 131,9 |
#define spinlock_trylock(x) (preemption_disable(), 1) |
#define spinlock_unlock(x) preemption_enable() |
#define DEADLOCK_PROBE_INIT(pname) |
#define DEADLOCK_PROBE(pname, value) |
#endif |
#endif |
/branches/rcu/kernel/generic/include/ddi/irq.h |
---|
121,6 → 121,14 |
* this lock must not be taken first. |
*/ |
SPINLOCK_DECLARE(lock); |
/** Send EOI before processing the interrupt. |
* This is essential for timer interrupt which |
* has to be acknowledged before doing preemption |
* to make sure another timer interrupt will |
* be eventually generated. |
*/ |
bool preack; |
/** Unique device number. -1 if not yet assigned. */ |
devno_t devno; |
127,7 → 135,7 |
/** Actual IRQ number. -1 if not yet assigned. */ |
inr_t inr; |
/** Trigger level of the IRQ.*/ |
/** Trigger level of the IRQ. */ |
irq_trigger_t trigger; |
/** Claim ownership of the IRQ. */ |
irq_ownership_t (* claim)(void); |
/branches/rcu/kernel/generic/include/printf/printf_core.h |
---|
47,7 → 47,7 |
}; |
int printf_core(const char *fmt, struct printf_spec *ps ,va_list ap); |
int printf_core(const char *fmt, struct printf_spec *ps, va_list ap); |
#endif |
/branches/rcu/kernel/generic/include/arch.h |
---|
74,8 → 74,12 |
extern void arch_post_cpu_init(void); |
extern void arch_pre_smp_init(void); |
extern void arch_post_smp_init(void); |
extern void calibrate_delay_loop(void); |
extern void reboot(void); |
extern void arch_reboot(void); |
#endif |
/** @} |
/branches/rcu/kernel/generic/include/mm/as.h |
---|
101,11 → 101,11 |
*/ |
asid_t asid; |
/** Number of references (i.e tasks that reference this as). */ |
atomic_t refcount; |
mutex_t lock; |
/** Number of references (i.e tasks that reference this as). */ |
count_t refcount; |
/** B+tree of address space areas. */ |
btree_t as_area_btree; |
147,11 → 147,11 |
*/ |
asid_t asid; |
/** Number of references (i.e tasks that reference this as). */ |
atomic_t refcount; |
mutex_t lock; |
/** Number of references (i.e tasks that reference this as). */ |
count_t refcount; |
/** B+tree of address space areas. */ |
btree_t as_area_btree; |
/branches/rcu/kernel/generic/include/syscall/syscall.h |
---|
40,6 → 40,7 |
SYS_TLS_SET = 1, /* Hardcoded in AMD64, IA32 uspace - psthread.S */ |
SYS_THREAD_CREATE, |
SYS_THREAD_EXIT, |
SYS_THREAD_GET_ID, |
SYS_TASK_GET_ID, |
SYS_FUTEX_SLEEP, |
SYS_FUTEX_WAKEUP, |
/branches/rcu/kernel/generic/src/synch/spinlock.c |
---|
73,7 → 73,6 |
* @param sl Pointer to spinlock_t structure. |
*/ |
#ifdef CONFIG_DEBUG_SPINLOCK |
#define DEADLOCK_THRESHOLD 100000000 |
void spinlock_lock_debug(spinlock_t *sl) |
{ |
count_t i = 0; |
84,7 → 83,7 |
while (test_and_set(&sl->val)) { |
/* |
* We need to be careful about printflock and fb_lock. |
* We need to be careful about printf_lock and fb_lock. |
* Both of them are used to report deadlocks via |
* printf() and fb_putchar(). |
* |
94,13 → 93,13 |
* However, we encountered false positives caused by very |
* slow VESA framebuffer interaction (especially when |
* run in a simulator) that caused problems with both |
* printflock and fb_lock. |
* printf_lock and fb_lock. |
* |
* Possible deadlocks on both printflock and fb_lock |
* Possible deadlocks on both printf_lock and fb_lock |
* are therefore not reported as they would cause an |
* infinite recursion. |
*/ |
if (sl == &printflock) |
if (sl == &printf_lock) |
continue; |
#ifdef CONFIG_FB |
if (sl == &fb_lock) |
/branches/rcu/kernel/generic/src/main/kinit.c |
---|
118,7 → 118,7 |
#ifdef CONFIG_SMP |
if (config.cpu_count > 1) { |
unsigned int i; |
count_t i; |
/* |
* For each CPU, create its load balancing thread. |
/branches/rcu/kernel/generic/src/main/main.c |
---|
82,6 → 82,7 |
#include <smp/smp.h> |
#include <ddi/ddi.h> |
#include <proc/tasklet.h> |
#include <synch/rcu.h> |
/** Global configuration structure. */ |
config_t config; |
275,6 → 276,8 |
tasklet_run_tasklet_thread(k); |
rcu_init(); |
/* |
* This call to scheduler() will return to kinit, |
* starting the thread of kernel threads. |
/branches/rcu/kernel/generic/src/time/timeout.c |
---|
45,7 → 45,6 |
#include <arch/asm.h> |
#include <arch.h> |
/** Initialize timeouts |
* |
* Initialize kernel timeouts. |
175,6 → 174,7 |
timeout_t *hlp; |
link_t *l; |
ipl_t ipl; |
DEADLOCK_PROBE_INIT(p_tolock); |
grab_locks: |
ipl = interrupts_disable(); |
186,7 → 186,8 |
} |
if (!spinlock_trylock(&t->cpu->timeoutlock)) { |
spinlock_unlock(&t->lock); |
interrupts_restore(ipl); |
interrupts_restore(ipl); |
DEADLOCK_PROBE(p_tolock, DEADLOCK_THRESHOLD); |
goto grab_locks; |
} |
/branches/rcu/kernel/generic/src/time/clock.c |
---|
41,7 → 41,6 |
#include <time/clock.h> |
#include <time/timeout.h> |
#include <arch/types.h> |
#include <config.h> |
#include <synch/spinlock.h> |
#include <synch/waitq.h> |
57,16 → 56,12 |
#include <mm/frame.h> |
#include <ddi/ddi.h> |
/** Physical memory area of the real time clock. */ |
/* Pointer to variable with uptime */ |
uptime_t *uptime; |
/** Physical memory area of the real time clock */ |
static parea_t clock_parea; |
/* Pointers to public variables with time */ |
struct ptime { |
unative_t seconds1; |
unative_t useconds; |
unative_t seconds2; |
}; |
struct ptime *public_time; |
/* Variable holding fragment of second, so that we would update |
* seconds correctly |
*/ |
86,15 → 81,14 |
if (!faddr) |
panic("Cannot allocate page for clock"); |
public_time = (struct ptime *) PA2KA(faddr); |
uptime = (uptime_t *) PA2KA(faddr); |
uptime->seconds1 = 0; |
uptime->seconds2 = 0; |
uptime->useconds = 0; |
/* TODO: We would need some arch dependent settings here */ |
public_time->seconds1 = 0; |
public_time->seconds2 = 0; |
public_time->useconds = 0; |
clock_parea.pbase = (uintptr_t) faddr; |
clock_parea.vbase = (uintptr_t) public_time; |
clock_parea.vbase = (uintptr_t) uptime; |
clock_parea.frames = 1; |
clock_parea.cacheable = true; |
ddi_parea_register(&clock_parea); |
116,16 → 110,16 |
static void clock_update_counters(void) |
{ |
if (CPU->id == 0) { |
secfrag += 1000000/HZ; |
secfrag += 1000000 / HZ; |
if (secfrag >= 1000000) { |
secfrag -= 1000000; |
public_time->seconds1++; |
uptime->seconds1++; |
write_barrier(); |
public_time->useconds = secfrag; |
uptime->useconds = secfrag; |
write_barrier(); |
public_time->seconds2 = public_time->seconds1; |
uptime->seconds2 = uptime->seconds1; |
} else |
public_time->useconds += 1000000/HZ; |
uptime->useconds += 1000000 / HZ; |
} |
} |
/branches/rcu/kernel/generic/src/ddi/irq.c |
---|
138,6 → 138,7 |
{ |
link_initialize(&irq->link); |
spinlock_initialize(&irq->lock, "irq.lock"); |
irq->preack = false; |
irq->inr = -1; |
irq->devno = -1; |
irq->trigger = (irq_trigger_t) 0; |
/branches/rcu/kernel/generic/src/printf/snprintf.c |
---|
50,4 → 50,3 |
/** @} |
*/ |
/branches/rcu/kernel/generic/src/printf/vprintf.c |
---|
35,7 → 35,11 |
#include <print.h> |
#include <printf/printf_core.h> |
#include <putchar.h> |
#include <synch/spinlock.h> |
#include <arch/asm.h> |
SPINLOCK_INITIALIZE(printf_lock); /**< vprintf spinlock */ |
static int vprintf_write(const char *str, size_t count, void *unused) |
{ |
size_t i; |
55,8 → 59,16 |
int vprintf(const char *fmt, va_list ap) |
{ |
struct printf_spec ps = {(int(*)(void *, size_t, void *)) vprintf_write, NULL}; |
return printf_core(fmt, &ps, ap); |
int irqpri = interrupts_disable(); |
spinlock_lock(&printf_lock); |
int ret = printf_core(fmt, &ps, ap); |
spinlock_unlock(&printf_lock); |
interrupts_restore(irqpri); |
return ret; |
} |
/** @} |
/branches/rcu/kernel/generic/src/printf/vsnprintf.c |
---|
42,8 → 42,6 |
char *string; /* destination string */ |
}; |
int vsnprintf_write(const char *str, size_t count, struct vsnprintf_data *data); |
/** Write string to given buffer. |
* Write at most data->size characters including trailing zero. According to C99, snprintf() has to return number |
* of characters that would have been written if enough space had been available. Hence the return value is not |
54,7 → 52,7 |
* @param data structure with destination string, counter of used space and total string size. |
* @return number of characters to print (not characters really printed!) |
*/ |
int vsnprintf_write(const char *str, size_t count, struct vsnprintf_data *data) |
static int vsnprintf_write(const char *str, size_t count, struct vsnprintf_data *data) |
{ |
size_t i; |
i = data->size - data->len; |
/branches/rcu/kernel/generic/src/printf/printf_core.c |
---|
38,14 → 38,9 |
#include <printf/printf_core.h> |
#include <putchar.h> |
#include <print.h> |
#include <synch/spinlock.h> |
#include <arch/arg.h> |
#include <arch/asm.h> |
#include <arch.h> |
SPINLOCK_INITIALIZE(printflock); /**< printf spinlock */ |
#define __PRINTF_FLAG_PREFIX 0x00000001 /**< show prefixes 0x or 0*/ |
#define __PRINTF_FLAG_SIGNED 0x00000002 /**< signed / unsigned number */ |
#define __PRINTF_FLAG_ZEROPADDED 0x00000004 /**< print leading zeroes */ |
458,7 → 453,6 |
*/ |
int printf_core(const char *fmt, struct printf_spec *ps, va_list ap) |
{ |
int irqpri; |
int i = 0, j = 0; /**< i is index of currently processed char from fmt, j is index to the first not printed nonformating character */ |
int end; |
int counter; /**< counter of printed characters */ |
472,10 → 466,7 |
uint64_t flags; |
counter = 0; |
irqpri = interrupts_disable(); |
spinlock_lock(&printflock); |
while ((c = fmt[i])) { |
/* control character */ |
if (c == '%' ) { |
712,8 → 703,6 |
} |
out: |
spinlock_unlock(&printflock); |
interrupts_restore(irqpri); |
return counter; |
} |
/branches/rcu/kernel/generic/src/printf/vsprintf.c |
---|
36,7 → 36,7 |
int vsprintf(char *str, const char *fmt, va_list ap) |
{ |
return vsnprintf(str, (size_t)-1, fmt, ap); |
return vsnprintf(str, (size_t) - 1, fmt, ap); |
} |
/** @} |
/branches/rcu/kernel/generic/src/console/cmd.c |
---|
48,6 → 48,7 |
#include <arch/types.h> |
#include <adt/list.h> |
#include <arch.h> |
#include <config.h> |
#include <func.h> |
#include <macros.h> |
#include <debug.h> |
79,10 → 80,26 |
static cmd_info_t exit_info = { |
.name = "exit", |
.description = "Exit kconsole", |
.description = "Exit kconsole.", |
.argc = 0 |
}; |
static int cmd_reboot(cmd_arg_t *argv); |
static cmd_info_t reboot_info = { |
.name = "reboot", |
.description = "Reboot.", |
.func = cmd_reboot, |
.argc = 0 |
}; |
static int cmd_uptime(cmd_arg_t *argv); |
static cmd_info_t uptime_info = { |
.name = "uptime", |
.description = "Print uptime information.", |
.func = cmd_uptime, |
.argc = 0 |
}; |
static int cmd_continue(cmd_arg_t *argv); |
static cmd_info_t continue_info = { |
.name = "continue", |
192,10 → 209,10 |
}; |
/* Data and methods for 'call0' command. */ |
static char call0_buf[MAX_CMDLINE+1]; |
static char carg1_buf[MAX_CMDLINE+1]; |
static char carg2_buf[MAX_CMDLINE+1]; |
static char carg3_buf[MAX_CMDLINE+1]; |
static char call0_buf[MAX_CMDLINE + 1]; |
static char carg1_buf[MAX_CMDLINE + 1]; |
static char carg2_buf[MAX_CMDLINE + 1]; |
static char carg3_buf[MAX_CMDLINE + 1]; |
static int cmd_call0(cmd_arg_t *argv); |
static cmd_arg_t call0_argv = { |
211,6 → 228,21 |
.argv = &call0_argv |
}; |
/* Data and methods for 'mcall0' command. */ |
static int cmd_mcall0(cmd_arg_t *argv); |
static cmd_arg_t mcall0_argv = { |
.type = ARG_TYPE_STRING, |
.buffer = call0_buf, |
.len = sizeof(call0_buf) |
}; |
static cmd_info_t mcall0_info = { |
.name = "mcall0", |
.description = "mcall0 <function> -> call function() on each CPU.", |
.func = cmd_mcall0, |
.argc = 1, |
.argv = &mcall0_argv |
}; |
/* Data and methods for 'call1' command. */ |
static int cmd_call1(cmd_arg_t *argv); |
static cmd_arg_t call1_argv[] = { |
406,6 → 438,7 |
static cmd_info_t *basic_commands[] = { |
&call0_info, |
&mcall0_info, |
&call1_info, |
&call2_info, |
&call3_info, |
413,6 → 446,8 |
&cpus_info, |
&desc_info, |
&exit_info, |
&reboot_info, |
&uptime_info, |
&halt_info, |
&help_info, |
&ipc_task_info, |
488,6 → 523,41 |
return 1; |
} |
/** Reboot the system. |
* |
* @param argv Argument vector. |
* |
* @return 0 on failure, 1 on success. |
*/ |
int cmd_reboot(cmd_arg_t *argv) |
{ |
reboot(); |
/* Not reached */ |
return 1; |
} |
/** Print system uptime information. |
* |
* @param argv Argument vector. |
* |
* @return 0 on failure, 1 on success. |
*/ |
int cmd_uptime(cmd_arg_t *argv) |
{ |
ASSERT(uptime); |
/* This doesn't have to be very accurate */ |
unative_t sec = uptime->seconds1; |
printf("Up %u days, %u hours, %u minutes, %u seconds\n", |
sec / 86400, (sec % 86400) / 3600, (sec % 3600) / 60, sec % 60); |
return 1; |
} |
/** Describe specified command. |
* |
* @param argv Argument vector. |
540,7 → 610,7 |
struct { |
unative_t f; |
unative_t gp; |
}fptr; |
} fptr; |
#endif |
symaddr = get_symbol_addr((char *) argv->buffer); |
551,7 → 621,7 |
printf("Duplicate symbol, be more specific.\n"); |
} else { |
symbol = get_symtab_entry(symaddr); |
printf("Calling f(): %.*p: %s\n", sizeof(uintptr_t) * 2, symaddr, symbol); |
printf("Calling %s() (%.*p)\n", symbol, sizeof(uintptr_t) * 2, symaddr); |
#ifdef ia64 |
fptr.f = symaddr; |
fptr.gp = ((unative_t *)cmd_call2)[1]; |
565,6 → 635,32 |
return 1; |
} |
/** Call function with zero parameters on each CPU */ |
int cmd_mcall0(cmd_arg_t *argv) |
{ |
/* |
* For each CPU, create a thread which will |
* call the function. |
*/ |
count_t i; |
for (i = 0; i < config.cpu_count; i++) { |
thread_t *t; |
if ((t = thread_create((void (*)(void *)) cmd_call0, (void *) argv, TASK, THREAD_FLAG_WIRED, "call0", false))) { |
spinlock_lock(&t->lock); |
t->cpu = &cpus[i]; |
spinlock_unlock(&t->lock); |
printf("cpu%u: ", i); |
thread_ready(t); |
thread_join(t); |
thread_detach(t); |
} else |
printf("Unable to create thread for cpu%u\n", i); |
} |
return 1; |
} |
/** Call function with one parameter */ |
int cmd_call1(cmd_arg_t *argv) |
{ |
713,7 → 809,7 |
/** Write 4 byte value to address */ |
int cmd_set4(cmd_arg_t *argv) |
{ |
uint32_t *addr ; |
uint32_t *addr; |
uint32_t arg1 = argv[1].intval; |
bool pointer = false; |
/branches/rcu/kernel/generic/src/proc/scheduler.c |
---|
61,6 → 61,7 |
#include <cpu.h> |
#include <print.h> |
#include <debug.h> |
#include <synch/rcu.h> |
static void before_task_runs(void); |
static void before_thread_runs(void); |
115,6 → 116,7 |
void after_thread_ran(void) |
{ |
after_thread_ran_arch(); |
rcu_run_callbacks(); |
} |
#ifdef CONFIG_FPU_LAZY |
207,7 → 209,7 |
interrupts_disable(); |
for (i = 0; i<RQ_COUNT; i++) { |
for (i = 0; i < RQ_COUNT; i++) { |
r = &CPU->rq[i]; |
spinlock_lock(&r->lock); |
if (r->n == 0) { |
377,7 → 379,8 |
void scheduler_separated_stack(void) |
{ |
int priority; |
DEADLOCK_PROBE_INIT(p_joinwq); |
ASSERT(CPU != NULL); |
if (THREAD) { |
406,6 → 409,8 |
spinlock_unlock(&THREAD->lock); |
delay(10); |
spinlock_lock(&THREAD->lock); |
DEADLOCK_PROBE(p_joinwq, |
DEADLOCK_THRESHOLD); |
goto repeat; |
} |
_waitq_wakeup_unsafe(&THREAD->join_wq, false); |
447,8 → 452,8 |
/* |
* Entering state is unexpected. |
*/ |
panic("tid%d: unexpected state %s\n", THREAD->tid, |
thread_states[THREAD->state]); |
panic("tid%llu: unexpected state %s\n", THREAD->tid, |
thread_states[THREAD->state]); |
break; |
} |
500,7 → 505,7 |
THREAD->state = Running; |
#ifdef SCHEDULER_VERBOSE |
printf("cpu%d: tid %d (priority=%d, ticks=%lld, nrdy=%ld)\n", |
printf("cpu%d: tid %llu (priority=%d, ticks=%llu, nrdy=%ld)\n", |
CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, |
atomic_get(&CPU->nrdy)); |
#endif |
568,7 → 573,7 |
* Searching least priority queues on all CPU's first and most priority |
* queues on all CPU's last. |
*/ |
for (j= RQ_COUNT - 1; j >= 0; j--) { |
for (j = RQ_COUNT - 1; j >= 0; j--) { |
for (i = 0; i < config.cpu_active; i++) { |
link_t *l; |
runq_t *r; |
609,8 → 614,8 |
*/ |
spinlock_lock(&t->lock); |
if ((!(t->flags & (THREAD_FLAG_WIRED | |
THREAD_FLAG_STOLEN))) && |
(!(t->fpu_context_engaged)) ) { |
THREAD_FLAG_STOLEN))) && |
(!(t->fpu_context_engaged))) { |
/* |
* Remove t from r. |
*/ |
636,7 → 641,7 |
*/ |
spinlock_lock(&t->lock); |
#ifdef KCPULB_VERBOSE |
printf("kcpulb%d: TID %d -> cpu%d, nrdy=%ld, " |
printf("kcpulb%d: TID %llu -> cpu%d, nrdy=%ld, " |
"avg=%nd\n", CPU->id, t->tid, CPU->id, |
atomic_get(&CPU->nrdy), |
atomic_get(&nrdy) / config.cpu_active); |
719,7 → 724,7 |
for (cur = r->rq_head.next; cur != &r->rq_head; |
cur = cur->next) { |
t = list_get_instance(cur, thread_t, rq_link); |
printf("%d(%s) ", t->tid, |
printf("%llu(%s) ", t->tid, |
thread_states[t->state]); |
} |
printf("\n"); |
/branches/rcu/kernel/generic/src/proc/task.c |
---|
41,6 → 41,7 |
#include <proc/uarg.h> |
#include <mm/as.h> |
#include <mm/slab.h> |
#include <atomic.h> |
#include <synch/spinlock.h> |
#include <synch/waitq.h> |
#include <arch.h> |
92,6 → 93,49 |
btree_create(&tasks_btree); |
} |
/** Kill all tasks except the current task. |
* |
*/ |
void task_done(void) |
{ |
task_t *t; |
do { /* Repeat until there are any tasks except TASK */ |
/* Messing with task structures, avoid deadlock */ |
ipl_t ipl = interrupts_disable(); |
spinlock_lock(&tasks_lock); |
t = NULL; |
link_t *cur; |
for (cur = tasks_btree.leaf_head.next; cur != &tasks_btree.leaf_head; cur = cur->next) { |
btree_node_t *node = list_get_instance(cur, btree_node_t, leaf_link); |
unsigned int i; |
for (i = 0; i < node->keys; i++) { |
if ((task_t *) node->value[i] != TASK) { |
t = (task_t *) node->value[i]; |
break; |
} |
} |
} |
if (t != NULL) { |
task_id_t id = t->taskid; |
spinlock_unlock(&tasks_lock); |
interrupts_restore(ipl); |
#ifdef CONFIG_DEBUG |
printf("Killing task %llu\n", id); |
#endif |
task_kill(id); |
} else { |
spinlock_unlock(&tasks_lock); |
interrupts_restore(ipl); |
} |
} while (t != NULL); |
} |
/** Create new task |
* |
140,11 → 184,8 |
/* |
* Increment address space reference count. |
* TODO: Reconsider the locking scheme. |
*/ |
mutex_lock(&as->lock); |
as->refcount++; |
mutex_unlock(&as->lock); |
atomic_inc(&as->refcount); |
spinlock_lock(&tasks_lock); |
166,15 → 207,8 |
task_destroy_arch(t); |
btree_destroy(&t->futexes); |
mutex_lock_active(&t->as->lock); |
if (--t->as->refcount == 0) { |
mutex_unlock(&t->as->lock); |
if (atomic_predec(&t->as->refcount) == 0) |
as_destroy(t->as); |
/* |
* t->as is destroyed. |
*/ |
} else |
mutex_unlock(&t->as->lock); |
free(t); |
TASK = NULL; |
382,7 → 416,7 |
link_t *cur; |
ipl_t ipl; |
/* Messing with thread structures, avoid deadlock */ |
/* Messing with task structures, avoid deadlock */ |
ipl = interrupts_disable(); |
spinlock_lock(&tasks_lock); |
408,7 → 442,7 |
char suffix; |
order(task_get_accounting(t), &cycles, &suffix); |
printf("%-6lld %-10s %-3ld %#10zx %#10zx %9llu%c %7zd " |
printf("%-6llu %-10s %-3ld %#10zx %#10zx %9llu%c %7zd " |
"%6zd", t->taskid, t->name, t->context, t, t->as, |
cycles, suffix, t->refcount, |
atomic_get(&t->active_calls)); |
495,7 → 529,7 |
ipc_cleanup(); |
futex_cleanup(); |
klog_printf("Cleanup of task %lld completed.", TASK->taskid); |
klog_printf("Cleanup of task %llu completed.", TASK->taskid); |
} |
/** Kernel thread used to kill the userspace task when its main thread exits. |
/branches/rcu/kernel/generic/src/proc/thread.c |
---|
94,7 → 94,7 |
btree_t threads_btree; |
SPINLOCK_INITIALIZE(tidlock); |
uint32_t last_tid = 0; |
thread_id_t last_tid = 0; |
static slab_cache_t *thread_slab; |
#ifdef ARCH_HAS_FPU |
238,6 → 238,7 |
cpu = CPU; |
if (t->flags & THREAD_FLAG_WIRED) { |
ASSERT(t->cpu != NULL); |
cpu = t->cpu; |
} |
t->state = Ready; |
496,7 → 497,7 |
ipl_t ipl; |
/* |
* Since the thread is expected to not be already detached, |
* Since the thread is expected not to be already detached, |
* pointer to it must be still valid. |
*/ |
ipl = interrupts_disable(); |
580,7 → 581,7 |
char suffix; |
order(t->cycles, &cycles, &suffix); |
printf("%-6zd %-10s %#10zx %-8s %#10zx %-3ld %#10zx " |
printf("%-6llu %-10s %#10zx %-8s %#10zx %-3ld %#10zx " |
"%#10zx %9llu%c ", t->tid, t->name, t, |
thread_states[t->state], t->task, t->task->context, |
t->thread_code, t->kstack, cycles, suffix); |
636,12 → 637,11 |
/** Process syscall to create new thread. |
* |
*/ |
unative_t sys_thread_create(uspace_arg_t *uspace_uarg, char *uspace_name) |
unative_t sys_thread_create(uspace_arg_t *uspace_uarg, char *uspace_name, thread_id_t *uspace_thread_id) |
{ |
thread_t *t; |
char namebuf[THREAD_NAME_BUFLEN]; |
uspace_arg_t *kernel_uarg; |
uint32_t tid; |
int rc; |
rc = copy_from_uspace(namebuf, uspace_name, THREAD_NAME_BUFLEN); |
658,12 → 658,14 |
t = thread_create(uinit, kernel_uarg, TASK, THREAD_FLAG_USPACE, namebuf, |
false); |
if (t) { |
tid = t->tid; |
thread_ready(t); |
return (unative_t) tid; |
} else { |
if (uspace_thread_id != NULL) |
return (unative_t) copy_to_uspace(uspace_thread_id, &t->tid, |
sizeof(t->tid)); |
else |
return 0; |
} else |
free(kernel_uarg); |
} |
return (unative_t) ENOMEM; |
} |
678,6 → 680,22 |
return 0; |
} |
/** Syscall for getting TID. |
* |
* @param uspace_thread_id Userspace address of 8-byte buffer where to store |
* current thread ID. |
* |
* @return 0 on success or an error code from @ref errno.h. |
*/ |
unative_t sys_thread_get_id(thread_id_t *uspace_thread_id) |
{ |
/* |
* No need to acquire lock on THREAD because tid |
* remains constant for the lifespan of the thread. |
*/ |
return (unative_t) copy_to_uspace(uspace_thread_id, &THREAD->tid, |
sizeof(THREAD->tid)); |
} |
/** @} |
*/ |
/branches/rcu/kernel/generic/src/lib/memstr.c |
---|
130,7 → 130,8 |
{ |
char *orig = dest; |
while ((*(dest++) = *(src++))); |
while ((*(dest++) = *(src++))) |
; |
return orig; |
} |
/branches/rcu/kernel/generic/src/lib/func.c |
---|
223,20 → 223,23 |
void order(const uint64_t val, uint64_t *rv, char *suffix) |
{ |
if (val > 1000000000000000000LL) { |
*rv = val / 1000000000000000LL; |
if (val > 10000000000000000000ULL) { |
*rv = val / 1000000000000000000ULL; |
*suffix = 'Z'; |
} else if (val > 1000000000000000000ULL) { |
*rv = val / 1000000000000000ULL; |
*suffix = 'E'; |
} else if (val > 1000000000000000LL) { |
*rv = val / 1000000000000LL; |
} else if (val > 1000000000000000ULL) { |
*rv = val / 1000000000000ULL; |
*suffix = 'T'; |
} else if (val > 1000000000000LL) { |
*rv = val / 1000000000LL; |
} else if (val > 1000000000000ULL) { |
*rv = val / 1000000000ULL; |
*suffix = 'G'; |
} else if (val > 1000000000LL) { |
*rv = val / 1000000LL; |
} else if (val > 1000000000ULL) { |
*rv = val / 1000000ULL; |
*suffix = 'M'; |
} else if (val > 1000000LL) { |
*rv = val / 1000LL; |
} else if (val > 1000000ULL) { |
*rv = val / 1000ULL; |
*suffix = 'k'; |
} else { |
*rv = val; |
/branches/rcu/kernel/generic/src/lib/objc.c |
---|
49,7 → 49,7 |
return class_create_instance(self); |
} |
- (id) free |
- (id) dispose |
{ |
return object_dispose(self); |
} |
/branches/rcu/kernel/generic/src/adt/btree.c |
---|
970,7 → 970,7 |
printf("("); |
for (i = 0; i < node->keys; i++) { |
printf("%lld%s", node->key[i], i < node->keys - 1 ? "," : ""); |
printf("%llu%s", node->key[i], i < node->keys - 1 ? "," : ""); |
if (node->depth && node->subtree[i]) { |
list_append(&node->subtree[i]->bfs_link, &head); |
} |
992,7 → 992,7 |
printf("("); |
for (i = 0; i < node->keys; i++) |
printf("%lld%s", node->key[i], i < node->keys - 1 ? "," : ""); |
printf("%llu%s", node->key[i], i < node->keys - 1 ? "," : ""); |
printf(")"); |
} |
printf("\n"); |
/branches/rcu/kernel/generic/src/mm/as.c |
---|
57,6 → 57,7 |
#include <genarch/mm/page_ht.h> |
#include <mm/asid.h> |
#include <arch/mm/asid.h> |
#include <preemption.h> |
#include <synch/spinlock.h> |
#include <synch/mutex.h> |
#include <adt/list.h> |
181,7 → 182,7 |
else |
as->asid = ASID_INVALID; |
as->refcount = 0; |
atomic_set(&as->refcount, 0); |
as->cpu_refcount = 0; |
#ifdef AS_PAGE_TABLE |
as->genarch.page_table = page_table_create(flags); |
196,13 → 197,16 |
* |
* When there are no tasks referencing this address space (i.e. its refcount is |
* zero), the address space can be destroyed. |
* |
* We know that we don't hold any spinlock. |
*/ |
void as_destroy(as_t *as) |
{ |
ipl_t ipl; |
bool cond; |
DEADLOCK_PROBE_INIT(p_asidlock); |
ASSERT(as->refcount == 0); |
ASSERT(atomic_get(&as->refcount) == 0); |
/* |
* Since there is no reference to this area, |
209,8 → 213,23 |
* it is safe not to lock its mutex. |
*/ |
ipl = interrupts_disable(); |
spinlock_lock(&asidlock); |
/* |
* We need to avoid deadlock between TLB shootdown and asidlock. |
* We therefore try to take asid conditionally and if we don't succeed, |
* we enable interrupts and try again. This is done while preemption is |
* disabled to prevent nested context switches. We also depend on the |
* fact that so far no spinlocks are held. |
*/ |
preemption_disable(); |
ipl = interrupts_read(); |
retry: |
interrupts_disable(); |
if (!spinlock_trylock(&asidlock)) { |
interrupts_enable(); |
DEADLOCK_PROBE(p_asidlock, DEADLOCK_THRESHOLD); |
goto retry; |
} |
preemption_enable(); /* Interrupts disabled, enable preemption */ |
if (as->asid != ASID_INVALID && as != AS_KERNEL) { |
if (as != AS && as->cpu_refcount == 0) |
list_remove(&as->inactive_as_with_asid_link); |
472,15 → 491,16 |
/* |
* Finish TLB shootdown sequence. |
*/ |
tlb_invalidate_pages(as->asid, area->base + pages * PAGE_SIZE, |
area->pages - pages); |
tlb_shootdown_finalize(); |
/* |
* Invalidate software translation caches (e.g. TSB on sparc64). |
*/ |
as_invalidate_translation_cache(as, area->base + |
pages * PAGE_SIZE, area->pages - pages); |
tlb_shootdown_finalize(); |
} else { |
/* |
* Growing the area. |
568,14 → 588,14 |
/* |
* Finish TLB shootdown sequence. |
*/ |
tlb_invalidate_pages(as->asid, area->base, area->pages); |
tlb_shootdown_finalize(); |
/* |
* Invalidate potential software translation caches (e.g. TSB on |
* sparc64). |
*/ |
as_invalidate_translation_cache(as, area->base, area->pages); |
tlb_shootdown_finalize(); |
btree_destroy(&area->used_space); |
867,12 → 887,29 |
* scheduling. Sleeping here would lead to deadlock on wakeup. Another |
* thing which is forbidden in this context is locking the address space. |
* |
* When this function is enetered, no spinlocks may be held. |
* |
* @param old Old address space or NULL. |
* @param new New address space. |
*/ |
void as_switch(as_t *old_as, as_t *new_as) |
{ |
spinlock_lock(&asidlock); |
DEADLOCK_PROBE_INIT(p_asidlock); |
preemption_disable(); |
retry: |
(void) interrupts_disable(); |
if (!spinlock_trylock(&asidlock)) { |
/* |
* Avoid deadlock with TLB shootdown. |
* We can enable interrupts here because |
* preemption is disabled. We should not be |
* holding any other lock. |
*/ |
(void) interrupts_enable(); |
DEADLOCK_PROBE(p_asidlock, DEADLOCK_THRESHOLD); |
goto retry; |
} |
preemption_enable(); |
/* |
* First, take care of the old address space. |
/branches/rcu/kernel/generic/src/mm/page.c |
---|
76,7 → 76,7 |
cnt = length / PAGE_SIZE + (length % PAGE_SIZE > 0); |
for (i = 0; i < cnt; i++) |
page_mapping_insert(AS_KERNEL, s + i * PAGE_SIZE, s + i * PAGE_SIZE, PAGE_NOT_CACHEABLE); |
page_mapping_insert(AS_KERNEL, s + i * PAGE_SIZE, s + i * PAGE_SIZE, PAGE_NOT_CACHEABLE | PAGE_WRITE); |
} |
/branches/rcu/kernel/generic/src/syscall/syscall.c |
---|
100,7 → 100,7 |
if (id < SYSCALL_END) |
rc = syscall_table[id](a1, a2, a3, a4); |
else { |
klog_printf("TASK %lld: Unknown syscall id %d",TASK->taskid,id); |
klog_printf("TASK %llu: Unknown syscall id %d",TASK->taskid,id); |
task_kill(TASK->taskid); |
thread_exit(); |
} |
118,6 → 118,7 |
/* Thread and task related syscalls. */ |
(syshandler_t) sys_thread_create, |
(syshandler_t) sys_thread_exit, |
(syshandler_t) sys_thread_get_id, |
(syshandler_t) sys_task_get_id, |
/* Synchronization related syscalls. */ |
/branches/rcu/kernel/generic/src/ipc/ipc.c |
---|
374,6 → 374,7 |
int i; |
call_t *call; |
phone_t *phone; |
DEADLOCK_PROBE_INIT(p_phonelck); |
/* Disconnect all our phones ('ipc_phone_hangup') */ |
for (i=0;i < IPC_MAX_PHONES; i++) |
387,9 → 388,10 |
spinlock_lock(&TASK->answerbox.lock); |
while (!list_empty(&TASK->answerbox.connected_phones)) { |
phone = list_get_instance(TASK->answerbox.connected_phones.next, |
phone_t, link); |
phone_t, link); |
if (! spinlock_trylock(&phone->lock)) { |
spinlock_unlock(&TASK->answerbox.lock); |
DEADLOCK_PROBE(p_phonelck, DEADLOCK_THRESHOLD); |
goto restart_phones; |
} |
500,7 → 502,7 |
printf("ABOX - CALLS:\n"); |
for (tmp=task->answerbox.calls.next; tmp != &task->answerbox.calls;tmp = tmp->next) { |
call = list_get_instance(tmp, call_t, link); |
printf("Callid: %p Srctask:%lld M:%d A1:%d A2:%d A3:%d Flags:%x\n",call, |
printf("Callid: %p Srctask:%llu M:%d A1:%d A2:%d A3:%d Flags:%x\n",call, |
call->sender->taskid, IPC_GET_METHOD(call->data), IPC_GET_ARG1(call->data), |
IPC_GET_ARG2(call->data), IPC_GET_ARG3(call->data), call->flags); |
} |
510,7 → 512,7 |
tmp != &task->answerbox.dispatched_calls; |
tmp = tmp->next) { |
call = list_get_instance(tmp, call_t, link); |
printf("Callid: %p Srctask:%lld M:%d A1:%d A2:%d A3:%d Flags:%x\n",call, |
printf("Callid: %p Srctask:%llu M:%d A1:%d A2:%d A3:%d Flags:%x\n",call, |
call->sender->taskid, IPC_GET_METHOD(call->data), IPC_GET_ARG1(call->data), |
IPC_GET_ARG2(call->data), IPC_GET_ARG3(call->data), call->flags); |
} |
/branches/rcu/kernel/generic/src/ipc/irq.c |
---|
336,6 → 336,7 |
while (box->irq_head.next != &box->irq_head) { |
link_t *cur = box->irq_head.next; |
irq_t *irq; |
DEADLOCK_PROBE_INIT(p_irqlock); |
irq = list_get_instance(cur, irq_t, notif_cfg.link); |
if (!spinlock_trylock(&irq->lock)) { |
344,6 → 345,7 |
*/ |
spinlock_unlock(&box->irq_lock); |
interrupts_restore(ipl); |
DEADLOCK_PROBE(p_irqlock, DEADLOCK_THRESHOLD); |
goto loop; |
} |