/kernel/trunk/generic/src/main/main.c |
---|
74,6 → 74,7 |
#include <ipc/ipc.h> |
#include <macros.h> |
#include <adt/btree.h> |
#include <console/klog.h> |
#ifdef CONFIG_SMP |
#include <arch/smp/apic.h> |
218,6 → 219,7 |
task_init(); |
thread_init(); |
futex_init(); |
klog_init(); |
for (i = 0; i < init.cnt; i++) |
printf("init[%zd].addr=%.*p, init[%zd].size=%zd\n", i, sizeof(__address) * 2, init.tasks[i].addr, i, init.tasks[i].size); |
/kernel/trunk/generic/src/interrupt/interrupt.c |
---|
90,6 → 90,7 |
/** Default 'null' exception handler */ |
static void exc_undef(int n, istate_t *istate) |
{ |
fault_if_from_uspace(istate, "Unhandled exception %d.", n); |
panic("Unhandled exception %d.", n); |
} |
/kernel/trunk/generic/src/console/klog.c |
---|
0,0 → 1,102 |
/* |
* Copyright (C) 2006 Ondrej Palkovsky |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
#include <mm/frame.h> |
#include <sysinfo/sysinfo.h> |
#include <console/klog.h> |
#include <print.h> |
#include <ipc/irq.h> |
/* Order of frame to be allocated for klog communication */ |
#define KLOG_ORDER 0 |
static char *klog; |
static int klogsize; |
static int klogpos; |
SPINLOCK_INITIALIZE(klog_lock); |
/** Initialize kernel loggin facility |
* |
* Allocate pages that are to be shared if uspace for console data |
*/ |
void klog_init(void) |
{ |
void *faddr; |
faddr = (void *)PFN2ADDR(frame_alloc(KLOG_ORDER, FRAME_ATOMIC)); |
if (!faddr) |
panic("Cannot allocate page for klog"); |
klog = (char *)PA2KA(faddr); |
sysinfo_set_item_val("klog.faddr", NULL, (__native)faddr); |
sysinfo_set_item_val("klog.pages", NULL, 1 << KLOG_ORDER); |
klogsize = PAGE_SIZE << KLOG_ORDER; |
klogpos = 0; |
} |
static void klog_vprintf(const char *fmt, va_list args) |
{ |
int ret; |
va_list atst; |
va_copy(atst, args); |
spinlock_lock(&klog_lock); |
ret = vsnprintf(klog+klogpos, klogsize-klogpos, fmt, atst); |
// Workaround around bad return value from vsnprintf |
if (ret+klogpos < klogsize) |
ret = 100; |
if (ret == klogsize-klogpos) { |
klogpos = 0; |
ret = vsnprintf(klog+klogpos, klogsize-klogpos, fmt, args); |
ret = 100; |
if (ret == klogsize) |
goto out; |
} |
ipc_irq_send_msg(IPC_IRQ_KLOG, klogpos, ret); |
klogpos += ret; |
if (klogpos >= klogsize) |
klogpos = 0; |
out: |
spinlock_unlock(&klog_lock); |
va_end(atst); |
} |
/** Printf a message to kernel-uspace log */ |
void klog_printf(const char *fmt, ...) |
{ |
va_list args; |
va_start(args, fmt); |
klog_vprintf(fmt, args); |
va_end(args); |
} |
/kernel/trunk/generic/src/proc/thread.c |
---|
388,6 → 388,10 |
THREAD->state = Exiting; |
spinlock_unlock(&THREAD->lock); |
scheduler(); |
/* Not reached */ |
while (1) |
; |
} |
/kernel/trunk/generic/src/ipc/sysipc.c |
---|
553,12 → 553,12 |
} |
/** Connect irq handler to task */ |
__native sys_ipc_register_irq(__native irq, irq_code_t *ucode) |
__native sys_ipc_register_irq(int irq, irq_code_t *ucode) |
{ |
if (!(cap_get(TASK) & CAP_IRQ_REG)) |
return EPERM; |
if (irq >= IRQ_COUNT) |
if (irq >= IRQ_COUNT || irq <= -IPC_IRQ_RESERVED_VIRTUAL) |
return (__native) ELIMIT; |
irq_ipc_bind_arch(irq); |
567,12 → 567,12 |
} |
/* Disconnect irq handler from task */ |
__native sys_ipc_unregister_irq(__native irq) |
__native sys_ipc_unregister_irq(int irq) |
{ |
if (!(cap_get(TASK) & CAP_IRQ_REG)) |
return EPERM; |
if (irq >= IRQ_COUNT) |
if (irq >= IRQ_COUNT || irq <= -IPC_IRQ_RESERVED_VIRTUAL) |
return (__native) ELIMIT; |
ipc_irq_unregister(&TASK->answerbox, irq); |
/kernel/trunk/generic/src/ipc/ipc.c |
---|
335,10 → 335,7 |
/* Append request to dispatch queue */ |
list_append(&request->link, &box->dispatched_calls); |
} else { |
/* This can happen regularly after ipc_cleanup, remove |
* the warning in the future when the IPC is |
* more debugged */ |
printf("WARNING: Spurious IPC wakeup.\n"); |
/* This can happen regularly after ipc_cleanup */ |
spinlock_unlock(&box->lock); |
goto restart; |
} |
/kernel/trunk/generic/src/ipc/irq.c |
---|
156,16 → 156,17 |
void ipc_irq_unregister(answerbox_t *box, int irq) |
{ |
ipl_t ipl; |
int mq = irq + IPC_IRQ_RESERVED_VIRTUAL; |
ipl = interrupts_disable(); |
spinlock_lock(&irq_conns[irq].lock); |
if (irq_conns[irq].box == box) { |
irq_conns[irq].box = NULL; |
code_free(irq_conns[irq].code); |
irq_conns[irq].code = NULL; |
spinlock_lock(&irq_conns[mq].lock); |
if (irq_conns[mq].box == box) { |
irq_conns[mq].box = NULL; |
code_free(irq_conns[mq].code); |
irq_conns[mq].code = NULL; |
} |
spinlock_unlock(&irq_conns[irq].lock); |
spinlock_unlock(&irq_conns[mq].lock); |
interrupts_restore(ipl); |
} |
174,6 → 175,7 |
{ |
ipl_t ipl; |
irq_code_t *code; |
int mq = irq + IPC_IRQ_RESERVED_VIRTUAL; |
ASSERT(irq_conns); |
185,23 → 187,62 |
code = NULL; |
ipl = interrupts_disable(); |
spinlock_lock(&irq_conns[irq].lock); |
spinlock_lock(&irq_conns[mq].lock); |
if (irq_conns[irq].box) { |
spinlock_unlock(&irq_conns[irq].lock); |
if (irq_conns[mq].box) { |
spinlock_unlock(&irq_conns[mq].lock); |
interrupts_restore(ipl); |
code_free(code); |
return EEXISTS; |
} |
irq_conns[irq].box = box; |
irq_conns[irq].code = code; |
atomic_set(&irq_conns[irq].counter, 0); |
spinlock_unlock(&irq_conns[irq].lock); |
irq_conns[mq].box = box; |
irq_conns[mq].code = code; |
atomic_set(&irq_conns[mq].counter, 0); |
spinlock_unlock(&irq_conns[mq].lock); |
interrupts_restore(ipl); |
return 0; |
} |
/** Add call to proper answerbox queue |
* |
* Assume irq_conns[mq].lock is locked */ |
static void send_call(int mq, call_t *call) |
{ |
spinlock_lock(&irq_conns[mq].box->irq_lock); |
list_append(&call->link, &irq_conns[mq].box->irq_notifs); |
spinlock_unlock(&irq_conns[mq].box->irq_lock); |
waitq_wakeup(&irq_conns[mq].box->wq, 0); |
} |
/** Send notification message |
* |
*/ |
void ipc_irq_send_msg(int irq, __native a2, __native a3) |
{ |
call_t *call; |
int mq = irq + IPC_IRQ_RESERVED_VIRTUAL; |
spinlock_lock(&irq_conns[mq].lock); |
if (irq_conns[mq].box) { |
call = ipc_call_alloc(FRAME_ATOMIC); |
if (!call) { |
spinlock_unlock(&irq_conns[mq].lock); |
return; |
} |
call->flags |= IPC_CALL_NOTIF; |
IPC_SET_METHOD(call->data, IPC_M_INTERRUPT); |
IPC_SET_ARG1(call->data, irq); |
IPC_SET_ARG2(call->data, a2); |
IPC_SET_ARG3(call->data, a3); |
send_call(mq, call); |
} |
spinlock_unlock(&irq_conns[mq].lock); |
} |
/** Notify process that an irq had happend |
* |
* We expect interrupts to be disabled |
209,40 → 250,42 |
void ipc_irq_send_notif(int irq) |
{ |
call_t *call; |
int mq = irq + IPC_IRQ_RESERVED_VIRTUAL; |
ASSERT(irq_conns); |
spinlock_lock(&irq_conns[irq].lock); |
spinlock_lock(&irq_conns[mq].lock); |
if (irq_conns[irq].box) { |
if (irq_conns[mq].box) { |
call = ipc_call_alloc(FRAME_ATOMIC); |
if (!call) { |
spinlock_unlock(&irq_conns[irq].lock); |
spinlock_unlock(&irq_conns[mq].lock); |
return; |
} |
call->flags |= IPC_CALL_NOTIF; |
IPC_SET_METHOD(call->data, IPC_M_INTERRUPT); |
IPC_SET_ARG1(call->data, irq); |
IPC_SET_ARG3(call->data, atomic_preinc(&irq_conns[irq].counter)); |
IPC_SET_ARG3(call->data, atomic_preinc(&irq_conns[mq].counter)); |
/* Execute code to handle irq */ |
code_execute(call, irq_conns[irq].code); |
code_execute(call, irq_conns[mq].code); |
spinlock_lock(&irq_conns[irq].box->irq_lock); |
list_append(&call->link, &irq_conns[irq].box->irq_notifs); |
spinlock_unlock(&irq_conns[irq].box->irq_lock); |
waitq_wakeup(&irq_conns[irq].box->wq, 0); |
send_call(mq, call); |
} |
spinlock_unlock(&irq_conns[irq].lock); |
spinlock_unlock(&irq_conns[mq].lock); |
} |
/** Initialize table of interrupt handlers */ |
/** Initialize table of interrupt handlers |
* |
* @param irqcount Count of required hardware IRQs to be supported |
*/ |
void ipc_irq_make_table(int irqcount) |
{ |
int i; |
irqcount += IPC_IRQ_RESERVED_VIRTUAL; |
irq_conns_size = irqcount; |
irq_conns = malloc(irqcount * (sizeof(*irq_conns)), 0); |
for (i=0; i < irqcount; i++) { |