Subversion Repositories HelenOS

Compare Revisions

Ignore whitespace Rev 4342 → Rev 4343

/branches/dynload/kernel/generic/src/time/clock.c
88,9 → 88,7
uptime->useconds = 0;
 
clock_parea.pbase = (uintptr_t) faddr;
clock_parea.vbase = (uintptr_t) uptime;
clock_parea.frames = 1;
clock_parea.cacheable = true;
ddi_parea_register(&clock_parea);
 
/*
/branches/dynload/kernel/generic/src/ddi/ddi.c
47,7 → 47,7
#include <mm/as.h>
#include <synch/spinlock.h>
#include <syscall/copy.h>
#include <adt/btree.h>
#include <adt/list.h>
#include <arch.h>
#include <align.h>
#include <errno.h>
55,13 → 55,17
/** This lock protects the parea_btree. */
SPINLOCK_INITIALIZE(parea_lock);
 
/** B+tree with enabled physical memory areas. */
static btree_t parea_btree;
/** List with enabled physical memory areas. */
static LIST_INITIALIZE(parea_head);
 
/** Physical memory area for devices. */
static parea_t dev_area;
 
/** Initialize DDI. */
void ddi_init(void)
{
btree_create(&parea_btree);
hw_area(&dev_area.pbase, &dev_area.frames);
ddi_parea_register(&dev_area);
}
 
/** Enable piece of physical memory for mapping by physmem_map().
74,19 → 78,19
void ddi_parea_register(parea_t *parea)
{
ipl_t ipl;
 
ipl = interrupts_disable();
spinlock_lock(&parea_lock);
/*
* TODO: we should really check for overlaps here.
* However, we should be safe because the kernel is pretty sane and
* memory of different devices doesn't overlap.
* However, we should be safe because the kernel is pretty sane.
*/
btree_insert(&parea_btree, (btree_key_t) parea->pbase, parea, NULL);
 
link_initialize(&parea->link);
list_append(&parea->link, &parea_head);
spinlock_unlock(&parea_lock);
interrupts_restore(ipl);
interrupts_restore(ipl);
}
 
/** Map piece of physical memory into virtual address space of current task.
97,16 → 101,16
* @param flags Address space area flags for the mapping.
*
* @return 0 on success, EPERM if the caller lacks capabilities to use this
* syscall, ENOENT if there is no task matching the specified ID or the
* physical address space is not enabled for mapping and ENOMEM if there
* was a problem in creating address space area.
* syscall, ENOENT if there is no task matching the specified ID or the
* physical address space is not enabled for mapping and ENOMEM if there
* was a problem in creating address space area.
*/
static int ddi_physmem_map(uintptr_t pf, uintptr_t vp, count_t pages, int flags)
static int ddi_physmem_map(uintptr_t pf, uintptr_t vp, pfn_t pages, int flags)
{
ipl_t ipl;
cap_t caps;
mem_backend_data_t backend_data;
 
backend_data.base = pf;
backend_data.frames = pages;
116,30 → 120,35
caps = cap_get(TASK);
if (!(caps & CAP_MEM_MANAGER))
return EPERM;
 
ipl = interrupts_disable();
 
/*
* Check if the physical memory area is enabled for mapping.
* If the architecture supports virtually indexed caches, intercept
* attempts to create an illegal address alias.
*/
spinlock_lock(&parea_lock);
parea_t *parea;
btree_node_t *nodep;
parea = (parea_t *) btree_search(&parea_btree, (btree_key_t) pf, &nodep);
if (!parea || parea->frames < pages || ((flags & AS_AREA_CACHEABLE) &&
!parea->cacheable) || (!(flags & AS_AREA_CACHEABLE) &&
parea->cacheable)) {
bool fnd = false;
link_t *cur;
for (cur = parea_head.next; cur != &parea_head; cur = cur->next) {
parea_t *parea = list_get_instance(cur, parea_t, link);
if ((parea->pbase <= pf) && (ADDR2PFN(pf - parea->pbase) + pages <= parea->frames)) {
fnd = true;
break;
}
}
spinlock_unlock(&parea_lock);
if (!fnd) {
/*
* This physical memory area cannot be mapped.
* Physical memory area cannot be mapped.
*/
spinlock_unlock(&parea_lock);
interrupts_restore(ipl);
return ENOENT;
}
spinlock_unlock(&parea_lock);
 
spinlock_lock(&TASK->lock);
if (!as_area_create(TASK->as, flags, pages * PAGE_SIZE, vp, AS_AREA_ATTR_NONE,
226,7 → 235,7
{
return (unative_t) ddi_physmem_map(ALIGN_DOWN((uintptr_t) phys_base,
FRAME_SIZE), ALIGN_DOWN((uintptr_t) virt_base, PAGE_SIZE),
(count_t) pages, (int) flags);
(pfn_t) pages, (int) flags);
}
 
/** Wrapper for SYS_ENABLE_IOSPACE syscall.
258,13 → 267,13
*/
unative_t sys_preempt_control(int enable)
{
if (!cap_get(TASK) & CAP_PREEMPT_CONTROL)
return EPERM;
if (enable)
preemption_enable();
else
preemption_disable();
return 0;
if (!cap_get(TASK) & CAP_PREEMPT_CONTROL)
return EPERM;
if (enable)
preemption_enable();
else
preemption_disable();
return 0;
}
 
/** @}
/branches/dynload/kernel/generic/src/ddi/irq.c
39,7 → 39,8
*
* This code is designed to support:
* - multiple devices sharing single IRQ
* - multiple IRQs per signle device
* - multiple IRQs per single device
* - multiple instances of the same device
*
*
* Note about architectures.
144,7 → 145,7
irq->trigger = (irq_trigger_t) 0;
irq->claim = NULL;
irq->handler = NULL;
irq->arg = NULL;
irq->instance = NULL;
irq->cir = NULL;
irq->cir_arg = NULL;
irq->notif_cfg.notify = false;
306,7 → 307,8
spinlock_lock(&irq->lock);
if (devno == -1) {
/* Invoked by irq_dispatch_and_lock(). */
rv = ((irq->inr == inr) && (irq->claim() == IRQ_ACCEPT));
rv = ((irq->inr == inr) &&
(irq->claim(irq->instance) == IRQ_ACCEPT));
} else {
/* Invoked by irq_find_and_lock(). */
rv = ((irq->inr == inr) && (irq->devno == devno));
365,7 → 367,7
spinlock_lock(&irq->lock);
if (devno == -1) {
/* Invoked by irq_dispatch_and_lock() */
rv = (irq->claim() == IRQ_ACCEPT);
rv = (irq->claim(irq->instance) == IRQ_ACCEPT);
} else {
/* Invoked by irq_find_and_lock() */
rv = (irq->devno == devno);
/branches/dynload/kernel/generic/src/console/console.c
101,7 → 101,7
*
* @return Always returns IRQ_DECLINE.
*/
static irq_ownership_t klog_claim(void)
static irq_ownership_t klog_claim(void *instance)
{
return IRQ_DECLINE;
}
126,9 → 126,7
devno_t devno = device_assign_devno();
klog_parea.pbase = (uintptr_t) faddr;
klog_parea.vbase = (uintptr_t) klog;
klog_parea.frames = SIZE2FRAMES(KLOG_SIZE);
klog_parea.cacheable = true;
ddi_parea_register(&klog_parea);
 
sysinfo_set_item_val("klog.faddr", NULL, (unative_t) faddr);
/branches/dynload/kernel/generic/src/console/cmd.c
529,7 → 529,7
spinlock_unlock(&hlp->lock);
}
spinlock_unlock(&cmd_lock);
spinlock_unlock(&cmd_lock);
 
return 1;
}
/branches/dynload/kernel/generic/src/console/kconsole.c
103,7 → 103,7
* @return Always returns IRQ_DECLINE.
*
*/
static irq_ownership_t kconsole_claim(void)
static irq_ownership_t kconsole_claim(void *instance)
{
return IRQ_DECLINE;
}
/branches/dynload/kernel/generic/src/proc/task.c
339,7 → 339,7
bool sleeping = false;
thr = list_get_instance(cur, thread_t, th_link);
spinlock_lock(&thr->lock);
thr->interrupted = true;
if (thr->state == Sleeping)
/branches/dynload/kernel/generic/src/proc/program.c
184,32 → 184,20
 
/** Syscall for creating a new loader instance from userspace.
*
* Creates a new task from the program loader image, connects a phone
* to it and stores the phone id into the provided buffer.
* Creates a new task from the program loader image and sets
* the task name.
*
* @param uspace_phone_id Userspace address where to store the phone id.
* @param name Name to set on the new task (typically the same
* as the command used to execute it).
*
* @return 0 on success or an error code from @ref errno.h.
*/
unative_t sys_program_spawn_loader(int *uspace_phone_id, char *uspace_name,
size_t name_len)
unative_t sys_program_spawn_loader(char *uspace_name, size_t name_len)
{
program_t p;
int fake_id;
int rc;
int phone_id;
char namebuf[TASK_NAME_BUFLEN];
 
fake_id = 0;
 
/* Before we even try creating the task, see if we can write the id */
rc = (unative_t) copy_to_uspace(uspace_phone_id, &fake_id,
sizeof(fake_id));
if (rc != 0)
return rc;
 
/* Cap length of name and copy it from userspace. */
 
if (name_len > THREAD_NAME_BUFLEN - 1)
221,12 → 209,6
 
namebuf[name_len] = '\0';
 
/* Allocate the phone for communicating with the new task. */
 
phone_id = phone_alloc();
if (phone_id < 0)
return ELIMIT;
 
/* Spawn the new task. */
 
rc = program_create_loader(&p, namebuf);
233,18 → 215,6
if (rc != 0)
return rc;
 
phone_connect(phone_id, &p.task->answerbox);
 
/* No need to aquire lock before task_ready() */
rc = (unative_t) copy_to_uspace(uspace_phone_id, &phone_id,
sizeof(phone_id));
if (rc != 0) {
/* Ooops */
ipc_phone_hangup(&TASK->phones[phone_id]);
task_kill(p.task->taskid);
return rc;
}
 
// FIXME: control the capabilities
cap_set(p.task, cap_get(TASK));
 
/branches/dynload/kernel/generic/src/lib/rd.c
88,9 → 88,7
rd_parea.pbase = ALIGN_DOWN((uintptr_t) KA2PA((void *) header + hsize),
FRAME_SIZE);
rd_parea.vbase = (uintptr_t) ((void *) header + hsize);
rd_parea.frames = SIZE2FRAMES(dsize);
rd_parea.cacheable = true;
ddi_parea_register(&rd_parea);
 
sysinfo_set_item_val("rd", NULL, true);
/branches/dynload/kernel/generic/src/adt/avl.c
43,7 → 43,7
*
* Every node has a pointer to its parent which allows insertion of multiple
* identical keys into the tree.
*
*
* Be careful when using this tree because of the base atribute which is added
* to every inserted node key. There is no rule in which order nodes with the
* same key are visited.
/branches/dynload/kernel/generic/src/mm/as.c
122,7 → 122,7
int rc;
 
link_initialize(&as->inactive_as_with_asid_link);
mutex_initialize(&as->lock, MUTEX_PASSIVE);
mutex_initialize(&as->lock, MUTEX_PASSIVE);
rc = as_constructor_arch(as, flags);
/branches/dynload/kernel/generic/src/ipc/irq.c
101,10 → 101,10
code->cmds[i].value;
break;
case CMD_PORT_READ_1:
dstval = inb((long) code->cmds[i].addr);
dstval = pio_read_8((long) code->cmds[i].addr);
break;
case CMD_PORT_WRITE_1:
outb((long) code->cmds[i].addr, code->cmds[i].value);
pio_write_8((long) code->cmds[i].addr, code->cmds[i].value);
break;
default:
break;