Subversion Repositories HelenOS

Compare Revisions

Ignore whitespace Rev 3386 → Rev 4153

/branches/network/kernel/generic/src/ddi/device.c
31,7 → 31,7
*/
/**
* @file
* @brief Device numbers.
* @brief Device numbers.
*/
 
#include <arch/types.h>
47,13 → 47,16
*/
devno_t device_assign_devno(void)
{
devno_t devno;
 
devno = (devno_t) atomic_postinc(&last);
devno_t devno = (devno_t) atomic_postinc(&last);
ASSERT(devno >= 0);
 
return devno;
}
 
unative_t sys_device_assign_devno(void)
{
return (unative_t) device_assign_devno();
}
 
/** @}
*/
/branches/network/kernel/generic/src/ddi/ddi.c
29,10 → 29,10
/** @addtogroup genericddi
* @{
*/
 
/**
* @file
* @brief Device Driver Interface functions.
* @brief Device Driver Interface functions.
*
* This file contains functions that comprise the Device Driver Interface.
* These are the functions for mapping physical memory and enabling I/O
68,82 → 68,100
*
* @param parea Pointer to physical area structure.
*
* @todo This function doesn't check for overlaps. It depends on the kernel to
* create disjunct physical memory areas.
*/
void ddi_parea_register(parea_t *parea)
{
ipl_t ipl;
 
ipl = interrupts_disable();
ipl_t ipl = interrupts_disable();
spinlock_lock(&parea_lock);
/*
* TODO: we should really check for overlaps here.
* However, we should be safe because the kernel is pretty sane and
* memory of different devices doesn't overlap.
* We don't check for overlaps here as the kernel is pretty sane.
*/
btree_insert(&parea_btree, (btree_key_t) parea->pbase, parea, NULL);
 
spinlock_unlock(&parea_lock);
interrupts_restore(ipl);
interrupts_restore(ipl);
}
 
/** Map piece of physical memory into virtual address space of current task.
*
* @param pf Physical address of the starting frame.
* @param vp Virtual address of the starting page.
* @param pf Physical address of the starting frame.
* @param vp Virtual address of the starting page.
* @param pages Number of pages to map.
* @param flags Address space area flags for the mapping.
*
* @return 0 on success, EPERM if the caller lacks capabilities to use this
* syscall, ENOENT if there is no task matching the specified ID or the
* physical address space is not enabled for mapping and ENOMEM if there
* was a problem in creating address space area.
* syscall, EBADMEM if pf or vf is not page aligned, ENOENT if there
* is no task matching the specified ID or the physical address space
* is not enabled for mapping and ENOMEM if there was a problem in
* creating address space area.
*
*/
static int ddi_physmem_map(uintptr_t pf, uintptr_t vp, count_t pages, int flags)
{
ipl_t ipl;
cap_t caps;
mem_backend_data_t backend_data;
 
backend_data.base = pf;
backend_data.frames = pages;
ASSERT(TASK);
ASSERT((pf % FRAME_SIZE) == 0);
ASSERT((vp % PAGE_SIZE) == 0);
/*
* Make sure the caller is authorised to make this syscall.
*/
caps = cap_get(TASK);
cap_t caps = cap_get(TASK);
if (!(caps & CAP_MEM_MANAGER))
return EPERM;
 
ipl = interrupts_disable();
 
/*
* Check if the physical memory area is enabled for mapping.
* If the architecture supports virtually indexed caches, intercept
* attempts to create an illegal address alias.
*/
spinlock_lock(&parea_lock);
parea_t *parea;
btree_node_t *nodep;
parea = (parea_t *) btree_search(&parea_btree, (btree_key_t) pf, &nodep);
if (!parea || parea->frames < pages || ((flags & AS_AREA_CACHEABLE) &&
!parea->cacheable) || (!(flags & AS_AREA_CACHEABLE) &&
parea->cacheable)) {
/*
* This physical memory area cannot be mapped.
mem_backend_data_t backend_data;
backend_data.base = pf;
backend_data.frames = pages;
ipl_t ipl = interrupts_disable();
/* Find the zone of the physical memory */
spinlock_lock(&zones.lock);
count_t znum = find_zone(ADDR2PFN(pf), pages, 0);
if (znum == (count_t) -1) {
/* Frames not found in any zones
* -> assume it is hardware device and allow mapping
*/
spinlock_unlock(&zones.lock);
goto map;
}
if (zones.info[znum].flags & ZONE_FIRMWARE) {
/* Frames are part of firmware */
spinlock_unlock(&zones.lock);
goto map;
}
if (zone_flags_available(zones.info[znum].flags)) {
/* Frames are part of physical memory, check if the memory
* region is enabled for mapping.
*/
spinlock_unlock(&zones.lock);
spinlock_lock(&parea_lock);
btree_node_t *nodep;
parea_t *parea = (parea_t *) btree_search(&parea_btree,
(btree_key_t) pf, &nodep);
if ((!parea) || (parea->frames < pages))
goto err;
spinlock_unlock(&parea_lock);
interrupts_restore(ipl);
return ENOENT;
goto map;
}
spinlock_unlock(&parea_lock);
 
err:
spinlock_unlock(&zones.lock);
interrupts_restore(ipl);
return ENOENT;
map:
spinlock_lock(&TASK->lock);
if (!as_area_create(TASK->as, flags, pages * PAGE_SIZE, vp, AS_AREA_ATTR_NONE,
&phys_backend, &backend_data)) {
if (!as_area_create(TASK->as, flags, pages * PAGE_SIZE, vp,
AS_AREA_ATTR_NONE, &phys_backend, &backend_data)) {
/*
* The address space area could not have been created.
* We report it using ENOMEM.
169,28 → 187,24
* @param size Size of the enabled I/O space..
*
* @return 0 on success, EPERM if the caller lacks capabilities to use this
* syscall, ENOENT if there is no task matching the specified ID.
* syscall, ENOENT if there is no task matching the specified ID.
*
*/
static int ddi_iospace_enable(task_id_t id, uintptr_t ioaddr, size_t size)
{
ipl_t ipl;
cap_t caps;
task_t *t;
int rc;
/*
* Make sure the caller is authorised to make this syscall.
*/
caps = cap_get(TASK);
cap_t caps = cap_get(TASK);
if (!(caps & CAP_IO_MANAGER))
return EPERM;
ipl = interrupts_disable();
ipl_t ipl = interrupts_disable();
spinlock_lock(&tasks_lock);
t = task_find_by_id(id);
task_t *task = task_find_by_id(id);
if ((!t) || (!context_check(CONTEXT, t->context))) {
if ((!task) || (!context_check(CONTEXT, task->context))) {
/*
* There is no task with the specified ID
* or the task belongs to a different security
200,15 → 214,16
interrupts_restore(ipl);
return ENOENT;
}
 
/* Lock the task and release the lock protecting tasks_btree. */
spinlock_lock(&t->lock);
spinlock_lock(&task->lock);
spinlock_unlock(&tasks_lock);
 
rc = ddi_iospace_enable_arch(t, ioaddr, size);
spinlock_unlock(&t->lock);
int rc = ddi_iospace_enable_arch(task, ioaddr, size);
spinlock_unlock(&task->lock);
interrupts_restore(ipl);
return rc;
}
 
220,7 → 235,8
* @param flags Flags of newly mapped pages
*
* @return 0 on success, otherwise it returns error code found in errno.h
*/
*
*/
unative_t sys_physmem_map(unative_t phys_base, unative_t virt_base,
unative_t pages, unative_t flags)
{
234,16 → 250,15
* @param uspace_io_arg User space address of DDI argument structure.
*
* @return 0 on success, otherwise it returns error code found in errno.h
*/
*
*/
unative_t sys_iospace_enable(ddi_ioarg_t *uspace_io_arg)
{
ddi_ioarg_t arg;
int rc;
rc = copy_from_uspace(&arg, uspace_io_arg, sizeof(ddi_ioarg_t));
int rc = copy_from_uspace(&arg, uspace_io_arg, sizeof(ddi_ioarg_t));
if (rc != 0)
return (unative_t) rc;
return (unative_t) ddi_iospace_enable((task_id_t) arg.task_id,
(uintptr_t) arg.ioaddr, (size_t) arg.size);
}
251,20 → 266,24
/** Disable or enable preemption.
*
* @param enable If non-zero, the preemption counter will be decremented,
* leading to potential enabling of preemption. Otherwise the preemption
* counter will be incremented, preventing preemption from occurring.
* leading to potential enabling of preemption. Otherwise
* the preemption counter will be incremented, preventing
* preemption from occurring.
*
* @return Zero on success or EPERM if callers capabilities are not sufficient.
*/
*
*/
unative_t sys_preempt_control(int enable)
{
if (!cap_get(TASK) & CAP_PREEMPT_CONTROL)
return EPERM;
if (enable)
preemption_enable();
else
preemption_disable();
return 0;
if (!cap_get(TASK) & CAP_PREEMPT_CONTROL)
return EPERM;
if (enable)
preemption_enable();
else
preemption_disable();
return 0;
}
 
/** @}
/branches/network/kernel/generic/src/ddi/irq.c
39,7 → 39,8
*
* This code is designed to support:
* - multiple devices sharing single IRQ
* - multiple IRQs per signle device
* - multiple IRQs per single device
* - multiple instances of the same device
*
*
* Note about architectures.
68,8 → 69,11
 
#include <ddi/irq.h>
#include <adt/hash_table.h>
#include <mm/slab.h>
#include <arch/types.h>
#include <synch/spinlock.h>
#include <console/console.h>
#include <memstr.h>
#include <arch.h>
 
#define KEY_INR 0
76,13 → 80,22
#define KEY_DEVNO 1
 
/**
* Spinlock protecting the hash table.
* Spinlock protecting the kernel IRQ hash table.
* This lock must be taken only when interrupts are disabled.
*/
SPINLOCK_INITIALIZE(irq_hash_table_lock);
static hash_table_t irq_hash_table;
SPINLOCK_INITIALIZE(irq_kernel_hash_table_lock);
/** The kernel IRQ hash table. */
static hash_table_t irq_kernel_hash_table;
 
/**
* Spinlock protecting the uspace IRQ hash table.
* This lock must be taken only when interrupts are disabled.
*/
SPINLOCK_INITIALIZE(irq_uspace_hash_table_lock);
/** The uspace IRQ hash table. */
hash_table_t irq_uspace_hash_table;
 
/**
* Hash table operations for cases when we know that
* there will be collisions between different keys.
*/
110,6 → 123,9
.remove_callback = NULL /* not used */
};
 
/** Number of buckets in either of the hash tables. */
static count_t buckets;
 
/** Initialize IRQ subsystem.
*
* @param inrs Numbers of unique IRQ numbers or INRs.
117,6 → 133,7
*/
void irq_init(count_t inrs, count_t chains)
{
buckets = chains;
/*
* Be smart about the choice of the hash table operations.
* In cases in which inrs equals the requested number of
123,10 → 140,17
* chains (i.e. where there is no collision between
* different keys), we can use optimized set of operations.
*/
if (inrs == chains)
hash_table_create(&irq_hash_table, chains, 2, &irq_lin_ops);
else
hash_table_create(&irq_hash_table, chains, 2, &irq_ht_ops);
if (inrs == chains) {
hash_table_create(&irq_uspace_hash_table, chains, 2,
&irq_lin_ops);
hash_table_create(&irq_kernel_hash_table, chains, 2,
&irq_lin_ops);
} else {
hash_table_create(&irq_uspace_hash_table, chains, 2,
&irq_ht_ops);
hash_table_create(&irq_kernel_hash_table, chains, 2,
&irq_ht_ops);
}
}
 
/** Initialize one IRQ structure.
136,21 → 160,12
*/
void irq_initialize(irq_t *irq)
{
memsetb(irq, sizeof(irq_t), 0);
link_initialize(&irq->link);
spinlock_initialize(&irq->lock, "irq.lock");
irq->preack = false;
link_initialize(&irq->notif_cfg.link);
irq->inr = -1;
irq->devno = -1;
irq->trigger = (irq_trigger_t) 0;
irq->claim = NULL;
irq->handler = NULL;
irq->arg = NULL;
irq->notif_cfg.notify = false;
irq->notif_cfg.answerbox = NULL;
irq->notif_cfg.code = NULL;
irq->notif_cfg.method = 0;
irq->notif_cfg.counter = 0;
link_initialize(&irq->notif_cfg.link);
}
 
/** Register IRQ for device.
157,9 → 172,10
*
* The irq structure must be filled with information
* about the interrupt source and with the claim()
* function pointer and irq_handler() function pointer.
* function pointer and handler() function pointer.
*
* @param irq IRQ structure belonging to a device.
* @param irq IRQ structure belonging to a device.
* @return True on success, false on failure.
*/
void irq_register(irq_t *irq)
{
170,88 → 186,101
};
ipl = interrupts_disable();
spinlock_lock(&irq_hash_table_lock);
hash_table_insert(&irq_hash_table, key, &irq->link);
spinlock_unlock(&irq_hash_table_lock);
spinlock_lock(&irq_kernel_hash_table_lock);
spinlock_lock(&irq->lock);
hash_table_insert(&irq_kernel_hash_table, key, &irq->link);
spinlock_unlock(&irq->lock);
spinlock_unlock(&irq_kernel_hash_table_lock);
interrupts_restore(ipl);
}
 
/** Dispatch the IRQ.
/** Search and lock the uspace IRQ hash table.
*
* We assume this function is only called from interrupt
* context (i.e. that interrupts are disabled prior to
* this call).
*
* This function attempts to lookup a fitting IRQ
* structure. In case of success, return with interrupts
* disabled and holding the respective structure.
*
* @param inr Interrupt number (aka inr or irq).
*
* @return IRQ structure of the respective device or NULL.
*/
irq_t *irq_dispatch_and_lock(inr_t inr)
static irq_t *irq_dispatch_and_lock_uspace(inr_t inr)
{
link_t *lnk;
unative_t key[] = {
(unative_t) inr,
(unative_t) -1 /* search will use claim() instead of devno */
(unative_t) -1 /* search will use claim() instead of devno */
};
spinlock_lock(&irq_hash_table_lock);
 
lnk = hash_table_find(&irq_hash_table, key);
spinlock_lock(&irq_uspace_hash_table_lock);
lnk = hash_table_find(&irq_uspace_hash_table, key);
if (lnk) {
irq_t *irq;
irq = hash_table_get_instance(lnk, irq_t, link);
 
spinlock_unlock(&irq_hash_table_lock);
spinlock_unlock(&irq_uspace_hash_table_lock);
return irq;
}
spinlock_unlock(&irq_uspace_hash_table_lock);
spinlock_unlock(&irq_hash_table_lock);
 
return NULL;
return NULL;
}
 
/** Find the IRQ structure corresponding to inr and devno.
/** Search and lock the kernel IRQ hash table.
*
* This functions attempts to lookup the IRQ structure
* corresponding to its arguments. On success, this
* function returns with interrups disabled, holding
* the lock of the respective IRQ structure.
*
* This function assumes interrupts are already disabled.
*
* @param inr INR being looked up.
* @param devno Devno being looked up.
*
* @return Locked IRQ structure on success or NULL on failure.
*/
irq_t *irq_find_and_lock(inr_t inr, devno_t devno)
static irq_t *irq_dispatch_and_lock_kernel(inr_t inr)
{
link_t *lnk;
unative_t keys[] = {
unative_t key[] = {
(unative_t) inr,
(unative_t) devno
(unative_t) -1 /* search will use claim() instead of devno */
};
spinlock_lock(&irq_hash_table_lock);
 
lnk = hash_table_find(&irq_hash_table, keys);
spinlock_lock(&irq_kernel_hash_table_lock);
lnk = hash_table_find(&irq_kernel_hash_table, key);
if (lnk) {
irq_t *irq;
irq = hash_table_get_instance(lnk, irq_t, link);
 
spinlock_unlock(&irq_hash_table_lock);
spinlock_unlock(&irq_kernel_hash_table_lock);
return irq;
}
spinlock_unlock(&irq_kernel_hash_table_lock);
spinlock_unlock(&irq_hash_table_lock);
return NULL;
}
 
return NULL;
/** Dispatch the IRQ.
*
* We assume this function is only called from interrupt
* context (i.e. that interrupts are disabled prior to
* this call).
*
* This function attempts to lookup a fitting IRQ
* structure. In case of success, return with interrupts
* disabled and holding the respective structure.
*
* @param inr Interrupt number (aka inr or irq).
*
* @return IRQ structure of the respective device or NULL.
*/
irq_t *irq_dispatch_and_lock(inr_t inr)
{
irq_t *irq;
/*
* If the kernel console is silenced,
* then try first the uspace handlers,
* eventually fall back to kernel handlers.
*
* If the kernel console is active,
* then do it the other way around.
*/
if (silent) {
irq = irq_dispatch_and_lock_uspace(inr);
if (irq)
return irq;
return irq_dispatch_and_lock_kernel(inr);
}
irq = irq_dispatch_and_lock_kernel(inr);
if (irq)
return irq;
return irq_dispatch_and_lock_uspace(inr);
}
 
/** Compute hash index for the key.
270,7 → 299,7
index_t irq_ht_hash(unative_t key[])
{
inr_t inr = (inr_t) key[KEY_INR];
return inr % irq_hash_table.entries;
return inr % buckets;
}
 
/** Compare hash table element with a key.
304,7 → 333,8
spinlock_lock(&irq->lock);
if (devno == -1) {
/* Invoked by irq_dispatch_and_lock(). */
rv = ((irq->inr == inr) && (irq->claim() == IRQ_ACCEPT));
rv = ((irq->inr == inr) &&
(irq->claim(irq) == IRQ_ACCEPT));
} else {
/* Invoked by irq_find_and_lock(). */
rv = ((irq->inr == inr) && (irq->devno == devno));
363,7 → 393,7
spinlock_lock(&irq->lock);
if (devno == -1) {
/* Invoked by irq_dispatch_and_lock() */
rv = (irq->claim() == IRQ_ACCEPT);
rv = (irq->claim(irq) == IRQ_ACCEPT);
} else {
/* Invoked by irq_find_and_lock() */
rv = (irq->devno == devno);