29,10 → 29,10 |
/** @addtogroup genericddi |
* @{ |
*/ |
|
|
/** |
* @file |
* @brief Device Driver Interface functions. |
* @brief Device Driver Interface functions. |
* |
* This file contains functions that comprise the Device Driver Interface. |
* These are the functions for mapping physical memory and enabling I/O |
47,7 → 47,7 |
#include <mm/as.h> |
#include <synch/spinlock.h> |
#include <syscall/copy.h> |
#include <adt/list.h> |
#include <adt/btree.h> |
#include <arch.h> |
#include <align.h> |
#include <errno.h> |
55,13 → 55,13 |
/** This lock protects the parea_btree. */ |
SPINLOCK_INITIALIZE(parea_lock); |
|
/** List with enabled physical memory areas. */ |
static LIST_INITIALIZE(parea_head); |
/** B+tree with enabled physical memory areas. */ |
static btree_t parea_btree; |
|
/** Initialize DDI. */ |
void ddi_init(void) |
{ |
hw_area(); |
btree_create(&parea_btree); |
} |
|
/** Enable piece of physical memory for mapping by physmem_map(). |
68,22 → 68,16 |
* |
* @param parea Pointer to physical area structure. |
* |
* @todo This function doesn't check for overlaps. It depends on the kernel to |
* create disjunct physical memory areas. |
*/ |
void ddi_parea_register(parea_t *parea) |
{ |
ipl_t ipl; |
|
ipl = interrupts_disable(); |
ipl_t ipl = interrupts_disable(); |
spinlock_lock(&parea_lock); |
|
/* |
* TODO: we should really check for overlaps here. |
* However, we should be safe because the kernel is pretty sane. |
* We don't check for overlaps here as the kernel is pretty sane. |
*/ |
link_initialize(&parea->link); |
list_append(&parea->link, &parea_head); |
btree_insert(&parea_btree, (btree_key_t) parea->pbase, parea, NULL); |
|
spinlock_unlock(&parea_lock); |
interrupts_restore(ipl); |
91,64 → 85,83 |
|
/** Map piece of physical memory into virtual address space of current task. |
* |
* @param pf Physical address of the starting frame. |
* @param vp Virtual address of the starting page. |
* @param pf Physical address of the starting frame. |
* @param vp Virtual address of the starting page. |
* @param pages Number of pages to map. |
* @param flags Address space area flags for the mapping. |
* |
* @return 0 on success, EPERM if the caller lacks capabilities to use this |
* syscall, ENOENT if there is no task matching the specified ID or the |
* physical address space is not enabled for mapping and ENOMEM if there |
* was a problem in creating address space area. |
* syscall, EBADMEM if pf or vf is not page aligned, ENOENT if there |
* is no task matching the specified ID or the physical address space |
* is not enabled for mapping and ENOMEM if there was a problem in |
* creating address space area. |
* |
*/ |
static int ddi_physmem_map(uintptr_t pf, uintptr_t vp, pfn_t pages, int flags) |
static int ddi_physmem_map(uintptr_t pf, uintptr_t vp, count_t pages, int flags) |
{ |
ipl_t ipl; |
cap_t caps; |
mem_backend_data_t backend_data; |
ASSERT(TASK); |
ASSERT((pf % FRAME_SIZE) == 0); |
ASSERT((vp % PAGE_SIZE) == 0); |
|
backend_data.base = pf; |
backend_data.frames = pages; |
|
/* |
* Make sure the caller is authorised to make this syscall. |
*/ |
caps = cap_get(TASK); |
cap_t caps = cap_get(TASK); |
if (!(caps & CAP_MEM_MANAGER)) |
return EPERM; |
|
ipl = interrupts_disable(); |
mem_backend_data_t backend_data; |
backend_data.base = pf; |
backend_data.frames = pages; |
|
/* |
* Check if the physical memory area is enabled for mapping. |
*/ |
spinlock_lock(&parea_lock); |
ipl_t ipl = interrupts_disable(); |
|
bool fnd = false; |
link_t *cur; |
/* Find the zone of the physical memory */ |
spinlock_lock(&zones.lock); |
count_t znum = find_zone(ADDR2PFN(pf), pages, 0); |
|
for (cur = parea_head.next; cur != &parea_head; cur = cur->next) { |
parea_t *parea = list_get_instance(cur, parea_t, link); |
if ((parea->pbase <= pf) && (ADDR2PFN(pf - parea->pbase) + pages <= parea->frames)) { |
fnd = true; |
break; |
} |
if (znum == (count_t) -1) { |
/* Frames not found in any zones |
* -> assume it is hardware device and allow mapping |
*/ |
spinlock_unlock(&zones.lock); |
goto map; |
} |
|
spinlock_unlock(&parea_lock); |
if (zones.info[znum].flags & ZONE_FIRMWARE) { |
/* Frames are part of firmware */ |
spinlock_unlock(&zones.lock); |
goto map; |
} |
|
if (!fnd) { |
/* |
* Physical memory area cannot be mapped. |
if (zone_flags_available(zones.info[znum].flags)) { |
/* Frames are part of physical memory, check if the memory |
* region is enabled for mapping. |
*/ |
interrupts_restore(ipl); |
return ENOENT; |
spinlock_unlock(&zones.lock); |
|
spinlock_lock(&parea_lock); |
btree_node_t *nodep; |
parea_t *parea = (parea_t *) btree_search(&parea_btree, |
(btree_key_t) pf, &nodep); |
|
if ((!parea) || (parea->frames < pages)) |
goto err; |
|
spinlock_unlock(&parea_lock); |
goto map; |
} |
|
err: |
spinlock_unlock(&zones.lock); |
interrupts_restore(ipl); |
return ENOENT; |
|
map: |
spinlock_lock(&TASK->lock); |
|
if (!as_area_create(TASK->as, flags, pages * PAGE_SIZE, vp, AS_AREA_ATTR_NONE, |
&phys_backend, &backend_data)) { |
if (!as_area_create(TASK->as, flags, pages * PAGE_SIZE, vp, |
AS_AREA_ATTR_NONE, &phys_backend, &backend_data)) { |
/* |
* The address space area could not have been created. |
* We report it using ENOMEM. |
174,28 → 187,24 |
* @param size Size of the enabled I/O space.. |
* |
* @return 0 on success, EPERM if the caller lacks capabilities to use this |
* syscall, ENOENT if there is no task matching the specified ID. |
* syscall, ENOENT if there is no task matching the specified ID. |
* |
*/ |
static int ddi_iospace_enable(task_id_t id, uintptr_t ioaddr, size_t size) |
{ |
ipl_t ipl; |
cap_t caps; |
task_t *t; |
int rc; |
|
/* |
* Make sure the caller is authorised to make this syscall. |
*/ |
caps = cap_get(TASK); |
cap_t caps = cap_get(TASK); |
if (!(caps & CAP_IO_MANAGER)) |
return EPERM; |
|
ipl = interrupts_disable(); |
ipl_t ipl = interrupts_disable(); |
spinlock_lock(&tasks_lock); |
|
t = task_find_by_id(id); |
task_t *task = task_find_by_id(id); |
|
if ((!t) || (!context_check(CONTEXT, t->context))) { |
if ((!task) || (!context_check(CONTEXT, task->context))) { |
/* |
* There is no task with the specified ID |
* or the task belongs to a different security |
205,15 → 214,16 |
interrupts_restore(ipl); |
return ENOENT; |
} |
|
|
/* Lock the task and release the lock protecting tasks_btree. */ |
spinlock_lock(&t->lock); |
spinlock_lock(&task->lock); |
spinlock_unlock(&tasks_lock); |
|
rc = ddi_iospace_enable_arch(t, ioaddr, size); |
|
spinlock_unlock(&t->lock); |
int rc = ddi_iospace_enable_arch(task, ioaddr, size); |
|
spinlock_unlock(&task->lock); |
interrupts_restore(ipl); |
|
return rc; |
} |
|
225,13 → 235,14 |
* @param flags Flags of newly mapped pages |
* |
* @return 0 on success, otherwise it returns error code found in errno.h |
*/ |
* |
*/ |
unative_t sys_physmem_map(unative_t phys_base, unative_t virt_base, |
unative_t pages, unative_t flags) |
{ |
return (unative_t) ddi_physmem_map(ALIGN_DOWN((uintptr_t) phys_base, |
FRAME_SIZE), ALIGN_DOWN((uintptr_t) virt_base, PAGE_SIZE), |
(pfn_t) pages, (int) flags); |
(count_t) pages, (int) flags); |
} |
|
/** Wrapper for SYS_ENABLE_IOSPACE syscall. |
239,16 → 250,15 |
* @param uspace_io_arg User space address of DDI argument structure. |
* |
* @return 0 on success, otherwise it returns error code found in errno.h |
*/ |
* |
*/ |
unative_t sys_iospace_enable(ddi_ioarg_t *uspace_io_arg) |
{ |
ddi_ioarg_t arg; |
int rc; |
|
rc = copy_from_uspace(&arg, uspace_io_arg, sizeof(ddi_ioarg_t)); |
int rc = copy_from_uspace(&arg, uspace_io_arg, sizeof(ddi_ioarg_t)); |
if (rc != 0) |
return (unative_t) rc; |
|
|
return (unative_t) ddi_iospace_enable((task_id_t) arg.task_id, |
(uintptr_t) arg.ioaddr, (size_t) arg.size); |
} |
256,19 → 266,23 |
/** Disable or enable preemption. |
* |
* @param enable If non-zero, the preemption counter will be decremented, |
* leading to potential enabling of preemption. Otherwise the preemption |
* counter will be incremented, preventing preemption from occurring. |
* leading to potential enabling of preemption. Otherwise |
* the preemption counter will be incremented, preventing |
* preemption from occurring. |
* |
* @return Zero on success or EPERM if callers capabilities are not sufficient. |
*/ |
* |
*/ |
unative_t sys_preempt_control(int enable) |
{ |
if (!cap_get(TASK) & CAP_PREEMPT_CONTROL) |
return EPERM; |
|
if (enable) |
preemption_enable(); |
else |
preemption_disable(); |
|
return 0; |
} |
|