Subversion Repositories HelenOS

Rev

Rev 2107 | Rev 3908 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (c) 2006 Jakub Jermar
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. /** @addtogroup genericddi
  30.  * @{
  31.  */
  32.  
  33. /**
  34.  * @file
  35.  * @brief   Device Driver Interface functions.
  36.  *
  37.  * This file contains functions that comprise the Device Driver Interface.
  38.  * These are the functions for mapping physical memory and enabling I/O
  39.  * space to tasks.
  40.  */
  41.  
  42. #include <ddi/ddi.h>
  43. #include <ddi/ddi_arg.h>
  44. #include <proc/task.h>
  45. #include <security/cap.h>
  46. #include <mm/frame.h>
  47. #include <mm/as.h>
  48. #include <synch/spinlock.h>
  49. #include <syscall/copy.h>
  50. #include <adt/btree.h>
  51. #include <arch.h>
  52. #include <align.h>
  53. #include <errno.h>
  54.  
  55. /** This lock protects the parea_btree. */
  56. SPINLOCK_INITIALIZE(parea_lock);
  57.  
  58. /** B+tree with enabled physical memory areas. */
  59. static btree_t parea_btree;
  60.  
  61. /** Initialize DDI. */
  62. void ddi_init(void)
  63. {
  64.     btree_create(&parea_btree);
  65. }
  66.  
  67. /** Enable piece of physical memory for mapping by physmem_map().
  68.  *
  69.  * @param parea Pointer to physical area structure.
  70.  *
  71.  * @todo This function doesn't check for overlaps. It depends on the kernel to
  72.  * create disjunct physical memory areas.
  73.  */
  74. void ddi_parea_register(parea_t *parea)
  75. {
  76.     ipl_t ipl;
  77.  
  78.     ipl = interrupts_disable();
  79.     spinlock_lock(&parea_lock);
  80.    
  81.     /*
  82.      * TODO: we should really check for overlaps here.
  83.      * However, we should be safe because the kernel is pretty sane and
  84.      * memory of different devices doesn't overlap.
  85.      */
  86.     btree_insert(&parea_btree, (btree_key_t) parea->pbase, parea, NULL);
  87.  
  88.     spinlock_unlock(&parea_lock);
  89.     interrupts_restore(ipl);   
  90. }
  91.  
  92. /** Map piece of physical memory into virtual address space of current task.
  93.  *
  94.  * @param pf Physical address of the starting frame.
  95.  * @param vp Virtual address of the starting page.
  96.  * @param pages Number of pages to map.
  97.  * @param flags Address space area flags for the mapping.
  98.  *
  99.  * @return 0 on success, EPERM if the caller lacks capabilities to use this
  100.  *  syscall, ENOENT if there is no task matching the specified ID or the
  101.  *  physical address space is not enabled for mapping and ENOMEM if there
  102.  *  was a problem in creating address space area. ENOTSUP is returned when
  103.  *  an attempt to create an illegal address alias is detected.
  104.  */
  105. static int ddi_physmem_map(uintptr_t pf, uintptr_t vp, count_t pages, int flags)
  106. {
  107.     ipl_t ipl;
  108.     cap_t caps;
  109.     mem_backend_data_t backend_data;
  110.  
  111.     backend_data.base = pf;
  112.     backend_data.frames = pages;
  113.    
  114.     /*
  115.      * Make sure the caller is authorised to make this syscall.
  116.      */
  117.     caps = cap_get(TASK);
  118.     if (!(caps & CAP_MEM_MANAGER))
  119.         return EPERM;
  120.  
  121.     ipl = interrupts_disable();
  122.  
  123.     /*
  124.      * Check if the physical memory area is enabled for mapping.
  125.      * If the architecture supports virtually indexed caches, intercept
  126.      * attempts to create an illegal address alias.
  127.      */
  128.     spinlock_lock(&parea_lock);
  129.     parea_t *parea;
  130.     btree_node_t *nodep;
  131.     parea = (parea_t *) btree_search(&parea_btree, (btree_key_t) pf, &nodep);
  132.     if (!parea || parea->frames < pages || ((flags & AS_AREA_CACHEABLE) &&
  133.         !parea->cacheable) || (!(flags & AS_AREA_CACHEABLE) &&
  134.         parea->cacheable)) {
  135.         /*
  136.          * This physical memory area cannot be mapped.
  137.          */
  138.         spinlock_unlock(&parea_lock);
  139.         interrupts_restore(ipl);
  140.         return ENOENT;
  141.     }
  142.  
  143. #ifdef CONFIG_VIRT_IDX_DCACHE
  144.     if (PAGE_COLOR(parea->vbase) != PAGE_COLOR(vp)) {
  145.         /*
  146.          * Refuse to create an illegal address alias.
  147.          */
  148.         spinlock_unlock(&parea_lock);
  149.         interrupts_restore(ipl);
  150.         return ENOTSUP;
  151.     }
  152. #endif /* CONFIG_VIRT_IDX_DCACHE */
  153.  
  154.     spinlock_unlock(&parea_lock);
  155.  
  156.     spinlock_lock(&TASK->lock);
  157.    
  158.     if (!as_area_create(TASK->as, flags, pages * PAGE_SIZE, vp, AS_AREA_ATTR_NONE,
  159.         &phys_backend, &backend_data)) {
  160.         /*
  161.          * The address space area could not have been created.
  162.          * We report it using ENOMEM.
  163.          */
  164.         spinlock_unlock(&TASK->lock);
  165.         interrupts_restore(ipl);
  166.         return ENOMEM;
  167.     }
  168.    
  169.     /*
  170.      * Mapping is created on-demand during page fault.
  171.      */
  172.    
  173.     spinlock_unlock(&TASK->lock);
  174.     interrupts_restore(ipl);
  175.     return 0;
  176. }
  177.  
  178. /** Enable range of I/O space for task.
  179.  *
  180.  * @param id Task ID of the destination task.
  181.  * @param ioaddr Starting I/O address.
  182.  * @param size Size of the enabled I/O space..
  183.  *
  184.  * @return 0 on success, EPERM if the caller lacks capabilities to use this
  185.  *  syscall, ENOENT if there is no task matching the specified ID.
  186.  */
  187. static int ddi_iospace_enable(task_id_t id, uintptr_t ioaddr, size_t size)
  188. {
  189.     ipl_t ipl;
  190.     cap_t caps;
  191.     task_t *t;
  192.     int rc;
  193.    
  194.     /*
  195.      * Make sure the caller is authorised to make this syscall.
  196.      */
  197.     caps = cap_get(TASK);
  198.     if (!(caps & CAP_IO_MANAGER))
  199.         return EPERM;
  200.    
  201.     ipl = interrupts_disable();
  202.     spinlock_lock(&tasks_lock);
  203.    
  204.     t = task_find_by_id(id);
  205.    
  206.     if ((!t) || (!context_check(CONTEXT, t->context))) {
  207.         /*
  208.          * There is no task with the specified ID
  209.          * or the task belongs to a different security
  210.          * context.
  211.          */
  212.         spinlock_unlock(&tasks_lock);
  213.         interrupts_restore(ipl);
  214.         return ENOENT;
  215.     }
  216.  
  217.     /* Lock the task and release the lock protecting tasks_btree. */
  218.     spinlock_lock(&t->lock);
  219.     spinlock_unlock(&tasks_lock);
  220.  
  221.     rc = ddi_iospace_enable_arch(t, ioaddr, size);
  222.    
  223.     spinlock_unlock(&t->lock);
  224.     interrupts_restore(ipl);
  225.     return rc;
  226. }
  227.  
  228. /** Wrapper for SYS_PHYSMEM_MAP syscall.
  229.  *
  230.  * @param phys_base Physical base address to map
  231.  * @param virt_base Destination virtual address
  232.  * @param pages Number of pages
  233.  * @param flags Flags of newly mapped pages
  234.  *
  235.  * @return 0 on success, otherwise it returns error code found in errno.h
  236.  */
  237. unative_t sys_physmem_map(unative_t phys_base, unative_t virt_base,
  238.     unative_t pages, unative_t flags)
  239. {
  240.     return (unative_t) ddi_physmem_map(ALIGN_DOWN((uintptr_t) phys_base,
  241.         FRAME_SIZE), ALIGN_DOWN((uintptr_t) virt_base, PAGE_SIZE),
  242.         (count_t) pages, (int) flags);
  243. }
  244.  
  245. /** Wrapper for SYS_ENABLE_IOSPACE syscall.
  246.  *
  247.  * @param uspace_io_arg User space address of DDI argument structure.
  248.  *
  249.  * @return 0 on success, otherwise it returns error code found in errno.h
  250.  */
  251. unative_t sys_iospace_enable(ddi_ioarg_t *uspace_io_arg)
  252. {
  253.     ddi_ioarg_t arg;
  254.     int rc;
  255.    
  256.     rc = copy_from_uspace(&arg, uspace_io_arg, sizeof(ddi_ioarg_t));
  257.     if (rc != 0)
  258.         return (unative_t) rc;
  259.        
  260.     return (unative_t) ddi_iospace_enable((task_id_t) arg.task_id,
  261.         (uintptr_t) arg.ioaddr, (size_t) arg.size);
  262. }
  263.  
  264. /** Disable or enable preemption.
  265.  *
  266.  * @param enable If non-zero, the preemption counter will be decremented,
  267.  *  leading to potential enabling of preemption. Otherwise the preemption
  268.  *  counter will be incremented, preventing preemption from occurring.
  269.  *
  270.  * @return Zero on success or EPERM if callers capabilities are not sufficient.
  271.  */
  272. unative_t sys_preempt_control(int enable)
  273. {
  274.         if (!cap_get(TASK) & CAP_PREEMPT_CONTROL)
  275.                 return EPERM;
  276.         if (enable)
  277.                 preemption_enable();
  278.         else
  279.                 preemption_disable();
  280.         return 0;
  281. }
  282.  
  283. /** @}
  284.  */
  285.