Subversion Repositories HelenOS

Rev

Rev 4343 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (c) 2006 Jakub Jermar
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. /** @addtogroup genericddi
  30.  * @{
  31.  */
  32.  
  33. /**
  34.  * @file
  35.  * @brief Device Driver Interface functions.
  36.  *
  37.  * This file contains functions that comprise the Device Driver Interface.
  38.  * These are the functions for mapping physical memory and enabling I/O
  39.  * space to tasks.
  40.  */
  41.  
  42. #include <ddi/ddi.h>
  43. #include <ddi/ddi_arg.h>
  44. #include <proc/task.h>
  45. #include <security/cap.h>
  46. #include <mm/frame.h>
  47. #include <mm/as.h>
  48. #include <synch/spinlock.h>
  49. #include <syscall/copy.h>
  50. #include <adt/btree.h>
  51. #include <arch.h>
  52. #include <align.h>
  53. #include <errno.h>
  54.  
  55. /** This lock protects the parea_btree. */
  56. SPINLOCK_INITIALIZE(parea_lock);
  57.  
  58. /** B+tree with enabled physical memory areas. */
  59. static btree_t parea_btree;
  60.  
  61. /** Initialize DDI. */
  62. void ddi_init(void)
  63. {
  64.     btree_create(&parea_btree);
  65. }
  66.  
  67. /** Enable piece of physical memory for mapping by physmem_map().
  68.  *
  69.  * @param parea Pointer to physical area structure.
  70.  *
  71.  */
  72. void ddi_parea_register(parea_t *parea)
  73. {
  74.     ipl_t ipl = interrupts_disable();
  75.     spinlock_lock(&parea_lock);
  76.    
  77.     /*
  78.      * We don't check for overlaps here as the kernel is pretty sane.
  79.      */
  80.     btree_insert(&parea_btree, (btree_key_t) parea->pbase, parea, NULL);
  81.    
  82.     spinlock_unlock(&parea_lock);
  83.     interrupts_restore(ipl);
  84. }
  85.  
  86. /** Map piece of physical memory into virtual address space of current task.
  87.  *
  88.  * @param pf    Physical address of the starting frame.
  89.  * @param vp    Virtual address of the starting page.
  90.  * @param pages Number of pages to map.
  91.  * @param flags Address space area flags for the mapping.
  92.  *
  93.  * @return 0 on success, EPERM if the caller lacks capabilities to use this
  94.  *         syscall, EBADMEM if pf or vf is not page aligned, ENOENT if there
  95.  *         is no task matching the specified ID or the physical address space
  96.  *         is not enabled for mapping and ENOMEM if there was a problem in
  97.  *         creating address space area.
  98.  *
  99.  */
  100. static int ddi_physmem_map(uintptr_t pf, uintptr_t vp, count_t pages, int flags)
  101. {
  102.     ASSERT(TASK);
  103.     ASSERT((pf % FRAME_SIZE) == 0);
  104.     ASSERT((vp % PAGE_SIZE) == 0);
  105.    
  106.     /*
  107.      * Make sure the caller is authorised to make this syscall.
  108.      */
  109.     cap_t caps = cap_get(TASK);
  110.     if (!(caps & CAP_MEM_MANAGER))
  111.         return EPERM;
  112.    
  113.     mem_backend_data_t backend_data;
  114.     backend_data.base = pf;
  115.     backend_data.frames = pages;
  116.    
  117.     ipl_t ipl = interrupts_disable();
  118.    
  119.     /* Find the zone of the physical memory */
  120.     spinlock_lock(&zones.lock);
  121.     count_t znum = find_zone(ADDR2PFN(pf), pages, 0);
  122.    
  123.     if (znum == (count_t) -1) {
  124.         /* Frames not found in any zones
  125.          * -> assume it is hardware device and allow mapping
  126.          */
  127.         spinlock_unlock(&zones.lock);
  128.         goto map;
  129.     }
  130.    
  131.     if (zones.info[znum].flags & ZONE_FIRMWARE) {
  132.         /* Frames are part of firmware */
  133.         spinlock_unlock(&zones.lock);
  134.         goto map;
  135.     }
  136.    
  137.     if (zone_flags_available(zones.info[znum].flags)) {
  138.         /* Frames are part of physical memory, check if the memory
  139.          * region is enabled for mapping.
  140.          */
  141.         spinlock_unlock(&zones.lock);
  142.        
  143.         spinlock_lock(&parea_lock);
  144.         btree_node_t *nodep;
  145.         parea_t *parea = (parea_t *) btree_search(&parea_btree,
  146.             (btree_key_t) pf, &nodep);
  147.        
  148.         if ((!parea) || (parea->frames < pages))
  149.             goto err;
  150.        
  151.         spinlock_unlock(&parea_lock);
  152.         goto map;
  153.     }
  154.    
  155. err:
  156.     spinlock_unlock(&zones.lock);
  157.     interrupts_restore(ipl);
  158.     return ENOENT;
  159.    
  160. map:
  161.     spinlock_lock(&TASK->lock);
  162.    
  163.     if (!as_area_create(TASK->as, flags, pages * PAGE_SIZE, vp,
  164.         AS_AREA_ATTR_NONE, &phys_backend, &backend_data)) {
  165.         /*
  166.          * The address space area could not have been created.
  167.          * We report it using ENOMEM.
  168.          */
  169.         spinlock_unlock(&TASK->lock);
  170.         interrupts_restore(ipl);
  171.         return ENOMEM;
  172.     }
  173.    
  174.     /*
  175.      * Mapping is created on-demand during page fault.
  176.      */
  177.    
  178.     spinlock_unlock(&TASK->lock);
  179.     interrupts_restore(ipl);
  180.     return 0;
  181. }
  182.  
  183. /** Enable range of I/O space for task.
  184.  *
  185.  * @param id Task ID of the destination task.
  186.  * @param ioaddr Starting I/O address.
  187.  * @param size Size of the enabled I/O space..
  188.  *
  189.  * @return 0 on success, EPERM if the caller lacks capabilities to use this
  190.  *           syscall, ENOENT if there is no task matching the specified ID.
  191.  *
  192.  */
  193. static int ddi_iospace_enable(task_id_t id, uintptr_t ioaddr, size_t size)
  194. {
  195.     /*
  196.      * Make sure the caller is authorised to make this syscall.
  197.      */
  198.     cap_t caps = cap_get(TASK);
  199.     if (!(caps & CAP_IO_MANAGER))
  200.         return EPERM;
  201.    
  202.     ipl_t ipl = interrupts_disable();
  203.     spinlock_lock(&tasks_lock);
  204.    
  205.     task_t *task = task_find_by_id(id);
  206.    
  207.     if ((!task) || (!context_check(CONTEXT, task->context))) {
  208.         /*
  209.          * There is no task with the specified ID
  210.          * or the task belongs to a different security
  211.          * context.
  212.          */
  213.         spinlock_unlock(&tasks_lock);
  214.         interrupts_restore(ipl);
  215.         return ENOENT;
  216.     }
  217.    
  218.     /* Lock the task and release the lock protecting tasks_btree. */
  219.     spinlock_lock(&task->lock);
  220.     spinlock_unlock(&tasks_lock);
  221.    
  222.     int rc = ddi_iospace_enable_arch(task, ioaddr, size);
  223.    
  224.     spinlock_unlock(&task->lock);
  225.     interrupts_restore(ipl);
  226.    
  227.     return rc;
  228. }
  229.  
  230. /** Wrapper for SYS_PHYSMEM_MAP syscall.
  231.  *
  232.  * @param phys_base Physical base address to map
  233.  * @param virt_base Destination virtual address
  234.  * @param pages Number of pages
  235.  * @param flags Flags of newly mapped pages
  236.  *
  237.  * @return 0 on success, otherwise it returns error code found in errno.h
  238.  *
  239.  */
  240. unative_t sys_physmem_map(unative_t phys_base, unative_t virt_base,
  241.     unative_t pages, unative_t flags)
  242. {
  243.     return (unative_t) ddi_physmem_map(ALIGN_DOWN((uintptr_t) phys_base,
  244.         FRAME_SIZE), ALIGN_DOWN((uintptr_t) virt_base, PAGE_SIZE),
  245.         (count_t) pages, (int) flags);
  246. }
  247.  
  248. /** Wrapper for SYS_ENABLE_IOSPACE syscall.
  249.  *
  250.  * @param uspace_io_arg User space address of DDI argument structure.
  251.  *
  252.  * @return 0 on success, otherwise it returns error code found in errno.h
  253.  *
  254.  */
  255. unative_t sys_iospace_enable(ddi_ioarg_t *uspace_io_arg)
  256. {
  257.     ddi_ioarg_t arg;
  258.     int rc = copy_from_uspace(&arg, uspace_io_arg, sizeof(ddi_ioarg_t));
  259.     if (rc != 0)
  260.         return (unative_t) rc;
  261.    
  262.     return (unative_t) ddi_iospace_enable((task_id_t) arg.task_id,
  263.         (uintptr_t) arg.ioaddr, (size_t) arg.size);
  264. }
  265.  
  266. /** Disable or enable preemption.
  267.  *
  268.  * @param enable If non-zero, the preemption counter will be decremented,
  269.  *               leading to potential enabling of preemption. Otherwise
  270.  *               the preemption counter will be incremented, preventing
  271.  *               preemption from occurring.
  272.  *
  273.  * @return Zero on success or EPERM if callers capabilities are not sufficient.
  274.  *
  275.  */
  276. unative_t sys_preempt_control(int enable)
  277. {
  278.     if (!cap_get(TASK) & CAP_PREEMPT_CONTROL)
  279.         return EPERM;
  280.    
  281.     if (enable)
  282.         preemption_enable();
  283.     else
  284.         preemption_disable();
  285.    
  286.     return 0;
  287. }
  288.  
  289. /** @}
  290.  */
  291.