Subversion Repositories HelenOS-historic

Rev

Rev 1757 | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (C) 2006 Ondrej Palkovsky
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. /** @addtogroup genericipc
  30.  * @{
  31.  */
  32. /**
  33.  * @file
  34.  * @brief IRQ notification framework.
  35.  *
  36.  * This framework allows applications to register to receive a notification
  37.  * when interrupt is detected. The application may provide a simple 'top-half'
  38.  * handler as part of its registration, which can perform simple operations
  39.  * (read/write port/memory, add information to notification ipc message).
  40.  *
  41.  * The structure of a notification message is as follows:
  42.  * - METHOD: interrupt number
  43.  * - ARG1: payload modified by a 'top-half' handler
  44.  * - ARG2: payload
  45.  * - ARG3: payload
  46.  * - in_phone_hash: interrupt counter (may be needed to assure correct order
  47.  *         in multithreaded drivers)
  48.  */
  49.  
  50. #include <arch.h>
  51. #include <mm/slab.h>
  52. #include <errno.h>
  53. #include <ipc/ipc.h>
  54. #include <ipc/irq.h>
  55. #include <atomic.h>
  56. #include <syscall/copy.h>
  57. #include <console/console.h>
  58.  
  59. typedef struct {
  60.     SPINLOCK_DECLARE(lock);
  61.     answerbox_t *box;
  62.     irq_code_t *code;
  63.     atomic_t counter;
  64. } ipc_irq_t;
  65.  
  66.  
  67. static ipc_irq_t *irq_conns = NULL;
  68. static int irq_conns_size;
  69.  
  70. #include <print.h>
  71. /* Execute code associated with IRQ notification */
  72. static void code_execute(call_t *call, irq_code_t *code)
  73. {
  74.     int i;
  75.     unative_t dstval = 0;
  76.    
  77.     if (!code)
  78.         return;
  79.    
  80.     for (i=0; i < code->cmdcount;i++) {
  81.         switch (code->cmds[i].cmd) {
  82.         case CMD_MEM_READ_1:
  83.             dstval = *((uint8_t *)code->cmds[i].addr);
  84.             break;
  85.         case CMD_MEM_READ_2:
  86.             dstval = *((uint16_t *)code->cmds[i].addr);
  87.             break;
  88.         case CMD_MEM_READ_4:
  89.             dstval = *((uint32_t *)code->cmds[i].addr);
  90.             break;
  91.         case CMD_MEM_READ_8:
  92.             dstval = *((uint64_t *)code->cmds[i].addr);
  93.             break;
  94.         case CMD_MEM_WRITE_1:
  95.             *((uint8_t *)code->cmds[i].addr) = code->cmds[i].value;
  96.             break;
  97.         case CMD_MEM_WRITE_2:
  98.             *((uint16_t *)code->cmds[i].addr) = code->cmds[i].value;
  99.             break;
  100.         case CMD_MEM_WRITE_4:
  101.             *((uint32_t *)code->cmds[i].addr) = code->cmds[i].value;
  102.             break;
  103.         case CMD_MEM_WRITE_8:
  104.             *((uint64_t *)code->cmds[i].addr) = code->cmds[i].value;
  105.             break;
  106. #if defined(ia32) || defined(amd64)
  107.         case CMD_PORT_READ_1:
  108.             dstval = inb((long)code->cmds[i].addr);
  109.             break;
  110.         case CMD_PORT_WRITE_1:
  111.             outb((long)code->cmds[i].addr, code->cmds[i].value);
  112.             break;
  113. #endif
  114. #if defined(ia64)
  115.         case CMD_IA64_GETCHAR:
  116.             dstval = _getc(&ski_uconsole);
  117.             break;
  118. #endif
  119. #if defined(ppc32)
  120.         case CMD_PPC32_GETCHAR:
  121.             dstval = cuda_get_scancode();
  122.             break;
  123. #endif
  124.         default:
  125.             break;
  126.         }
  127.         if (code->cmds[i].dstarg && code->cmds[i].dstarg < 4) {
  128.             call->data.args[code->cmds[i].dstarg] = dstval;
  129.         }
  130.     }
  131. }
  132.  
  133. static void code_free(irq_code_t *code)
  134. {
  135.     if (code) {
  136.         free(code->cmds);
  137.         free(code);
  138.     }
  139. }
  140.  
  141. static irq_code_t * code_from_uspace(irq_code_t *ucode)
  142. {
  143.     irq_code_t *code;
  144.     irq_cmd_t *ucmds;
  145.     int rc;
  146.  
  147.     code = malloc(sizeof(*code), 0);
  148.     rc = copy_from_uspace(code, ucode, sizeof(*code));
  149.     if (rc != 0) {
  150.         free(code);
  151.         return NULL;
  152.     }
  153.    
  154.     if (code->cmdcount > IRQ_MAX_PROG_SIZE) {
  155.         free(code);
  156.         return NULL;
  157.     }
  158.     ucmds = code->cmds;
  159.     code->cmds = malloc(sizeof(code->cmds[0]) * (code->cmdcount), 0);
  160.     rc = copy_from_uspace(code->cmds, ucmds, sizeof(code->cmds[0]) * (code->cmdcount));
  161.     if (rc != 0) {
  162.         free(code->cmds);
  163.         free(code);
  164.         return NULL;
  165.     }
  166.  
  167.     return code;
  168. }
  169.  
  170. /** Unregister task from irq */
  171. void ipc_irq_unregister(answerbox_t *box, int irq)
  172. {
  173.     ipl_t ipl;
  174.     int mq = irq + IPC_IRQ_RESERVED_VIRTUAL;
  175.  
  176.     ipl = interrupts_disable();
  177.     spinlock_lock(&irq_conns[mq].lock);
  178.     if (irq_conns[mq].box == box) {
  179.         irq_conns[mq].box = NULL;
  180.         code_free(irq_conns[mq].code);
  181.         irq_conns[mq].code = NULL;
  182.     }
  183.  
  184.     spinlock_unlock(&irq_conns[mq].lock);
  185.     interrupts_restore(ipl);
  186. }
  187.  
  188. /** Register an answerbox as a receiving end of interrupts notifications */
  189. int ipc_irq_register(answerbox_t *box, int irq, irq_code_t *ucode)
  190. {
  191.     ipl_t ipl;
  192.     irq_code_t *code;
  193.     int mq = irq + IPC_IRQ_RESERVED_VIRTUAL;
  194.  
  195.     ASSERT(irq_conns);
  196.  
  197.     if (ucode) {
  198.         code = code_from_uspace(ucode);
  199.         if (!code)
  200.             return EBADMEM;
  201.     } else
  202.         code = NULL;
  203.  
  204.     ipl = interrupts_disable();
  205.     spinlock_lock(&irq_conns[mq].lock);
  206.  
  207.     if (irq_conns[mq].box) {
  208.         spinlock_unlock(&irq_conns[mq].lock);
  209.         interrupts_restore(ipl);
  210.         code_free(code);
  211.         return EEXISTS;
  212.     }
  213.     irq_conns[mq].box = box;
  214.     irq_conns[mq].code = code;
  215.     atomic_set(&irq_conns[mq].counter, 0);
  216.     spinlock_unlock(&irq_conns[mq].lock);
  217.     interrupts_restore(ipl);
  218.  
  219.     return 0;
  220. }
  221.  
  222. /** Add call to proper answerbox queue
  223.  *
  224.  * Assume irq_conns[mq].lock is locked */
  225. static void send_call(int mq, call_t *call)
  226. {
  227.     spinlock_lock(&irq_conns[mq].box->irq_lock);
  228.     list_append(&call->link, &irq_conns[mq].box->irq_notifs);
  229.     spinlock_unlock(&irq_conns[mq].box->irq_lock);
  230.        
  231.     waitq_wakeup(&irq_conns[mq].box->wq, 0);
  232. }
  233.  
  234. /** Send notification message
  235.  *
  236.  */
  237. void ipc_irq_send_msg(int irq, unative_t a1, unative_t a2, unative_t a3)
  238. {
  239.     call_t *call;
  240.     int mq = irq + IPC_IRQ_RESERVED_VIRTUAL;
  241.  
  242.     spinlock_lock(&irq_conns[mq].lock);
  243.  
  244.     if (irq_conns[mq].box) {
  245.         call = ipc_call_alloc(FRAME_ATOMIC);
  246.         if (!call) {
  247.             spinlock_unlock(&irq_conns[mq].lock);
  248.             return;
  249.         }
  250.         call->flags |= IPC_CALL_NOTIF;
  251.         IPC_SET_METHOD(call->data, irq);
  252.         IPC_SET_ARG1(call->data, a1);
  253.         IPC_SET_ARG2(call->data, a2);
  254.         IPC_SET_ARG3(call->data, a3);
  255.         /* Put a counter to the message */
  256.         call->private = atomic_preinc(&irq_conns[mq].counter);
  257.        
  258.         send_call(mq, call);
  259.     }
  260.     spinlock_unlock(&irq_conns[mq].lock);
  261. }
  262.  
  263. /** Notify task that an irq had occurred.
  264.  *
  265.  * We expect interrupts to be disabled
  266.  */
  267. void ipc_irq_send_notif(int irq)
  268. {
  269.     call_t *call;
  270.     int mq = irq + IPC_IRQ_RESERVED_VIRTUAL;
  271.  
  272.     ASSERT(irq_conns);
  273.     spinlock_lock(&irq_conns[mq].lock);
  274.  
  275.     if (irq_conns[mq].box) {
  276.         call = ipc_call_alloc(FRAME_ATOMIC);
  277.         if (!call) {
  278.             spinlock_unlock(&irq_conns[mq].lock);
  279.             return;
  280.         }
  281.         call->flags |= IPC_CALL_NOTIF;
  282.         /* Put a counter to the message */
  283.         call->private = atomic_preinc(&irq_conns[mq].counter);
  284.         /* Set up args */
  285.         IPC_SET_METHOD(call->data, irq);
  286.  
  287.         /* Execute code to handle irq */
  288.         code_execute(call, irq_conns[mq].code);
  289.        
  290.         send_call(mq, call);
  291.     }
  292.        
  293.     spinlock_unlock(&irq_conns[mq].lock);
  294. }
  295.  
  296.  
  297. /** Initialize table of interrupt handlers
  298.  *
  299.  * @param irqcount Count of required hardware IRQs to be supported
  300.  */
  301. void ipc_irq_make_table(int irqcount)
  302. {
  303.     int i;
  304.  
  305.     irqcount +=  IPC_IRQ_RESERVED_VIRTUAL;
  306.  
  307.     irq_conns_size = irqcount;
  308.     irq_conns = malloc(irqcount * (sizeof(*irq_conns)), 0);
  309.     for (i=0; i < irqcount; i++) {
  310.         spinlock_initialize(&irq_conns[i].lock, "irq_ipc_lock");
  311.         irq_conns[i].box = NULL;
  312.         irq_conns[i].code = NULL;
  313.     }
  314. }
  315.  
  316. /** Disconnect all irq's notifications
  317.  *
  318.  * @todo It may be better to do some linked list, so that
  319.  *       we wouldn't need to go through whole array every cleanup
  320.  */
  321. void ipc_irq_cleanup(answerbox_t *box)
  322. {
  323.     int i;
  324.     ipl_t ipl;
  325.    
  326.     for (i=0; i < irq_conns_size; i++) {
  327.         ipl = interrupts_disable();
  328.         spinlock_lock(&irq_conns[i].lock);
  329.         if (irq_conns[i].box == box)
  330.             irq_conns[i].box = NULL;
  331.         spinlock_unlock(&irq_conns[i].lock);
  332.         interrupts_restore(ipl);
  333.     }
  334. }
  335.  
  336. /** @}
  337.  */
  338.