Subversion Repositories HelenOS-historic

Rev

Rev 1625 | Rev 1698 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (C) 2006 Ondrej Palkovsky
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. /** IRQ notification framework
  30.  *
  31.  * This framework allows applications to register to receive a notification
  32.  * when interrupt is detected. The application may provide a simple 'top-half'
  33.  * handler as part of its registration, which can perform simple operations
  34.  * (read/write port/memory, add information to notification ipc message).
  35.  *
  36.  * The structure of a notification message is as follows:
  37.  * - METHOD: IPC_M_INTERRUPT
  38.  * - ARG1: interrupt number
  39.  * - ARG2: payload modified by a 'top-half' handler
  40.  * - ARG3: interrupt counter (may be needed to assure correct order
  41.  *         in multithreaded drivers)
  42.  */
  43.  
  44. #include <arch.h>
  45. #include <mm/slab.h>
  46. #include <errno.h>
  47. #include <ipc/ipc.h>
  48. #include <ipc/irq.h>
  49. #include <atomic.h>
  50. #include <syscall/copy.h>
  51. #include <console/console.h>
  52.  
  53. typedef struct {
  54.     SPINLOCK_DECLARE(lock);
  55.     answerbox_t *box;
  56.     irq_code_t *code;
  57.     atomic_t counter;
  58. } ipc_irq_t;
  59.  
  60.  
  61. static ipc_irq_t *irq_conns = NULL;
  62. static int irq_conns_size;
  63.  
  64. #include <print.h>
  65. /* Execute code associated with IRQ notification */
  66. static void code_execute(call_t *call, irq_code_t *code)
  67. {
  68.     int i;
  69.    
  70.     if (!code)
  71.         return;
  72.    
  73.     for (i=0; i < code->cmdcount;i++) {
  74.         switch (code->cmds[i].cmd) {
  75.         case CMD_MEM_READ_1:
  76.             IPC_SET_ARG2(call->data, *((__u8 *)code->cmds[i].addr));
  77.             break;
  78.         case CMD_MEM_READ_2:
  79.             IPC_SET_ARG2(call->data, *((__u16 *)code->cmds[i].addr));
  80.             break;
  81.         case CMD_MEM_READ_4:
  82.             IPC_SET_ARG2(call->data, *((__u32 *)code->cmds[i].addr));
  83.             break;
  84.         case CMD_MEM_READ_8:
  85.             IPC_SET_ARG2(call->data, *((__u64 *)code->cmds[i].addr));
  86.             break;
  87.         case CMD_MEM_WRITE_1:
  88.             *((__u8 *)code->cmds[i].addr) = code->cmds[i].value;
  89.             break;
  90.         case CMD_MEM_WRITE_2:
  91.             *((__u16 *)code->cmds[i].addr) = code->cmds[i].value;
  92.             break;
  93.         case CMD_MEM_WRITE_4:
  94.             *((__u32 *)code->cmds[i].addr) = code->cmds[i].value;
  95.             break;
  96.         case CMD_MEM_WRITE_8:
  97.             *((__u64 *)code->cmds[i].addr) = code->cmds[i].value;
  98.             break;
  99. #if defined(ia32) || defined(amd64)
  100.         case CMD_PORT_READ_1:
  101.             IPC_SET_ARG2(call->data, inb((long)code->cmds[i].addr));
  102.             break;
  103.         case CMD_PORT_WRITE_1:
  104.             outb((long)code->cmds[i].addr, code->cmds[i].value);
  105.             break;
  106. #endif
  107. #if defined(ia64)
  108.         case CMD_IA64_GETCHAR:
  109.             IPC_SET_ARG2(call->data, _getc(&ski_uconsole));
  110.             break;
  111. #endif
  112. #if defined(ppc32)
  113.         case CMD_PPC32_GETCHAR:
  114.             IPC_SET_ARG2(call->data, cuda_get_scancode());
  115.             break;
  116. #endif
  117.         default:
  118.             break;
  119.         }
  120.     }
  121. }
  122.  
  123. static void code_free(irq_code_t *code)
  124. {
  125.     if (code) {
  126.         free(code->cmds);
  127.         free(code);
  128.     }
  129. }
  130.  
  131. static irq_code_t * code_from_uspace(irq_code_t *ucode)
  132. {
  133.     irq_code_t *code;
  134.     irq_cmd_t *ucmds;
  135.     int rc;
  136.  
  137.     code = malloc(sizeof(*code), 0);
  138.     rc = copy_from_uspace(code, ucode, sizeof(*code));
  139.     if (rc != 0) {
  140.         free(code);
  141.         return NULL;
  142.     }
  143.    
  144.     if (code->cmdcount > IRQ_MAX_PROG_SIZE) {
  145.         free(code);
  146.         return NULL;
  147.     }
  148.     ucmds = code->cmds;
  149.     code->cmds = malloc(sizeof(code->cmds[0]) * (code->cmdcount), 0);
  150.     rc = copy_from_uspace(code->cmds, ucmds, sizeof(code->cmds[0]) * (code->cmdcount));
  151.     if (rc != 0) {
  152.         free(code->cmds);
  153.         free(code);
  154.         return NULL;
  155.     }
  156.  
  157.     return code;
  158. }
  159.  
  160. /** Unregister task from irq */
  161. void ipc_irq_unregister(answerbox_t *box, int irq)
  162. {
  163.     ipl_t ipl;
  164.     int mq = irq + IPC_IRQ_RESERVED_VIRTUAL;
  165.  
  166.     ipl = interrupts_disable();
  167.     spinlock_lock(&irq_conns[mq].lock);
  168.     if (irq_conns[mq].box == box) {
  169.         irq_conns[mq].box = NULL;
  170.         code_free(irq_conns[mq].code);
  171.         irq_conns[mq].code = NULL;
  172.     }
  173.  
  174.     spinlock_unlock(&irq_conns[mq].lock);
  175.     interrupts_restore(ipl);
  176. }
  177.  
  178. /** Register an answerbox as a receiving end of interrupts notifications */
  179. int ipc_irq_register(answerbox_t *box, int irq, irq_code_t *ucode)
  180. {
  181.     ipl_t ipl;
  182.     irq_code_t *code;
  183.     int mq = irq + IPC_IRQ_RESERVED_VIRTUAL;
  184.  
  185.     ASSERT(irq_conns);
  186.  
  187.     if (ucode) {
  188.         code = code_from_uspace(ucode);
  189.         if (!code)
  190.             return EBADMEM;
  191.     } else
  192.         code = NULL;
  193.  
  194.     ipl = interrupts_disable();
  195.     spinlock_lock(&irq_conns[mq].lock);
  196.  
  197.     if (irq_conns[mq].box) {
  198.         spinlock_unlock(&irq_conns[mq].lock);
  199.         interrupts_restore(ipl);
  200.         code_free(code);
  201.         return EEXISTS;
  202.     }
  203.     irq_conns[mq].box = box;
  204.     irq_conns[mq].code = code;
  205.     atomic_set(&irq_conns[mq].counter, 0);
  206.     spinlock_unlock(&irq_conns[mq].lock);
  207.     interrupts_restore(ipl);
  208.  
  209.     return 0;
  210. }
  211.  
  212. /** Add call to proper answerbox queue
  213.  *
  214.  * Assume irq_conns[mq].lock is locked */
  215. static void send_call(int mq, call_t *call)
  216. {
  217.     spinlock_lock(&irq_conns[mq].box->irq_lock);
  218.     list_append(&call->link, &irq_conns[mq].box->irq_notifs);
  219.     spinlock_unlock(&irq_conns[mq].box->irq_lock);
  220.        
  221.     waitq_wakeup(&irq_conns[mq].box->wq, 0);
  222. }
  223.  
  224. /** Send notification message
  225.  *
  226.  */
  227. void ipc_irq_send_msg(int irq, __native a2, __native a3)
  228. {
  229.     call_t *call;
  230.     int mq = irq + IPC_IRQ_RESERVED_VIRTUAL;
  231.  
  232.     spinlock_lock(&irq_conns[mq].lock);
  233.  
  234.     if (irq_conns[mq].box) {
  235.         call = ipc_call_alloc(FRAME_ATOMIC);
  236.         if (!call) {
  237.             spinlock_unlock(&irq_conns[mq].lock);
  238.             return;
  239.         }
  240.         call->flags |= IPC_CALL_NOTIF;
  241.         IPC_SET_METHOD(call->data, IPC_M_INTERRUPT);
  242.         IPC_SET_ARG1(call->data, irq);
  243.         IPC_SET_ARG2(call->data, a2);
  244.         IPC_SET_ARG3(call->data, a3);
  245.        
  246.         send_call(mq, call);
  247.     }
  248.     spinlock_unlock(&irq_conns[mq].lock);
  249. }
  250.  
  251. /** Notify process that an irq had happend
  252.  *
  253.  * We expect interrupts to be disabled
  254.  */
  255. void ipc_irq_send_notif(int irq)
  256. {
  257.     call_t *call;
  258.     int mq = irq + IPC_IRQ_RESERVED_VIRTUAL;
  259.  
  260.     ASSERT(irq_conns);
  261.     spinlock_lock(&irq_conns[mq].lock);
  262.  
  263.     if (irq_conns[mq].box) {
  264.         call = ipc_call_alloc(FRAME_ATOMIC);
  265.         if (!call) {
  266.             spinlock_unlock(&irq_conns[mq].lock);
  267.             return;
  268.         }
  269.         call->flags |= IPC_CALL_NOTIF;
  270.         IPC_SET_METHOD(call->data, IPC_M_INTERRUPT);
  271.         IPC_SET_ARG1(call->data, irq);
  272.         IPC_SET_ARG3(call->data, atomic_preinc(&irq_conns[mq].counter));
  273.  
  274.         /* Execute code to handle irq */
  275.         code_execute(call, irq_conns[mq].code);
  276.        
  277.         send_call(mq, call);
  278.     }
  279.        
  280.     spinlock_unlock(&irq_conns[mq].lock);
  281. }
  282.  
  283.  
  284. /** Initialize table of interrupt handlers
  285.  *
  286.  * @param irqcount Count of required hardware IRQs to be supported
  287.  */
  288. void ipc_irq_make_table(int irqcount)
  289. {
  290.     int i;
  291.  
  292.     irqcount +=  IPC_IRQ_RESERVED_VIRTUAL;
  293.  
  294.     irq_conns_size = irqcount;
  295.     irq_conns = malloc(irqcount * (sizeof(*irq_conns)), 0);
  296.     for (i=0; i < irqcount; i++) {
  297.         spinlock_initialize(&irq_conns[i].lock, "irq_ipc_lock");
  298.         irq_conns[i].box = NULL;
  299.         irq_conns[i].code = NULL;
  300.     }
  301. }
  302.  
  303. /** Disconnect all irq's notifications
  304.  *
  305.  * TODO: It may be better to do some linked list, so that
  306.  *       we wouldn't need to go through whole array every cleanup
  307.  */
  308. void ipc_irq_cleanup(answerbox_t *box)
  309. {
  310.     int i;
  311.     ipl_t ipl;
  312.    
  313.     for (i=0; i < irq_conns_size; i++) {
  314.         ipl = interrupts_disable();
  315.         spinlock_lock(&irq_conns[i].lock);
  316.         if (irq_conns[i].box == box)
  317.             irq_conns[i].box = NULL;
  318.         spinlock_unlock(&irq_conns[i].lock);
  319.         interrupts_restore(ipl);
  320.     }
  321. }
  322.