Subversion Repositories HelenOS-historic

Rev

Rev 1628 | Rev 1698 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (C) 2006 Ondrej Palkovsky
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. /** IRQ notification framework
  30.  *
  31.  * This framework allows applications to register to receive a notification
  32.  * when interrupt is detected. The application may provide a simple 'top-half'
  33.  * handler as part of its registration, which can perform simple operations
  34.  * (read/write port/memory, add information to notification ipc message).
  35.  *
  36.  * The structure of a notification message is as follows:
  37.  * - METHOD: interrupt number
  38.  * - ARG1: payload modified by a 'top-half' handler
  39.  * - ARG2: payload
  40.  * - ARG3: payload
  41.  * - in_phone_hash: interrupt counter (may be needed to assure correct order
  42.  *         in multithreaded drivers)
  43.  */
  44.  
  45. #include <arch.h>
  46. #include <mm/slab.h>
  47. #include <errno.h>
  48. #include <ipc/ipc.h>
  49. #include <ipc/irq.h>
  50. #include <atomic.h>
  51. #include <syscall/copy.h>
  52. #include <console/console.h>
  53.  
  54. typedef struct {
  55.     SPINLOCK_DECLARE(lock);
  56.     answerbox_t *box;
  57.     irq_code_t *code;
  58.     atomic_t counter;
  59. } ipc_irq_t;
  60.  
  61.  
  62. static ipc_irq_t *irq_conns = NULL;
  63. static int irq_conns_size;
  64.  
  65. #include <print.h>
  66. /* Execute code associated with IRQ notification */
  67. static void code_execute(call_t *call, irq_code_t *code)
  68. {
  69.     int i;
  70.     __native dstval = 0;
  71.    
  72.     if (!code)
  73.         return;
  74.    
  75.     for (i=0; i < code->cmdcount;i++) {
  76.         switch (code->cmds[i].cmd) {
  77.         case CMD_MEM_READ_1:
  78.             dstval = *((__u8 *)code->cmds[i].addr);
  79.             break;
  80.         case CMD_MEM_READ_2:
  81.             dstval = *((__u16 *)code->cmds[i].addr);
  82.             break;
  83.         case CMD_MEM_READ_4:
  84.             dstval = *((__u32 *)code->cmds[i].addr);
  85.             break;
  86.         case CMD_MEM_READ_8:
  87.             dstval = *((__u64 *)code->cmds[i].addr);
  88.             break;
  89.         case CMD_MEM_WRITE_1:
  90.             *((__u8 *)code->cmds[i].addr) = code->cmds[i].value;
  91.             break;
  92.         case CMD_MEM_WRITE_2:
  93.             *((__u16 *)code->cmds[i].addr) = code->cmds[i].value;
  94.             break;
  95.         case CMD_MEM_WRITE_4:
  96.             *((__u32 *)code->cmds[i].addr) = code->cmds[i].value;
  97.             break;
  98.         case CMD_MEM_WRITE_8:
  99.             *((__u64 *)code->cmds[i].addr) = code->cmds[i].value;
  100.             break;
  101. #if defined(ia32) || defined(amd64)
  102.         case CMD_PORT_READ_1:
  103.             dstval = inb((long)code->cmds[i].addr);
  104.             break;
  105.         case CMD_PORT_WRITE_1:
  106.             outb((long)code->cmds[i].addr, code->cmds[i].value);
  107.             break;
  108. #endif
  109. #if defined(ia64)
  110.         case CMD_IA64_GETCHAR:
  111.             dstval = _getc(&ski_uconsole);
  112.             break;
  113. #endif
  114. #if defined(ppc32)
  115.         case CMD_PPC32_GETCHAR:
  116.             dstval = cuda_get_scancode();
  117.             break;
  118. #endif
  119.         default:
  120.             break;
  121.         }
  122.         if (code->cmds[i].dstarg && code->cmds[i].dstarg < 4) {
  123.             call->data.args[code->cmds[i].dstarg] = dstval;
  124.         }
  125.     }
  126. }
  127.  
  128. static void code_free(irq_code_t *code)
  129. {
  130.     if (code) {
  131.         free(code->cmds);
  132.         free(code);
  133.     }
  134. }
  135.  
  136. static irq_code_t * code_from_uspace(irq_code_t *ucode)
  137. {
  138.     irq_code_t *code;
  139.     irq_cmd_t *ucmds;
  140.     int rc;
  141.  
  142.     code = malloc(sizeof(*code), 0);
  143.     rc = copy_from_uspace(code, ucode, sizeof(*code));
  144.     if (rc != 0) {
  145.         free(code);
  146.         return NULL;
  147.     }
  148.    
  149.     if (code->cmdcount > IRQ_MAX_PROG_SIZE) {
  150.         free(code);
  151.         return NULL;
  152.     }
  153.     ucmds = code->cmds;
  154.     code->cmds = malloc(sizeof(code->cmds[0]) * (code->cmdcount), 0);
  155.     rc = copy_from_uspace(code->cmds, ucmds, sizeof(code->cmds[0]) * (code->cmdcount));
  156.     if (rc != 0) {
  157.         free(code->cmds);
  158.         free(code);
  159.         return NULL;
  160.     }
  161.  
  162.     return code;
  163. }
  164.  
  165. /** Unregister task from irq */
  166. void ipc_irq_unregister(answerbox_t *box, int irq)
  167. {
  168.     ipl_t ipl;
  169.     int mq = irq + IPC_IRQ_RESERVED_VIRTUAL;
  170.  
  171.     ipl = interrupts_disable();
  172.     spinlock_lock(&irq_conns[mq].lock);
  173.     if (irq_conns[mq].box == box) {
  174.         irq_conns[mq].box = NULL;
  175.         code_free(irq_conns[mq].code);
  176.         irq_conns[mq].code = NULL;
  177.     }
  178.  
  179.     spinlock_unlock(&irq_conns[mq].lock);
  180.     interrupts_restore(ipl);
  181. }
  182.  
  183. /** Register an answerbox as a receiving end of interrupts notifications */
  184. int ipc_irq_register(answerbox_t *box, int irq, irq_code_t *ucode)
  185. {
  186.     ipl_t ipl;
  187.     irq_code_t *code;
  188.     int mq = irq + IPC_IRQ_RESERVED_VIRTUAL;
  189.  
  190.     ASSERT(irq_conns);
  191.  
  192.     if (ucode) {
  193.         code = code_from_uspace(ucode);
  194.         if (!code)
  195.             return EBADMEM;
  196.     } else
  197.         code = NULL;
  198.  
  199.     ipl = interrupts_disable();
  200.     spinlock_lock(&irq_conns[mq].lock);
  201.  
  202.     if (irq_conns[mq].box) {
  203.         spinlock_unlock(&irq_conns[mq].lock);
  204.         interrupts_restore(ipl);
  205.         code_free(code);
  206.         return EEXISTS;
  207.     }
  208.     irq_conns[mq].box = box;
  209.     irq_conns[mq].code = code;
  210.     atomic_set(&irq_conns[mq].counter, 0);
  211.     spinlock_unlock(&irq_conns[mq].lock);
  212.     interrupts_restore(ipl);
  213.  
  214.     return 0;
  215. }
  216.  
  217. /** Add call to proper answerbox queue
  218.  *
  219.  * Assume irq_conns[mq].lock is locked */
  220. static void send_call(int mq, call_t *call)
  221. {
  222.     spinlock_lock(&irq_conns[mq].box->irq_lock);
  223.     list_append(&call->link, &irq_conns[mq].box->irq_notifs);
  224.     spinlock_unlock(&irq_conns[mq].box->irq_lock);
  225.        
  226.     waitq_wakeup(&irq_conns[mq].box->wq, 0);
  227. }
  228.  
  229. /** Send notification message
  230.  *
  231.  */
  232. void ipc_irq_send_msg(int irq, __native a1, __native a2, __native a3)
  233. {
  234.     call_t *call;
  235.     int mq = irq + IPC_IRQ_RESERVED_VIRTUAL;
  236.  
  237.     spinlock_lock(&irq_conns[mq].lock);
  238.  
  239.     if (irq_conns[mq].box) {
  240.         call = ipc_call_alloc(FRAME_ATOMIC);
  241.         if (!call) {
  242.             spinlock_unlock(&irq_conns[mq].lock);
  243.             return;
  244.         }
  245.         call->flags |= IPC_CALL_NOTIF;
  246.         IPC_SET_METHOD(call->data, irq);
  247.         IPC_SET_ARG1(call->data, a1);
  248.         IPC_SET_ARG2(call->data, a2);
  249.         IPC_SET_ARG3(call->data, a3);
  250.         /* Put a counter to the message */
  251.         call->private = atomic_preinc(&irq_conns[mq].counter);
  252.        
  253.         send_call(mq, call);
  254.     }
  255.     spinlock_unlock(&irq_conns[mq].lock);
  256. }
  257.  
  258. /** Notify process that an irq had happend
  259.  *
  260.  * We expect interrupts to be disabled
  261.  */
  262. void ipc_irq_send_notif(int irq)
  263. {
  264.     call_t *call;
  265.     int mq = irq + IPC_IRQ_RESERVED_VIRTUAL;
  266.  
  267.     ASSERT(irq_conns);
  268.     spinlock_lock(&irq_conns[mq].lock);
  269.  
  270.     if (irq_conns[mq].box) {
  271.         call = ipc_call_alloc(FRAME_ATOMIC);
  272.         if (!call) {
  273.             spinlock_unlock(&irq_conns[mq].lock);
  274.             return;
  275.         }
  276.         call->flags |= IPC_CALL_NOTIF;
  277.         /* Put a counter to the message */
  278.         call->private = atomic_preinc(&irq_conns[mq].counter);
  279.         /* Set up args */
  280.         IPC_SET_METHOD(call->data, irq);
  281.  
  282.         /* Execute code to handle irq */
  283.         code_execute(call, irq_conns[mq].code);
  284.        
  285.         send_call(mq, call);
  286.     }
  287.        
  288.     spinlock_unlock(&irq_conns[mq].lock);
  289. }
  290.  
  291.  
  292. /** Initialize table of interrupt handlers
  293.  *
  294.  * @param irqcount Count of required hardware IRQs to be supported
  295.  */
  296. void ipc_irq_make_table(int irqcount)
  297. {
  298.     int i;
  299.  
  300.     irqcount +=  IPC_IRQ_RESERVED_VIRTUAL;
  301.  
  302.     irq_conns_size = irqcount;
  303.     irq_conns = malloc(irqcount * (sizeof(*irq_conns)), 0);
  304.     for (i=0; i < irqcount; i++) {
  305.         spinlock_initialize(&irq_conns[i].lock, "irq_ipc_lock");
  306.         irq_conns[i].box = NULL;
  307.         irq_conns[i].code = NULL;
  308.     }
  309. }
  310.  
  311. /** Disconnect all irq's notifications
  312.  *
  313.  * TODO: It may be better to do some linked list, so that
  314.  *       we wouldn't need to go through whole array every cleanup
  315.  */
  316. void ipc_irq_cleanup(answerbox_t *box)
  317. {
  318.     int i;
  319.     ipl_t ipl;
  320.    
  321.     for (i=0; i < irq_conns_size; i++) {
  322.         ipl = interrupts_disable();
  323.         spinlock_lock(&irq_conns[i].lock);
  324.         if (irq_conns[i].box == box)
  325.             irq_conns[i].box = NULL;
  326.         spinlock_unlock(&irq_conns[i].lock);
  327.         interrupts_restore(ipl);
  328.     }
  329. }
  330.