Rev 1757 | Rev 1787 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
1281 | palkovsky | 1 | /* |
2 | * Copyright (C) 2006 Ondrej Palkovsky |
||
3 | * All rights reserved. |
||
4 | * |
||
5 | * Redistribution and use in source and binary forms, with or without |
||
6 | * modification, are permitted provided that the following conditions |
||
7 | * are met: |
||
8 | * |
||
9 | * - Redistributions of source code must retain the above copyright |
||
10 | * notice, this list of conditions and the following disclaimer. |
||
11 | * - Redistributions in binary form must reproduce the above copyright |
||
12 | * notice, this list of conditions and the following disclaimer in the |
||
13 | * documentation and/or other materials provided with the distribution. |
||
14 | * - The name of the author may not be used to endorse or promote products |
||
15 | * derived from this software without specific prior written permission. |
||
16 | * |
||
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
||
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
||
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
||
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
||
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
||
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
||
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
||
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
||
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
||
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
||
27 | */ |
||
28 | |||
1757 | jermar | 29 | /** @addtogroup genericipc |
1702 | cejka | 30 | * @{ |
31 | */ |
||
1757 | jermar | 32 | /** |
33 | * @file |
||
34 | * @brief IRQ notification framework. |
||
1284 | palkovsky | 35 | * |
36 | * This framework allows applications to register to receive a notification |
||
37 | * when interrupt is detected. The application may provide a simple 'top-half' |
||
38 | * handler as part of its registration, which can perform simple operations |
||
39 | * (read/write port/memory, add information to notification ipc message). |
||
40 | * |
||
41 | * The structure of a notification message is as follows: |
||
1693 | palkovsky | 42 | * - METHOD: interrupt number |
43 | * - ARG1: payload modified by a 'top-half' handler |
||
44 | * - ARG2: payload |
||
45 | * - ARG3: payload |
||
46 | * - in_phone_hash: interrupt counter (may be needed to assure correct order |
||
1284 | palkovsky | 47 | * in multithreaded drivers) |
48 | */ |
||
49 | |||
1281 | palkovsky | 50 | #include <arch.h> |
51 | #include <mm/slab.h> |
||
52 | #include <errno.h> |
||
53 | #include <ipc/ipc.h> |
||
54 | #include <ipc/irq.h> |
||
1284 | palkovsky | 55 | #include <atomic.h> |
1288 | jermar | 56 | #include <syscall/copy.h> |
1507 | vana | 57 | #include <console/console.h> |
1281 | palkovsky | 58 | |
59 | typedef struct { |
||
60 | SPINLOCK_DECLARE(lock); |
||
61 | answerbox_t *box; |
||
62 | irq_code_t *code; |
||
1284 | palkovsky | 63 | atomic_t counter; |
1281 | palkovsky | 64 | } ipc_irq_t; |
65 | |||
66 | |||
67 | static ipc_irq_t *irq_conns = NULL; |
||
68 | static int irq_conns_size; |
||
69 | |||
70 | #include <print.h> |
||
71 | /* Execute code associated with IRQ notification */ |
||
72 | static void code_execute(call_t *call, irq_code_t *code) |
||
73 | { |
||
74 | int i; |
||
1780 | jermar | 75 | unative_t dstval = 0; |
1628 | decky | 76 | |
1281 | palkovsky | 77 | if (!code) |
78 | return; |
||
79 | |||
80 | for (i=0; i < code->cmdcount;i++) { |
||
81 | switch (code->cmds[i].cmd) { |
||
82 | case CMD_MEM_READ_1: |
||
1780 | jermar | 83 | dstval = *((uint8_t *)code->cmds[i].addr); |
1281 | palkovsky | 84 | break; |
85 | case CMD_MEM_READ_2: |
||
1780 | jermar | 86 | dstval = *((uint16_t *)code->cmds[i].addr); |
1281 | palkovsky | 87 | break; |
88 | case CMD_MEM_READ_4: |
||
1780 | jermar | 89 | dstval = *((uint32_t *)code->cmds[i].addr); |
1281 | palkovsky | 90 | break; |
91 | case CMD_MEM_READ_8: |
||
1780 | jermar | 92 | dstval = *((uint64_t *)code->cmds[i].addr); |
1281 | palkovsky | 93 | break; |
94 | case CMD_MEM_WRITE_1: |
||
1780 | jermar | 95 | *((uint8_t *)code->cmds[i].addr) = code->cmds[i].value; |
1281 | palkovsky | 96 | break; |
97 | case CMD_MEM_WRITE_2: |
||
1780 | jermar | 98 | *((uint16_t *)code->cmds[i].addr) = code->cmds[i].value; |
1281 | palkovsky | 99 | break; |
100 | case CMD_MEM_WRITE_4: |
||
1780 | jermar | 101 | *((uint32_t *)code->cmds[i].addr) = code->cmds[i].value; |
1281 | palkovsky | 102 | break; |
103 | case CMD_MEM_WRITE_8: |
||
1780 | jermar | 104 | *((uint64_t *)code->cmds[i].addr) = code->cmds[i].value; |
1281 | palkovsky | 105 | break; |
1284 | palkovsky | 106 | #if defined(ia32) || defined(amd64) |
107 | case CMD_PORT_READ_1: |
||
1693 | palkovsky | 108 | dstval = inb((long)code->cmds[i].addr); |
1284 | palkovsky | 109 | break; |
110 | case CMD_PORT_WRITE_1: |
||
111 | outb((long)code->cmds[i].addr, code->cmds[i].value); |
||
112 | break; |
||
113 | #endif |
||
1507 | vana | 114 | #if defined(ia64) |
115 | case CMD_IA64_GETCHAR: |
||
1693 | palkovsky | 116 | dstval = _getc(&ski_uconsole); |
1507 | vana | 117 | break; |
118 | #endif |
||
1628 | decky | 119 | #if defined(ppc32) |
1625 | decky | 120 | case CMD_PPC32_GETCHAR: |
1693 | palkovsky | 121 | dstval = cuda_get_scancode(); |
1625 | decky | 122 | break; |
123 | #endif |
||
1281 | palkovsky | 124 | default: |
125 | break; |
||
126 | } |
||
1693 | palkovsky | 127 | if (code->cmds[i].dstarg && code->cmds[i].dstarg < 4) { |
128 | call->data.args[code->cmds[i].dstarg] = dstval; |
||
129 | } |
||
1281 | palkovsky | 130 | } |
131 | } |
||
132 | |||
133 | static void code_free(irq_code_t *code) |
||
134 | { |
||
135 | if (code) { |
||
136 | free(code->cmds); |
||
137 | free(code); |
||
138 | } |
||
139 | } |
||
140 | |||
141 | static irq_code_t * code_from_uspace(irq_code_t *ucode) |
||
142 | { |
||
143 | irq_code_t *code; |
||
144 | irq_cmd_t *ucmds; |
||
1288 | jermar | 145 | int rc; |
1281 | palkovsky | 146 | |
147 | code = malloc(sizeof(*code), 0); |
||
1288 | jermar | 148 | rc = copy_from_uspace(code, ucode, sizeof(*code)); |
149 | if (rc != 0) { |
||
150 | free(code); |
||
151 | return NULL; |
||
152 | } |
||
1281 | palkovsky | 153 | |
154 | if (code->cmdcount > IRQ_MAX_PROG_SIZE) { |
||
155 | free(code); |
||
156 | return NULL; |
||
157 | } |
||
158 | ucmds = code->cmds; |
||
159 | code->cmds = malloc(sizeof(code->cmds[0]) * (code->cmdcount), 0); |
||
1288 | jermar | 160 | rc = copy_from_uspace(code->cmds, ucmds, sizeof(code->cmds[0]) * (code->cmdcount)); |
161 | if (rc != 0) { |
||
162 | free(code->cmds); |
||
163 | free(code); |
||
164 | return NULL; |
||
165 | } |
||
1281 | palkovsky | 166 | |
167 | return code; |
||
168 | } |
||
169 | |||
170 | /** Unregister task from irq */ |
||
171 | void ipc_irq_unregister(answerbox_t *box, int irq) |
||
172 | { |
||
173 | ipl_t ipl; |
||
1595 | palkovsky | 174 | int mq = irq + IPC_IRQ_RESERVED_VIRTUAL; |
1281 | palkovsky | 175 | |
176 | ipl = interrupts_disable(); |
||
1595 | palkovsky | 177 | spinlock_lock(&irq_conns[mq].lock); |
178 | if (irq_conns[mq].box == box) { |
||
179 | irq_conns[mq].box = NULL; |
||
180 | code_free(irq_conns[mq].code); |
||
181 | irq_conns[mq].code = NULL; |
||
1281 | palkovsky | 182 | } |
183 | |||
1595 | palkovsky | 184 | spinlock_unlock(&irq_conns[mq].lock); |
1281 | palkovsky | 185 | interrupts_restore(ipl); |
186 | } |
||
187 | |||
188 | /** Register an answerbox as a receiving end of interrupts notifications */ |
||
189 | int ipc_irq_register(answerbox_t *box, int irq, irq_code_t *ucode) |
||
190 | { |
||
191 | ipl_t ipl; |
||
192 | irq_code_t *code; |
||
1595 | palkovsky | 193 | int mq = irq + IPC_IRQ_RESERVED_VIRTUAL; |
1281 | palkovsky | 194 | |
195 | ASSERT(irq_conns); |
||
196 | |||
197 | if (ucode) { |
||
198 | code = code_from_uspace(ucode); |
||
199 | if (!code) |
||
200 | return EBADMEM; |
||
201 | } else |
||
202 | code = NULL; |
||
203 | |||
204 | ipl = interrupts_disable(); |
||
1595 | palkovsky | 205 | spinlock_lock(&irq_conns[mq].lock); |
1281 | palkovsky | 206 | |
1595 | palkovsky | 207 | if (irq_conns[mq].box) { |
208 | spinlock_unlock(&irq_conns[mq].lock); |
||
1281 | palkovsky | 209 | interrupts_restore(ipl); |
210 | code_free(code); |
||
211 | return EEXISTS; |
||
212 | } |
||
1595 | palkovsky | 213 | irq_conns[mq].box = box; |
214 | irq_conns[mq].code = code; |
||
215 | atomic_set(&irq_conns[mq].counter, 0); |
||
216 | spinlock_unlock(&irq_conns[mq].lock); |
||
1281 | palkovsky | 217 | interrupts_restore(ipl); |
218 | |||
219 | return 0; |
||
220 | } |
||
221 | |||
1595 | palkovsky | 222 | /** Add call to proper answerbox queue |
223 | * |
||
224 | * Assume irq_conns[mq].lock is locked */ |
||
225 | static void send_call(int mq, call_t *call) |
||
226 | { |
||
227 | spinlock_lock(&irq_conns[mq].box->irq_lock); |
||
228 | list_append(&call->link, &irq_conns[mq].box->irq_notifs); |
||
229 | spinlock_unlock(&irq_conns[mq].box->irq_lock); |
||
230 | |||
231 | waitq_wakeup(&irq_conns[mq].box->wq, 0); |
||
232 | } |
||
233 | |||
234 | /** Send notification message |
||
235 | * |
||
236 | */ |
||
1780 | jermar | 237 | void ipc_irq_send_msg(int irq, unative_t a1, unative_t a2, unative_t a3) |
1595 | palkovsky | 238 | { |
239 | call_t *call; |
||
240 | int mq = irq + IPC_IRQ_RESERVED_VIRTUAL; |
||
241 | |||
242 | spinlock_lock(&irq_conns[mq].lock); |
||
243 | |||
244 | if (irq_conns[mq].box) { |
||
245 | call = ipc_call_alloc(FRAME_ATOMIC); |
||
246 | if (!call) { |
||
247 | spinlock_unlock(&irq_conns[mq].lock); |
||
248 | return; |
||
249 | } |
||
250 | call->flags |= IPC_CALL_NOTIF; |
||
1693 | palkovsky | 251 | IPC_SET_METHOD(call->data, irq); |
252 | IPC_SET_ARG1(call->data, a1); |
||
1595 | palkovsky | 253 | IPC_SET_ARG2(call->data, a2); |
254 | IPC_SET_ARG3(call->data, a3); |
||
1693 | palkovsky | 255 | /* Put a counter to the message */ |
256 | call->private = atomic_preinc(&irq_conns[mq].counter); |
||
1595 | palkovsky | 257 | |
258 | send_call(mq, call); |
||
259 | } |
||
260 | spinlock_unlock(&irq_conns[mq].lock); |
||
261 | } |
||
262 | |||
1698 | jermar | 263 | /** Notify task that an irq had occurred. |
1281 | palkovsky | 264 | * |
265 | * We expect interrupts to be disabled |
||
266 | */ |
||
267 | void ipc_irq_send_notif(int irq) |
||
268 | { |
||
269 | call_t *call; |
||
1595 | palkovsky | 270 | int mq = irq + IPC_IRQ_RESERVED_VIRTUAL; |
1281 | palkovsky | 271 | |
272 | ASSERT(irq_conns); |
||
1595 | palkovsky | 273 | spinlock_lock(&irq_conns[mq].lock); |
1281 | palkovsky | 274 | |
1595 | palkovsky | 275 | if (irq_conns[mq].box) { |
1281 | palkovsky | 276 | call = ipc_call_alloc(FRAME_ATOMIC); |
1591 | palkovsky | 277 | if (!call) { |
1595 | palkovsky | 278 | spinlock_unlock(&irq_conns[mq].lock); |
1591 | palkovsky | 279 | return; |
280 | } |
||
1281 | palkovsky | 281 | call->flags |= IPC_CALL_NOTIF; |
1693 | palkovsky | 282 | /* Put a counter to the message */ |
283 | call->private = atomic_preinc(&irq_conns[mq].counter); |
||
284 | /* Set up args */ |
||
285 | IPC_SET_METHOD(call->data, irq); |
||
1281 | palkovsky | 286 | |
287 | /* Execute code to handle irq */ |
||
1595 | palkovsky | 288 | code_execute(call, irq_conns[mq].code); |
289 | |||
290 | send_call(mq, call); |
||
1281 | palkovsky | 291 | } |
292 | |||
1595 | palkovsky | 293 | spinlock_unlock(&irq_conns[mq].lock); |
1281 | palkovsky | 294 | } |
295 | |||
296 | |||
1595 | palkovsky | 297 | /** Initialize table of interrupt handlers |
298 | * |
||
299 | * @param irqcount Count of required hardware IRQs to be supported |
||
300 | */ |
||
1281 | palkovsky | 301 | void ipc_irq_make_table(int irqcount) |
302 | { |
||
303 | int i; |
||
304 | |||
1595 | palkovsky | 305 | irqcount += IPC_IRQ_RESERVED_VIRTUAL; |
306 | |||
1281 | palkovsky | 307 | irq_conns_size = irqcount; |
308 | irq_conns = malloc(irqcount * (sizeof(*irq_conns)), 0); |
||
309 | for (i=0; i < irqcount; i++) { |
||
310 | spinlock_initialize(&irq_conns[i].lock, "irq_ipc_lock"); |
||
311 | irq_conns[i].box = NULL; |
||
312 | irq_conns[i].code = NULL; |
||
313 | } |
||
314 | } |
||
315 | |||
316 | /** Disconnect all irq's notifications |
||
317 | * |
||
1757 | jermar | 318 | * @todo It may be better to do some linked list, so that |
1281 | palkovsky | 319 | * we wouldn't need to go through whole array every cleanup |
320 | */ |
||
321 | void ipc_irq_cleanup(answerbox_t *box) |
||
322 | { |
||
323 | int i; |
||
324 | ipl_t ipl; |
||
325 | |||
326 | for (i=0; i < irq_conns_size; i++) { |
||
327 | ipl = interrupts_disable(); |
||
328 | spinlock_lock(&irq_conns[i].lock); |
||
329 | if (irq_conns[i].box == box) |
||
330 | irq_conns[i].box = NULL; |
||
331 | spinlock_unlock(&irq_conns[i].lock); |
||
332 | interrupts_restore(ipl); |
||
333 | } |
||
334 | } |
||
1702 | cejka | 335 | |
1757 | jermar | 336 | /** @} |
1702 | cejka | 337 | */ |