Rev 3675 | Details | Compare with Previous | Last modification | View Log | RSS feed
| Rev | Author | Line No. | Line |
|---|---|---|---|
| 1281 | palkovsky | 1 | /* |
| 2071 | jermar | 2 | * Copyright (c) 2006 Ondrej Palkovsky |
| 3 | * Copyright (c) 2006 Jakub Jermar |
||
| 1281 | palkovsky | 4 | * All rights reserved. |
| 5 | * |
||
| 6 | * Redistribution and use in source and binary forms, with or without |
||
| 7 | * modification, are permitted provided that the following conditions |
||
| 8 | * are met: |
||
| 9 | * |
||
| 10 | * - Redistributions of source code must retain the above copyright |
||
| 11 | * notice, this list of conditions and the following disclaimer. |
||
| 12 | * - Redistributions in binary form must reproduce the above copyright |
||
| 13 | * notice, this list of conditions and the following disclaimer in the |
||
| 14 | * documentation and/or other materials provided with the distribution. |
||
| 15 | * - The name of the author may not be used to endorse or promote products |
||
| 16 | * derived from this software without specific prior written permission. |
||
| 17 | * |
||
| 18 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
||
| 19 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
||
| 20 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
||
| 21 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
||
| 22 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
||
| 23 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
||
| 24 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
||
| 25 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
||
| 26 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
||
| 27 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
||
| 28 | */ |
||
| 29 | |||
| 1757 | jermar | 30 | /** @addtogroup genericipc |
| 1702 | cejka | 31 | * @{ |
| 32 | */ |
||
| 1757 | jermar | 33 | /** |
| 34 | * @file |
||
| 35 | * @brief IRQ notification framework. |
||
| 1284 | palkovsky | 36 | * |
| 37 | * This framework allows applications to register to receive a notification |
||
| 38 | * when interrupt is detected. The application may provide a simple 'top-half' |
||
| 39 | * handler as part of its registration, which can perform simple operations |
||
| 40 | * (read/write port/memory, add information to notification ipc message). |
||
| 41 | * |
||
| 42 | * The structure of a notification message is as follows: |
||
| 1923 | jermar | 43 | * - METHOD: method as registered by the SYS_IPC_REGISTER_IRQ syscall |
| 1693 | palkovsky | 44 | * - ARG1: payload modified by a 'top-half' handler |
| 1923 | jermar | 45 | * - ARG2: payload modified by a 'top-half' handler |
| 46 | * - ARG3: payload modified by a 'top-half' handler |
||
| 4377 | svoboda | 47 | * - ARG4: payload modified by a 'top-half' handler |
| 48 | * - ARG5: payload modified by a 'top-half' handler |
||
| 1693 | palkovsky | 49 | * - in_phone_hash: interrupt counter (may be needed to assure correct order |
| 1284 | palkovsky | 50 | * in multithreaded drivers) |
| 4377 | svoboda | 51 | * |
| 52 | * Note on synchronization for ipc_irq_register(), ipc_irq_unregister(), |
||
| 53 | * ipc_irq_cleanup() and IRQ handlers: |
||
| 54 | * |
||
| 55 | * By always taking all of the uspace IRQ hash table lock, IRQ structure lock |
||
| 56 | * and answerbox lock, we can rule out race conditions between the |
||
| 57 | * registration functions and also the cleanup function. Thus the observer can |
||
| 58 | * either see the IRQ structure present in both the hash table and the |
||
| 59 | * answerbox list or absent in both. Views in which the IRQ structure would be |
||
| 60 | * linked in the hash table but not in the answerbox list, or vice versa, are |
||
| 61 | * not possible. |
||
| 62 | * |
||
| 63 | * By always taking the hash table lock and the IRQ structure lock, we can |
||
| 64 | * rule out a scenario in which we would free up an IRQ structure, which is |
||
| 65 | * still referenced by, for example, an IRQ handler. The locking scheme forces |
||
| 66 | * us to lock the IRQ structure only after any progressing IRQs on that |
||
| 67 | * structure are finished. Because we hold the hash table lock, we prevent new |
||
| 68 | * IRQs from taking new references to the IRQ structure. |
||
| 1284 | palkovsky | 69 | */ |
| 70 | |||
| 1281 | palkovsky | 71 | #include <arch.h> |
| 72 | #include <mm/slab.h> |
||
| 73 | #include <errno.h> |
||
| 1923 | jermar | 74 | #include <ddi/irq.h> |
| 1281 | palkovsky | 75 | #include <ipc/ipc.h> |
| 76 | #include <ipc/irq.h> |
||
| 1288 | jermar | 77 | #include <syscall/copy.h> |
| 1507 | vana | 78 | #include <console/console.h> |
| 1875 | jermar | 79 | #include <print.h> |
| 1281 | palkovsky | 80 | |
| 4377 | svoboda | 81 | /** Free the top-half pseudocode. |
| 1923 | jermar | 82 | * |
| 2471 | jermar | 83 | * @param code Pointer to the top-half pseudocode. |
| 84 | */ |
||
| 1281 | palkovsky | 85 | static void code_free(irq_code_t *code) |
| 86 | { |
||
| 87 | if (code) { |
||
| 88 | free(code->cmds); |
||
| 89 | free(code); |
||
| 90 | } |
||
| 91 | } |
||
| 92 | |||
| 4377 | svoboda | 93 | /** Copy the top-half pseudocode from userspace into the kernel. |
| 2471 | jermar | 94 | * |
| 95 | * @param ucode Userspace address of the top-half pseudocode. |
||
| 96 | * |
||
| 97 | * @return Kernel address of the copied pseudocode. |
||
| 98 | */ |
||
| 99 | static irq_code_t *code_from_uspace(irq_code_t *ucode) |
||
| 1281 | palkovsky | 100 | { |
| 101 | irq_code_t *code; |
||
| 102 | irq_cmd_t *ucmds; |
||
| 1288 | jermar | 103 | int rc; |
| 1281 | palkovsky | 104 | |
| 105 | code = malloc(sizeof(*code), 0); |
||
| 1288 | jermar | 106 | rc = copy_from_uspace(code, ucode, sizeof(*code)); |
| 107 | if (rc != 0) { |
||
| 108 | free(code); |
||
| 109 | return NULL; |
||
| 110 | } |
||
| 1281 | palkovsky | 111 | |
| 112 | if (code->cmdcount > IRQ_MAX_PROG_SIZE) { |
||
| 113 | free(code); |
||
| 114 | return NULL; |
||
| 115 | } |
||
| 116 | ucmds = code->cmds; |
||
| 2471 | jermar | 117 | code->cmds = malloc(sizeof(code->cmds[0]) * code->cmdcount, 0); |
| 118 | rc = copy_from_uspace(code->cmds, ucmds, |
||
| 119 | sizeof(code->cmds[0]) * code->cmdcount); |
||
| 1288 | jermar | 120 | if (rc != 0) { |
| 121 | free(code->cmds); |
||
| 122 | free(code); |
||
| 123 | return NULL; |
||
| 124 | } |
||
| 1281 | palkovsky | 125 | |
| 126 | return code; |
||
| 127 | } |
||
| 128 | |||
| 1923 | jermar | 129 | /** Register an answerbox as a receiving end for IRQ notifications. |
| 130 | * |
||
| 4377 | svoboda | 131 | * @param box Receiving answerbox. |
| 132 | * @param inr IRQ number. |
||
| 133 | * @param devno Device number. |
||
| 134 | * @param method Method to be associated with the notification. |
||
| 135 | * @param ucode Uspace pointer to top-half pseudocode. |
||
| 1923 | jermar | 136 | * |
| 4377 | svoboda | 137 | * @return EBADMEM, ENOENT or EEXISTS on failure or 0 on success. |
| 138 | * |
||
| 1923 | jermar | 139 | */ |
| 2471 | jermar | 140 | int ipc_irq_register(answerbox_t *box, inr_t inr, devno_t devno, |
| 141 | unative_t method, irq_code_t *ucode) |
||
| 1281 | palkovsky | 142 | { |
| 143 | ipl_t ipl; |
||
| 144 | irq_code_t *code; |
||
| 1923 | jermar | 145 | irq_t *irq; |
| 4377 | svoboda | 146 | link_t *hlp; |
| 147 | unative_t key[] = { |
||
| 148 | (unative_t) inr, |
||
| 149 | (unative_t) devno |
||
| 150 | }; |
||
| 151 | |||
| 1281 | palkovsky | 152 | if (ucode) { |
| 153 | code = code_from_uspace(ucode); |
||
| 154 | if (!code) |
||
| 155 | return EBADMEM; |
||
| 2471 | jermar | 156 | } else { |
| 1281 | palkovsky | 157 | code = NULL; |
| 2471 | jermar | 158 | } |
| 4377 | svoboda | 159 | |
| 160 | /* |
||
| 161 | * Allocate and populate the IRQ structure. |
||
| 162 | */ |
||
| 163 | irq = malloc(sizeof(irq_t), 0); |
||
| 164 | irq_initialize(irq); |
||
| 165 | irq->devno = devno; |
||
| 166 | irq->inr = inr; |
||
| 167 | irq->claim = ipc_irq_top_half_claim; |
||
| 168 | irq->handler = ipc_irq_top_half_handler; |
||
| 169 | irq->notif_cfg.notify = true; |
||
| 170 | irq->notif_cfg.answerbox = box; |
||
| 171 | irq->notif_cfg.method = method; |
||
| 172 | irq->notif_cfg.code = code; |
||
| 173 | irq->notif_cfg.counter = 0; |
||
| 174 | |||
| 175 | /* |
||
| 176 | * Enlist the IRQ structure in the uspace IRQ hash table and the |
||
| 177 | * answerbox's list. |
||
| 178 | */ |
||
| 1281 | palkovsky | 179 | ipl = interrupts_disable(); |
| 4377 | svoboda | 180 | spinlock_lock(&irq_uspace_hash_table_lock); |
| 181 | hlp = hash_table_find(&irq_uspace_hash_table, key); |
||
| 182 | if (hlp) { |
||
| 183 | irq_t *hirq __attribute__((unused)) |
||
| 184 | = hash_table_get_instance(hlp, irq_t, link); |
||
| 185 | |||
| 186 | /* hirq is locked */ |
||
| 187 | spinlock_unlock(&hirq->lock); |
||
| 1281 | palkovsky | 188 | code_free(code); |
| 4377 | svoboda | 189 | spinlock_unlock(&irq_uspace_hash_table_lock); |
| 190 | free(irq); |
||
| 1923 | jermar | 191 | interrupts_restore(ipl); |
| 1281 | palkovsky | 192 | return EEXISTS; |
| 193 | } |
||
| 1923 | jermar | 194 | |
| 4377 | svoboda | 195 | spinlock_lock(&irq->lock); /* Not really necessary, but paranoid */ |
| 1933 | jermar | 196 | spinlock_lock(&box->irq_lock); |
| 4377 | svoboda | 197 | hash_table_insert(&irq_uspace_hash_table, key, &irq->link); |
| 1933 | jermar | 198 | list_append(&irq->notif_cfg.link, &box->irq_head); |
| 199 | spinlock_unlock(&box->irq_lock); |
||
| 4377 | svoboda | 200 | spinlock_unlock(&irq->lock); |
| 201 | spinlock_unlock(&irq_uspace_hash_table_lock); |
||
| 202 | |||
| 203 | interrupts_restore(ipl); |
||
| 204 | return EOK; |
||
| 205 | } |
||
| 1933 | jermar | 206 | |
| 4377 | svoboda | 207 | /** Unregister task from IRQ notification. |
| 208 | * |
||
| 209 | * @param box Answerbox associated with the notification. |
||
| 210 | * @param inr IRQ number. |
||
| 211 | * @param devno Device number. |
||
| 212 | */ |
||
| 213 | int ipc_irq_unregister(answerbox_t *box, inr_t inr, devno_t devno) |
||
| 214 | { |
||
| 215 | ipl_t ipl; |
||
| 216 | unative_t key[] = { |
||
| 217 | (unative_t) inr, |
||
| 218 | (unative_t) devno |
||
| 219 | }; |
||
| 220 | link_t *lnk; |
||
| 221 | irq_t *irq; |
||
| 222 | |||
| 223 | ipl = interrupts_disable(); |
||
| 224 | spinlock_lock(&irq_uspace_hash_table_lock); |
||
| 225 | lnk = hash_table_find(&irq_uspace_hash_table, key); |
||
| 226 | if (!lnk) { |
||
| 227 | spinlock_unlock(&irq_uspace_hash_table_lock); |
||
| 228 | interrupts_restore(ipl); |
||
| 229 | return ENOENT; |
||
| 230 | } |
||
| 231 | irq = hash_table_get_instance(lnk, irq_t, link); |
||
| 232 | /* irq is locked */ |
||
| 233 | spinlock_lock(&box->irq_lock); |
||
| 234 | |||
| 235 | ASSERT(irq->notif_cfg.answerbox == box); |
||
| 236 | |||
| 237 | /* Free up the pseudo code and associated structures. */ |
||
| 238 | code_free(irq->notif_cfg.code); |
||
| 239 | |||
| 240 | /* Remove the IRQ from the answerbox's list. */ |
||
| 241 | list_remove(&irq->notif_cfg.link); |
||
| 242 | |||
| 243 | /* |
||
| 244 | * We need to drop the IRQ lock now because hash_table_remove() will try |
||
| 245 | * to reacquire it. That basically violates the natural locking order, |
||
| 246 | * but a deadlock in hash_table_remove() is prevented by the fact that |
||
| 247 | * we already held the IRQ lock and didn't drop the hash table lock in |
||
| 248 | * the meantime. |
||
| 249 | */ |
||
| 1923 | jermar | 250 | spinlock_unlock(&irq->lock); |
| 4377 | svoboda | 251 | |
| 252 | /* Remove the IRQ from the uspace IRQ hash table. */ |
||
| 253 | hash_table_remove(&irq_uspace_hash_table, key, 2); |
||
| 254 | |||
| 255 | spinlock_unlock(&irq_uspace_hash_table_lock); |
||
| 256 | spinlock_unlock(&box->irq_lock); |
||
| 257 | |||
| 258 | /* Free up the IRQ structure. */ |
||
| 259 | free(irq); |
||
| 260 | |||
| 1281 | palkovsky | 261 | interrupts_restore(ipl); |
| 4377 | svoboda | 262 | return EOK; |
| 263 | } |
||
| 1281 | palkovsky | 264 | |
| 4377 | svoboda | 265 | |
| 266 | /** Disconnect all IRQ notifications from an answerbox. |
||
| 267 | * |
||
| 268 | * This function is effective because the answerbox contains |
||
| 269 | * list of all irq_t structures that are registered to |
||
| 270 | * send notifications to it. |
||
| 271 | * |
||
| 272 | * @param box Answerbox for which we want to carry out the cleanup. |
||
| 273 | */ |
||
| 274 | void ipc_irq_cleanup(answerbox_t *box) |
||
| 275 | { |
||
| 276 | ipl_t ipl; |
||
| 277 | |||
| 278 | loop: |
||
| 279 | ipl = interrupts_disable(); |
||
| 280 | spinlock_lock(&irq_uspace_hash_table_lock); |
||
| 281 | spinlock_lock(&box->irq_lock); |
||
| 282 | |||
| 283 | while (box->irq_head.next != &box->irq_head) { |
||
| 284 | link_t *cur = box->irq_head.next; |
||
| 285 | irq_t *irq; |
||
| 286 | DEADLOCK_PROBE_INIT(p_irqlock); |
||
| 287 | unative_t key[2]; |
||
| 288 | |||
| 289 | irq = list_get_instance(cur, irq_t, notif_cfg.link); |
||
| 290 | if (!spinlock_trylock(&irq->lock)) { |
||
| 291 | /* |
||
| 292 | * Avoid deadlock by trying again. |
||
| 293 | */ |
||
| 294 | spinlock_unlock(&box->irq_lock); |
||
| 295 | spinlock_unlock(&irq_uspace_hash_table_lock); |
||
| 296 | interrupts_restore(ipl); |
||
| 297 | DEADLOCK_PROBE(p_irqlock, DEADLOCK_THRESHOLD); |
||
| 298 | goto loop; |
||
| 299 | } |
||
| 300 | key[0] = irq->inr; |
||
| 301 | key[1] = irq->devno; |
||
| 302 | |||
| 303 | |||
| 304 | ASSERT(irq->notif_cfg.answerbox == box); |
||
| 305 | |||
| 306 | /* Unlist from the answerbox. */ |
||
| 307 | list_remove(&irq->notif_cfg.link); |
||
| 308 | |||
| 309 | /* Free up the pseudo code and associated structures. */ |
||
| 310 | code_free(irq->notif_cfg.code); |
||
| 311 | |||
| 312 | /* |
||
| 313 | * We need to drop the IRQ lock now because hash_table_remove() |
||
| 314 | * will try to reacquire it. That basically violates the natural |
||
| 315 | * locking order, but a deadlock in hash_table_remove() is |
||
| 316 | * prevented by the fact that we already held the IRQ lock and |
||
| 317 | * didn't drop the hash table lock in the meantime. |
||
| 318 | */ |
||
| 319 | spinlock_unlock(&irq->lock); |
||
| 320 | |||
| 321 | /* Remove from the hash table. */ |
||
| 322 | hash_table_remove(&irq_uspace_hash_table, key, 2); |
||
| 323 | |||
| 324 | free(irq); |
||
| 325 | } |
||
| 326 | |||
| 327 | spinlock_unlock(&box->irq_lock); |
||
| 328 | spinlock_unlock(&irq_uspace_hash_table_lock); |
||
| 329 | interrupts_restore(ipl); |
||
| 1281 | palkovsky | 330 | } |
| 331 | |||
| 2471 | jermar | 332 | /** Add a call to the proper answerbox queue. |
| 1595 | palkovsky | 333 | * |
| 1923 | jermar | 334 | * Assume irq->lock is locked. |
| 335 | * |
||
| 2471 | jermar | 336 | * @param irq IRQ structure referencing the target answerbox. |
| 337 | * @param call IRQ notification call. |
||
| 1923 | jermar | 338 | */ |
| 339 | static void send_call(irq_t *irq, call_t *call) |
||
| 1595 | palkovsky | 340 | { |
| 1923 | jermar | 341 | spinlock_lock(&irq->notif_cfg.answerbox->irq_lock); |
| 342 | list_append(&call->link, &irq->notif_cfg.answerbox->irq_notifs); |
||
| 343 | spinlock_unlock(&irq->notif_cfg.answerbox->irq_lock); |
||
| 1595 | palkovsky | 344 | |
| 1923 | jermar | 345 | waitq_wakeup(&irq->notif_cfg.answerbox->wq, WAKEUP_FIRST); |
| 1595 | palkovsky | 346 | } |
| 347 | |||
| 4377 | svoboda | 348 | /** Apply the top-half pseudo code to find out whether to accept the IRQ or not. |
| 1595 | palkovsky | 349 | * |
| 2471 | jermar | 350 | * @param irq IRQ structure. |
| 4377 | svoboda | 351 | * |
| 352 | * @return IRQ_ACCEPT if the interrupt is accepted by the |
||
| 353 | * pseudocode. IRQ_DECLINE otherwise. |
||
| 1595 | palkovsky | 354 | */ |
| 4377 | svoboda | 355 | irq_ownership_t ipc_irq_top_half_claim(irq_t *irq) |
| 1595 | palkovsky | 356 | { |
| 4377 | svoboda | 357 | unsigned int i; |
| 358 | unative_t dstval; |
||
| 359 | irq_code_t *code = irq->notif_cfg.code; |
||
| 360 | unative_t *scratch = irq->notif_cfg.scratch; |
||
| 1595 | palkovsky | 361 | |
| 4377 | svoboda | 362 | |
| 363 | if (!irq->notif_cfg.notify) |
||
| 364 | return IRQ_DECLINE; |
||
| 365 | |||
| 366 | if (!code) |
||
| 367 | return IRQ_DECLINE; |
||
| 368 | |||
| 369 | for (i = 0; i < code->cmdcount; i++) { |
||
| 370 | unsigned int srcarg = code->cmds[i].srcarg; |
||
| 371 | unsigned int dstarg = code->cmds[i].dstarg; |
||
| 372 | |||
| 373 | if (srcarg >= IPC_CALL_LEN) |
||
| 374 | break; |
||
| 375 | if (dstarg >= IPC_CALL_LEN) |
||
| 376 | break; |
||
| 377 | |||
| 378 | switch (code->cmds[i].cmd) { |
||
| 379 | case CMD_PIO_READ_8: |
||
| 380 | dstval = pio_read_8((ioport8_t *) code->cmds[i].addr); |
||
| 381 | if (dstarg) |
||
| 382 | scratch[dstarg] = dstval; |
||
| 383 | break; |
||
| 384 | case CMD_PIO_READ_16: |
||
| 385 | dstval = pio_read_16((ioport16_t *) code->cmds[i].addr); |
||
| 386 | if (dstarg) |
||
| 387 | scratch[dstarg] = dstval; |
||
| 388 | break; |
||
| 389 | case CMD_PIO_READ_32: |
||
| 390 | dstval = pio_read_32((ioport32_t *) code->cmds[i].addr); |
||
| 391 | if (dstarg) |
||
| 392 | scratch[dstarg] = dstval; |
||
| 393 | break; |
||
| 394 | case CMD_PIO_WRITE_8: |
||
| 395 | pio_write_8((ioport8_t *) code->cmds[i].addr, |
||
| 396 | (uint8_t) code->cmds[i].value); |
||
| 397 | break; |
||
| 398 | case CMD_PIO_WRITE_16: |
||
| 399 | pio_write_16((ioport16_t *) code->cmds[i].addr, |
||
| 400 | (uint16_t) code->cmds[i].value); |
||
| 401 | break; |
||
| 402 | case CMD_PIO_WRITE_32: |
||
| 403 | pio_write_32((ioport32_t *) code->cmds[i].addr, |
||
| 404 | (uint32_t) code->cmds[i].value); |
||
| 405 | break; |
||
| 406 | case CMD_BTEST: |
||
| 407 | if (srcarg && dstarg) { |
||
| 408 | dstval = scratch[srcarg] & code->cmds[i].value; |
||
| 409 | scratch[dstarg] = dstval; |
||
| 410 | } |
||
| 411 | break; |
||
| 412 | case CMD_PREDICATE: |
||
| 413 | if (srcarg && !scratch[srcarg]) { |
||
| 414 | i += code->cmds[i].value; |
||
| 415 | continue; |
||
| 416 | } |
||
| 417 | break; |
||
| 418 | case CMD_ACCEPT: |
||
| 419 | return IRQ_ACCEPT; |
||
| 420 | break; |
||
| 421 | case CMD_DECLINE: |
||
| 422 | default: |
||
| 423 | return IRQ_DECLINE; |
||
| 1595 | palkovsky | 424 | } |
| 425 | } |
||
| 4377 | svoboda | 426 | |
| 427 | return IRQ_DECLINE; |
||
| 1595 | palkovsky | 428 | } |
| 429 | |||
| 4377 | svoboda | 430 | |
| 431 | /* IRQ top-half handler. |
||
| 1281 | palkovsky | 432 | * |
| 1923 | jermar | 433 | * We expect interrupts to be disabled and the irq->lock already held. |
| 2471 | jermar | 434 | * |
| 435 | * @param irq IRQ structure. |
||
| 1281 | palkovsky | 436 | */ |
| 4377 | svoboda | 437 | void ipc_irq_top_half_handler(irq_t *irq) |
| 1281 | palkovsky | 438 | { |
| 1923 | jermar | 439 | ASSERT(irq); |
| 1281 | palkovsky | 440 | |
| 1923 | jermar | 441 | if (irq->notif_cfg.answerbox) { |
| 4377 | svoboda | 442 | call_t *call; |
| 443 | |||
| 1281 | palkovsky | 444 | call = ipc_call_alloc(FRAME_ATOMIC); |
| 4377 | svoboda | 445 | if (!call) |
| 1591 | palkovsky | 446 | return; |
| 4377 | svoboda | 447 | |
| 1281 | palkovsky | 448 | call->flags |= IPC_CALL_NOTIF; |
| 1693 | palkovsky | 449 | /* Put a counter to the message */ |
| 2098 | decky | 450 | call->priv = ++irq->notif_cfg.counter; |
| 4377 | svoboda | 451 | |
| 1693 | palkovsky | 452 | /* Set up args */ |
| 1923 | jermar | 453 | IPC_SET_METHOD(call->data, irq->notif_cfg.method); |
| 4377 | svoboda | 454 | IPC_SET_ARG1(call->data, irq->notif_cfg.scratch[1]); |
| 455 | IPC_SET_ARG2(call->data, irq->notif_cfg.scratch[2]); |
||
| 456 | IPC_SET_ARG3(call->data, irq->notif_cfg.scratch[3]); |
||
| 457 | IPC_SET_ARG4(call->data, irq->notif_cfg.scratch[4]); |
||
| 458 | IPC_SET_ARG5(call->data, irq->notif_cfg.scratch[5]); |
||
| 1281 | palkovsky | 459 | |
| 1923 | jermar | 460 | send_call(irq, call); |
| 1281 | palkovsky | 461 | } |
| 462 | } |
||
| 463 | |||
| 4377 | svoboda | 464 | /** Send notification message. |
| 1595 | palkovsky | 465 | * |
| 4377 | svoboda | 466 | * @param irq IRQ structure. |
| 467 | * @param a1 Driver-specific payload argument. |
||
| 468 | * @param a2 Driver-specific payload argument. |
||
| 469 | * @param a3 Driver-specific payload argument. |
||
| 470 | * @param a4 Driver-specific payload argument. |
||
| 471 | * @param a5 Driver-specific payload argument. |
||
| 1595 | palkovsky | 472 | */ |
| 4377 | svoboda | 473 | void ipc_irq_send_msg(irq_t *irq, unative_t a1, unative_t a2, unative_t a3, |
| 474 | unative_t a4, unative_t a5) |
||
| 1281 | palkovsky | 475 | { |
| 4377 | svoboda | 476 | call_t *call; |
| 477 | |||
| 478 | spinlock_lock(&irq->lock); |
||
| 479 | |||
| 480 | if (irq->notif_cfg.answerbox) { |
||
| 481 | call = ipc_call_alloc(FRAME_ATOMIC); |
||
| 482 | if (!call) { |
||
| 483 | spinlock_unlock(&irq->lock); |
||
| 484 | return; |
||
| 1933 | jermar | 485 | } |
| 4377 | svoboda | 486 | call->flags |= IPC_CALL_NOTIF; |
| 487 | /* Put a counter to the message */ |
||
| 488 | call->priv = ++irq->notif_cfg.counter; |
||
| 489 | |||
| 490 | IPC_SET_METHOD(call->data, irq->notif_cfg.method); |
||
| 491 | IPC_SET_ARG1(call->data, a1); |
||
| 492 | IPC_SET_ARG2(call->data, a2); |
||
| 493 | IPC_SET_ARG3(call->data, a3); |
||
| 494 | IPC_SET_ARG4(call->data, a4); |
||
| 495 | IPC_SET_ARG5(call->data, a5); |
||
| 1933 | jermar | 496 | |
| 4377 | svoboda | 497 | send_call(irq, call); |
| 1933 | jermar | 498 | } |
| 4377 | svoboda | 499 | spinlock_unlock(&irq->lock); |
| 1281 | palkovsky | 500 | } |
| 1702 | cejka | 501 | |
| 1757 | jermar | 502 | /** @} |
| 1702 | cejka | 503 | */ |