Rev 3675 | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 3675 | Rev 4377 | ||
---|---|---|---|
Line 42... | Line 42... | ||
42 | * The structure of a notification message is as follows: |
42 | * The structure of a notification message is as follows: |
43 | * - METHOD: method as registered by the SYS_IPC_REGISTER_IRQ syscall |
43 | * - METHOD: method as registered by the SYS_IPC_REGISTER_IRQ syscall |
44 | * - ARG1: payload modified by a 'top-half' handler |
44 | * - ARG1: payload modified by a 'top-half' handler |
45 | * - ARG2: payload modified by a 'top-half' handler |
45 | * - ARG2: payload modified by a 'top-half' handler |
46 | * - ARG3: payload modified by a 'top-half' handler |
46 | * - ARG3: payload modified by a 'top-half' handler |
- | 47 | * - ARG4: payload modified by a 'top-half' handler |
|
- | 48 | * - ARG5: payload modified by a 'top-half' handler |
|
47 | * - in_phone_hash: interrupt counter (may be needed to assure correct order |
49 | * - in_phone_hash: interrupt counter (may be needed to assure correct order |
48 | * in multithreaded drivers) |
50 | * in multithreaded drivers) |
- | 51 | * |
|
- | 52 | * Note on synchronization for ipc_irq_register(), ipc_irq_unregister(), |
|
- | 53 | * ipc_irq_cleanup() and IRQ handlers: |
|
- | 54 | * |
|
- | 55 | * By always taking all of the uspace IRQ hash table lock, IRQ structure lock |
|
- | 56 | * and answerbox lock, we can rule out race conditions between the |
|
- | 57 | * registration functions and also the cleanup function. Thus the observer can |
|
- | 58 | * either see the IRQ structure present in both the hash table and the |
|
- | 59 | * answerbox list or absent in both. Views in which the IRQ structure would be |
|
- | 60 | * linked in the hash table but not in the answerbox list, or vice versa, are |
|
- | 61 | * not possible. |
|
- | 62 | * |
|
- | 63 | * By always taking the hash table lock and the IRQ structure lock, we can |
|
- | 64 | * rule out a scenario in which we would free up an IRQ structure, which is |
|
- | 65 | * still referenced by, for example, an IRQ handler. The locking scheme forces |
|
- | 66 | * us to lock the IRQ structure only after any progressing IRQs on that |
|
- | 67 | * structure are finished. Because we hold the hash table lock, we prevent new |
|
- | 68 | * IRQs from taking new references to the IRQ structure. |
|
49 | */ |
69 | */ |
50 | 70 | ||
51 | #include <arch.h> |
71 | #include <arch.h> |
52 | #include <mm/slab.h> |
72 | #include <mm/slab.h> |
53 | #include <errno.h> |
73 | #include <errno.h> |
Line 56... | Line 76... | ||
56 | #include <ipc/irq.h> |
76 | #include <ipc/irq.h> |
57 | #include <syscall/copy.h> |
77 | #include <syscall/copy.h> |
58 | #include <console/console.h> |
78 | #include <console/console.h> |
59 | #include <print.h> |
79 | #include <print.h> |
60 | 80 | ||
61 | /** Execute code associated with IRQ notification. |
- | |
62 | * |
- | |
63 | * @param call Notification call. |
- | |
64 | * @param code Top-half pseudocode. |
- | |
65 | */ |
- | |
66 | static void code_execute(call_t *call, irq_code_t *code) |
- | |
67 | { |
- | |
68 | unsigned int i; |
- | |
69 | unative_t dstval = 0; |
- | |
70 | - | ||
71 | if (!code) |
- | |
72 | return; |
- | |
73 | - | ||
74 | for (i = 0; i < code->cmdcount; i++) { |
- | |
75 | switch (code->cmds[i].cmd) { |
- | |
76 | case CMD_MEM_READ_1: |
- | |
77 | dstval = *((uint8_t *) code->cmds[i].addr); |
- | |
78 | break; |
- | |
79 | case CMD_MEM_READ_2: |
- | |
80 | dstval = *((uint16_t *) code->cmds[i].addr); |
- | |
81 | break; |
- | |
82 | case CMD_MEM_READ_4: |
- | |
83 | dstval = *((uint32_t *) code->cmds[i].addr); |
- | |
84 | break; |
- | |
85 | case CMD_MEM_READ_8: |
- | |
86 | dstval = *((uint64_t *) code->cmds[i].addr); |
- | |
87 | break; |
- | |
88 | case CMD_MEM_WRITE_1: |
- | |
89 | *((uint8_t *) code->cmds[i].addr) = code->cmds[i].value; |
- | |
90 | break; |
- | |
91 | case CMD_MEM_WRITE_2: |
- | |
92 | *((uint16_t *) code->cmds[i].addr) = |
- | |
93 | code->cmds[i].value; |
- | |
94 | break; |
- | |
95 | case CMD_MEM_WRITE_4: |
- | |
96 | *((uint32_t *) code->cmds[i].addr) = |
- | |
97 | code->cmds[i].value; |
- | |
98 | break; |
- | |
99 | case CMD_MEM_WRITE_8: |
- | |
100 | *((uint64_t *) code->cmds[i].addr) = |
- | |
101 | code->cmds[i].value; |
- | |
102 | break; |
- | |
103 | #if defined(ia32) || defined(amd64) || defined(ia64) |
- | |
104 | case CMD_PORT_READ_1: |
- | |
105 | dstval = inb((long) code->cmds[i].addr); |
- | |
106 | break; |
- | |
107 | case CMD_PORT_WRITE_1: |
- | |
108 | outb((long) code->cmds[i].addr, code->cmds[i].value); |
- | |
109 | break; |
- | |
110 | #endif |
- | |
111 | #if defined(ia64) && defined(SKI) |
- | |
112 | case CMD_IA64_GETCHAR: |
- | |
113 | dstval = _getc(&ski_uconsole); |
- | |
114 | break; |
- | |
115 | #endif |
- | |
116 | #if defined(ppc32) |
- | |
117 | case CMD_PPC32_GETCHAR: |
- | |
118 | dstval = cuda_get_scancode(); |
- | |
119 | break; |
- | |
120 | #endif |
- | |
121 | default: |
- | |
122 | break; |
- | |
123 | } |
- | |
124 | if (code->cmds[i].dstarg && code->cmds[i].dstarg < |
- | |
125 | IPC_CALL_LEN) { |
- | |
126 | call->data.args[code->cmds[i].dstarg] = dstval; |
- | |
127 | } |
- | |
128 | } |
- | |
129 | } |
- | |
130 | - | ||
131 | /** Free top-half pseudocode. |
81 | /** Free the top-half pseudocode. |
132 | * |
82 | * |
133 | * @param code Pointer to the top-half pseudocode. |
83 | * @param code Pointer to the top-half pseudocode. |
134 | */ |
84 | */ |
135 | static void code_free(irq_code_t *code) |
85 | static void code_free(irq_code_t *code) |
136 | { |
86 | { |
Line 138... | Line 88... | ||
138 | free(code->cmds); |
88 | free(code->cmds); |
139 | free(code); |
89 | free(code); |
140 | } |
90 | } |
141 | } |
91 | } |
142 | 92 | ||
143 | /** Copy top-half pseudocode from userspace into the kernel. |
93 | /** Copy the top-half pseudocode from userspace into the kernel. |
144 | * |
94 | * |
145 | * @param ucode Userspace address of the top-half pseudocode. |
95 | * @param ucode Userspace address of the top-half pseudocode. |
146 | * |
96 | * |
147 | * @return Kernel address of the copied pseudocode. |
97 | * @return Kernel address of the copied pseudocode. |
148 | */ |
98 | */ |
Line 174... | Line 124... | ||
174 | } |
124 | } |
175 | 125 | ||
176 | return code; |
126 | return code; |
177 | } |
127 | } |
178 | 128 | ||
179 | /** Unregister task from IRQ notification. |
- | |
180 | * |
- | |
181 | * @param box Answerbox associated with the notification. |
- | |
182 | * @param inr IRQ number. |
- | |
183 | * @param devno Device number. |
- | |
184 | */ |
- | |
185 | void ipc_irq_unregister(answerbox_t *box, inr_t inr, devno_t devno) |
- | |
186 | { |
- | |
187 | ipl_t ipl; |
- | |
188 | irq_t *irq; |
- | |
189 | - | ||
190 | ipl = interrupts_disable(); |
- | |
191 | irq = irq_find_and_lock(inr, devno); |
- | |
192 | if (irq) { |
- | |
193 | if (irq->notif_cfg.answerbox == box) { |
- | |
194 | code_free(irq->notif_cfg.code); |
- | |
195 | irq->notif_cfg.notify = false; |
- | |
196 | irq->notif_cfg.answerbox = NULL; |
- | |
197 | irq->notif_cfg.code = NULL; |
- | |
198 | irq->notif_cfg.method = 0; |
- | |
199 | irq->notif_cfg.counter = 0; |
- | |
200 | - | ||
201 | spinlock_lock(&box->irq_lock); |
- | |
202 | list_remove(&irq->notif_cfg.link); |
- | |
203 | spinlock_unlock(&box->irq_lock); |
- | |
204 | - | ||
205 | spinlock_unlock(&irq->lock); |
- | |
206 | } |
- | |
207 | } |
- | |
208 | interrupts_restore(ipl); |
- | |
209 | } |
- | |
210 | - | ||
211 | /** Register an answerbox as a receiving end for IRQ notifications. |
129 | /** Register an answerbox as a receiving end for IRQ notifications. |
212 | * |
130 | * |
213 | * @param box Receiving answerbox. |
131 | * @param box Receiving answerbox. |
214 | * @param inr IRQ number. |
132 | * @param inr IRQ number. |
215 | * @param devno Device number. |
133 | * @param devno Device number. |
216 | * @param method Method to be associated with the notification. |
134 | * @param method Method to be associated with the notification. |
217 | * @param ucode Uspace pointer to top-half pseudocode. |
135 | * @param ucode Uspace pointer to top-half pseudocode. |
- | 136 | * |
|
- | 137 | * @return EBADMEM, ENOENT or EEXISTS on failure or 0 on success. |
|
218 | * |
138 | * |
219 | * @return EBADMEM, ENOENT or EEXISTS on failure or 0 on success. |
- | |
220 | */ |
139 | */ |
221 | int ipc_irq_register(answerbox_t *box, inr_t inr, devno_t devno, |
140 | int ipc_irq_register(answerbox_t *box, inr_t inr, devno_t devno, |
222 | unative_t method, irq_code_t *ucode) |
141 | unative_t method, irq_code_t *ucode) |
223 | { |
142 | { |
224 | ipl_t ipl; |
143 | ipl_t ipl; |
225 | irq_code_t *code; |
144 | irq_code_t *code; |
226 | irq_t *irq; |
145 | irq_t *irq; |
- | 146 | link_t *hlp; |
|
- | 147 | unative_t key[] = { |
|
- | 148 | (unative_t) inr, |
|
- | 149 | (unative_t) devno |
|
- | 150 | }; |
|
227 | 151 | ||
228 | if (ucode) { |
152 | if (ucode) { |
229 | code = code_from_uspace(ucode); |
153 | code = code_from_uspace(ucode); |
230 | if (!code) |
154 | if (!code) |
231 | return EBADMEM; |
155 | return EBADMEM; |
232 | } else { |
156 | } else { |
233 | code = NULL; |
157 | code = NULL; |
234 | } |
158 | } |
235 | - | ||
236 | ipl = interrupts_disable(); |
- | |
237 | irq = irq_find_and_lock(inr, devno); |
- | |
238 | if (!irq) { |
- | |
239 | interrupts_restore(ipl); |
- | |
240 | code_free(code); |
- | |
241 | return ENOENT; |
- | |
242 | } |
- | |
243 | - | ||
244 | if (irq->notif_cfg.answerbox) { |
- | |
245 | spinlock_unlock(&irq->lock); |
- | |
246 | interrupts_restore(ipl); |
- | |
247 | code_free(code); |
- | |
248 | return EEXISTS; |
- | |
249 | } |
- | |
250 | 159 | ||
- | 160 | /* |
|
- | 161 | * Allocate and populate the IRQ structure. |
|
- | 162 | */ |
|
- | 163 | irq = malloc(sizeof(irq_t), 0); |
|
- | 164 | irq_initialize(irq); |
|
- | 165 | irq->devno = devno; |
|
- | 166 | irq->inr = inr; |
|
- | 167 | irq->claim = ipc_irq_top_half_claim; |
|
- | 168 | irq->handler = ipc_irq_top_half_handler; |
|
251 | irq->notif_cfg.notify = true; |
169 | irq->notif_cfg.notify = true; |
252 | irq->notif_cfg.answerbox = box; |
170 | irq->notif_cfg.answerbox = box; |
253 | irq->notif_cfg.method = method; |
171 | irq->notif_cfg.method = method; |
254 | irq->notif_cfg.code = code; |
172 | irq->notif_cfg.code = code; |
255 | irq->notif_cfg.counter = 0; |
173 | irq->notif_cfg.counter = 0; |
256 | 174 | ||
- | 175 | /* |
|
- | 176 | * Enlist the IRQ structure in the uspace IRQ hash table and the |
|
- | 177 | * answerbox's list. |
|
- | 178 | */ |
|
- | 179 | ipl = interrupts_disable(); |
|
- | 180 | spinlock_lock(&irq_uspace_hash_table_lock); |
|
- | 181 | hlp = hash_table_find(&irq_uspace_hash_table, key); |
|
- | 182 | if (hlp) { |
|
- | 183 | irq_t *hirq __attribute__((unused)) |
|
- | 184 | = hash_table_get_instance(hlp, irq_t, link); |
|
- | 185 | ||
- | 186 | /* hirq is locked */ |
|
- | 187 | spinlock_unlock(&hirq->lock); |
|
- | 188 | code_free(code); |
|
- | 189 | spinlock_unlock(&irq_uspace_hash_table_lock); |
|
- | 190 | free(irq); |
|
- | 191 | interrupts_restore(ipl); |
|
- | 192 | return EEXISTS; |
|
- | 193 | } |
|
- | 194 | ||
- | 195 | spinlock_lock(&irq->lock); /* Not really necessary, but paranoid */ |
|
257 | spinlock_lock(&box->irq_lock); |
196 | spinlock_lock(&box->irq_lock); |
- | 197 | hash_table_insert(&irq_uspace_hash_table, key, &irq->link); |
|
258 | list_append(&irq->notif_cfg.link, &box->irq_head); |
198 | list_append(&irq->notif_cfg.link, &box->irq_head); |
259 | spinlock_unlock(&box->irq_lock); |
199 | spinlock_unlock(&box->irq_lock); |
- | 200 | spinlock_unlock(&irq->lock); |
|
- | 201 | spinlock_unlock(&irq_uspace_hash_table_lock); |
|
- | 202 | ||
- | 203 | interrupts_restore(ipl); |
|
- | 204 | return EOK; |
|
- | 205 | } |
|
260 | 206 | ||
- | 207 | /** Unregister task from IRQ notification. |
|
- | 208 | * |
|
- | 209 | * @param box Answerbox associated with the notification. |
|
- | 210 | * @param inr IRQ number. |
|
- | 211 | * @param devno Device number. |
|
- | 212 | */ |
|
- | 213 | int ipc_irq_unregister(answerbox_t *box, inr_t inr, devno_t devno) |
|
- | 214 | { |
|
- | 215 | ipl_t ipl; |
|
- | 216 | unative_t key[] = { |
|
- | 217 | (unative_t) inr, |
|
- | 218 | (unative_t) devno |
|
- | 219 | }; |
|
- | 220 | link_t *lnk; |
|
- | 221 | irq_t *irq; |
|
- | 222 | ||
- | 223 | ipl = interrupts_disable(); |
|
- | 224 | spinlock_lock(&irq_uspace_hash_table_lock); |
|
- | 225 | lnk = hash_table_find(&irq_uspace_hash_table, key); |
|
- | 226 | if (!lnk) { |
|
- | 227 | spinlock_unlock(&irq_uspace_hash_table_lock); |
|
- | 228 | interrupts_restore(ipl); |
|
- | 229 | return ENOENT; |
|
- | 230 | } |
|
- | 231 | irq = hash_table_get_instance(lnk, irq_t, link); |
|
- | 232 | /* irq is locked */ |
|
- | 233 | spinlock_lock(&box->irq_lock); |
|
- | 234 | ||
- | 235 | ASSERT(irq->notif_cfg.answerbox == box); |
|
- | 236 | ||
- | 237 | /* Free up the pseudo code and associated structures. */ |
|
- | 238 | code_free(irq->notif_cfg.code); |
|
- | 239 | ||
- | 240 | /* Remove the IRQ from the answerbox's list. */ |
|
- | 241 | list_remove(&irq->notif_cfg.link); |
|
- | 242 | ||
- | 243 | /* |
|
- | 244 | * We need to drop the IRQ lock now because hash_table_remove() will try |
|
- | 245 | * to reacquire it. That basically violates the natural locking order, |
|
- | 246 | * but a deadlock in hash_table_remove() is prevented by the fact that |
|
- | 247 | * we already held the IRQ lock and didn't drop the hash table lock in |
|
- | 248 | * the meantime. |
|
- | 249 | */ |
|
261 | spinlock_unlock(&irq->lock); |
250 | spinlock_unlock(&irq->lock); |
- | 251 | ||
- | 252 | /* Remove the IRQ from the uspace IRQ hash table. */ |
|
- | 253 | hash_table_remove(&irq_uspace_hash_table, key, 2); |
|
- | 254 | ||
- | 255 | spinlock_unlock(&irq_uspace_hash_table_lock); |
|
- | 256 | spinlock_unlock(&box->irq_lock); |
|
- | 257 | ||
- | 258 | /* Free up the IRQ structure. */ |
|
- | 259 | free(irq); |
|
- | 260 | ||
262 | interrupts_restore(ipl); |
261 | interrupts_restore(ipl); |
- | 262 | return EOK; |
|
- | 263 | } |
|
263 | 264 | ||
- | 265 | ||
- | 266 | /** Disconnect all IRQ notifications from an answerbox. |
|
- | 267 | * |
|
- | 268 | * This function is effective because the answerbox contains |
|
- | 269 | * list of all irq_t structures that are registered to |
|
- | 270 | * send notifications to it. |
|
- | 271 | * |
|
- | 272 | * @param box Answerbox for which we want to carry out the cleanup. |
|
- | 273 | */ |
|
- | 274 | void ipc_irq_cleanup(answerbox_t *box) |
|
- | 275 | { |
|
- | 276 | ipl_t ipl; |
|
- | 277 | ||
- | 278 | loop: |
|
- | 279 | ipl = interrupts_disable(); |
|
- | 280 | spinlock_lock(&irq_uspace_hash_table_lock); |
|
- | 281 | spinlock_lock(&box->irq_lock); |
|
- | 282 | ||
- | 283 | while (box->irq_head.next != &box->irq_head) { |
|
- | 284 | link_t *cur = box->irq_head.next; |
|
- | 285 | irq_t *irq; |
|
- | 286 | DEADLOCK_PROBE_INIT(p_irqlock); |
|
- | 287 | unative_t key[2]; |
|
- | 288 | ||
- | 289 | irq = list_get_instance(cur, irq_t, notif_cfg.link); |
|
- | 290 | if (!spinlock_trylock(&irq->lock)) { |
|
- | 291 | /* |
|
- | 292 | * Avoid deadlock by trying again. |
|
- | 293 | */ |
|
- | 294 | spinlock_unlock(&box->irq_lock); |
|
- | 295 | spinlock_unlock(&irq_uspace_hash_table_lock); |
|
- | 296 | interrupts_restore(ipl); |
|
- | 297 | DEADLOCK_PROBE(p_irqlock, DEADLOCK_THRESHOLD); |
|
- | 298 | goto loop; |
|
- | 299 | } |
|
- | 300 | key[0] = irq->inr; |
|
- | 301 | key[1] = irq->devno; |
|
- | 302 | ||
- | 303 | ||
- | 304 | ASSERT(irq->notif_cfg.answerbox == box); |
|
- | 305 | ||
- | 306 | /* Unlist from the answerbox. */ |
|
- | 307 | list_remove(&irq->notif_cfg.link); |
|
- | 308 | ||
- | 309 | /* Free up the pseudo code and associated structures. */ |
|
- | 310 | code_free(irq->notif_cfg.code); |
|
- | 311 | ||
- | 312 | /* |
|
- | 313 | * We need to drop the IRQ lock now because hash_table_remove() |
|
- | 314 | * will try to reacquire it. That basically violates the natural |
|
- | 315 | * locking order, but a deadlock in hash_table_remove() is |
|
- | 316 | * prevented by the fact that we already held the IRQ lock and |
|
- | 317 | * didn't drop the hash table lock in the meantime. |
|
- | 318 | */ |
|
- | 319 | spinlock_unlock(&irq->lock); |
|
- | 320 | ||
- | 321 | /* Remove from the hash table. */ |
|
- | 322 | hash_table_remove(&irq_uspace_hash_table, key, 2); |
|
- | 323 | ||
264 | return 0; |
324 | free(irq); |
- | 325 | } |
|
- | 326 | ||
- | 327 | spinlock_unlock(&box->irq_lock); |
|
- | 328 | spinlock_unlock(&irq_uspace_hash_table_lock); |
|
- | 329 | interrupts_restore(ipl); |
|
265 | } |
330 | } |
266 | 331 | ||
267 | /** Add a call to the proper answerbox queue. |
332 | /** Add a call to the proper answerbox queue. |
268 | * |
333 | * |
269 | * Assume irq->lock is locked. |
334 | * Assume irq->lock is locked. |
Line 278... | Line 343... | ||
278 | spinlock_unlock(&irq->notif_cfg.answerbox->irq_lock); |
343 | spinlock_unlock(&irq->notif_cfg.answerbox->irq_lock); |
279 | 344 | ||
280 | waitq_wakeup(&irq->notif_cfg.answerbox->wq, WAKEUP_FIRST); |
345 | waitq_wakeup(&irq->notif_cfg.answerbox->wq, WAKEUP_FIRST); |
281 | } |
346 | } |
282 | 347 | ||
283 | /** Send notification message. |
348 | /** Apply the top-half pseudo code to find out whether to accept the IRQ or not. |
284 | * |
349 | * |
285 | * @param irq IRQ structure. |
350 | * @param irq IRQ structure. |
286 | * @param a1 Driver-specific payload argument. |
- | |
- | 351 | * |
|
287 | * @param a2 Driver-specific payload argument. |
352 | * @return IRQ_ACCEPT if the interrupt is accepted by the |
288 | * @param a3 Driver-specific payload argument. |
353 | * pseudocode. IRQ_DECLINE otherwise. |
289 | * @param a4 Driver-specific payload argument. |
- | |
290 | * @param a5 Driver-specific payload argument. |
- | |
291 | */ |
354 | */ |
292 | void ipc_irq_send_msg(irq_t *irq, unative_t a1, unative_t a2, unative_t a3, |
355 | irq_ownership_t ipc_irq_top_half_claim(irq_t *irq) |
293 | unative_t a4, unative_t a5) |
- | |
294 | { |
356 | { |
- | 357 | unsigned int i; |
|
295 | call_t *call; |
358 | unative_t dstval; |
296 | - | ||
297 | spinlock_lock(&irq->lock); |
359 | irq_code_t *code = irq->notif_cfg.code; |
- | 360 | unative_t *scratch = irq->notif_cfg.scratch; |
|
298 | 361 | ||
- | 362 | ||
299 | if (irq->notif_cfg.answerbox) { |
363 | if (!irq->notif_cfg.notify) |
300 | call = ipc_call_alloc(FRAME_ATOMIC); |
364 | return IRQ_DECLINE; |
- | 365 | ||
301 | if (!call) { |
366 | if (!code) |
302 | spinlock_unlock(&irq->lock); |
- | |
303 | return; |
367 | return IRQ_DECLINE; |
304 | } |
368 | |
305 | call->flags |= IPC_CALL_NOTIF; |
- | |
306 | IPC_SET_METHOD(call->data, irq->notif_cfg.method); |
- | |
307 | IPC_SET_ARG1(call->data, a1); |
- | |
308 | IPC_SET_ARG2(call->data, a2); |
- | |
309 | IPC_SET_ARG3(call->data, a3); |
- | |
310 | IPC_SET_ARG4(call->data, a4); |
- | |
311 | IPC_SET_ARG5(call->data, a5); |
369 | for (i = 0; i < code->cmdcount; i++) { |
312 | /* Put a counter to the message */ |
370 | unsigned int srcarg = code->cmds[i].srcarg; |
313 | call->priv = ++irq->notif_cfg.counter; |
371 | unsigned int dstarg = code->cmds[i].dstarg; |
314 | 372 | ||
- | 373 | if (srcarg >= IPC_CALL_LEN) |
|
- | 374 | break; |
|
- | 375 | if (dstarg >= IPC_CALL_LEN) |
|
- | 376 | break; |
|
- | 377 | ||
- | 378 | switch (code->cmds[i].cmd) { |
|
- | 379 | case CMD_PIO_READ_8: |
|
- | 380 | dstval = pio_read_8((ioport8_t *) code->cmds[i].addr); |
|
- | 381 | if (dstarg) |
|
- | 382 | scratch[dstarg] = dstval; |
|
- | 383 | break; |
|
- | 384 | case CMD_PIO_READ_16: |
|
- | 385 | dstval = pio_read_16((ioport16_t *) code->cmds[i].addr); |
|
- | 386 | if (dstarg) |
|
- | 387 | scratch[dstarg] = dstval; |
|
- | 388 | break; |
|
- | 389 | case CMD_PIO_READ_32: |
|
- | 390 | dstval = pio_read_32((ioport32_t *) code->cmds[i].addr); |
|
- | 391 | if (dstarg) |
|
- | 392 | scratch[dstarg] = dstval; |
|
- | 393 | break; |
|
- | 394 | case CMD_PIO_WRITE_8: |
|
- | 395 | pio_write_8((ioport8_t *) code->cmds[i].addr, |
|
- | 396 | (uint8_t) code->cmds[i].value); |
|
- | 397 | break; |
|
- | 398 | case CMD_PIO_WRITE_16: |
|
- | 399 | pio_write_16((ioport16_t *) code->cmds[i].addr, |
|
- | 400 | (uint16_t) code->cmds[i].value); |
|
- | 401 | break; |
|
- | 402 | case CMD_PIO_WRITE_32: |
|
- | 403 | pio_write_32((ioport32_t *) code->cmds[i].addr, |
|
- | 404 | (uint32_t) code->cmds[i].value); |
|
- | 405 | break; |
|
- | 406 | case CMD_BTEST: |
|
315 | send_call(irq, call); |
407 | if (srcarg && dstarg) { |
- | 408 | dstval = scratch[srcarg] & code->cmds[i].value; |
|
- | 409 | scratch[dstarg] = dstval; |
|
- | 410 | } |
|
- | 411 | break; |
|
- | 412 | case CMD_PREDICATE: |
|
- | 413 | if (srcarg && !scratch[srcarg]) { |
|
- | 414 | i += code->cmds[i].value; |
|
- | 415 | continue; |
|
- | 416 | } |
|
- | 417 | break; |
|
- | 418 | case CMD_ACCEPT: |
|
- | 419 | return IRQ_ACCEPT; |
|
- | 420 | break; |
|
- | 421 | case CMD_DECLINE: |
|
- | 422 | default: |
|
- | 423 | return IRQ_DECLINE; |
|
- | 424 | } |
|
316 | } |
425 | } |
- | 426 | ||
317 | spinlock_unlock(&irq->lock); |
427 | return IRQ_DECLINE; |
318 | } |
428 | } |
319 | 429 | ||
- | 430 | ||
320 | /** Notify a task that an IRQ had occurred. |
431 | /* IRQ top-half handler. |
321 | * |
432 | * |
322 | * We expect interrupts to be disabled and the irq->lock already held. |
433 | * We expect interrupts to be disabled and the irq->lock already held. |
323 | * |
434 | * |
324 | * @param irq IRQ structure. |
435 | * @param irq IRQ structure. |
325 | */ |
436 | */ |
326 | void ipc_irq_send_notif(irq_t *irq) |
437 | void ipc_irq_top_half_handler(irq_t *irq) |
327 | { |
438 | { |
328 | call_t *call; |
- | |
329 | - | ||
330 | ASSERT(irq); |
439 | ASSERT(irq); |
331 | 440 | ||
332 | if (irq->notif_cfg.answerbox) { |
441 | if (irq->notif_cfg.answerbox) { |
- | 442 | call_t *call; |
|
- | 443 | ||
333 | call = ipc_call_alloc(FRAME_ATOMIC); |
444 | call = ipc_call_alloc(FRAME_ATOMIC); |
334 | if (!call) { |
445 | if (!call) |
335 | return; |
446 | return; |
336 | } |
447 | |
337 | call->flags |= IPC_CALL_NOTIF; |
448 | call->flags |= IPC_CALL_NOTIF; |
338 | /* Put a counter to the message */ |
449 | /* Put a counter to the message */ |
339 | call->priv = ++irq->notif_cfg.counter; |
450 | call->priv = ++irq->notif_cfg.counter; |
- | 451 | ||
340 | /* Set up args */ |
452 | /* Set up args */ |
341 | IPC_SET_METHOD(call->data, irq->notif_cfg.method); |
453 | IPC_SET_METHOD(call->data, irq->notif_cfg.method); |
- | 454 | IPC_SET_ARG1(call->data, irq->notif_cfg.scratch[1]); |
|
- | 455 | IPC_SET_ARG2(call->data, irq->notif_cfg.scratch[2]); |
|
- | 456 | IPC_SET_ARG3(call->data, irq->notif_cfg.scratch[3]); |
|
- | 457 | IPC_SET_ARG4(call->data, irq->notif_cfg.scratch[4]); |
|
- | 458 | IPC_SET_ARG5(call->data, irq->notif_cfg.scratch[5]); |
|
342 | 459 | ||
343 | /* Execute code to handle irq */ |
- | |
344 | code_execute(call, irq->notif_cfg.code); |
- | |
345 | - | ||
346 | send_call(irq, call); |
460 | send_call(irq, call); |
347 | } |
461 | } |
348 | } |
462 | } |
349 | 463 | ||
350 | /** Disconnect all IRQ notifications from an answerbox. |
- | |
351 | * |
- | |
352 | * This function is effective because the answerbox contains |
- | |
353 | * list of all irq_t structures that are registered to |
- | |
354 | * send notifications to it. |
464 | /** Send notification message. |
355 | * |
465 | * |
- | 466 | * @param irq IRQ structure. |
|
- | 467 | * @param a1 Driver-specific payload argument. |
|
- | 468 | * @param a2 Driver-specific payload argument. |
|
356 | * @param box Answerbox for which we want to carry out the cleanup. |
469 | * @param a3 Driver-specific payload argument. |
- | 470 | * @param a4 Driver-specific payload argument. |
|
- | 471 | * @param a5 Driver-specific payload argument. |
|
357 | */ |
472 | */ |
- | 473 | void ipc_irq_send_msg(irq_t *irq, unative_t a1, unative_t a2, unative_t a3, |
|
358 | void ipc_irq_cleanup(answerbox_t *box) |
474 | unative_t a4, unative_t a5) |
359 | { |
475 | { |
360 | ipl_t ipl; |
476 | call_t *call; |
361 | 477 | ||
362 | loop: |
- | |
363 | ipl = interrupts_disable(); |
- | |
364 | spinlock_lock(&box->irq_lock); |
478 | spinlock_lock(&irq->lock); |
365 | 479 | ||
366 | while (box->irq_head.next != &box->irq_head) { |
480 | if (irq->notif_cfg.answerbox) { |
367 | link_t *cur = box->irq_head.next; |
481 | call = ipc_call_alloc(FRAME_ATOMIC); |
368 | irq_t *irq; |
482 | if (!call) { |
369 | DEADLOCK_PROBE_INIT(p_irqlock); |
- | |
370 | - | ||
371 | irq = list_get_instance(cur, irq_t, notif_cfg.link); |
- | |
372 | if (!spinlock_trylock(&irq->lock)) { |
- | |
373 | /* |
- | |
374 | * Avoid deadlock by trying again. |
- | |
375 | */ |
- | |
376 | spinlock_unlock(&box->irq_lock); |
483 | spinlock_unlock(&irq->lock); |
377 | interrupts_restore(ipl); |
- | |
378 | DEADLOCK_PROBE(p_irqlock, DEADLOCK_THRESHOLD); |
- | |
379 | goto loop; |
484 | return; |
380 | } |
485 | } |
381 | - | ||
382 | ASSERT(irq->notif_cfg.answerbox == box); |
- | |
383 | - | ||
384 | list_remove(&irq->notif_cfg.link); |
486 | call->flags |= IPC_CALL_NOTIF; |
385 | - | ||
386 | /* |
- | |
387 | * Don't forget to free any top-half pseudocode. |
487 | /* Put a counter to the message */ |
388 | */ |
- | |
389 | code_free(irq->notif_cfg.code); |
488 | call->priv = ++irq->notif_cfg.counter; |
390 | - | ||
391 | irq->notif_cfg.notify = false; |
- | |
392 | irq->notif_cfg.answerbox = NULL; |
- | |
393 | irq->notif_cfg.code = NULL; |
- | |
394 | irq->notif_cfg.method = 0; |
- | |
395 | irq->notif_cfg.counter = 0; |
- | |
396 | 489 | ||
- | 490 | IPC_SET_METHOD(call->data, irq->notif_cfg.method); |
|
- | 491 | IPC_SET_ARG1(call->data, a1); |
|
- | 492 | IPC_SET_ARG2(call->data, a2); |
|
- | 493 | IPC_SET_ARG3(call->data, a3); |
|
- | 494 | IPC_SET_ARG4(call->data, a4); |
|
- | 495 | IPC_SET_ARG5(call->data, a5); |
|
- | 496 | ||
397 | spinlock_unlock(&irq->lock); |
497 | send_call(irq, call); |
398 | } |
498 | } |
399 | - | ||
400 | spinlock_unlock(&box->irq_lock); |
499 | spinlock_unlock(&irq->lock); |
401 | interrupts_restore(ipl); |
- | |
402 | } |
500 | } |
403 | 501 | ||
404 | /** @} |
502 | /** @} |
405 | */ |
503 | */ |