Rev 4263 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 4263 | Rev 4327 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (c) 2006 Ondrej Palkovsky |
2 | * Copyright (c) 2006 Ondrej Palkovsky |
3 | * Copyright (c) 2006 Jakub Jermar |
3 | * Copyright (c) 2006 Jakub Jermar |
4 | * All rights reserved. |
4 | * All rights reserved. |
5 | * |
5 | * |
6 | * Redistribution and use in source and binary forms, with or without |
6 | * Redistribution and use in source and binary forms, with or without |
7 | * modification, are permitted provided that the following conditions |
7 | * modification, are permitted provided that the following conditions |
8 | * are met: |
8 | * are met: |
9 | * |
9 | * |
10 | * - Redistributions of source code must retain the above copyright |
10 | * - Redistributions of source code must retain the above copyright |
11 | * notice, this list of conditions and the following disclaimer. |
11 | * notice, this list of conditions and the following disclaimer. |
12 | * - Redistributions in binary form must reproduce the above copyright |
12 | * - Redistributions in binary form must reproduce the above copyright |
13 | * notice, this list of conditions and the following disclaimer in the |
13 | * notice, this list of conditions and the following disclaimer in the |
14 | * documentation and/or other materials provided with the distribution. |
14 | * documentation and/or other materials provided with the distribution. |
15 | * - The name of the author may not be used to endorse or promote products |
15 | * - The name of the author may not be used to endorse or promote products |
16 | * derived from this software without specific prior written permission. |
16 | * derived from this software without specific prior written permission. |
17 | * |
17 | * |
18 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
19 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
20 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
21 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
22 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
23 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
24 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
25 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
26 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
27 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
28 | */ |
28 | */ |
29 | 29 | ||
30 | /** @addtogroup genericipc |
30 | /** @addtogroup genericipc |
31 | * @{ |
31 | * @{ |
32 | */ |
32 | */ |
33 | /** |
33 | /** |
34 | * @file |
34 | * @file |
35 | * @brief IRQ notification framework. |
35 | * @brief IRQ notification framework. |
36 | * |
36 | * |
37 | * This framework allows applications to register to receive a notification |
37 | * This framework allows applications to register to receive a notification |
38 | * when interrupt is detected. The application may provide a simple 'top-half' |
38 | * when interrupt is detected. The application may provide a simple 'top-half' |
39 | * handler as part of its registration, which can perform simple operations |
39 | * handler as part of its registration, which can perform simple operations |
40 | * (read/write port/memory, add information to notification ipc message). |
40 | * (read/write port/memory, add information to notification ipc message). |
41 | * |
41 | * |
42 | * The structure of a notification message is as follows: |
42 | * The structure of a notification message is as follows: |
43 | * - METHOD: method as registered by the SYS_IPC_REGISTER_IRQ syscall |
43 | * - METHOD: method as registered by the SYS_IPC_REGISTER_IRQ syscall |
44 | * - ARG1: payload modified by a 'top-half' handler |
44 | * - ARG1: payload modified by a 'top-half' handler |
45 | * - ARG2: payload modified by a 'top-half' handler |
45 | * - ARG2: payload modified by a 'top-half' handler |
46 | * - ARG3: payload modified by a 'top-half' handler |
46 | * - ARG3: payload modified by a 'top-half' handler |
47 | * - ARG4: payload modified by a 'top-half' handler |
47 | * - ARG4: payload modified by a 'top-half' handler |
48 | * - ARG5: payload modified by a 'top-half' handler |
48 | * - ARG5: payload modified by a 'top-half' handler |
49 | * - in_phone_hash: interrupt counter (may be needed to assure correct order |
49 | * - in_phone_hash: interrupt counter (may be needed to assure correct order |
50 | * in multithreaded drivers) |
50 | * in multithreaded drivers) |
51 | * |
51 | * |
52 | * Note on synchronization for ipc_irq_register(), ipc_irq_unregister(), |
52 | * Note on synchronization for ipc_irq_register(), ipc_irq_unregister(), |
53 | * ipc_irq_cleanup() and IRQ handlers: |
53 | * ipc_irq_cleanup() and IRQ handlers: |
54 | * |
54 | * |
55 | * By always taking all of the uspace IRQ hash table lock, IRQ structure lock |
55 | * By always taking all of the uspace IRQ hash table lock, IRQ structure lock |
56 | * and answerbox lock, we can rule out race conditions between the |
56 | * and answerbox lock, we can rule out race conditions between the |
57 | * registration functions and also the cleanup function. Thus the observer can |
57 | * registration functions and also the cleanup function. Thus the observer can |
58 | * either see the IRQ structure present in both the hash table and the |
58 | * either see the IRQ structure present in both the hash table and the |
59 | * answerbox list or absent in both. Views in which the IRQ structure would be |
59 | * answerbox list or absent in both. Views in which the IRQ structure would be |
60 | * linked in the hash table but not in the answerbox list, or vice versa, are |
60 | * linked in the hash table but not in the answerbox list, or vice versa, are |
61 | * not possible. |
61 | * not possible. |
62 | * |
62 | * |
63 | * By always taking the hash table lock and the IRQ structure lock, we can |
63 | * By always taking the hash table lock and the IRQ structure lock, we can |
64 | * rule out a scenario in which we would free up an IRQ structure, which is |
64 | * rule out a scenario in which we would free up an IRQ structure, which is |
65 | * still referenced by, for example, an IRQ handler. The locking scheme forces |
65 | * still referenced by, for example, an IRQ handler. The locking scheme forces |
66 | * us to lock the IRQ structure only after any progressing IRQs on that |
66 | * us to lock the IRQ structure only after any progressing IRQs on that |
67 | * structure are finished. Because we hold the hash table lock, we prevent new |
67 | * structure are finished. Because we hold the hash table lock, we prevent new |
68 | * IRQs from taking new references to the IRQ structure. |
68 | * IRQs from taking new references to the IRQ structure. |
69 | */ |
69 | */ |
70 | 70 | ||
71 | #include <arch.h> |
71 | #include <arch.h> |
72 | #include <mm/slab.h> |
72 | #include <mm/slab.h> |
73 | #include <errno.h> |
73 | #include <errno.h> |
74 | #include <ddi/irq.h> |
74 | #include <ddi/irq.h> |
75 | #include <ipc/ipc.h> |
75 | #include <ipc/ipc.h> |
76 | #include <ipc/irq.h> |
76 | #include <ipc/irq.h> |
77 | #include <syscall/copy.h> |
77 | #include <syscall/copy.h> |
78 | #include <console/console.h> |
78 | #include <console/console.h> |
79 | #include <print.h> |
79 | #include <print.h> |
80 | // explicitly enable irq |
80 | // explicitly enable irq |
81 | #include <arch/interrupt.h> |
81 | #include <arch/interrupt.h> |
82 | 82 | ||
83 | /** Free the top-half pseudocode. |
83 | /** Free the top-half pseudocode. |
84 | * |
84 | * |
85 | * @param code Pointer to the top-half pseudocode. |
85 | * @param code Pointer to the top-half pseudocode. |
86 | */ |
86 | */ |
87 | static void code_free(irq_code_t *code) |
87 | static void code_free(irq_code_t *code) |
88 | { |
88 | { |
89 | if (code) { |
89 | if (code) { |
90 | free(code->cmds); |
90 | free(code->cmds); |
91 | free(code); |
91 | free(code); |
92 | } |
92 | } |
93 | } |
93 | } |
94 | 94 | ||
95 | /** Copy the top-half pseudocode from userspace into the kernel. |
95 | /** Copy the top-half pseudocode from userspace into the kernel. |
96 | * |
96 | * |
97 | * @param ucode Userspace address of the top-half pseudocode. |
97 | * @param ucode Userspace address of the top-half pseudocode. |
98 | * |
98 | * |
99 | * @return Kernel address of the copied pseudocode. |
99 | * @return Kernel address of the copied pseudocode. |
100 | */ |
100 | */ |
101 | static irq_code_t *code_from_uspace(irq_code_t *ucode) |
101 | static irq_code_t *code_from_uspace(irq_code_t *ucode) |
102 | { |
102 | { |
103 | irq_code_t *code; |
103 | irq_code_t *code; |
104 | irq_cmd_t *ucmds; |
104 | irq_cmd_t *ucmds; |
105 | int rc; |
105 | int rc; |
106 | 106 | ||
107 | code = malloc(sizeof(*code), 0); |
107 | code = malloc(sizeof(*code), 0); |
108 | rc = copy_from_uspace(code, ucode, sizeof(*code)); |
108 | rc = copy_from_uspace(code, ucode, sizeof(*code)); |
109 | if (rc != 0) { |
109 | if (rc != 0) { |
110 | free(code); |
110 | free(code); |
111 | return NULL; |
111 | return NULL; |
112 | } |
112 | } |
113 | 113 | ||
114 | if (code->cmdcount > IRQ_MAX_PROG_SIZE) { |
114 | if (code->cmdcount > IRQ_MAX_PROG_SIZE) { |
115 | free(code); |
115 | free(code); |
116 | return NULL; |
116 | return NULL; |
117 | } |
117 | } |
118 | ucmds = code->cmds; |
118 | ucmds = code->cmds; |
119 | code->cmds = malloc(sizeof(code->cmds[0]) * code->cmdcount, 0); |
119 | code->cmds = malloc(sizeof(code->cmds[0]) * code->cmdcount, 0); |
120 | rc = copy_from_uspace(code->cmds, ucmds, |
120 | rc = copy_from_uspace(code->cmds, ucmds, |
121 | sizeof(code->cmds[0]) * code->cmdcount); |
121 | sizeof(code->cmds[0]) * code->cmdcount); |
122 | if (rc != 0) { |
122 | if (rc != 0) { |
123 | free(code->cmds); |
123 | free(code->cmds); |
124 | free(code); |
124 | free(code); |
125 | return NULL; |
125 | return NULL; |
126 | } |
126 | } |
127 | 127 | ||
128 | return code; |
128 | return code; |
129 | } |
129 | } |
130 | 130 | ||
131 | /** Register an answerbox as a receiving end for IRQ notifications. |
131 | /** Register an answerbox as a receiving end for IRQ notifications. |
132 | * |
132 | * |
133 | * @param box Receiving answerbox. |
133 | * @param box Receiving answerbox. |
134 | * @param inr IRQ number. |
134 | * @param inr IRQ number. |
135 | * @param devno Device number. |
135 | * @param devno Device number. |
136 | * @param method Method to be associated with the notification. |
136 | * @param method Method to be associated with the notification. |
137 | * @param ucode Uspace pointer to top-half pseudocode. |
137 | * @param ucode Uspace pointer to top-half pseudocode. |
- | 138 | * |
|
- | 139 | * @return EBADMEM, ENOENT or EEXISTS on failure or 0 on success. |
|
138 | * |
140 | * |
139 | * @return EBADMEM, ENOENT or EEXISTS on failure or 0 on success. |
- | |
140 | */ |
141 | */ |
141 | int ipc_irq_register(answerbox_t *box, inr_t inr, devno_t devno, |
142 | int ipc_irq_register(answerbox_t *box, inr_t inr, devno_t devno, |
142 | unative_t method, irq_code_t *ucode) |
143 | unative_t method, irq_code_t *ucode) |
143 | { |
144 | { |
144 | ipl_t ipl; |
145 | ipl_t ipl; |
145 | irq_code_t *code; |
146 | irq_code_t *code; |
146 | irq_t *irq; |
147 | irq_t *irq; |
147 | link_t *hlp; |
148 | link_t *hlp; |
148 | unative_t key[] = { |
149 | unative_t key[] = { |
149 | (unative_t) inr, |
150 | (unative_t) inr, |
150 | (unative_t) devno |
151 | (unative_t) devno |
151 | }; |
152 | }; |
152 | 153 | ||
153 | if (ucode) { |
154 | if (ucode) { |
154 | code = code_from_uspace(ucode); |
155 | code = code_from_uspace(ucode); |
155 | if (!code) |
156 | if (!code) |
156 | return EBADMEM; |
157 | return EBADMEM; |
157 | } else { |
158 | } else { |
158 | code = NULL; |
159 | code = NULL; |
159 | } |
160 | } |
160 | 161 | ||
161 | /* |
162 | /* |
162 | * Allocate and populate the IRQ structure. |
163 | * Allocate and populate the IRQ structure. |
163 | */ |
164 | */ |
164 | irq = malloc(sizeof(irq_t), 0); |
165 | irq = malloc(sizeof(irq_t), 0); |
165 | irq_initialize(irq); |
166 | irq_initialize(irq); |
166 | irq->devno = devno; |
167 | irq->devno = devno; |
167 | irq->inr = inr; |
168 | irq->inr = inr; |
168 | irq->claim = ipc_irq_top_half_claim; |
169 | irq->claim = ipc_irq_top_half_claim; |
169 | irq->handler = ipc_irq_top_half_handler; |
170 | irq->handler = ipc_irq_top_half_handler; |
170 | irq->notif_cfg.notify = true; |
171 | irq->notif_cfg.notify = true; |
171 | irq->notif_cfg.answerbox = box; |
172 | irq->notif_cfg.answerbox = box; |
172 | irq->notif_cfg.method = method; |
173 | irq->notif_cfg.method = method; |
173 | irq->notif_cfg.code = code; |
174 | irq->notif_cfg.code = code; |
174 | irq->notif_cfg.counter = 0; |
175 | irq->notif_cfg.counter = 0; |
175 | 176 | ||
176 | /* |
177 | /* |
177 | * Enlist the IRQ structure in the uspace IRQ hash table and the |
178 | * Enlist the IRQ structure in the uspace IRQ hash table and the |
178 | * answerbox's list. |
179 | * answerbox's list. |
179 | */ |
180 | */ |
180 | ipl = interrupts_disable(); |
181 | ipl = interrupts_disable(); |
181 | spinlock_lock(&irq_uspace_hash_table_lock); |
182 | spinlock_lock(&irq_uspace_hash_table_lock); |
182 | hlp = hash_table_find(&irq_uspace_hash_table, key); |
183 | hlp = hash_table_find(&irq_uspace_hash_table, key); |
183 | if (hlp) { |
184 | if (hlp) { |
- | 185 | irq_t *hirq __attribute__((unused)) |
|
184 | irq_t *hirq = hash_table_get_instance(hlp, irq_t, link); |
186 | = hash_table_get_instance(hlp, irq_t, link); |
- | 187 | ||
185 | /* hirq is locked */ |
188 | /* hirq is locked */ |
186 | spinlock_unlock(&hirq->lock); |
189 | spinlock_unlock(&hirq->lock); |
187 | code_free(code); |
190 | code_free(code); |
188 | spinlock_unlock(&irq_uspace_hash_table_lock); |
191 | spinlock_unlock(&irq_uspace_hash_table_lock); |
189 | free(irq); |
192 | free(irq); |
190 | interrupts_restore(ipl); |
193 | interrupts_restore(ipl); |
191 | return EEXISTS; |
194 | return EEXISTS; |
192 | } |
195 | } |
- | 196 | ||
193 | spinlock_lock(&irq->lock); /* not really necessary, but paranoid */ |
197 | spinlock_lock(&irq->lock); /* Not really necessary, but paranoid */ |
194 | spinlock_lock(&box->irq_lock); |
198 | spinlock_lock(&box->irq_lock); |
195 | hash_table_insert(&irq_uspace_hash_table, key, &irq->link); |
199 | hash_table_insert(&irq_uspace_hash_table, key, &irq->link); |
196 | list_append(&irq->notif_cfg.link, &box->irq_head); |
200 | list_append(&irq->notif_cfg.link, &box->irq_head); |
197 | spinlock_unlock(&box->irq_lock); |
201 | spinlock_unlock(&box->irq_lock); |
198 | spinlock_unlock(&irq->lock); |
202 | spinlock_unlock(&irq->lock); |
199 | spinlock_unlock(&irq_uspace_hash_table_lock); |
203 | spinlock_unlock(&irq_uspace_hash_table_lock); |
200 | 204 | ||
201 | interrupts_restore(ipl); |
205 | interrupts_restore(ipl); |
202 | // explicitly enable irq |
206 | // explicitly enable irq |
203 | /* different byteorder? |
- | |
204 | * trap_virtual_enable_irqs( 1 << ( irq->inr - 1 )); |
- | |
205 | */ |
- | |
206 | trap_virtual_enable_irqs( 1 << ( irq->inr + 7 )); |
207 | trap_virtual_enable_irqs( 1 << irq->inr ); |
207 | return EOK; |
208 | return EOK; |
208 | } |
209 | } |
209 | 210 | ||
210 | /** Unregister task from IRQ notification. |
211 | /** Unregister task from IRQ notification. |
211 | * |
212 | * |
212 | * @param box Answerbox associated with the notification. |
213 | * @param box Answerbox associated with the notification. |
213 | * @param inr IRQ number. |
214 | * @param inr IRQ number. |
214 | * @param devno Device number. |
215 | * @param devno Device number. |
215 | */ |
216 | */ |
216 | int ipc_irq_unregister(answerbox_t *box, inr_t inr, devno_t devno) |
217 | int ipc_irq_unregister(answerbox_t *box, inr_t inr, devno_t devno) |
217 | { |
218 | { |
218 | ipl_t ipl; |
219 | ipl_t ipl; |
219 | unative_t key[] = { |
220 | unative_t key[] = { |
220 | (unative_t) inr, |
221 | (unative_t) inr, |
221 | (unative_t) devno |
222 | (unative_t) devno |
222 | }; |
223 | }; |
223 | link_t *lnk; |
224 | link_t *lnk; |
224 | irq_t *irq; |
225 | irq_t *irq; |
225 | 226 | ||
226 | ipl = interrupts_disable(); |
227 | ipl = interrupts_disable(); |
227 | spinlock_lock(&irq_uspace_hash_table_lock); |
228 | spinlock_lock(&irq_uspace_hash_table_lock); |
228 | lnk = hash_table_find(&irq_uspace_hash_table, key); |
229 | lnk = hash_table_find(&irq_uspace_hash_table, key); |
229 | if (!lnk) { |
230 | if (!lnk) { |
230 | spinlock_unlock(&irq_uspace_hash_table_lock); |
231 | spinlock_unlock(&irq_uspace_hash_table_lock); |
231 | interrupts_restore(ipl); |
232 | interrupts_restore(ipl); |
232 | return ENOENT; |
233 | return ENOENT; |
233 | } |
234 | } |
234 | irq = hash_table_get_instance(lnk, irq_t, link); |
235 | irq = hash_table_get_instance(lnk, irq_t, link); |
235 | /* irq is locked */ |
236 | /* irq is locked */ |
236 | spinlock_lock(&box->irq_lock); |
237 | spinlock_lock(&box->irq_lock); |
237 | 238 | ||
238 | ASSERT(irq->notif_cfg.answerbox == box); |
239 | ASSERT(irq->notif_cfg.answerbox == box); |
239 | 240 | ||
240 | /* Free up the pseudo code and associated structures. */ |
241 | /* Free up the pseudo code and associated structures. */ |
241 | code_free(irq->notif_cfg.code); |
242 | code_free(irq->notif_cfg.code); |
242 | 243 | ||
243 | /* Remove the IRQ from the answerbox's list. */ |
244 | /* Remove the IRQ from the answerbox's list. */ |
244 | list_remove(&irq->notif_cfg.link); |
245 | list_remove(&irq->notif_cfg.link); |
245 | 246 | ||
246 | /* |
247 | /* |
247 | * We need to drop the IRQ lock now because hash_table_remove() will try |
248 | * We need to drop the IRQ lock now because hash_table_remove() will try |
248 | * to reacquire it. That basically violates the natural locking order, |
249 | * to reacquire it. That basically violates the natural locking order, |
249 | * but a deadlock in hash_table_remove() is prevented by the fact that |
250 | * but a deadlock in hash_table_remove() is prevented by the fact that |
250 | * we already held the IRQ lock and didn't drop the hash table lock in |
251 | * we already held the IRQ lock and didn't drop the hash table lock in |
251 | * the meantime. |
252 | * the meantime. |
252 | */ |
253 | */ |
253 | spinlock_unlock(&irq->lock); |
254 | spinlock_unlock(&irq->lock); |
254 | 255 | ||
255 | /* Remove the IRQ from the uspace IRQ hash table. */ |
256 | /* Remove the IRQ from the uspace IRQ hash table. */ |
256 | hash_table_remove(&irq_uspace_hash_table, key, 2); |
257 | hash_table_remove(&irq_uspace_hash_table, key, 2); |
257 | 258 | ||
258 | spinlock_unlock(&irq_uspace_hash_table_lock); |
259 | spinlock_unlock(&irq_uspace_hash_table_lock); |
259 | spinlock_unlock(&box->irq_lock); |
260 | spinlock_unlock(&box->irq_lock); |
260 | 261 | ||
261 | /* Free up the IRQ structure. */ |
262 | /* Free up the IRQ structure. */ |
262 | free(irq); |
263 | free(irq); |
263 | 264 | ||
264 | interrupts_restore(ipl); |
265 | interrupts_restore(ipl); |
265 | return EOK; |
266 | return EOK; |
266 | } |
267 | } |
267 | 268 | ||
268 | 269 | ||
269 | /** Disconnect all IRQ notifications from an answerbox. |
270 | /** Disconnect all IRQ notifications from an answerbox. |
270 | * |
271 | * |
271 | * This function is effective because the answerbox contains |
272 | * This function is effective because the answerbox contains |
272 | * list of all irq_t structures that are registered to |
273 | * list of all irq_t structures that are registered to |
273 | * send notifications to it. |
274 | * send notifications to it. |
274 | * |
275 | * |
275 | * @param box Answerbox for which we want to carry out the cleanup. |
276 | * @param box Answerbox for which we want to carry out the cleanup. |
276 | */ |
277 | */ |
277 | void ipc_irq_cleanup(answerbox_t *box) |
278 | void ipc_irq_cleanup(answerbox_t *box) |
278 | { |
279 | { |
279 | ipl_t ipl; |
280 | ipl_t ipl; |
280 | 281 | ||
281 | loop: |
282 | loop: |
282 | ipl = interrupts_disable(); |
283 | ipl = interrupts_disable(); |
283 | spinlock_lock(&irq_uspace_hash_table_lock); |
284 | spinlock_lock(&irq_uspace_hash_table_lock); |
284 | spinlock_lock(&box->irq_lock); |
285 | spinlock_lock(&box->irq_lock); |
285 | 286 | ||
286 | while (box->irq_head.next != &box->irq_head) { |
287 | while (box->irq_head.next != &box->irq_head) { |
287 | link_t *cur = box->irq_head.next; |
288 | link_t *cur = box->irq_head.next; |
288 | irq_t *irq; |
289 | irq_t *irq; |
289 | DEADLOCK_PROBE_INIT(p_irqlock); |
290 | DEADLOCK_PROBE_INIT(p_irqlock); |
290 | unative_t key[2]; |
291 | unative_t key[2]; |
291 | 292 | ||
292 | irq = list_get_instance(cur, irq_t, notif_cfg.link); |
293 | irq = list_get_instance(cur, irq_t, notif_cfg.link); |
293 | if (!spinlock_trylock(&irq->lock)) { |
294 | if (!spinlock_trylock(&irq->lock)) { |
294 | /* |
295 | /* |
295 | * Avoid deadlock by trying again. |
296 | * Avoid deadlock by trying again. |
296 | */ |
297 | */ |
297 | spinlock_unlock(&box->irq_lock); |
298 | spinlock_unlock(&box->irq_lock); |
298 | spinlock_unlock(&irq_uspace_hash_table_lock); |
299 | spinlock_unlock(&irq_uspace_hash_table_lock); |
299 | interrupts_restore(ipl); |
300 | interrupts_restore(ipl); |
300 | DEADLOCK_PROBE(p_irqlock, DEADLOCK_THRESHOLD); |
301 | DEADLOCK_PROBE(p_irqlock, DEADLOCK_THRESHOLD); |
301 | goto loop; |
302 | goto loop; |
302 | } |
303 | } |
303 | key[0] = irq->inr; |
304 | key[0] = irq->inr; |
304 | key[1] = irq->devno; |
305 | key[1] = irq->devno; |
305 | 306 | ||
306 | 307 | ||
307 | ASSERT(irq->notif_cfg.answerbox == box); |
308 | ASSERT(irq->notif_cfg.answerbox == box); |
308 | 309 | ||
309 | /* Unlist from the answerbox. */ |
310 | /* Unlist from the answerbox. */ |
310 | list_remove(&irq->notif_cfg.link); |
311 | list_remove(&irq->notif_cfg.link); |
311 | 312 | ||
312 | /* Free up the pseudo code and associated structures. */ |
313 | /* Free up the pseudo code and associated structures. */ |
313 | code_free(irq->notif_cfg.code); |
314 | code_free(irq->notif_cfg.code); |
314 | 315 | ||
315 | /* |
316 | /* |
316 | * We need to drop the IRQ lock now because hash_table_remove() |
317 | * We need to drop the IRQ lock now because hash_table_remove() |
317 | * will try to reacquire it. That basically violates the natural |
318 | * will try to reacquire it. That basically violates the natural |
318 | * locking order, but a deadlock in hash_table_remove() is |
319 | * locking order, but a deadlock in hash_table_remove() is |
319 | * prevented by the fact that we already held the IRQ lock and |
320 | * prevented by the fact that we already held the IRQ lock and |
320 | * didn't drop the hash table lock in the meantime. |
321 | * didn't drop the hash table lock in the meantime. |
321 | */ |
322 | */ |
322 | spinlock_unlock(&irq->lock); |
323 | spinlock_unlock(&irq->lock); |
323 | 324 | ||
324 | /* Remove from the hash table. */ |
325 | /* Remove from the hash table. */ |
325 | hash_table_remove(&irq_uspace_hash_table, key, 2); |
326 | hash_table_remove(&irq_uspace_hash_table, key, 2); |
326 | 327 | ||
327 | free(irq); |
328 | free(irq); |
328 | } |
329 | } |
329 | 330 | ||
330 | spinlock_unlock(&box->irq_lock); |
331 | spinlock_unlock(&box->irq_lock); |
331 | spinlock_unlock(&irq_uspace_hash_table_lock); |
332 | spinlock_unlock(&irq_uspace_hash_table_lock); |
332 | interrupts_restore(ipl); |
333 | interrupts_restore(ipl); |
333 | } |
334 | } |
334 | 335 | ||
335 | /** Add a call to the proper answerbox queue. |
336 | /** Add a call to the proper answerbox queue. |
336 | * |
337 | * |
337 | * Assume irq->lock is locked. |
338 | * Assume irq->lock is locked. |
338 | * |
339 | * |
339 | * @param irq IRQ structure referencing the target answerbox. |
340 | * @param irq IRQ structure referencing the target answerbox. |
340 | * @param call IRQ notification call. |
341 | * @param call IRQ notification call. |
341 | */ |
342 | */ |
342 | static void send_call(irq_t *irq, call_t *call) |
343 | static void send_call(irq_t *irq, call_t *call) |
343 | { |
344 | { |
344 | spinlock_lock(&irq->notif_cfg.answerbox->irq_lock); |
345 | spinlock_lock(&irq->notif_cfg.answerbox->irq_lock); |
345 | list_append(&call->link, &irq->notif_cfg.answerbox->irq_notifs); |
346 | list_append(&call->link, &irq->notif_cfg.answerbox->irq_notifs); |
346 | spinlock_unlock(&irq->notif_cfg.answerbox->irq_lock); |
347 | spinlock_unlock(&irq->notif_cfg.answerbox->irq_lock); |
347 | 348 | ||
348 | waitq_wakeup(&irq->notif_cfg.answerbox->wq, WAKEUP_FIRST); |
349 | waitq_wakeup(&irq->notif_cfg.answerbox->wq, WAKEUP_FIRST); |
349 | } |
350 | } |
350 | 351 | ||
351 | /** Apply the top-half pseudo code to find out whether to accept the IRQ or not. |
352 | /** Apply the top-half pseudo code to find out whether to accept the IRQ or not. |
352 | * |
353 | * |
353 | * @param irq IRQ structure. |
354 | * @param irq IRQ structure. |
354 | * |
355 | * |
355 | * @return IRQ_ACCEPT if the interrupt is accepted by the |
356 | * @return IRQ_ACCEPT if the interrupt is accepted by the |
356 | * pseudocode. IRQ_DECLINE otherwise. |
357 | * pseudocode. IRQ_DECLINE otherwise. |
357 | */ |
358 | */ |
358 | irq_ownership_t ipc_irq_top_half_claim(irq_t *irq) |
359 | irq_ownership_t ipc_irq_top_half_claim(irq_t *irq) |
359 | { |
360 | { |
360 | unsigned int i; |
361 | unsigned int i; |
361 | unative_t dstval; |
362 | unative_t dstval; |
362 | irq_code_t *code = irq->notif_cfg.code; |
363 | irq_code_t *code = irq->notif_cfg.code; |
363 | unative_t *scratch = irq->notif_cfg.scratch; |
364 | unative_t *scratch = irq->notif_cfg.scratch; |
364 | 365 | ||
365 | 366 | ||
366 | if (!irq->notif_cfg.notify) |
367 | if (!irq->notif_cfg.notify) |
367 | return IRQ_DECLINE; |
368 | return IRQ_DECLINE; |
368 | 369 | ||
369 | if (!code) |
370 | if (!code) |
370 | return IRQ_DECLINE; |
371 | return IRQ_DECLINE; |
371 | 372 | ||
372 | for (i = 0; i < code->cmdcount; i++) { |
373 | for (i = 0; i < code->cmdcount; i++) { |
373 | unsigned int srcarg = code->cmds[i].srcarg; |
374 | unsigned int srcarg = code->cmds[i].srcarg; |
374 | unsigned int dstarg = code->cmds[i].dstarg; |
375 | unsigned int dstarg = code->cmds[i].dstarg; |
375 | 376 | ||
376 | if (srcarg >= IPC_CALL_LEN) |
377 | if (srcarg >= IPC_CALL_LEN) |
377 | break; |
378 | break; |
378 | if (dstarg >= IPC_CALL_LEN) |
379 | if (dstarg >= IPC_CALL_LEN) |
379 | break; |
380 | break; |
380 | 381 | ||
381 | switch (code->cmds[i].cmd) { |
382 | switch (code->cmds[i].cmd) { |
382 | case CMD_PIO_READ_8: |
383 | case CMD_PIO_READ_8: |
383 | dstval = pio_read_8((ioport8_t *) code->cmds[i].addr); |
384 | dstval = pio_read_8((ioport8_t *) code->cmds[i].addr); |
384 | if (dstarg) |
385 | if (dstarg) |
385 | scratch[dstarg] = dstval; |
386 | scratch[dstarg] = dstval; |
386 | break; |
387 | break; |
387 | case CMD_PIO_READ_16: |
388 | case CMD_PIO_READ_16: |
388 | dstval = pio_read_16((ioport16_t *) code->cmds[i].addr); |
389 | dstval = pio_read_16((ioport16_t *) code->cmds[i].addr); |
389 | if (dstarg) |
390 | if (dstarg) |
390 | scratch[dstarg] = dstval; |
391 | scratch[dstarg] = dstval; |
391 | break; |
392 | break; |
392 | case CMD_PIO_READ_32: |
393 | case CMD_PIO_READ_32: |
393 | dstval = pio_read_32((ioport32_t *) code->cmds[i].addr); |
394 | dstval = pio_read_32((ioport32_t *) code->cmds[i].addr); |
394 | if (dstarg) |
395 | if (dstarg) |
395 | scratch[dstarg] = dstval; |
396 | scratch[dstarg] = dstval; |
396 | break; |
397 | break; |
397 | case CMD_PIO_WRITE_8: |
398 | case CMD_PIO_WRITE_8: |
398 | pio_write_8((ioport8_t *) code->cmds[i].addr, |
399 | pio_write_8((ioport8_t *) code->cmds[i].addr, |
399 | (uint8_t) code->cmds[i].value); |
400 | (uint8_t) code->cmds[i].value); |
400 | break; |
401 | break; |
401 | case CMD_PIO_WRITE_16: |
402 | case CMD_PIO_WRITE_16: |
402 | pio_write_16((ioport16_t *) code->cmds[i].addr, |
403 | pio_write_16((ioport16_t *) code->cmds[i].addr, |
403 | (uint16_t) code->cmds[i].value); |
404 | (uint16_t) code->cmds[i].value); |
404 | break; |
405 | break; |
405 | case CMD_PIO_WRITE_32: |
406 | case CMD_PIO_WRITE_32: |
406 | pio_write_32((ioport32_t *) code->cmds[i].addr, |
407 | pio_write_32((ioport32_t *) code->cmds[i].addr, |
407 | (uint32_t) code->cmds[i].value); |
408 | (uint32_t) code->cmds[i].value); |
408 | break; |
409 | break; |
409 | case CMD_BTEST: |
410 | case CMD_BTEST: |
410 | if (srcarg && dstarg) { |
411 | if (srcarg && dstarg) { |
411 | dstval = scratch[srcarg] & code->cmds[i].value; |
412 | dstval = scratch[srcarg] & code->cmds[i].value; |
412 | scratch[dstarg] = dstval; |
413 | scratch[dstarg] = dstval; |
413 | } |
414 | } |
414 | break; |
415 | break; |
415 | case CMD_PREDICATE: |
416 | case CMD_PREDICATE: |
416 | if (srcarg && !scratch[srcarg]) { |
417 | if (srcarg && !scratch[srcarg]) { |
417 | i += code->cmds[i].value; |
418 | i += code->cmds[i].value; |
418 | continue; |
419 | continue; |
419 | } |
420 | } |
420 | break; |
421 | break; |
421 | case CMD_ACCEPT: |
422 | case CMD_ACCEPT: |
422 | return IRQ_ACCEPT; |
423 | return IRQ_ACCEPT; |
423 | break; |
424 | break; |
424 | case CMD_DECLINE: |
425 | case CMD_DECLINE: |
425 | default: |
426 | default: |
426 | return IRQ_DECLINE; |
427 | return IRQ_DECLINE; |
427 | } |
428 | } |
428 | } |
429 | } |
429 | 430 | ||
430 | return IRQ_DECLINE; |
431 | return IRQ_DECLINE; |
431 | } |
432 | } |
432 | 433 | ||
433 | 434 | ||
434 | /* IRQ top-half handler. |
435 | /* IRQ top-half handler. |
435 | * |
436 | * |
436 | * We expect interrupts to be disabled and the irq->lock already held. |
437 | * We expect interrupts to be disabled and the irq->lock already held. |
437 | * |
438 | * |
438 | * @param irq IRQ structure. |
439 | * @param irq IRQ structure. |
439 | */ |
440 | */ |
440 | void ipc_irq_top_half_handler(irq_t *irq) |
441 | void ipc_irq_top_half_handler(irq_t *irq) |
441 | { |
442 | { |
442 | ASSERT(irq); |
443 | ASSERT(irq); |
443 | 444 | ||
444 | if (irq->notif_cfg.answerbox) { |
445 | if (irq->notif_cfg.answerbox) { |
445 | call_t *call; |
446 | call_t *call; |
446 | 447 | ||
447 | call = ipc_call_alloc(FRAME_ATOMIC); |
448 | call = ipc_call_alloc(FRAME_ATOMIC); |
448 | if (!call) |
449 | if (!call) |
449 | return; |
450 | return; |
450 | 451 | ||
451 | call->flags |= IPC_CALL_NOTIF; |
452 | call->flags |= IPC_CALL_NOTIF; |
452 | /* Put a counter to the message */ |
453 | /* Put a counter to the message */ |
453 | call->priv = ++irq->notif_cfg.counter; |
454 | call->priv = ++irq->notif_cfg.counter; |
454 | 455 | ||
455 | /* Set up args */ |
456 | /* Set up args */ |
456 | IPC_SET_METHOD(call->data, irq->notif_cfg.method); |
457 | IPC_SET_METHOD(call->data, irq->notif_cfg.method); |
457 | IPC_SET_ARG1(call->data, irq->notif_cfg.scratch[1]); |
458 | IPC_SET_ARG1(call->data, irq->notif_cfg.scratch[1]); |
458 | IPC_SET_ARG2(call->data, irq->notif_cfg.scratch[2]); |
459 | IPC_SET_ARG2(call->data, irq->notif_cfg.scratch[2]); |
459 | IPC_SET_ARG3(call->data, irq->notif_cfg.scratch[3]); |
460 | IPC_SET_ARG3(call->data, irq->notif_cfg.scratch[3]); |
460 | IPC_SET_ARG4(call->data, irq->notif_cfg.scratch[4]); |
461 | IPC_SET_ARG4(call->data, irq->notif_cfg.scratch[4]); |
461 | IPC_SET_ARG5(call->data, irq->notif_cfg.scratch[5]); |
462 | IPC_SET_ARG5(call->data, irq->notif_cfg.scratch[5]); |
462 | 463 | ||
463 | send_call(irq, call); |
464 | send_call(irq, call); |
464 | } |
465 | } |
465 | } |
466 | } |
466 | 467 | ||
467 | /** Send notification message. |
468 | /** Send notification message. |
468 | * |
469 | * |
469 | * @param irq IRQ structure. |
470 | * @param irq IRQ structure. |
470 | * @param a1 Driver-specific payload argument. |
471 | * @param a1 Driver-specific payload argument. |
471 | * @param a2 Driver-specific payload argument. |
472 | * @param a2 Driver-specific payload argument. |
472 | * @param a3 Driver-specific payload argument. |
473 | * @param a3 Driver-specific payload argument. |
473 | * @param a4 Driver-specific payload argument. |
474 | * @param a4 Driver-specific payload argument. |
474 | * @param a5 Driver-specific payload argument. |
475 | * @param a5 Driver-specific payload argument. |
475 | */ |
476 | */ |
476 | void ipc_irq_send_msg(irq_t *irq, unative_t a1, unative_t a2, unative_t a3, |
477 | void ipc_irq_send_msg(irq_t *irq, unative_t a1, unative_t a2, unative_t a3, |
477 | unative_t a4, unative_t a5) |
478 | unative_t a4, unative_t a5) |
478 | { |
479 | { |
479 | call_t *call; |
480 | call_t *call; |
480 | 481 | ||
481 | spinlock_lock(&irq->lock); |
482 | spinlock_lock(&irq->lock); |
482 | 483 | ||
483 | if (irq->notif_cfg.answerbox) { |
484 | if (irq->notif_cfg.answerbox) { |
484 | call = ipc_call_alloc(FRAME_ATOMIC); |
485 | call = ipc_call_alloc(FRAME_ATOMIC); |
485 | if (!call) { |
486 | if (!call) { |
486 | spinlock_unlock(&irq->lock); |
487 | spinlock_unlock(&irq->lock); |
487 | return; |
488 | return; |
488 | } |
489 | } |
489 | call->flags |= IPC_CALL_NOTIF; |
490 | call->flags |= IPC_CALL_NOTIF; |
490 | /* Put a counter to the message */ |
491 | /* Put a counter to the message */ |
491 | call->priv = ++irq->notif_cfg.counter; |
492 | call->priv = ++irq->notif_cfg.counter; |
492 | 493 | ||
493 | IPC_SET_METHOD(call->data, irq->notif_cfg.method); |
494 | IPC_SET_METHOD(call->data, irq->notif_cfg.method); |
494 | IPC_SET_ARG1(call->data, a1); |
495 | IPC_SET_ARG1(call->data, a1); |
495 | IPC_SET_ARG2(call->data, a2); |
496 | IPC_SET_ARG2(call->data, a2); |
496 | IPC_SET_ARG3(call->data, a3); |
497 | IPC_SET_ARG3(call->data, a3); |
497 | IPC_SET_ARG4(call->data, a4); |
498 | IPC_SET_ARG4(call->data, a4); |
498 | IPC_SET_ARG5(call->data, a5); |
499 | IPC_SET_ARG5(call->data, a5); |
499 | 500 | ||
500 | send_call(irq, call); |
501 | send_call(irq, call); |
501 | } |
502 | } |
502 | spinlock_unlock(&irq->lock); |
503 | spinlock_unlock(&irq->lock); |
503 | } |
504 | } |
504 | 505 | ||
505 | /** @} |
506 | /** @} |
506 | */ |
507 | */ |
507 | 508 |