Rev 4192 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 4192 | Rev 4263 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (c) 2006 Ondrej Palkovsky |
2 | * Copyright (c) 2006 Ondrej Palkovsky |
3 | * Copyright (c) 2006 Jakub Jermar |
3 | * Copyright (c) 2006 Jakub Jermar |
4 | * All rights reserved. |
4 | * All rights reserved. |
5 | * |
5 | * |
6 | * Redistribution and use in source and binary forms, with or without |
6 | * Redistribution and use in source and binary forms, with or without |
7 | * modification, are permitted provided that the following conditions |
7 | * modification, are permitted provided that the following conditions |
8 | * are met: |
8 | * are met: |
9 | * |
9 | * |
10 | * - Redistributions of source code must retain the above copyright |
10 | * - Redistributions of source code must retain the above copyright |
11 | * notice, this list of conditions and the following disclaimer. |
11 | * notice, this list of conditions and the following disclaimer. |
12 | * - Redistributions in binary form must reproduce the above copyright |
12 | * - Redistributions in binary form must reproduce the above copyright |
13 | * notice, this list of conditions and the following disclaimer in the |
13 | * notice, this list of conditions and the following disclaimer in the |
14 | * documentation and/or other materials provided with the distribution. |
14 | * documentation and/or other materials provided with the distribution. |
15 | * - The name of the author may not be used to endorse or promote products |
15 | * - The name of the author may not be used to endorse or promote products |
16 | * derived from this software without specific prior written permission. |
16 | * derived from this software without specific prior written permission. |
17 | * |
17 | * |
18 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
19 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
20 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
21 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
22 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
23 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
24 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
25 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
26 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
27 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
28 | */ |
28 | */ |
29 | 29 | ||
30 | /** @addtogroup genericipc |
30 | /** @addtogroup genericipc |
31 | * @{ |
31 | * @{ |
32 | */ |
32 | */ |
33 | /** |
33 | /** |
34 | * @file |
34 | * @file |
35 | * @brief IRQ notification framework. |
35 | * @brief IRQ notification framework. |
36 | * |
36 | * |
37 | * This framework allows applications to register to receive a notification |
37 | * This framework allows applications to register to receive a notification |
38 | * when interrupt is detected. The application may provide a simple 'top-half' |
38 | * when interrupt is detected. The application may provide a simple 'top-half' |
39 | * handler as part of its registration, which can perform simple operations |
39 | * handler as part of its registration, which can perform simple operations |
40 | * (read/write port/memory, add information to notification ipc message). |
40 | * (read/write port/memory, add information to notification ipc message). |
41 | * |
41 | * |
42 | * The structure of a notification message is as follows: |
42 | * The structure of a notification message is as follows: |
43 | * - METHOD: method as registered by the SYS_IPC_REGISTER_IRQ syscall |
43 | * - METHOD: method as registered by the SYS_IPC_REGISTER_IRQ syscall |
44 | * - ARG1: payload modified by a 'top-half' handler |
44 | * - ARG1: payload modified by a 'top-half' handler |
45 | * - ARG2: payload modified by a 'top-half' handler |
45 | * - ARG2: payload modified by a 'top-half' handler |
46 | * - ARG3: payload modified by a 'top-half' handler |
46 | * - ARG3: payload modified by a 'top-half' handler |
47 | * - ARG4: payload modified by a 'top-half' handler |
47 | * - ARG4: payload modified by a 'top-half' handler |
48 | * - ARG5: payload modified by a 'top-half' handler |
48 | * - ARG5: payload modified by a 'top-half' handler |
49 | * - in_phone_hash: interrupt counter (may be needed to assure correct order |
49 | * - in_phone_hash: interrupt counter (may be needed to assure correct order |
50 | * in multithreaded drivers) |
50 | * in multithreaded drivers) |
51 | * |
51 | * |
52 | * Note on synchronization for ipc_irq_register(), ipc_irq_unregister(), |
52 | * Note on synchronization for ipc_irq_register(), ipc_irq_unregister(), |
53 | * ipc_irq_cleanup() and IRQ handlers: |
53 | * ipc_irq_cleanup() and IRQ handlers: |
54 | * |
54 | * |
55 | * By always taking all of the uspace IRQ hash table lock, IRQ structure lock |
55 | * By always taking all of the uspace IRQ hash table lock, IRQ structure lock |
56 | * and answerbox lock, we can rule out race conditions between the |
56 | * and answerbox lock, we can rule out race conditions between the |
57 | * registration functions and also the cleanup function. Thus the observer can |
57 | * registration functions and also the cleanup function. Thus the observer can |
58 | * either see the IRQ structure present in both the hash table and the |
58 | * either see the IRQ structure present in both the hash table and the |
59 | * answerbox list or absent in both. Views in which the IRQ structure would be |
59 | * answerbox list or absent in both. Views in which the IRQ structure would be |
60 | * linked in the hash table but not in the answerbox list, or vice versa, are |
60 | * linked in the hash table but not in the answerbox list, or vice versa, are |
61 | * not possible. |
61 | * not possible. |
62 | * |
62 | * |
63 | * By always taking the hash table lock and the IRQ structure lock, we can |
63 | * By always taking the hash table lock and the IRQ structure lock, we can |
64 | * rule out a scenario in which we would free up an IRQ structure, which is |
64 | * rule out a scenario in which we would free up an IRQ structure, which is |
65 | * still referenced by, for example, an IRQ handler. The locking scheme forces |
65 | * still referenced by, for example, an IRQ handler. The locking scheme forces |
66 | * us to lock the IRQ structure only after any progressing IRQs on that |
66 | * us to lock the IRQ structure only after any progressing IRQs on that |
67 | * structure are finished. Because we hold the hash table lock, we prevent new |
67 | * structure are finished. Because we hold the hash table lock, we prevent new |
68 | * IRQs from taking new references to the IRQ structure. |
68 | * IRQs from taking new references to the IRQ structure. |
69 | */ |
69 | */ |
70 | 70 | ||
71 | #include <arch.h> |
71 | #include <arch.h> |
72 | #include <mm/slab.h> |
72 | #include <mm/slab.h> |
73 | #include <errno.h> |
73 | #include <errno.h> |
74 | #include <ddi/irq.h> |
74 | #include <ddi/irq.h> |
75 | #include <ipc/ipc.h> |
75 | #include <ipc/ipc.h> |
76 | #include <ipc/irq.h> |
76 | #include <ipc/irq.h> |
77 | #include <syscall/copy.h> |
77 | #include <syscall/copy.h> |
78 | #include <console/console.h> |
78 | #include <console/console.h> |
79 | #include <print.h> |
79 | #include <print.h> |
80 | // explicitly enable irq |
80 | // explicitly enable irq |
81 | #include <arch/interrupt.h> |
81 | #include <arch/interrupt.h> |
82 | 82 | ||
83 | /** Free the top-half pseudocode. |
83 | /** Free the top-half pseudocode. |
84 | * |
84 | * |
85 | * @param code Pointer to the top-half pseudocode. |
85 | * @param code Pointer to the top-half pseudocode. |
86 | */ |
86 | */ |
87 | static void code_free(irq_code_t *code) |
87 | static void code_free(irq_code_t *code) |
88 | { |
88 | { |
89 | if (code) { |
89 | if (code) { |
90 | free(code->cmds); |
90 | free(code->cmds); |
91 | free(code); |
91 | free(code); |
92 | } |
92 | } |
93 | } |
93 | } |
94 | 94 | ||
95 | /** Copy the top-half pseudocode from userspace into the kernel. |
95 | /** Copy the top-half pseudocode from userspace into the kernel. |
96 | * |
96 | * |
97 | * @param ucode Userspace address of the top-half pseudocode. |
97 | * @param ucode Userspace address of the top-half pseudocode. |
98 | * |
98 | * |
99 | * @return Kernel address of the copied pseudocode. |
99 | * @return Kernel address of the copied pseudocode. |
100 | */ |
100 | */ |
101 | static irq_code_t *code_from_uspace(irq_code_t *ucode) |
101 | static irq_code_t *code_from_uspace(irq_code_t *ucode) |
102 | { |
102 | { |
103 | irq_code_t *code; |
103 | irq_code_t *code; |
104 | irq_cmd_t *ucmds; |
104 | irq_cmd_t *ucmds; |
105 | int rc; |
105 | int rc; |
106 | 106 | ||
107 | code = malloc(sizeof(*code), 0); |
107 | code = malloc(sizeof(*code), 0); |
108 | rc = copy_from_uspace(code, ucode, sizeof(*code)); |
108 | rc = copy_from_uspace(code, ucode, sizeof(*code)); |
109 | if (rc != 0) { |
109 | if (rc != 0) { |
110 | free(code); |
110 | free(code); |
111 | return NULL; |
111 | return NULL; |
112 | } |
112 | } |
113 | 113 | ||
114 | if (code->cmdcount > IRQ_MAX_PROG_SIZE) { |
114 | if (code->cmdcount > IRQ_MAX_PROG_SIZE) { |
115 | free(code); |
115 | free(code); |
116 | return NULL; |
116 | return NULL; |
117 | } |
117 | } |
118 | ucmds = code->cmds; |
118 | ucmds = code->cmds; |
119 | code->cmds = malloc(sizeof(code->cmds[0]) * code->cmdcount, 0); |
119 | code->cmds = malloc(sizeof(code->cmds[0]) * code->cmdcount, 0); |
120 | rc = copy_from_uspace(code->cmds, ucmds, |
120 | rc = copy_from_uspace(code->cmds, ucmds, |
121 | sizeof(code->cmds[0]) * code->cmdcount); |
121 | sizeof(code->cmds[0]) * code->cmdcount); |
122 | if (rc != 0) { |
122 | if (rc != 0) { |
123 | free(code->cmds); |
123 | free(code->cmds); |
124 | free(code); |
124 | free(code); |
125 | return NULL; |
125 | return NULL; |
126 | } |
126 | } |
127 | 127 | ||
128 | return code; |
128 | return code; |
129 | } |
129 | } |
130 | 130 | ||
131 | /** Register an answerbox as a receiving end for IRQ notifications. |
131 | /** Register an answerbox as a receiving end for IRQ notifications. |
132 | * |
132 | * |
133 | * @param box Receiving answerbox. |
133 | * @param box Receiving answerbox. |
134 | * @param inr IRQ number. |
134 | * @param inr IRQ number. |
135 | * @param devno Device number. |
135 | * @param devno Device number. |
136 | * @param method Method to be associated with the notification. |
136 | * @param method Method to be associated with the notification. |
137 | * @param ucode Uspace pointer to top-half pseudocode. |
137 | * @param ucode Uspace pointer to top-half pseudocode. |
138 | * |
138 | * |
139 | * @return EBADMEM, ENOENT or EEXISTS on failure or 0 on success. |
139 | * @return EBADMEM, ENOENT or EEXISTS on failure or 0 on success. |
140 | */ |
140 | */ |
141 | int ipc_irq_register(answerbox_t *box, inr_t inr, devno_t devno, |
141 | int ipc_irq_register(answerbox_t *box, inr_t inr, devno_t devno, |
142 | unative_t method, irq_code_t *ucode) |
142 | unative_t method, irq_code_t *ucode) |
143 | { |
143 | { |
144 | ipl_t ipl; |
144 | ipl_t ipl; |
145 | irq_code_t *code; |
145 | irq_code_t *code; |
146 | irq_t *irq; |
146 | irq_t *irq; |
- | 147 | link_t *hlp; |
|
147 | unative_t key[] = { |
148 | unative_t key[] = { |
148 | (unative_t) inr, |
149 | (unative_t) inr, |
149 | (unative_t) devno |
150 | (unative_t) devno |
150 | }; |
151 | }; |
151 | 152 | ||
152 | if (ucode) { |
153 | if (ucode) { |
153 | code = code_from_uspace(ucode); |
154 | code = code_from_uspace(ucode); |
154 | if (!code) |
155 | if (!code) |
155 | return EBADMEM; |
156 | return EBADMEM; |
156 | } else { |
157 | } else { |
157 | code = NULL; |
158 | code = NULL; |
158 | } |
159 | } |
159 | 160 | ||
160 | /* |
161 | /* |
161 | * Allocate and populate the IRQ structure. |
162 | * Allocate and populate the IRQ structure. |
162 | */ |
163 | */ |
163 | irq = malloc(sizeof(irq_t), 0); |
164 | irq = malloc(sizeof(irq_t), 0); |
164 | irq_initialize(irq); |
165 | irq_initialize(irq); |
165 | irq->devno = devno; |
166 | irq->devno = devno; |
166 | irq->inr = inr; |
167 | irq->inr = inr; |
167 | irq->claim = ipc_irq_top_half_claim; |
168 | irq->claim = ipc_irq_top_half_claim; |
168 | irq->handler = ipc_irq_top_half_handler; |
169 | irq->handler = ipc_irq_top_half_handler; |
169 | irq->notif_cfg.notify = true; |
170 | irq->notif_cfg.notify = true; |
170 | irq->notif_cfg.answerbox = box; |
171 | irq->notif_cfg.answerbox = box; |
171 | irq->notif_cfg.method = method; |
172 | irq->notif_cfg.method = method; |
172 | irq->notif_cfg.code = code; |
173 | irq->notif_cfg.code = code; |
173 | irq->notif_cfg.counter = 0; |
174 | irq->notif_cfg.counter = 0; |
174 | 175 | ||
175 | /* |
176 | /* |
176 | * Enlist the IRQ structure in the uspace IRQ hash table and the |
177 | * Enlist the IRQ structure in the uspace IRQ hash table and the |
177 | * answerbox's list. |
178 | * answerbox's list. |
178 | */ |
179 | */ |
179 | ipl = interrupts_disable(); |
180 | ipl = interrupts_disable(); |
180 | spinlock_lock(&irq_uspace_hash_table_lock); |
181 | spinlock_lock(&irq_uspace_hash_table_lock); |
- | 182 | hlp = hash_table_find(&irq_uspace_hash_table, key); |
|
- | 183 | if (hlp) { |
|
- | 184 | irq_t *hirq = hash_table_get_instance(hlp, irq_t, link); |
|
181 | spinlock_lock(&irq->lock); |
185 | /* hirq is locked */ |
182 | spinlock_lock(&box->irq_lock); |
186 | spinlock_unlock(&hirq->lock); |
183 | if (hash_table_find(&irq_uspace_hash_table, key)) { |
- | |
184 | code_free(code); |
187 | code_free(code); |
185 | spinlock_unlock(&box->irq_lock); |
- | |
186 | spinlock_unlock(&irq->lock); |
- | |
187 | spinlock_unlock(&irq_uspace_hash_table_lock); |
188 | spinlock_unlock(&irq_uspace_hash_table_lock); |
188 | free(irq); |
189 | free(irq); |
189 | interrupts_restore(ipl); |
190 | interrupts_restore(ipl); |
190 | return EEXISTS; |
191 | return EEXISTS; |
191 | } |
192 | } |
- | 193 | spinlock_lock(&irq->lock); /* not really necessary, but paranoid */ |
|
- | 194 | spinlock_lock(&box->irq_lock); |
|
192 | hash_table_insert(&irq_uspace_hash_table, key, &irq->link); |
195 | hash_table_insert(&irq_uspace_hash_table, key, &irq->link); |
193 | list_append(&irq->notif_cfg.link, &box->irq_head); |
196 | list_append(&irq->notif_cfg.link, &box->irq_head); |
194 | spinlock_unlock(&box->irq_lock); |
197 | spinlock_unlock(&box->irq_lock); |
195 | spinlock_unlock(&irq->lock); |
198 | spinlock_unlock(&irq->lock); |
196 | spinlock_unlock(&irq_uspace_hash_table_lock); |
199 | spinlock_unlock(&irq_uspace_hash_table_lock); |
197 | 200 | ||
198 | interrupts_restore(ipl); |
201 | interrupts_restore(ipl); |
199 | // explicitly enable irq |
202 | // explicitly enable irq |
200 | /* different byteorder? |
203 | /* different byteorder? |
201 | * trap_virtual_enable_irqs( 1 << ( irq->inr - 1 )); |
204 | * trap_virtual_enable_irqs( 1 << ( irq->inr - 1 )); |
202 | */ |
205 | */ |
203 | trap_virtual_enable_irqs( 1 << ( irq->inr + 7 )); |
206 | trap_virtual_enable_irqs( 1 << ( irq->inr + 7 )); |
204 | return EOK; |
207 | return EOK; |
205 | } |
208 | } |
206 | 209 | ||
207 | /** Unregister task from IRQ notification. |
210 | /** Unregister task from IRQ notification. |
208 | * |
211 | * |
209 | * @param box Answerbox associated with the notification. |
212 | * @param box Answerbox associated with the notification. |
210 | * @param inr IRQ number. |
213 | * @param inr IRQ number. |
211 | * @param devno Device number. |
214 | * @param devno Device number. |
212 | */ |
215 | */ |
213 | int ipc_irq_unregister(answerbox_t *box, inr_t inr, devno_t devno) |
216 | int ipc_irq_unregister(answerbox_t *box, inr_t inr, devno_t devno) |
214 | { |
217 | { |
215 | ipl_t ipl; |
218 | ipl_t ipl; |
216 | unative_t key[] = { |
219 | unative_t key[] = { |
217 | (unative_t) inr, |
220 | (unative_t) inr, |
218 | (unative_t) devno |
221 | (unative_t) devno |
219 | }; |
222 | }; |
220 | link_t *lnk; |
223 | link_t *lnk; |
221 | irq_t *irq; |
224 | irq_t *irq; |
222 | 225 | ||
223 | ipl = interrupts_disable(); |
226 | ipl = interrupts_disable(); |
224 | spinlock_lock(&irq_uspace_hash_table_lock); |
227 | spinlock_lock(&irq_uspace_hash_table_lock); |
225 | lnk = hash_table_find(&irq_uspace_hash_table, key); |
228 | lnk = hash_table_find(&irq_uspace_hash_table, key); |
226 | if (!lnk) { |
229 | if (!lnk) { |
227 | spinlock_unlock(&irq_uspace_hash_table_lock); |
230 | spinlock_unlock(&irq_uspace_hash_table_lock); |
228 | interrupts_restore(ipl); |
231 | interrupts_restore(ipl); |
229 | return ENOENT; |
232 | return ENOENT; |
230 | } |
233 | } |
231 | irq = hash_table_get_instance(lnk, irq_t, link); |
234 | irq = hash_table_get_instance(lnk, irq_t, link); |
232 | spinlock_lock(&irq->lock); |
235 | /* irq is locked */ |
233 | spinlock_lock(&box->irq_lock); |
236 | spinlock_lock(&box->irq_lock); |
234 | 237 | ||
235 | ASSERT(irq->notif_cfg.answerbox == box); |
238 | ASSERT(irq->notif_cfg.answerbox == box); |
236 | 239 | ||
237 | /* Free up the pseudo code and associated structures. */ |
240 | /* Free up the pseudo code and associated structures. */ |
238 | code_free(irq->notif_cfg.code); |
241 | code_free(irq->notif_cfg.code); |
239 | 242 | ||
240 | /* Remove the IRQ from the answerbox's list. */ |
243 | /* Remove the IRQ from the answerbox's list. */ |
241 | list_remove(&irq->notif_cfg.link); |
244 | list_remove(&irq->notif_cfg.link); |
242 | 245 | ||
- | 246 | /* |
|
- | 247 | * We need to drop the IRQ lock now because hash_table_remove() will try |
|
- | 248 | * to reacquire it. That basically violates the natural locking order, |
|
- | 249 | * but a deadlock in hash_table_remove() is prevented by the fact that |
|
- | 250 | * we already held the IRQ lock and didn't drop the hash table lock in |
|
- | 251 | * the meantime. |
|
- | 252 | */ |
|
- | 253 | spinlock_unlock(&irq->lock); |
|
- | 254 | ||
243 | /* Remove the IRQ from the uspace IRQ hash table. */ |
255 | /* Remove the IRQ from the uspace IRQ hash table. */ |
244 | hash_table_remove(&irq_uspace_hash_table, key, 2); |
256 | hash_table_remove(&irq_uspace_hash_table, key, 2); |
245 | 257 | ||
246 | spinlock_unlock(&irq_uspace_hash_table_lock); |
258 | spinlock_unlock(&irq_uspace_hash_table_lock); |
247 | spinlock_unlock(&irq->lock); |
- | |
248 | spinlock_unlock(&box->irq_lock); |
259 | spinlock_unlock(&box->irq_lock); |
249 | 260 | ||
250 | /* Free up the IRQ structure. */ |
261 | /* Free up the IRQ structure. */ |
251 | free(irq); |
262 | free(irq); |
252 | 263 | ||
253 | interrupts_restore(ipl); |
264 | interrupts_restore(ipl); |
254 | return EOK; |
265 | return EOK; |
255 | } |
266 | } |
256 | 267 | ||
257 | 268 | ||
258 | /** Disconnect all IRQ notifications from an answerbox. |
269 | /** Disconnect all IRQ notifications from an answerbox. |
259 | * |
270 | * |
260 | * This function is effective because the answerbox contains |
271 | * This function is effective because the answerbox contains |
261 | * list of all irq_t structures that are registered to |
272 | * list of all irq_t structures that are registered to |
262 | * send notifications to it. |
273 | * send notifications to it. |
263 | * |
274 | * |
264 | * @param box Answerbox for which we want to carry out the cleanup. |
275 | * @param box Answerbox for which we want to carry out the cleanup. |
265 | */ |
276 | */ |
266 | void ipc_irq_cleanup(answerbox_t *box) |
277 | void ipc_irq_cleanup(answerbox_t *box) |
267 | { |
278 | { |
268 | ipl_t ipl; |
279 | ipl_t ipl; |
269 | 280 | ||
270 | loop: |
281 | loop: |
271 | ipl = interrupts_disable(); |
282 | ipl = interrupts_disable(); |
272 | spinlock_lock(&irq_uspace_hash_table_lock); |
283 | spinlock_lock(&irq_uspace_hash_table_lock); |
273 | spinlock_lock(&box->irq_lock); |
284 | spinlock_lock(&box->irq_lock); |
274 | 285 | ||
275 | while (box->irq_head.next != &box->irq_head) { |
286 | while (box->irq_head.next != &box->irq_head) { |
276 | link_t *cur = box->irq_head.next; |
287 | link_t *cur = box->irq_head.next; |
277 | irq_t *irq; |
288 | irq_t *irq; |
278 | DEADLOCK_PROBE_INIT(p_irqlock); |
289 | DEADLOCK_PROBE_INIT(p_irqlock); |
279 | unative_t key[2]; |
290 | unative_t key[2]; |
280 | 291 | ||
281 | irq = list_get_instance(cur, irq_t, notif_cfg.link); |
292 | irq = list_get_instance(cur, irq_t, notif_cfg.link); |
282 | if (!spinlock_trylock(&irq->lock)) { |
293 | if (!spinlock_trylock(&irq->lock)) { |
283 | /* |
294 | /* |
284 | * Avoid deadlock by trying again. |
295 | * Avoid deadlock by trying again. |
285 | */ |
296 | */ |
286 | spinlock_unlock(&box->irq_lock); |
297 | spinlock_unlock(&box->irq_lock); |
287 | spinlock_unlock(&irq_uspace_hash_table_lock); |
298 | spinlock_unlock(&irq_uspace_hash_table_lock); |
288 | interrupts_restore(ipl); |
299 | interrupts_restore(ipl); |
289 | DEADLOCK_PROBE(p_irqlock, DEADLOCK_THRESHOLD); |
300 | DEADLOCK_PROBE(p_irqlock, DEADLOCK_THRESHOLD); |
290 | goto loop; |
301 | goto loop; |
291 | } |
302 | } |
292 | key[0] = irq->inr; |
303 | key[0] = irq->inr; |
293 | key[1] = irq->devno; |
304 | key[1] = irq->devno; |
294 | 305 | ||
295 | 306 | ||
296 | ASSERT(irq->notif_cfg.answerbox == box); |
307 | ASSERT(irq->notif_cfg.answerbox == box); |
297 | 308 | ||
298 | /* Unlist from the answerbox. */ |
309 | /* Unlist from the answerbox. */ |
299 | list_remove(&irq->notif_cfg.link); |
310 | list_remove(&irq->notif_cfg.link); |
300 | 311 | ||
301 | /* Remove from the hash table. */ |
- | |
302 | hash_table_remove(&irq_uspace_hash_table, key, 2); |
- | |
303 | - | ||
304 | /* Free up the pseudo code and associated structures. */ |
312 | /* Free up the pseudo code and associated structures. */ |
305 | code_free(irq->notif_cfg.code); |
313 | code_free(irq->notif_cfg.code); |
306 | 314 | ||
- | 315 | /* |
|
- | 316 | * We need to drop the IRQ lock now because hash_table_remove() |
|
- | 317 | * will try to reacquire it. That basically violates the natural |
|
- | 318 | * locking order, but a deadlock in hash_table_remove() is |
|
- | 319 | * prevented by the fact that we already held the IRQ lock and |
|
- | 320 | * didn't drop the hash table lock in the meantime. |
|
- | 321 | */ |
|
307 | spinlock_unlock(&irq->lock); |
322 | spinlock_unlock(&irq->lock); |
- | 323 | ||
- | 324 | /* Remove from the hash table. */ |
|
- | 325 | hash_table_remove(&irq_uspace_hash_table, key, 2); |
|
- | 326 | ||
308 | free(irq); |
327 | free(irq); |
309 | } |
328 | } |
310 | 329 | ||
311 | spinlock_unlock(&box->irq_lock); |
330 | spinlock_unlock(&box->irq_lock); |
312 | spinlock_unlock(&irq_uspace_hash_table_lock); |
331 | spinlock_unlock(&irq_uspace_hash_table_lock); |
313 | interrupts_restore(ipl); |
332 | interrupts_restore(ipl); |
314 | } |
333 | } |
315 | 334 | ||
316 | /** Add a call to the proper answerbox queue. |
335 | /** Add a call to the proper answerbox queue. |
317 | * |
336 | * |
318 | * Assume irq->lock is locked. |
337 | * Assume irq->lock is locked. |
319 | * |
338 | * |
320 | * @param irq IRQ structure referencing the target answerbox. |
339 | * @param irq IRQ structure referencing the target answerbox. |
321 | * @param call IRQ notification call. |
340 | * @param call IRQ notification call. |
322 | */ |
341 | */ |
323 | static void send_call(irq_t *irq, call_t *call) |
342 | static void send_call(irq_t *irq, call_t *call) |
324 | { |
343 | { |
325 | spinlock_lock(&irq->notif_cfg.answerbox->irq_lock); |
344 | spinlock_lock(&irq->notif_cfg.answerbox->irq_lock); |
326 | list_append(&call->link, &irq->notif_cfg.answerbox->irq_notifs); |
345 | list_append(&call->link, &irq->notif_cfg.answerbox->irq_notifs); |
327 | spinlock_unlock(&irq->notif_cfg.answerbox->irq_lock); |
346 | spinlock_unlock(&irq->notif_cfg.answerbox->irq_lock); |
328 | 347 | ||
329 | waitq_wakeup(&irq->notif_cfg.answerbox->wq, WAKEUP_FIRST); |
348 | waitq_wakeup(&irq->notif_cfg.answerbox->wq, WAKEUP_FIRST); |
330 | } |
349 | } |
331 | 350 | ||
332 | /** Apply the top-half pseudo code to find out whether to accept the IRQ or not. |
351 | /** Apply the top-half pseudo code to find out whether to accept the IRQ or not. |
333 | * |
352 | * |
334 | * @param irq IRQ structure. |
353 | * @param irq IRQ structure. |
335 | * |
354 | * |
336 | * @return IRQ_ACCEPT if the interrupt is accepted by the |
355 | * @return IRQ_ACCEPT if the interrupt is accepted by the |
337 | * pseudocode. IRQ_DECLINE otherwise. |
356 | * pseudocode. IRQ_DECLINE otherwise. |
338 | */ |
357 | */ |
339 | irq_ownership_t ipc_irq_top_half_claim(irq_t *irq) |
358 | irq_ownership_t ipc_irq_top_half_claim(irq_t *irq) |
340 | { |
359 | { |
341 | unsigned int i; |
360 | unsigned int i; |
342 | unative_t dstval; |
361 | unative_t dstval; |
343 | irq_code_t *code = irq->notif_cfg.code; |
362 | irq_code_t *code = irq->notif_cfg.code; |
344 | unative_t *scratch = irq->notif_cfg.scratch; |
363 | unative_t *scratch = irq->notif_cfg.scratch; |
345 | 364 | ||
346 | 365 | ||
347 | if (!irq->notif_cfg.notify) |
366 | if (!irq->notif_cfg.notify) |
348 | return IRQ_DECLINE; |
367 | return IRQ_DECLINE; |
349 | 368 | ||
350 | if (!code) |
369 | if (!code) |
351 | return IRQ_DECLINE; |
370 | return IRQ_DECLINE; |
352 | 371 | ||
353 | for (i = 0; i < code->cmdcount; i++) { |
372 | for (i = 0; i < code->cmdcount; i++) { |
354 | unsigned int srcarg = code->cmds[i].srcarg; |
373 | unsigned int srcarg = code->cmds[i].srcarg; |
355 | unsigned int dstarg = code->cmds[i].dstarg; |
374 | unsigned int dstarg = code->cmds[i].dstarg; |
356 | 375 | ||
357 | if (srcarg >= IPC_CALL_LEN) |
376 | if (srcarg >= IPC_CALL_LEN) |
358 | break; |
377 | break; |
359 | if (dstarg >= IPC_CALL_LEN) |
378 | if (dstarg >= IPC_CALL_LEN) |
360 | break; |
379 | break; |
361 | 380 | ||
362 | switch (code->cmds[i].cmd) { |
381 | switch (code->cmds[i].cmd) { |
363 | case CMD_PIO_READ_8: |
382 | case CMD_PIO_READ_8: |
364 | dstval = pio_read_8((ioport8_t *) code->cmds[i].addr); |
383 | dstval = pio_read_8((ioport8_t *) code->cmds[i].addr); |
365 | if (dstarg) |
384 | if (dstarg) |
366 | scratch[dstarg] = dstval; |
385 | scratch[dstarg] = dstval; |
367 | break; |
386 | break; |
368 | case CMD_PIO_READ_16: |
387 | case CMD_PIO_READ_16: |
369 | dstval = pio_read_16((ioport16_t *) code->cmds[i].addr); |
388 | dstval = pio_read_16((ioport16_t *) code->cmds[i].addr); |
370 | if (dstarg) |
389 | if (dstarg) |
371 | scratch[dstarg] = dstval; |
390 | scratch[dstarg] = dstval; |
372 | break; |
391 | break; |
373 | case CMD_PIO_READ_32: |
392 | case CMD_PIO_READ_32: |
374 | dstval = pio_read_32((ioport32_t *) code->cmds[i].addr); |
393 | dstval = pio_read_32((ioport32_t *) code->cmds[i].addr); |
375 | if (dstarg) |
394 | if (dstarg) |
376 | scratch[dstarg] = dstval; |
395 | scratch[dstarg] = dstval; |
377 | break; |
396 | break; |
378 | case CMD_PIO_WRITE_8: |
397 | case CMD_PIO_WRITE_8: |
379 | pio_write_8((ioport8_t *) code->cmds[i].addr, |
398 | pio_write_8((ioport8_t *) code->cmds[i].addr, |
380 | (uint8_t) code->cmds[i].value); |
399 | (uint8_t) code->cmds[i].value); |
381 | break; |
400 | break; |
382 | case CMD_PIO_WRITE_16: |
401 | case CMD_PIO_WRITE_16: |
383 | pio_write_16((ioport16_t *) code->cmds[i].addr, |
402 | pio_write_16((ioport16_t *) code->cmds[i].addr, |
384 | (uint16_t) code->cmds[i].value); |
403 | (uint16_t) code->cmds[i].value); |
385 | break; |
404 | break; |
386 | case CMD_PIO_WRITE_32: |
405 | case CMD_PIO_WRITE_32: |
387 | pio_write_32((ioport32_t *) code->cmds[i].addr, |
406 | pio_write_32((ioport32_t *) code->cmds[i].addr, |
388 | (uint32_t) code->cmds[i].value); |
407 | (uint32_t) code->cmds[i].value); |
389 | break; |
408 | break; |
390 | case CMD_BTEST: |
409 | case CMD_BTEST: |
391 | if (srcarg && dstarg) { |
410 | if (srcarg && dstarg) { |
392 | dstval = scratch[srcarg] & code->cmds[i].value; |
411 | dstval = scratch[srcarg] & code->cmds[i].value; |
393 | scratch[dstarg] = dstval; |
412 | scratch[dstarg] = dstval; |
394 | } |
413 | } |
395 | break; |
414 | break; |
396 | case CMD_PREDICATE: |
415 | case CMD_PREDICATE: |
397 | if (srcarg && !scratch[srcarg]) { |
416 | if (srcarg && !scratch[srcarg]) { |
398 | i += code->cmds[i].value; |
417 | i += code->cmds[i].value; |
399 | continue; |
418 | continue; |
400 | } |
419 | } |
401 | break; |
420 | break; |
402 | case CMD_ACCEPT: |
421 | case CMD_ACCEPT: |
403 | return IRQ_ACCEPT; |
422 | return IRQ_ACCEPT; |
404 | break; |
423 | break; |
405 | case CMD_DECLINE: |
424 | case CMD_DECLINE: |
406 | default: |
425 | default: |
407 | return IRQ_DECLINE; |
426 | return IRQ_DECLINE; |
408 | } |
427 | } |
409 | } |
428 | } |
410 | 429 | ||
411 | return IRQ_DECLINE; |
430 | return IRQ_DECLINE; |
412 | } |
431 | } |
413 | 432 | ||
414 | 433 | ||
415 | /* IRQ top-half handler. |
434 | /* IRQ top-half handler. |
416 | * |
435 | * |
417 | * We expect interrupts to be disabled and the irq->lock already held. |
436 | * We expect interrupts to be disabled and the irq->lock already held. |
418 | * |
437 | * |
419 | * @param irq IRQ structure. |
438 | * @param irq IRQ structure. |
420 | */ |
439 | */ |
421 | void ipc_irq_top_half_handler(irq_t *irq) |
440 | void ipc_irq_top_half_handler(irq_t *irq) |
422 | { |
441 | { |
423 | ASSERT(irq); |
442 | ASSERT(irq); |
424 | 443 | ||
425 | if (irq->notif_cfg.answerbox) { |
444 | if (irq->notif_cfg.answerbox) { |
426 | call_t *call; |
445 | call_t *call; |
427 | 446 | ||
428 | call = ipc_call_alloc(FRAME_ATOMIC); |
447 | call = ipc_call_alloc(FRAME_ATOMIC); |
429 | if (!call) |
448 | if (!call) |
430 | return; |
449 | return; |
431 | 450 | ||
432 | call->flags |= IPC_CALL_NOTIF; |
451 | call->flags |= IPC_CALL_NOTIF; |
433 | /* Put a counter to the message */ |
452 | /* Put a counter to the message */ |
434 | call->priv = ++irq->notif_cfg.counter; |
453 | call->priv = ++irq->notif_cfg.counter; |
435 | 454 | ||
436 | /* Set up args */ |
455 | /* Set up args */ |
437 | IPC_SET_METHOD(call->data, irq->notif_cfg.method); |
456 | IPC_SET_METHOD(call->data, irq->notif_cfg.method); |
438 | IPC_SET_ARG1(call->data, irq->notif_cfg.scratch[1]); |
457 | IPC_SET_ARG1(call->data, irq->notif_cfg.scratch[1]); |
439 | IPC_SET_ARG2(call->data, irq->notif_cfg.scratch[2]); |
458 | IPC_SET_ARG2(call->data, irq->notif_cfg.scratch[2]); |
440 | IPC_SET_ARG3(call->data, irq->notif_cfg.scratch[3]); |
459 | IPC_SET_ARG3(call->data, irq->notif_cfg.scratch[3]); |
441 | IPC_SET_ARG4(call->data, irq->notif_cfg.scratch[4]); |
460 | IPC_SET_ARG4(call->data, irq->notif_cfg.scratch[4]); |
442 | IPC_SET_ARG5(call->data, irq->notif_cfg.scratch[5]); |
461 | IPC_SET_ARG5(call->data, irq->notif_cfg.scratch[5]); |
443 | 462 | ||
444 | send_call(irq, call); |
463 | send_call(irq, call); |
445 | } |
464 | } |
446 | } |
465 | } |
447 | 466 | ||
448 | /** Send notification message. |
467 | /** Send notification message. |
449 | * |
468 | * |
450 | * @param irq IRQ structure. |
469 | * @param irq IRQ structure. |
451 | * @param a1 Driver-specific payload argument. |
470 | * @param a1 Driver-specific payload argument. |
452 | * @param a2 Driver-specific payload argument. |
471 | * @param a2 Driver-specific payload argument. |
453 | * @param a3 Driver-specific payload argument. |
472 | * @param a3 Driver-specific payload argument. |
454 | * @param a4 Driver-specific payload argument. |
473 | * @param a4 Driver-specific payload argument. |
455 | * @param a5 Driver-specific payload argument. |
474 | * @param a5 Driver-specific payload argument. |
456 | */ |
475 | */ |
457 | void ipc_irq_send_msg(irq_t *irq, unative_t a1, unative_t a2, unative_t a3, |
476 | void ipc_irq_send_msg(irq_t *irq, unative_t a1, unative_t a2, unative_t a3, |
458 | unative_t a4, unative_t a5) |
477 | unative_t a4, unative_t a5) |
459 | { |
478 | { |
460 | call_t *call; |
479 | call_t *call; |
461 | 480 | ||
462 | spinlock_lock(&irq->lock); |
481 | spinlock_lock(&irq->lock); |
463 | 482 | ||
464 | if (irq->notif_cfg.answerbox) { |
483 | if (irq->notif_cfg.answerbox) { |
465 | call = ipc_call_alloc(FRAME_ATOMIC); |
484 | call = ipc_call_alloc(FRAME_ATOMIC); |
466 | if (!call) { |
485 | if (!call) { |
467 | spinlock_unlock(&irq->lock); |
486 | spinlock_unlock(&irq->lock); |
468 | return; |
487 | return; |
469 | } |
488 | } |
470 | call->flags |= IPC_CALL_NOTIF; |
489 | call->flags |= IPC_CALL_NOTIF; |
471 | /* Put a counter to the message */ |
490 | /* Put a counter to the message */ |
472 | call->priv = ++irq->notif_cfg.counter; |
491 | call->priv = ++irq->notif_cfg.counter; |
473 | 492 | ||
474 | IPC_SET_METHOD(call->data, irq->notif_cfg.method); |
493 | IPC_SET_METHOD(call->data, irq->notif_cfg.method); |
475 | IPC_SET_ARG1(call->data, a1); |
494 | IPC_SET_ARG1(call->data, a1); |
476 | IPC_SET_ARG2(call->data, a2); |
495 | IPC_SET_ARG2(call->data, a2); |
477 | IPC_SET_ARG3(call->data, a3); |
496 | IPC_SET_ARG3(call->data, a3); |
478 | IPC_SET_ARG4(call->data, a4); |
497 | IPC_SET_ARG4(call->data, a4); |
479 | IPC_SET_ARG5(call->data, a5); |
498 | IPC_SET_ARG5(call->data, a5); |
480 | 499 | ||
481 | send_call(irq, call); |
500 | send_call(irq, call); |
482 | } |
501 | } |
483 | spinlock_unlock(&irq->lock); |
502 | spinlock_unlock(&irq->lock); |
484 | } |
503 | } |
485 | 504 | ||
486 | /** @} |
505 | /** @} |
487 | */ |
506 | */ |
488 | 507 |