Rev 1591 | Rev 1625 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 1591 | Rev 1595 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (C) 2006 Ondrej Palkovsky |
2 | * Copyright (C) 2006 Ondrej Palkovsky |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | /** IRQ notification framework |
29 | /** IRQ notification framework |
30 | * |
30 | * |
31 | * This framework allows applications to register to receive a notification |
31 | * This framework allows applications to register to receive a notification |
32 | * when interrupt is detected. The application may provide a simple 'top-half' |
32 | * when interrupt is detected. The application may provide a simple 'top-half' |
33 | * handler as part of its registration, which can perform simple operations |
33 | * handler as part of its registration, which can perform simple operations |
34 | * (read/write port/memory, add information to notification ipc message). |
34 | * (read/write port/memory, add information to notification ipc message). |
35 | * |
35 | * |
36 | * The structure of a notification message is as follows: |
36 | * The structure of a notification message is as follows: |
37 | * - METHOD: IPC_M_INTERRUPT |
37 | * - METHOD: IPC_M_INTERRUPT |
38 | * - ARG1: interrupt number |
38 | * - ARG1: interrupt number |
39 | * - ARG2: payload modified by a 'top-half' handler |
39 | * - ARG2: payload modified by a 'top-half' handler |
40 | * - ARG3: interrupt counter (may be needed to assure correct order |
40 | * - ARG3: interrupt counter (may be needed to assure correct order |
41 | * in multithreaded drivers) |
41 | * in multithreaded drivers) |
42 | */ |
42 | */ |
43 | 43 | ||
44 | #include <arch.h> |
44 | #include <arch.h> |
45 | #include <mm/slab.h> |
45 | #include <mm/slab.h> |
46 | #include <errno.h> |
46 | #include <errno.h> |
47 | #include <ipc/ipc.h> |
47 | #include <ipc/ipc.h> |
48 | #include <ipc/irq.h> |
48 | #include <ipc/irq.h> |
49 | #include <atomic.h> |
49 | #include <atomic.h> |
50 | #include <syscall/copy.h> |
50 | #include <syscall/copy.h> |
51 | #include <console/console.h> |
51 | #include <console/console.h> |
52 | 52 | ||
53 | typedef struct { |
53 | typedef struct { |
54 | SPINLOCK_DECLARE(lock); |
54 | SPINLOCK_DECLARE(lock); |
55 | answerbox_t *box; |
55 | answerbox_t *box; |
56 | irq_code_t *code; |
56 | irq_code_t *code; |
57 | atomic_t counter; |
57 | atomic_t counter; |
58 | } ipc_irq_t; |
58 | } ipc_irq_t; |
59 | 59 | ||
60 | 60 | ||
61 | static ipc_irq_t *irq_conns = NULL; |
61 | static ipc_irq_t *irq_conns = NULL; |
62 | static int irq_conns_size; |
62 | static int irq_conns_size; |
63 | 63 | ||
64 | #include <print.h> |
64 | #include <print.h> |
65 | /* Execute code associated with IRQ notification */ |
65 | /* Execute code associated with IRQ notification */ |
66 | static void code_execute(call_t *call, irq_code_t *code) |
66 | static void code_execute(call_t *call, irq_code_t *code) |
67 | { |
67 | { |
68 | int i; |
68 | int i; |
69 | 69 | ||
70 | if (!code) |
70 | if (!code) |
71 | return; |
71 | return; |
72 | 72 | ||
73 | for (i=0; i < code->cmdcount;i++) { |
73 | for (i=0; i < code->cmdcount;i++) { |
74 | switch (code->cmds[i].cmd) { |
74 | switch (code->cmds[i].cmd) { |
75 | case CMD_MEM_READ_1: |
75 | case CMD_MEM_READ_1: |
76 | IPC_SET_ARG2(call->data, *((__u8 *)code->cmds[i].addr)); |
76 | IPC_SET_ARG2(call->data, *((__u8 *)code->cmds[i].addr)); |
77 | break; |
77 | break; |
78 | case CMD_MEM_READ_2: |
78 | case CMD_MEM_READ_2: |
79 | IPC_SET_ARG2(call->data, *((__u16 *)code->cmds[i].addr)); |
79 | IPC_SET_ARG2(call->data, *((__u16 *)code->cmds[i].addr)); |
80 | break; |
80 | break; |
81 | case CMD_MEM_READ_4: |
81 | case CMD_MEM_READ_4: |
82 | IPC_SET_ARG2(call->data, *((__u32 *)code->cmds[i].addr)); |
82 | IPC_SET_ARG2(call->data, *((__u32 *)code->cmds[i].addr)); |
83 | break; |
83 | break; |
84 | case CMD_MEM_READ_8: |
84 | case CMD_MEM_READ_8: |
85 | IPC_SET_ARG2(call->data, *((__u64 *)code->cmds[i].addr)); |
85 | IPC_SET_ARG2(call->data, *((__u64 *)code->cmds[i].addr)); |
86 | break; |
86 | break; |
87 | case CMD_MEM_WRITE_1: |
87 | case CMD_MEM_WRITE_1: |
88 | *((__u8 *)code->cmds[i].addr) = code->cmds[i].value; |
88 | *((__u8 *)code->cmds[i].addr) = code->cmds[i].value; |
89 | break; |
89 | break; |
90 | case CMD_MEM_WRITE_2: |
90 | case CMD_MEM_WRITE_2: |
91 | *((__u16 *)code->cmds[i].addr) = code->cmds[i].value; |
91 | *((__u16 *)code->cmds[i].addr) = code->cmds[i].value; |
92 | break; |
92 | break; |
93 | case CMD_MEM_WRITE_4: |
93 | case CMD_MEM_WRITE_4: |
94 | *((__u32 *)code->cmds[i].addr) = code->cmds[i].value; |
94 | *((__u32 *)code->cmds[i].addr) = code->cmds[i].value; |
95 | break; |
95 | break; |
96 | case CMD_MEM_WRITE_8: |
96 | case CMD_MEM_WRITE_8: |
97 | *((__u64 *)code->cmds[i].addr) = code->cmds[i].value; |
97 | *((__u64 *)code->cmds[i].addr) = code->cmds[i].value; |
98 | break; |
98 | break; |
99 | #if defined(ia32) || defined(amd64) |
99 | #if defined(ia32) || defined(amd64) |
100 | case CMD_PORT_READ_1: |
100 | case CMD_PORT_READ_1: |
101 | IPC_SET_ARG2(call->data, inb((long)code->cmds[i].addr)); |
101 | IPC_SET_ARG2(call->data, inb((long)code->cmds[i].addr)); |
102 | break; |
102 | break; |
103 | case CMD_PORT_WRITE_1: |
103 | case CMD_PORT_WRITE_1: |
104 | outb((long)code->cmds[i].addr, code->cmds[i].value); |
104 | outb((long)code->cmds[i].addr, code->cmds[i].value); |
105 | break; |
105 | break; |
106 | #endif |
106 | #endif |
107 | #if defined(ia64) |
107 | #if defined(ia64) |
108 | case CMD_IA64_GETCHAR: |
108 | case CMD_IA64_GETCHAR: |
109 | IPC_SET_ARG2(call->data, _getc(&ski_uconsole)); |
109 | IPC_SET_ARG2(call->data, _getc(&ski_uconsole)); |
110 | break; |
110 | break; |
111 | #endif |
111 | #endif |
112 | default: |
112 | default: |
113 | break; |
113 | break; |
114 | } |
114 | } |
115 | } |
115 | } |
116 | } |
116 | } |
117 | 117 | ||
118 | static void code_free(irq_code_t *code) |
118 | static void code_free(irq_code_t *code) |
119 | { |
119 | { |
120 | if (code) { |
120 | if (code) { |
121 | free(code->cmds); |
121 | free(code->cmds); |
122 | free(code); |
122 | free(code); |
123 | } |
123 | } |
124 | } |
124 | } |
125 | 125 | ||
126 | static irq_code_t * code_from_uspace(irq_code_t *ucode) |
126 | static irq_code_t * code_from_uspace(irq_code_t *ucode) |
127 | { |
127 | { |
128 | irq_code_t *code; |
128 | irq_code_t *code; |
129 | irq_cmd_t *ucmds; |
129 | irq_cmd_t *ucmds; |
130 | int rc; |
130 | int rc; |
131 | 131 | ||
132 | code = malloc(sizeof(*code), 0); |
132 | code = malloc(sizeof(*code), 0); |
133 | rc = copy_from_uspace(code, ucode, sizeof(*code)); |
133 | rc = copy_from_uspace(code, ucode, sizeof(*code)); |
134 | if (rc != 0) { |
134 | if (rc != 0) { |
135 | free(code); |
135 | free(code); |
136 | return NULL; |
136 | return NULL; |
137 | } |
137 | } |
138 | 138 | ||
139 | if (code->cmdcount > IRQ_MAX_PROG_SIZE) { |
139 | if (code->cmdcount > IRQ_MAX_PROG_SIZE) { |
140 | free(code); |
140 | free(code); |
141 | return NULL; |
141 | return NULL; |
142 | } |
142 | } |
143 | ucmds = code->cmds; |
143 | ucmds = code->cmds; |
144 | code->cmds = malloc(sizeof(code->cmds[0]) * (code->cmdcount), 0); |
144 | code->cmds = malloc(sizeof(code->cmds[0]) * (code->cmdcount), 0); |
145 | rc = copy_from_uspace(code->cmds, ucmds, sizeof(code->cmds[0]) * (code->cmdcount)); |
145 | rc = copy_from_uspace(code->cmds, ucmds, sizeof(code->cmds[0]) * (code->cmdcount)); |
146 | if (rc != 0) { |
146 | if (rc != 0) { |
147 | free(code->cmds); |
147 | free(code->cmds); |
148 | free(code); |
148 | free(code); |
149 | return NULL; |
149 | return NULL; |
150 | } |
150 | } |
151 | 151 | ||
152 | return code; |
152 | return code; |
153 | } |
153 | } |
154 | 154 | ||
155 | /** Unregister task from irq */ |
155 | /** Unregister task from irq */ |
156 | void ipc_irq_unregister(answerbox_t *box, int irq) |
156 | void ipc_irq_unregister(answerbox_t *box, int irq) |
157 | { |
157 | { |
158 | ipl_t ipl; |
158 | ipl_t ipl; |
- | 159 | int mq = irq + IPC_IRQ_RESERVED_VIRTUAL; |
|
159 | 160 | ||
160 | ipl = interrupts_disable(); |
161 | ipl = interrupts_disable(); |
161 | spinlock_lock(&irq_conns[irq].lock); |
162 | spinlock_lock(&irq_conns[mq].lock); |
162 | if (irq_conns[irq].box == box) { |
163 | if (irq_conns[mq].box == box) { |
163 | irq_conns[irq].box = NULL; |
164 | irq_conns[mq].box = NULL; |
164 | code_free(irq_conns[irq].code); |
165 | code_free(irq_conns[mq].code); |
165 | irq_conns[irq].code = NULL; |
166 | irq_conns[mq].code = NULL; |
166 | } |
167 | } |
167 | 168 | ||
168 | spinlock_unlock(&irq_conns[irq].lock); |
169 | spinlock_unlock(&irq_conns[mq].lock); |
169 | interrupts_restore(ipl); |
170 | interrupts_restore(ipl); |
170 | } |
171 | } |
171 | 172 | ||
172 | /** Register an answerbox as a receiving end of interrupts notifications */ |
173 | /** Register an answerbox as a receiving end of interrupts notifications */ |
173 | int ipc_irq_register(answerbox_t *box, int irq, irq_code_t *ucode) |
174 | int ipc_irq_register(answerbox_t *box, int irq, irq_code_t *ucode) |
174 | { |
175 | { |
175 | ipl_t ipl; |
176 | ipl_t ipl; |
176 | irq_code_t *code; |
177 | irq_code_t *code; |
- | 178 | int mq = irq + IPC_IRQ_RESERVED_VIRTUAL; |
|
177 | 179 | ||
178 | ASSERT(irq_conns); |
180 | ASSERT(irq_conns); |
179 | 181 | ||
180 | if (ucode) { |
182 | if (ucode) { |
181 | code = code_from_uspace(ucode); |
183 | code = code_from_uspace(ucode); |
182 | if (!code) |
184 | if (!code) |
183 | return EBADMEM; |
185 | return EBADMEM; |
184 | } else |
186 | } else |
185 | code = NULL; |
187 | code = NULL; |
186 | 188 | ||
187 | ipl = interrupts_disable(); |
189 | ipl = interrupts_disable(); |
188 | spinlock_lock(&irq_conns[irq].lock); |
190 | spinlock_lock(&irq_conns[mq].lock); |
189 | 191 | ||
190 | if (irq_conns[irq].box) { |
192 | if (irq_conns[mq].box) { |
191 | spinlock_unlock(&irq_conns[irq].lock); |
193 | spinlock_unlock(&irq_conns[mq].lock); |
192 | interrupts_restore(ipl); |
194 | interrupts_restore(ipl); |
193 | code_free(code); |
195 | code_free(code); |
194 | return EEXISTS; |
196 | return EEXISTS; |
195 | } |
197 | } |
196 | irq_conns[irq].box = box; |
198 | irq_conns[mq].box = box; |
197 | irq_conns[irq].code = code; |
199 | irq_conns[mq].code = code; |
198 | atomic_set(&irq_conns[irq].counter, 0); |
200 | atomic_set(&irq_conns[mq].counter, 0); |
199 | spinlock_unlock(&irq_conns[irq].lock); |
201 | spinlock_unlock(&irq_conns[mq].lock); |
200 | interrupts_restore(ipl); |
202 | interrupts_restore(ipl); |
201 | 203 | ||
202 | return 0; |
204 | return 0; |
203 | } |
205 | } |
204 | 206 | ||
- | 207 | /** Add call to proper answerbox queue |
|
- | 208 | * |
|
- | 209 | * Assume irq_conns[mq].lock is locked */ |
|
- | 210 | static void send_call(int mq, call_t *call) |
|
- | 211 | { |
|
- | 212 | spinlock_lock(&irq_conns[mq].box->irq_lock); |
|
- | 213 | list_append(&call->link, &irq_conns[mq].box->irq_notifs); |
|
- | 214 | spinlock_unlock(&irq_conns[mq].box->irq_lock); |
|
- | 215 | ||
- | 216 | waitq_wakeup(&irq_conns[mq].box->wq, 0); |
|
- | 217 | } |
|
- | 218 | ||
- | 219 | /** Send notification message |
|
- | 220 | * |
|
- | 221 | */ |
|
- | 222 | void ipc_irq_send_msg(int irq, __native a2, __native a3) |
|
- | 223 | { |
|
- | 224 | call_t *call; |
|
- | 225 | int mq = irq + IPC_IRQ_RESERVED_VIRTUAL; |
|
- | 226 | ||
- | 227 | spinlock_lock(&irq_conns[mq].lock); |
|
- | 228 | ||
- | 229 | if (irq_conns[mq].box) { |
|
- | 230 | call = ipc_call_alloc(FRAME_ATOMIC); |
|
- | 231 | if (!call) { |
|
- | 232 | spinlock_unlock(&irq_conns[mq].lock); |
|
- | 233 | return; |
|
- | 234 | } |
|
- | 235 | call->flags |= IPC_CALL_NOTIF; |
|
- | 236 | IPC_SET_METHOD(call->data, IPC_M_INTERRUPT); |
|
- | 237 | IPC_SET_ARG1(call->data, irq); |
|
- | 238 | IPC_SET_ARG2(call->data, a2); |
|
- | 239 | IPC_SET_ARG3(call->data, a3); |
|
- | 240 | ||
- | 241 | send_call(mq, call); |
|
- | 242 | } |
|
- | 243 | spinlock_unlock(&irq_conns[mq].lock); |
|
- | 244 | } |
|
- | 245 | ||
205 | /** Notify process that an irq had happend |
246 | /** Notify process that an irq had happend |
206 | * |
247 | * |
207 | * We expect interrupts to be disabled |
248 | * We expect interrupts to be disabled |
208 | */ |
249 | */ |
209 | void ipc_irq_send_notif(int irq) |
250 | void ipc_irq_send_notif(int irq) |
210 | { |
251 | { |
211 | call_t *call; |
252 | call_t *call; |
- | 253 | int mq = irq + IPC_IRQ_RESERVED_VIRTUAL; |
|
212 | 254 | ||
213 | ASSERT(irq_conns); |
255 | ASSERT(irq_conns); |
214 | spinlock_lock(&irq_conns[irq].lock); |
256 | spinlock_lock(&irq_conns[mq].lock); |
215 | 257 | ||
216 | if (irq_conns[irq].box) { |
258 | if (irq_conns[mq].box) { |
217 | call = ipc_call_alloc(FRAME_ATOMIC); |
259 | call = ipc_call_alloc(FRAME_ATOMIC); |
218 | if (!call) { |
260 | if (!call) { |
219 | spinlock_unlock(&irq_conns[irq].lock); |
261 | spinlock_unlock(&irq_conns[mq].lock); |
220 | return; |
262 | return; |
221 | } |
263 | } |
222 | call->flags |= IPC_CALL_NOTIF; |
264 | call->flags |= IPC_CALL_NOTIF; |
223 | IPC_SET_METHOD(call->data, IPC_M_INTERRUPT); |
265 | IPC_SET_METHOD(call->data, IPC_M_INTERRUPT); |
224 | IPC_SET_ARG1(call->data, irq); |
266 | IPC_SET_ARG1(call->data, irq); |
225 | IPC_SET_ARG3(call->data, atomic_preinc(&irq_conns[irq].counter)); |
267 | IPC_SET_ARG3(call->data, atomic_preinc(&irq_conns[mq].counter)); |
226 | 268 | ||
227 | /* Execute code to handle irq */ |
269 | /* Execute code to handle irq */ |
228 | code_execute(call, irq_conns[irq].code); |
270 | code_execute(call, irq_conns[mq].code); |
229 | 271 | ||
230 | spinlock_lock(&irq_conns[irq].box->irq_lock); |
272 | send_call(mq, call); |
231 | list_append(&call->link, &irq_conns[irq].box->irq_notifs); |
- | |
232 | spinlock_unlock(&irq_conns[irq].box->irq_lock); |
- | |
233 | - | ||
234 | waitq_wakeup(&irq_conns[irq].box->wq, 0); |
- | |
235 | } |
273 | } |
236 | 274 | ||
237 | spinlock_unlock(&irq_conns[irq].lock); |
275 | spinlock_unlock(&irq_conns[mq].lock); |
238 | } |
276 | } |
239 | 277 | ||
240 | 278 | ||
241 | /** Initialize table of interrupt handlers */ |
279 | /** Initialize table of interrupt handlers |
- | 280 | * |
|
- | 281 | * @param irqcount Count of required hardware IRQs to be supported |
|
- | 282 | */ |
|
242 | void ipc_irq_make_table(int irqcount) |
283 | void ipc_irq_make_table(int irqcount) |
243 | { |
284 | { |
244 | int i; |
285 | int i; |
245 | 286 | ||
- | 287 | irqcount += IPC_IRQ_RESERVED_VIRTUAL; |
|
- | 288 | ||
246 | irq_conns_size = irqcount; |
289 | irq_conns_size = irqcount; |
247 | irq_conns = malloc(irqcount * (sizeof(*irq_conns)), 0); |
290 | irq_conns = malloc(irqcount * (sizeof(*irq_conns)), 0); |
248 | for (i=0; i < irqcount; i++) { |
291 | for (i=0; i < irqcount; i++) { |
249 | spinlock_initialize(&irq_conns[i].lock, "irq_ipc_lock"); |
292 | spinlock_initialize(&irq_conns[i].lock, "irq_ipc_lock"); |
250 | irq_conns[i].box = NULL; |
293 | irq_conns[i].box = NULL; |
251 | irq_conns[i].code = NULL; |
294 | irq_conns[i].code = NULL; |
252 | } |
295 | } |
253 | } |
296 | } |
254 | 297 | ||
255 | /** Disconnect all irq's notifications |
298 | /** Disconnect all irq's notifications |
256 | * |
299 | * |
257 | * TODO: It may be better to do some linked list, so that |
300 | * TODO: It may be better to do some linked list, so that |
258 | * we wouldn't need to go through whole array every cleanup |
301 | * we wouldn't need to go through whole array every cleanup |
259 | */ |
302 | */ |
260 | void ipc_irq_cleanup(answerbox_t *box) |
303 | void ipc_irq_cleanup(answerbox_t *box) |
261 | { |
304 | { |
262 | int i; |
305 | int i; |
263 | ipl_t ipl; |
306 | ipl_t ipl; |
264 | 307 | ||
265 | for (i=0; i < irq_conns_size; i++) { |
308 | for (i=0; i < irq_conns_size; i++) { |
266 | ipl = interrupts_disable(); |
309 | ipl = interrupts_disable(); |
267 | spinlock_lock(&irq_conns[i].lock); |
310 | spinlock_lock(&irq_conns[i].lock); |
268 | if (irq_conns[i].box == box) |
311 | if (irq_conns[i].box == box) |
269 | irq_conns[i].box = NULL; |
312 | irq_conns[i].box = NULL; |
270 | spinlock_unlock(&irq_conns[i].lock); |
313 | spinlock_unlock(&irq_conns[i].lock); |
271 | interrupts_restore(ipl); |
314 | interrupts_restore(ipl); |
272 | } |
315 | } |
273 | } |
316 | } |
274 | 317 |