Subversion Repositories HelenOS-historic

Rev

Rev 1698 | Rev 1757 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 1698 Rev 1702
1
/*
1
/*
2
 * Copyright (C) 2006 Ondrej Palkovsky
2
 * Copyright (C) 2006 Ondrej Palkovsky
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
-
 
29
 /** @addtogroup genericipc
-
 
30
 * @{
-
 
31
 */
-
 
32
/** @file
-
 
33
 */
-
 
34
 
29
/** IRQ notification framework
35
/** IRQ notification framework
30
 *
36
 *
31
 * This framework allows applications to register to receive a notification
37
 * This framework allows applications to register to receive a notification
32
 * when interrupt is detected. The application may provide a simple 'top-half'
38
 * when interrupt is detected. The application may provide a simple 'top-half'
33
 * handler as part of its registration, which can perform simple operations
39
 * handler as part of its registration, which can perform simple operations
34
 * (read/write port/memory, add information to notification ipc message).
40
 * (read/write port/memory, add information to notification ipc message).
35
 *
41
 *
36
 * The structure of a notification message is as follows:
42
 * The structure of a notification message is as follows:
37
 * - METHOD: interrupt number
43
 * - METHOD: interrupt number
38
 * - ARG1: payload modified by a 'top-half' handler
44
 * - ARG1: payload modified by a 'top-half' handler
39
 * - ARG2: payload
45
 * - ARG2: payload
40
 * - ARG3: payload
46
 * - ARG3: payload
41
 * - in_phone_hash: interrupt counter (may be needed to assure correct order
47
 * - in_phone_hash: interrupt counter (may be needed to assure correct order
42
 *         in multithreaded drivers)
48
 *         in multithreaded drivers)
43
 */
49
 */
44
 
50
 
45
#include <arch.h>
51
#include <arch.h>
46
#include <mm/slab.h>
52
#include <mm/slab.h>
47
#include <errno.h>
53
#include <errno.h>
48
#include <ipc/ipc.h>
54
#include <ipc/ipc.h>
49
#include <ipc/irq.h>
55
#include <ipc/irq.h>
50
#include <atomic.h>
56
#include <atomic.h>
51
#include <syscall/copy.h>
57
#include <syscall/copy.h>
52
#include <console/console.h>
58
#include <console/console.h>
53
 
59
 
54
typedef struct {
60
typedef struct {
55
    SPINLOCK_DECLARE(lock);
61
    SPINLOCK_DECLARE(lock);
56
    answerbox_t *box;
62
    answerbox_t *box;
57
    irq_code_t *code;
63
    irq_code_t *code;
58
    atomic_t counter;
64
    atomic_t counter;
59
} ipc_irq_t;
65
} ipc_irq_t;
60
 
66
 
61
 
67
 
62
static ipc_irq_t *irq_conns = NULL;
68
static ipc_irq_t *irq_conns = NULL;
63
static int irq_conns_size;
69
static int irq_conns_size;
64
 
70
 
65
#include <print.h>
71
#include <print.h>
66
/* Execute code associated with IRQ notification */
72
/* Execute code associated with IRQ notification */
67
static void code_execute(call_t *call, irq_code_t *code)
73
static void code_execute(call_t *call, irq_code_t *code)
68
{
74
{
69
    int i;
75
    int i;
70
    __native dstval = 0;
76
    __native dstval = 0;
71
   
77
   
72
    if (!code)
78
    if (!code)
73
        return;
79
        return;
74
   
80
   
75
    for (i=0; i < code->cmdcount;i++) {
81
    for (i=0; i < code->cmdcount;i++) {
76
        switch (code->cmds[i].cmd) {
82
        switch (code->cmds[i].cmd) {
77
        case CMD_MEM_READ_1:
83
        case CMD_MEM_READ_1:
78
            dstval = *((__u8 *)code->cmds[i].addr);
84
            dstval = *((__u8 *)code->cmds[i].addr);
79
            break;
85
            break;
80
        case CMD_MEM_READ_2:
86
        case CMD_MEM_READ_2:
81
            dstval = *((__u16 *)code->cmds[i].addr);
87
            dstval = *((__u16 *)code->cmds[i].addr);
82
            break;
88
            break;
83
        case CMD_MEM_READ_4:
89
        case CMD_MEM_READ_4:
84
            dstval = *((__u32 *)code->cmds[i].addr);
90
            dstval = *((__u32 *)code->cmds[i].addr);
85
            break;
91
            break;
86
        case CMD_MEM_READ_8:
92
        case CMD_MEM_READ_8:
87
            dstval = *((__u64 *)code->cmds[i].addr);
93
            dstval = *((__u64 *)code->cmds[i].addr);
88
            break;
94
            break;
89
        case CMD_MEM_WRITE_1:
95
        case CMD_MEM_WRITE_1:
90
            *((__u8 *)code->cmds[i].addr) = code->cmds[i].value;
96
            *((__u8 *)code->cmds[i].addr) = code->cmds[i].value;
91
            break;
97
            break;
92
        case CMD_MEM_WRITE_2:
98
        case CMD_MEM_WRITE_2:
93
            *((__u16 *)code->cmds[i].addr) = code->cmds[i].value;
99
            *((__u16 *)code->cmds[i].addr) = code->cmds[i].value;
94
            break;
100
            break;
95
        case CMD_MEM_WRITE_4:
101
        case CMD_MEM_WRITE_4:
96
            *((__u32 *)code->cmds[i].addr) = code->cmds[i].value;
102
            *((__u32 *)code->cmds[i].addr) = code->cmds[i].value;
97
            break;
103
            break;
98
        case CMD_MEM_WRITE_8:
104
        case CMD_MEM_WRITE_8:
99
            *((__u64 *)code->cmds[i].addr) = code->cmds[i].value;
105
            *((__u64 *)code->cmds[i].addr) = code->cmds[i].value;
100
            break;
106
            break;
101
#if defined(ia32) || defined(amd64)
107
#if defined(ia32) || defined(amd64)
102
        case CMD_PORT_READ_1:
108
        case CMD_PORT_READ_1:
103
            dstval = inb((long)code->cmds[i].addr);
109
            dstval = inb((long)code->cmds[i].addr);
104
            break;
110
            break;
105
        case CMD_PORT_WRITE_1:
111
        case CMD_PORT_WRITE_1:
106
            outb((long)code->cmds[i].addr, code->cmds[i].value);
112
            outb((long)code->cmds[i].addr, code->cmds[i].value);
107
            break;
113
            break;
108
#endif
114
#endif
109
#if defined(ia64) 
115
#if defined(ia64) 
110
        case CMD_IA64_GETCHAR:
116
        case CMD_IA64_GETCHAR:
111
            dstval = _getc(&ski_uconsole);
117
            dstval = _getc(&ski_uconsole);
112
            break;
118
            break;
113
#endif
119
#endif
114
#if defined(ppc32)
120
#if defined(ppc32)
115
        case CMD_PPC32_GETCHAR:
121
        case CMD_PPC32_GETCHAR:
116
            dstval = cuda_get_scancode();
122
            dstval = cuda_get_scancode();
117
            break;
123
            break;
118
#endif
124
#endif
119
        default:
125
        default:
120
            break;
126
            break;
121
        }
127
        }
122
        if (code->cmds[i].dstarg && code->cmds[i].dstarg < 4) {
128
        if (code->cmds[i].dstarg && code->cmds[i].dstarg < 4) {
123
            call->data.args[code->cmds[i].dstarg] = dstval;
129
            call->data.args[code->cmds[i].dstarg] = dstval;
124
        }
130
        }
125
    }
131
    }
126
}
132
}
127
 
133
 
128
static void code_free(irq_code_t *code)
134
static void code_free(irq_code_t *code)
129
{
135
{
130
    if (code) {
136
    if (code) {
131
        free(code->cmds);
137
        free(code->cmds);
132
        free(code);
138
        free(code);
133
    }
139
    }
134
}
140
}
135
 
141
 
136
static irq_code_t * code_from_uspace(irq_code_t *ucode)
142
static irq_code_t * code_from_uspace(irq_code_t *ucode)
137
{
143
{
138
    irq_code_t *code;
144
    irq_code_t *code;
139
    irq_cmd_t *ucmds;
145
    irq_cmd_t *ucmds;
140
    int rc;
146
    int rc;
141
 
147
 
142
    code = malloc(sizeof(*code), 0);
148
    code = malloc(sizeof(*code), 0);
143
    rc = copy_from_uspace(code, ucode, sizeof(*code));
149
    rc = copy_from_uspace(code, ucode, sizeof(*code));
144
    if (rc != 0) {
150
    if (rc != 0) {
145
        free(code);
151
        free(code);
146
        return NULL;
152
        return NULL;
147
    }
153
    }
148
   
154
   
149
    if (code->cmdcount > IRQ_MAX_PROG_SIZE) {
155
    if (code->cmdcount > IRQ_MAX_PROG_SIZE) {
150
        free(code);
156
        free(code);
151
        return NULL;
157
        return NULL;
152
    }
158
    }
153
    ucmds = code->cmds;
159
    ucmds = code->cmds;
154
    code->cmds = malloc(sizeof(code->cmds[0]) * (code->cmdcount), 0);
160
    code->cmds = malloc(sizeof(code->cmds[0]) * (code->cmdcount), 0);
155
    rc = copy_from_uspace(code->cmds, ucmds, sizeof(code->cmds[0]) * (code->cmdcount));
161
    rc = copy_from_uspace(code->cmds, ucmds, sizeof(code->cmds[0]) * (code->cmdcount));
156
    if (rc != 0) {
162
    if (rc != 0) {
157
        free(code->cmds);
163
        free(code->cmds);
158
        free(code);
164
        free(code);
159
        return NULL;
165
        return NULL;
160
    }
166
    }
161
 
167
 
162
    return code;
168
    return code;
163
}
169
}
164
 
170
 
165
/** Unregister task from irq */
171
/** Unregister task from irq */
166
void ipc_irq_unregister(answerbox_t *box, int irq)
172
void ipc_irq_unregister(answerbox_t *box, int irq)
167
{
173
{
168
    ipl_t ipl;
174
    ipl_t ipl;
169
    int mq = irq + IPC_IRQ_RESERVED_VIRTUAL;
175
    int mq = irq + IPC_IRQ_RESERVED_VIRTUAL;
170
 
176
 
171
    ipl = interrupts_disable();
177
    ipl = interrupts_disable();
172
    spinlock_lock(&irq_conns[mq].lock);
178
    spinlock_lock(&irq_conns[mq].lock);
173
    if (irq_conns[mq].box == box) {
179
    if (irq_conns[mq].box == box) {
174
        irq_conns[mq].box = NULL;
180
        irq_conns[mq].box = NULL;
175
        code_free(irq_conns[mq].code);
181
        code_free(irq_conns[mq].code);
176
        irq_conns[mq].code = NULL;
182
        irq_conns[mq].code = NULL;
177
    }
183
    }
178
 
184
 
179
    spinlock_unlock(&irq_conns[mq].lock);
185
    spinlock_unlock(&irq_conns[mq].lock);
180
    interrupts_restore(ipl);
186
    interrupts_restore(ipl);
181
}
187
}
182
 
188
 
183
/** Register an answerbox as a receiving end of interrupts notifications */
189
/** Register an answerbox as a receiving end of interrupts notifications */
184
int ipc_irq_register(answerbox_t *box, int irq, irq_code_t *ucode)
190
int ipc_irq_register(answerbox_t *box, int irq, irq_code_t *ucode)
185
{
191
{
186
    ipl_t ipl;
192
    ipl_t ipl;
187
    irq_code_t *code;
193
    irq_code_t *code;
188
    int mq = irq + IPC_IRQ_RESERVED_VIRTUAL;
194
    int mq = irq + IPC_IRQ_RESERVED_VIRTUAL;
189
 
195
 
190
    ASSERT(irq_conns);
196
    ASSERT(irq_conns);
191
 
197
 
192
    if (ucode) {
198
    if (ucode) {
193
        code = code_from_uspace(ucode);
199
        code = code_from_uspace(ucode);
194
        if (!code)
200
        if (!code)
195
            return EBADMEM;
201
            return EBADMEM;
196
    } else
202
    } else
197
        code = NULL;
203
        code = NULL;
198
 
204
 
199
    ipl = interrupts_disable();
205
    ipl = interrupts_disable();
200
    spinlock_lock(&irq_conns[mq].lock);
206
    spinlock_lock(&irq_conns[mq].lock);
201
 
207
 
202
    if (irq_conns[mq].box) {
208
    if (irq_conns[mq].box) {
203
        spinlock_unlock(&irq_conns[mq].lock);
209
        spinlock_unlock(&irq_conns[mq].lock);
204
        interrupts_restore(ipl);
210
        interrupts_restore(ipl);
205
        code_free(code);
211
        code_free(code);
206
        return EEXISTS;
212
        return EEXISTS;
207
    }
213
    }
208
    irq_conns[mq].box = box;
214
    irq_conns[mq].box = box;
209
    irq_conns[mq].code = code;
215
    irq_conns[mq].code = code;
210
    atomic_set(&irq_conns[mq].counter, 0);
216
    atomic_set(&irq_conns[mq].counter, 0);
211
    spinlock_unlock(&irq_conns[mq].lock);
217
    spinlock_unlock(&irq_conns[mq].lock);
212
    interrupts_restore(ipl);
218
    interrupts_restore(ipl);
213
 
219
 
214
    return 0;
220
    return 0;
215
}
221
}
216
 
222
 
217
/** Add call to proper answerbox queue
223
/** Add call to proper answerbox queue
218
 *
224
 *
219
 * Assume irq_conns[mq].lock is locked */
225
 * Assume irq_conns[mq].lock is locked */
220
static void send_call(int mq, call_t *call)
226
static void send_call(int mq, call_t *call)
221
{
227
{
222
    spinlock_lock(&irq_conns[mq].box->irq_lock);
228
    spinlock_lock(&irq_conns[mq].box->irq_lock);
223
    list_append(&call->link, &irq_conns[mq].box->irq_notifs);
229
    list_append(&call->link, &irq_conns[mq].box->irq_notifs);
224
    spinlock_unlock(&irq_conns[mq].box->irq_lock);
230
    spinlock_unlock(&irq_conns[mq].box->irq_lock);
225
       
231
       
226
    waitq_wakeup(&irq_conns[mq].box->wq, 0);
232
    waitq_wakeup(&irq_conns[mq].box->wq, 0);
227
}
233
}
228
 
234
 
229
/** Send notification message
235
/** Send notification message
230
 *
236
 *
231
 */
237
 */
232
void ipc_irq_send_msg(int irq, __native a1, __native a2, __native a3)
238
void ipc_irq_send_msg(int irq, __native a1, __native a2, __native a3)
233
{
239
{
234
    call_t *call;
240
    call_t *call;
235
    int mq = irq + IPC_IRQ_RESERVED_VIRTUAL;
241
    int mq = irq + IPC_IRQ_RESERVED_VIRTUAL;
236
 
242
 
237
    spinlock_lock(&irq_conns[mq].lock);
243
    spinlock_lock(&irq_conns[mq].lock);
238
 
244
 
239
    if (irq_conns[mq].box) {
245
    if (irq_conns[mq].box) {
240
        call = ipc_call_alloc(FRAME_ATOMIC);
246
        call = ipc_call_alloc(FRAME_ATOMIC);
241
        if (!call) {
247
        if (!call) {
242
            spinlock_unlock(&irq_conns[mq].lock);
248
            spinlock_unlock(&irq_conns[mq].lock);
243
            return;
249
            return;
244
        }
250
        }
245
        call->flags |= IPC_CALL_NOTIF;
251
        call->flags |= IPC_CALL_NOTIF;
246
        IPC_SET_METHOD(call->data, irq);
252
        IPC_SET_METHOD(call->data, irq);
247
        IPC_SET_ARG1(call->data, a1);
253
        IPC_SET_ARG1(call->data, a1);
248
        IPC_SET_ARG2(call->data, a2);
254
        IPC_SET_ARG2(call->data, a2);
249
        IPC_SET_ARG3(call->data, a3);
255
        IPC_SET_ARG3(call->data, a3);
250
        /* Put a counter to the message */
256
        /* Put a counter to the message */
251
        call->private = atomic_preinc(&irq_conns[mq].counter);
257
        call->private = atomic_preinc(&irq_conns[mq].counter);
252
       
258
       
253
        send_call(mq, call);
259
        send_call(mq, call);
254
    }
260
    }
255
    spinlock_unlock(&irq_conns[mq].lock);
261
    spinlock_unlock(&irq_conns[mq].lock);
256
}
262
}
257
 
263
 
258
/** Notify task that an irq had occurred.
264
/** Notify task that an irq had occurred.
259
 *
265
 *
260
 * We expect interrupts to be disabled
266
 * We expect interrupts to be disabled
261
 */
267
 */
262
void ipc_irq_send_notif(int irq)
268
void ipc_irq_send_notif(int irq)
263
{
269
{
264
    call_t *call;
270
    call_t *call;
265
    int mq = irq + IPC_IRQ_RESERVED_VIRTUAL;
271
    int mq = irq + IPC_IRQ_RESERVED_VIRTUAL;
266
 
272
 
267
    ASSERT(irq_conns);
273
    ASSERT(irq_conns);
268
    spinlock_lock(&irq_conns[mq].lock);
274
    spinlock_lock(&irq_conns[mq].lock);
269
 
275
 
270
    if (irq_conns[mq].box) {
276
    if (irq_conns[mq].box) {
271
        call = ipc_call_alloc(FRAME_ATOMIC);
277
        call = ipc_call_alloc(FRAME_ATOMIC);
272
        if (!call) {
278
        if (!call) {
273
            spinlock_unlock(&irq_conns[mq].lock);
279
            spinlock_unlock(&irq_conns[mq].lock);
274
            return;
280
            return;
275
        }
281
        }
276
        call->flags |= IPC_CALL_NOTIF;
282
        call->flags |= IPC_CALL_NOTIF;
277
        /* Put a counter to the message */
283
        /* Put a counter to the message */
278
        call->private = atomic_preinc(&irq_conns[mq].counter);
284
        call->private = atomic_preinc(&irq_conns[mq].counter);
279
        /* Set up args */
285
        /* Set up args */
280
        IPC_SET_METHOD(call->data, irq);
286
        IPC_SET_METHOD(call->data, irq);
281
 
287
 
282
        /* Execute code to handle irq */
288
        /* Execute code to handle irq */
283
        code_execute(call, irq_conns[mq].code);
289
        code_execute(call, irq_conns[mq].code);
284
       
290
       
285
        send_call(mq, call);
291
        send_call(mq, call);
286
    }
292
    }
287
       
293
       
288
    spinlock_unlock(&irq_conns[mq].lock);
294
    spinlock_unlock(&irq_conns[mq].lock);
289
}
295
}
290
 
296
 
291
 
297
 
292
/** Initialize table of interrupt handlers
298
/** Initialize table of interrupt handlers
293
 *
299
 *
294
 * @param irqcount Count of required hardware IRQs to be supported
300
 * @param irqcount Count of required hardware IRQs to be supported
295
 */
301
 */
296
void ipc_irq_make_table(int irqcount)
302
void ipc_irq_make_table(int irqcount)
297
{
303
{
298
    int i;
304
    int i;
299
 
305
 
300
    irqcount +=  IPC_IRQ_RESERVED_VIRTUAL;
306
    irqcount +=  IPC_IRQ_RESERVED_VIRTUAL;
301
 
307
 
302
    irq_conns_size = irqcount;
308
    irq_conns_size = irqcount;
303
    irq_conns = malloc(irqcount * (sizeof(*irq_conns)), 0);
309
    irq_conns = malloc(irqcount * (sizeof(*irq_conns)), 0);
304
    for (i=0; i < irqcount; i++) {
310
    for (i=0; i < irqcount; i++) {
305
        spinlock_initialize(&irq_conns[i].lock, "irq_ipc_lock");
311
        spinlock_initialize(&irq_conns[i].lock, "irq_ipc_lock");
306
        irq_conns[i].box = NULL;
312
        irq_conns[i].box = NULL;
307
        irq_conns[i].code = NULL;
313
        irq_conns[i].code = NULL;
308
    }
314
    }
309
}
315
}
310
 
316
 
311
/** Disconnect all irq's notifications
317
/** Disconnect all irq's notifications
312
 *
318
 *
313
 * TODO: It may be better to do some linked list, so that
319
 * TODO: It may be better to do some linked list, so that
314
 *       we wouldn't need to go through whole array every cleanup
320
 *       we wouldn't need to go through whole array every cleanup
315
 */
321
 */
316
void ipc_irq_cleanup(answerbox_t *box)
322
void ipc_irq_cleanup(answerbox_t *box)
317
{
323
{
318
    int i;
324
    int i;
319
    ipl_t ipl;
325
    ipl_t ipl;
320
   
326
   
321
    for (i=0; i < irq_conns_size; i++) {
327
    for (i=0; i < irq_conns_size; i++) {
322
        ipl = interrupts_disable();
328
        ipl = interrupts_disable();
323
        spinlock_lock(&irq_conns[i].lock);
329
        spinlock_lock(&irq_conns[i].lock);
324
        if (irq_conns[i].box == box)
330
        if (irq_conns[i].box == box)
325
            irq_conns[i].box = NULL;
331
            irq_conns[i].box = NULL;
326
        spinlock_unlock(&irq_conns[i].lock);
332
        spinlock_unlock(&irq_conns[i].lock);
327
        interrupts_restore(ipl);
333
        interrupts_restore(ipl);
328
    }
334
    }
329
}
335
}
-
 
336
 
-
 
337
 /** @}
-
 
338
 */
-
 
339
 
330
 
340