Subversion Repositories HelenOS-historic

Rev

Rev 1702 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 1702 Rev 1757
1
/*
1
/*
2
 * Copyright (C) 2006 Ondrej Palkovsky
2
 * Copyright (C) 2006 Ondrej Palkovsky
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
 /** @addtogroup genericipc
29
/** @addtogroup genericipc
30
 * @{
30
 * @{
31
 */
31
 */
32
/** @file
32
/**
33
 */
33
 * @file
34
 
-
 
35
/** IRQ notification framework
34
 * @brief IRQ notification framework.
36
 *
35
 *
37
 * This framework allows applications to register to receive a notification
36
 * This framework allows applications to register to receive a notification
38
 * when interrupt is detected. The application may provide a simple 'top-half'
37
 * when interrupt is detected. The application may provide a simple 'top-half'
39
 * handler as part of its registration, which can perform simple operations
38
 * handler as part of its registration, which can perform simple operations
40
 * (read/write port/memory, add information to notification ipc message).
39
 * (read/write port/memory, add information to notification ipc message).
41
 *
40
 *
42
 * The structure of a notification message is as follows:
41
 * The structure of a notification message is as follows:
43
 * - METHOD: interrupt number
42
 * - METHOD: interrupt number
44
 * - ARG1: payload modified by a 'top-half' handler
43
 * - ARG1: payload modified by a 'top-half' handler
45
 * - ARG2: payload
44
 * - ARG2: payload
46
 * - ARG3: payload
45
 * - ARG3: payload
47
 * - in_phone_hash: interrupt counter (may be needed to assure correct order
46
 * - in_phone_hash: interrupt counter (may be needed to assure correct order
48
 *         in multithreaded drivers)
47
 *         in multithreaded drivers)
49
 */
48
 */
50
 
49
 
51
#include <arch.h>
50
#include <arch.h>
52
#include <mm/slab.h>
51
#include <mm/slab.h>
53
#include <errno.h>
52
#include <errno.h>
54
#include <ipc/ipc.h>
53
#include <ipc/ipc.h>
55
#include <ipc/irq.h>
54
#include <ipc/irq.h>
56
#include <atomic.h>
55
#include <atomic.h>
57
#include <syscall/copy.h>
56
#include <syscall/copy.h>
58
#include <console/console.h>
57
#include <console/console.h>
59
 
58
 
60
typedef struct {
59
typedef struct {
61
    SPINLOCK_DECLARE(lock);
60
    SPINLOCK_DECLARE(lock);
62
    answerbox_t *box;
61
    answerbox_t *box;
63
    irq_code_t *code;
62
    irq_code_t *code;
64
    atomic_t counter;
63
    atomic_t counter;
65
} ipc_irq_t;
64
} ipc_irq_t;
66
 
65
 
67
 
66
 
68
static ipc_irq_t *irq_conns = NULL;
67
static ipc_irq_t *irq_conns = NULL;
69
static int irq_conns_size;
68
static int irq_conns_size;
70
 
69
 
71
#include <print.h>
70
#include <print.h>
72
/* Execute code associated with IRQ notification */
71
/* Execute code associated with IRQ notification */
73
static void code_execute(call_t *call, irq_code_t *code)
72
static void code_execute(call_t *call, irq_code_t *code)
74
{
73
{
75
    int i;
74
    int i;
76
    __native dstval = 0;
75
    __native dstval = 0;
77
   
76
   
78
    if (!code)
77
    if (!code)
79
        return;
78
        return;
80
   
79
   
81
    for (i=0; i < code->cmdcount;i++) {
80
    for (i=0; i < code->cmdcount;i++) {
82
        switch (code->cmds[i].cmd) {
81
        switch (code->cmds[i].cmd) {
83
        case CMD_MEM_READ_1:
82
        case CMD_MEM_READ_1:
84
            dstval = *((__u8 *)code->cmds[i].addr);
83
            dstval = *((__u8 *)code->cmds[i].addr);
85
            break;
84
            break;
86
        case CMD_MEM_READ_2:
85
        case CMD_MEM_READ_2:
87
            dstval = *((__u16 *)code->cmds[i].addr);
86
            dstval = *((__u16 *)code->cmds[i].addr);
88
            break;
87
            break;
89
        case CMD_MEM_READ_4:
88
        case CMD_MEM_READ_4:
90
            dstval = *((__u32 *)code->cmds[i].addr);
89
            dstval = *((__u32 *)code->cmds[i].addr);
91
            break;
90
            break;
92
        case CMD_MEM_READ_8:
91
        case CMD_MEM_READ_8:
93
            dstval = *((__u64 *)code->cmds[i].addr);
92
            dstval = *((__u64 *)code->cmds[i].addr);
94
            break;
93
            break;
95
        case CMD_MEM_WRITE_1:
94
        case CMD_MEM_WRITE_1:
96
            *((__u8 *)code->cmds[i].addr) = code->cmds[i].value;
95
            *((__u8 *)code->cmds[i].addr) = code->cmds[i].value;
97
            break;
96
            break;
98
        case CMD_MEM_WRITE_2:
97
        case CMD_MEM_WRITE_2:
99
            *((__u16 *)code->cmds[i].addr) = code->cmds[i].value;
98
            *((__u16 *)code->cmds[i].addr) = code->cmds[i].value;
100
            break;
99
            break;
101
        case CMD_MEM_WRITE_4:
100
        case CMD_MEM_WRITE_4:
102
            *((__u32 *)code->cmds[i].addr) = code->cmds[i].value;
101
            *((__u32 *)code->cmds[i].addr) = code->cmds[i].value;
103
            break;
102
            break;
104
        case CMD_MEM_WRITE_8:
103
        case CMD_MEM_WRITE_8:
105
            *((__u64 *)code->cmds[i].addr) = code->cmds[i].value;
104
            *((__u64 *)code->cmds[i].addr) = code->cmds[i].value;
106
            break;
105
            break;
107
#if defined(ia32) || defined(amd64)
106
#if defined(ia32) || defined(amd64)
108
        case CMD_PORT_READ_1:
107
        case CMD_PORT_READ_1:
109
            dstval = inb((long)code->cmds[i].addr);
108
            dstval = inb((long)code->cmds[i].addr);
110
            break;
109
            break;
111
        case CMD_PORT_WRITE_1:
110
        case CMD_PORT_WRITE_1:
112
            outb((long)code->cmds[i].addr, code->cmds[i].value);
111
            outb((long)code->cmds[i].addr, code->cmds[i].value);
113
            break;
112
            break;
114
#endif
113
#endif
115
#if defined(ia64) 
114
#if defined(ia64) 
116
        case CMD_IA64_GETCHAR:
115
        case CMD_IA64_GETCHAR:
117
            dstval = _getc(&ski_uconsole);
116
            dstval = _getc(&ski_uconsole);
118
            break;
117
            break;
119
#endif
118
#endif
120
#if defined(ppc32)
119
#if defined(ppc32)
121
        case CMD_PPC32_GETCHAR:
120
        case CMD_PPC32_GETCHAR:
122
            dstval = cuda_get_scancode();
121
            dstval = cuda_get_scancode();
123
            break;
122
            break;
124
#endif
123
#endif
125
        default:
124
        default:
126
            break;
125
            break;
127
        }
126
        }
128
        if (code->cmds[i].dstarg && code->cmds[i].dstarg < 4) {
127
        if (code->cmds[i].dstarg && code->cmds[i].dstarg < 4) {
129
            call->data.args[code->cmds[i].dstarg] = dstval;
128
            call->data.args[code->cmds[i].dstarg] = dstval;
130
        }
129
        }
131
    }
130
    }
132
}
131
}
133
 
132
 
134
static void code_free(irq_code_t *code)
133
static void code_free(irq_code_t *code)
135
{
134
{
136
    if (code) {
135
    if (code) {
137
        free(code->cmds);
136
        free(code->cmds);
138
        free(code);
137
        free(code);
139
    }
138
    }
140
}
139
}
141
 
140
 
142
static irq_code_t * code_from_uspace(irq_code_t *ucode)
141
static irq_code_t * code_from_uspace(irq_code_t *ucode)
143
{
142
{
144
    irq_code_t *code;
143
    irq_code_t *code;
145
    irq_cmd_t *ucmds;
144
    irq_cmd_t *ucmds;
146
    int rc;
145
    int rc;
147
 
146
 
148
    code = malloc(sizeof(*code), 0);
147
    code = malloc(sizeof(*code), 0);
149
    rc = copy_from_uspace(code, ucode, sizeof(*code));
148
    rc = copy_from_uspace(code, ucode, sizeof(*code));
150
    if (rc != 0) {
149
    if (rc != 0) {
151
        free(code);
150
        free(code);
152
        return NULL;
151
        return NULL;
153
    }
152
    }
154
   
153
   
155
    if (code->cmdcount > IRQ_MAX_PROG_SIZE) {
154
    if (code->cmdcount > IRQ_MAX_PROG_SIZE) {
156
        free(code);
155
        free(code);
157
        return NULL;
156
        return NULL;
158
    }
157
    }
159
    ucmds = code->cmds;
158
    ucmds = code->cmds;
160
    code->cmds = malloc(sizeof(code->cmds[0]) * (code->cmdcount), 0);
159
    code->cmds = malloc(sizeof(code->cmds[0]) * (code->cmdcount), 0);
161
    rc = copy_from_uspace(code->cmds, ucmds, sizeof(code->cmds[0]) * (code->cmdcount));
160
    rc = copy_from_uspace(code->cmds, ucmds, sizeof(code->cmds[0]) * (code->cmdcount));
162
    if (rc != 0) {
161
    if (rc != 0) {
163
        free(code->cmds);
162
        free(code->cmds);
164
        free(code);
163
        free(code);
165
        return NULL;
164
        return NULL;
166
    }
165
    }
167
 
166
 
168
    return code;
167
    return code;
169
}
168
}
170
 
169
 
171
/** Unregister task from irq */
170
/** Unregister task from irq */
172
void ipc_irq_unregister(answerbox_t *box, int irq)
171
void ipc_irq_unregister(answerbox_t *box, int irq)
173
{
172
{
174
    ipl_t ipl;
173
    ipl_t ipl;
175
    int mq = irq + IPC_IRQ_RESERVED_VIRTUAL;
174
    int mq = irq + IPC_IRQ_RESERVED_VIRTUAL;
176
 
175
 
177
    ipl = interrupts_disable();
176
    ipl = interrupts_disable();
178
    spinlock_lock(&irq_conns[mq].lock);
177
    spinlock_lock(&irq_conns[mq].lock);
179
    if (irq_conns[mq].box == box) {
178
    if (irq_conns[mq].box == box) {
180
        irq_conns[mq].box = NULL;
179
        irq_conns[mq].box = NULL;
181
        code_free(irq_conns[mq].code);
180
        code_free(irq_conns[mq].code);
182
        irq_conns[mq].code = NULL;
181
        irq_conns[mq].code = NULL;
183
    }
182
    }
184
 
183
 
185
    spinlock_unlock(&irq_conns[mq].lock);
184
    spinlock_unlock(&irq_conns[mq].lock);
186
    interrupts_restore(ipl);
185
    interrupts_restore(ipl);
187
}
186
}
188
 
187
 
189
/** Register an answerbox as a receiving end of interrupts notifications */
188
/** Register an answerbox as a receiving end of interrupts notifications */
190
int ipc_irq_register(answerbox_t *box, int irq, irq_code_t *ucode)
189
int ipc_irq_register(answerbox_t *box, int irq, irq_code_t *ucode)
191
{
190
{
192
    ipl_t ipl;
191
    ipl_t ipl;
193
    irq_code_t *code;
192
    irq_code_t *code;
194
    int mq = irq + IPC_IRQ_RESERVED_VIRTUAL;
193
    int mq = irq + IPC_IRQ_RESERVED_VIRTUAL;
195
 
194
 
196
    ASSERT(irq_conns);
195
    ASSERT(irq_conns);
197
 
196
 
198
    if (ucode) {
197
    if (ucode) {
199
        code = code_from_uspace(ucode);
198
        code = code_from_uspace(ucode);
200
        if (!code)
199
        if (!code)
201
            return EBADMEM;
200
            return EBADMEM;
202
    } else
201
    } else
203
        code = NULL;
202
        code = NULL;
204
 
203
 
205
    ipl = interrupts_disable();
204
    ipl = interrupts_disable();
206
    spinlock_lock(&irq_conns[mq].lock);
205
    spinlock_lock(&irq_conns[mq].lock);
207
 
206
 
208
    if (irq_conns[mq].box) {
207
    if (irq_conns[mq].box) {
209
        spinlock_unlock(&irq_conns[mq].lock);
208
        spinlock_unlock(&irq_conns[mq].lock);
210
        interrupts_restore(ipl);
209
        interrupts_restore(ipl);
211
        code_free(code);
210
        code_free(code);
212
        return EEXISTS;
211
        return EEXISTS;
213
    }
212
    }
214
    irq_conns[mq].box = box;
213
    irq_conns[mq].box = box;
215
    irq_conns[mq].code = code;
214
    irq_conns[mq].code = code;
216
    atomic_set(&irq_conns[mq].counter, 0);
215
    atomic_set(&irq_conns[mq].counter, 0);
217
    spinlock_unlock(&irq_conns[mq].lock);
216
    spinlock_unlock(&irq_conns[mq].lock);
218
    interrupts_restore(ipl);
217
    interrupts_restore(ipl);
219
 
218
 
220
    return 0;
219
    return 0;
221
}
220
}
222
 
221
 
223
/** Add call to proper answerbox queue
222
/** Add call to proper answerbox queue
224
 *
223
 *
225
 * Assume irq_conns[mq].lock is locked */
224
 * Assume irq_conns[mq].lock is locked */
226
static void send_call(int mq, call_t *call)
225
static void send_call(int mq, call_t *call)
227
{
226
{
228
    spinlock_lock(&irq_conns[mq].box->irq_lock);
227
    spinlock_lock(&irq_conns[mq].box->irq_lock);
229
    list_append(&call->link, &irq_conns[mq].box->irq_notifs);
228
    list_append(&call->link, &irq_conns[mq].box->irq_notifs);
230
    spinlock_unlock(&irq_conns[mq].box->irq_lock);
229
    spinlock_unlock(&irq_conns[mq].box->irq_lock);
231
       
230
       
232
    waitq_wakeup(&irq_conns[mq].box->wq, 0);
231
    waitq_wakeup(&irq_conns[mq].box->wq, 0);
233
}
232
}
234
 
233
 
235
/** Send notification message
234
/** Send notification message
236
 *
235
 *
237
 */
236
 */
238
void ipc_irq_send_msg(int irq, __native a1, __native a2, __native a3)
237
void ipc_irq_send_msg(int irq, __native a1, __native a2, __native a3)
239
{
238
{
240
    call_t *call;
239
    call_t *call;
241
    int mq = irq + IPC_IRQ_RESERVED_VIRTUAL;
240
    int mq = irq + IPC_IRQ_RESERVED_VIRTUAL;
242
 
241
 
243
    spinlock_lock(&irq_conns[mq].lock);
242
    spinlock_lock(&irq_conns[mq].lock);
244
 
243
 
245
    if (irq_conns[mq].box) {
244
    if (irq_conns[mq].box) {
246
        call = ipc_call_alloc(FRAME_ATOMIC);
245
        call = ipc_call_alloc(FRAME_ATOMIC);
247
        if (!call) {
246
        if (!call) {
248
            spinlock_unlock(&irq_conns[mq].lock);
247
            spinlock_unlock(&irq_conns[mq].lock);
249
            return;
248
            return;
250
        }
249
        }
251
        call->flags |= IPC_CALL_NOTIF;
250
        call->flags |= IPC_CALL_NOTIF;
252
        IPC_SET_METHOD(call->data, irq);
251
        IPC_SET_METHOD(call->data, irq);
253
        IPC_SET_ARG1(call->data, a1);
252
        IPC_SET_ARG1(call->data, a1);
254
        IPC_SET_ARG2(call->data, a2);
253
        IPC_SET_ARG2(call->data, a2);
255
        IPC_SET_ARG3(call->data, a3);
254
        IPC_SET_ARG3(call->data, a3);
256
        /* Put a counter to the message */
255
        /* Put a counter to the message */
257
        call->private = atomic_preinc(&irq_conns[mq].counter);
256
        call->private = atomic_preinc(&irq_conns[mq].counter);
258
       
257
       
259
        send_call(mq, call);
258
        send_call(mq, call);
260
    }
259
    }
261
    spinlock_unlock(&irq_conns[mq].lock);
260
    spinlock_unlock(&irq_conns[mq].lock);
262
}
261
}
263
 
262
 
264
/** Notify task that an irq had occurred.
263
/** Notify task that an irq had occurred.
265
 *
264
 *
266
 * We expect interrupts to be disabled
265
 * We expect interrupts to be disabled
267
 */
266
 */
268
void ipc_irq_send_notif(int irq)
267
void ipc_irq_send_notif(int irq)
269
{
268
{
270
    call_t *call;
269
    call_t *call;
271
    int mq = irq + IPC_IRQ_RESERVED_VIRTUAL;
270
    int mq = irq + IPC_IRQ_RESERVED_VIRTUAL;
272
 
271
 
273
    ASSERT(irq_conns);
272
    ASSERT(irq_conns);
274
    spinlock_lock(&irq_conns[mq].lock);
273
    spinlock_lock(&irq_conns[mq].lock);
275
 
274
 
276
    if (irq_conns[mq].box) {
275
    if (irq_conns[mq].box) {
277
        call = ipc_call_alloc(FRAME_ATOMIC);
276
        call = ipc_call_alloc(FRAME_ATOMIC);
278
        if (!call) {
277
        if (!call) {
279
            spinlock_unlock(&irq_conns[mq].lock);
278
            spinlock_unlock(&irq_conns[mq].lock);
280
            return;
279
            return;
281
        }
280
        }
282
        call->flags |= IPC_CALL_NOTIF;
281
        call->flags |= IPC_CALL_NOTIF;
283
        /* Put a counter to the message */
282
        /* Put a counter to the message */
284
        call->private = atomic_preinc(&irq_conns[mq].counter);
283
        call->private = atomic_preinc(&irq_conns[mq].counter);
285
        /* Set up args */
284
        /* Set up args */
286
        IPC_SET_METHOD(call->data, irq);
285
        IPC_SET_METHOD(call->data, irq);
287
 
286
 
288
        /* Execute code to handle irq */
287
        /* Execute code to handle irq */
289
        code_execute(call, irq_conns[mq].code);
288
        code_execute(call, irq_conns[mq].code);
290
       
289
       
291
        send_call(mq, call);
290
        send_call(mq, call);
292
    }
291
    }
293
       
292
       
294
    spinlock_unlock(&irq_conns[mq].lock);
293
    spinlock_unlock(&irq_conns[mq].lock);
295
}
294
}
296
 
295
 
297
 
296
 
298
/** Initialize table of interrupt handlers
297
/** Initialize table of interrupt handlers
299
 *
298
 *
300
 * @param irqcount Count of required hardware IRQs to be supported
299
 * @param irqcount Count of required hardware IRQs to be supported
301
 */
300
 */
302
void ipc_irq_make_table(int irqcount)
301
void ipc_irq_make_table(int irqcount)
303
{
302
{
304
    int i;
303
    int i;
305
 
304
 
306
    irqcount +=  IPC_IRQ_RESERVED_VIRTUAL;
305
    irqcount +=  IPC_IRQ_RESERVED_VIRTUAL;
307
 
306
 
308
    irq_conns_size = irqcount;
307
    irq_conns_size = irqcount;
309
    irq_conns = malloc(irqcount * (sizeof(*irq_conns)), 0);
308
    irq_conns = malloc(irqcount * (sizeof(*irq_conns)), 0);
310
    for (i=0; i < irqcount; i++) {
309
    for (i=0; i < irqcount; i++) {
311
        spinlock_initialize(&irq_conns[i].lock, "irq_ipc_lock");
310
        spinlock_initialize(&irq_conns[i].lock, "irq_ipc_lock");
312
        irq_conns[i].box = NULL;
311
        irq_conns[i].box = NULL;
313
        irq_conns[i].code = NULL;
312
        irq_conns[i].code = NULL;
314
    }
313
    }
315
}
314
}
316
 
315
 
317
/** Disconnect all irq's notifications
316
/** Disconnect all irq's notifications
318
 *
317
 *
319
 * TODO: It may be better to do some linked list, so that
318
 * @todo It may be better to do some linked list, so that
320
 *       we wouldn't need to go through whole array every cleanup
319
 *       we wouldn't need to go through whole array every cleanup
321
 */
320
 */
322
void ipc_irq_cleanup(answerbox_t *box)
321
void ipc_irq_cleanup(answerbox_t *box)
323
{
322
{
324
    int i;
323
    int i;
325
    ipl_t ipl;
324
    ipl_t ipl;
326
   
325
   
327
    for (i=0; i < irq_conns_size; i++) {
326
    for (i=0; i < irq_conns_size; i++) {
328
        ipl = interrupts_disable();
327
        ipl = interrupts_disable();
329
        spinlock_lock(&irq_conns[i].lock);
328
        spinlock_lock(&irq_conns[i].lock);
330
        if (irq_conns[i].box == box)
329
        if (irq_conns[i].box == box)
331
            irq_conns[i].box = NULL;
330
            irq_conns[i].box = NULL;
332
        spinlock_unlock(&irq_conns[i].lock);
331
        spinlock_unlock(&irq_conns[i].lock);
333
        interrupts_restore(ipl);
332
        interrupts_restore(ipl);
334
    }
333
    }
335
}
334
}
336
 
335
 
337
 /** @}
336
/** @}
338
 */
337
 */
339
 
-
 
340
 
338