Rev 1281 | Rev 1288 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 1281 | Rev 1284 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (C) 2006 Ondrej Palkovsky |
2 | * Copyright (C) 2006 Ondrej Palkovsky |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
- | 29 | /** IRQ notification framework |
|
- | 30 | * |
|
- | 31 | * This framework allows applications to register to receive a notification |
|
- | 32 | * when interrupt is detected. The application may provide a simple 'top-half' |
|
- | 33 | * handler as part of its registration, which can perform simple operations |
|
- | 34 | * (read/write port/memory, add information to notification ipc message). |
|
- | 35 | * |
|
- | 36 | * The structure of a notification message is as follows: |
|
- | 37 | * - METHOD: IPC_M_INTERRUPT |
|
- | 38 | * - ARG1: interrupt number |
|
- | 39 | * - ARG2: payload modified by a 'top-half' handler |
|
- | 40 | * - ARG3: interrupt counter (may be needed to assure correct order |
|
- | 41 | * in multithreaded drivers) |
|
- | 42 | */ |
|
- | 43 | ||
29 | #include <arch.h> |
44 | #include <arch.h> |
30 | #include <mm/slab.h> |
45 | #include <mm/slab.h> |
31 | #include <errno.h> |
46 | #include <errno.h> |
32 | #include <ipc/ipc.h> |
47 | #include <ipc/ipc.h> |
33 | #include <ipc/irq.h> |
48 | #include <ipc/irq.h> |
- | 49 | #include <atomic.h> |
|
34 | 50 | ||
35 | typedef struct { |
51 | typedef struct { |
36 | SPINLOCK_DECLARE(lock); |
52 | SPINLOCK_DECLARE(lock); |
37 | answerbox_t *box; |
53 | answerbox_t *box; |
38 | irq_code_t *code; |
54 | irq_code_t *code; |
- | 55 | atomic_t counter; |
|
39 | } ipc_irq_t; |
56 | } ipc_irq_t; |
40 | 57 | ||
41 | 58 | ||
42 | static ipc_irq_t *irq_conns = NULL; |
59 | static ipc_irq_t *irq_conns = NULL; |
43 | static int irq_conns_size; |
60 | static int irq_conns_size; |
44 | 61 | ||
45 | #include <print.h> |
62 | #include <print.h> |
46 | /* Execute code associated with IRQ notification */ |
63 | /* Execute code associated with IRQ notification */ |
47 | static void code_execute(call_t *call, irq_code_t *code) |
64 | static void code_execute(call_t *call, irq_code_t *code) |
48 | { |
65 | { |
49 | int i; |
66 | int i; |
50 | 67 | ||
51 | if (!code) |
68 | if (!code) |
52 | return; |
69 | return; |
53 | 70 | ||
54 | for (i=0; i < code->cmdcount;i++) { |
71 | for (i=0; i < code->cmdcount;i++) { |
55 | switch (code->cmds[i].cmd) { |
72 | switch (code->cmds[i].cmd) { |
56 | case CMD_MEM_READ_1: |
73 | case CMD_MEM_READ_1: |
57 | IPC_SET_ARG2(call->data, *((__u8 *)code->cmds[i].addr)); |
74 | IPC_SET_ARG2(call->data, *((__u8 *)code->cmds[i].addr)); |
58 | break; |
75 | break; |
59 | case CMD_MEM_READ_2: |
76 | case CMD_MEM_READ_2: |
60 | IPC_SET_ARG2(call->data, *((__u16 *)code->cmds[i].addr)); |
77 | IPC_SET_ARG2(call->data, *((__u16 *)code->cmds[i].addr)); |
61 | break; |
78 | break; |
62 | case CMD_MEM_READ_4: |
79 | case CMD_MEM_READ_4: |
63 | IPC_SET_ARG2(call->data, *((__u32 *)code->cmds[i].addr)); |
80 | IPC_SET_ARG2(call->data, *((__u32 *)code->cmds[i].addr)); |
64 | break; |
81 | break; |
65 | case CMD_MEM_READ_8: |
82 | case CMD_MEM_READ_8: |
66 | IPC_SET_ARG2(call->data, *((__u64 *)code->cmds[i].addr)); |
83 | IPC_SET_ARG2(call->data, *((__u64 *)code->cmds[i].addr)); |
67 | break; |
84 | break; |
68 | case CMD_MEM_WRITE_1: |
85 | case CMD_MEM_WRITE_1: |
69 | *((__u8 *)code->cmds[i].addr) = code->cmds[i].value; |
86 | *((__u8 *)code->cmds[i].addr) = code->cmds[i].value; |
70 | break; |
87 | break; |
71 | case CMD_MEM_WRITE_2: |
88 | case CMD_MEM_WRITE_2: |
72 | *((__u16 *)code->cmds[i].addr) = code->cmds[i].value; |
89 | *((__u16 *)code->cmds[i].addr) = code->cmds[i].value; |
73 | break; |
90 | break; |
74 | case CMD_MEM_WRITE_4: |
91 | case CMD_MEM_WRITE_4: |
75 | *((__u32 *)code->cmds[i].addr) = code->cmds[i].value; |
92 | *((__u32 *)code->cmds[i].addr) = code->cmds[i].value; |
76 | break; |
93 | break; |
77 | case CMD_MEM_WRITE_8: |
94 | case CMD_MEM_WRITE_8: |
78 | *((__u64 *)code->cmds[i].addr) = code->cmds[i].value; |
95 | *((__u64 *)code->cmds[i].addr) = code->cmds[i].value; |
79 | break; |
96 | break; |
- | 97 | #if defined(ia32) || defined(amd64) |
|
- | 98 | case CMD_PORT_READ_1: |
|
- | 99 | IPC_SET_ARG2(call->data, inb((long)code->cmds[i].addr)); |
|
- | 100 | break; |
|
- | 101 | case CMD_PORT_WRITE_1: |
|
- | 102 | outb((long)code->cmds[i].addr, code->cmds[i].value); |
|
- | 103 | break; |
|
- | 104 | #endif |
|
80 | default: |
105 | default: |
81 | break; |
106 | break; |
82 | } |
107 | } |
83 | } |
108 | } |
84 | } |
109 | } |
85 | 110 | ||
86 | static void code_free(irq_code_t *code) |
111 | static void code_free(irq_code_t *code) |
87 | { |
112 | { |
88 | if (code) { |
113 | if (code) { |
89 | free(code->cmds); |
114 | free(code->cmds); |
90 | free(code); |
115 | free(code); |
91 | } |
116 | } |
92 | } |
117 | } |
93 | 118 | ||
94 | static irq_code_t * code_from_uspace(irq_code_t *ucode) |
119 | static irq_code_t * code_from_uspace(irq_code_t *ucode) |
95 | { |
120 | { |
96 | irq_code_t *code; |
121 | irq_code_t *code; |
97 | irq_cmd_t *ucmds; |
122 | irq_cmd_t *ucmds; |
98 | 123 | ||
99 | code = malloc(sizeof(*code), 0); |
124 | code = malloc(sizeof(*code), 0); |
100 | copy_from_uspace(code, ucode, sizeof(*code)); |
125 | copy_from_uspace(code, ucode, sizeof(*code)); |
101 | 126 | ||
102 | if (code->cmdcount > IRQ_MAX_PROG_SIZE) { |
127 | if (code->cmdcount > IRQ_MAX_PROG_SIZE) { |
103 | free(code); |
128 | free(code); |
104 | return NULL; |
129 | return NULL; |
105 | } |
130 | } |
106 | ucmds = code->cmds; |
131 | ucmds = code->cmds; |
107 | code->cmds = malloc(sizeof(code->cmds[0]) * (code->cmdcount), 0); |
132 | code->cmds = malloc(sizeof(code->cmds[0]) * (code->cmdcount), 0); |
108 | copy_from_uspace(code->cmds, ucmds, sizeof(code->cmds[0]) * (code->cmdcount)); |
133 | copy_from_uspace(code->cmds, ucmds, sizeof(code->cmds[0]) * (code->cmdcount)); |
109 | 134 | ||
110 | return code; |
135 | return code; |
111 | } |
136 | } |
112 | 137 | ||
113 | /** Unregister task from irq */ |
138 | /** Unregister task from irq */ |
114 | void ipc_irq_unregister(answerbox_t *box, int irq) |
139 | void ipc_irq_unregister(answerbox_t *box, int irq) |
115 | { |
140 | { |
116 | ipl_t ipl; |
141 | ipl_t ipl; |
117 | 142 | ||
118 | ipl = interrupts_disable(); |
143 | ipl = interrupts_disable(); |
119 | spinlock_lock(&irq_conns[irq].lock); |
144 | spinlock_lock(&irq_conns[irq].lock); |
120 | if (irq_conns[irq].box == box) { |
145 | if (irq_conns[irq].box == box) { |
121 | irq_conns[irq].box = NULL; |
146 | irq_conns[irq].box = NULL; |
122 | code_free(irq_conns[irq].code); |
147 | code_free(irq_conns[irq].code); |
123 | irq_conns[irq].code = NULL; |
148 | irq_conns[irq].code = NULL; |
124 | } |
149 | } |
125 | 150 | ||
126 | spinlock_unlock(&irq_conns[irq].lock); |
151 | spinlock_unlock(&irq_conns[irq].lock); |
127 | interrupts_restore(ipl); |
152 | interrupts_restore(ipl); |
128 | } |
153 | } |
129 | 154 | ||
130 | /** Register an answerbox as a receiving end of interrupts notifications */ |
155 | /** Register an answerbox as a receiving end of interrupts notifications */ |
131 | int ipc_irq_register(answerbox_t *box, int irq, irq_code_t *ucode) |
156 | int ipc_irq_register(answerbox_t *box, int irq, irq_code_t *ucode) |
132 | { |
157 | { |
133 | ipl_t ipl; |
158 | ipl_t ipl; |
134 | irq_code_t *code; |
159 | irq_code_t *code; |
135 | 160 | ||
136 | ASSERT(irq_conns); |
161 | ASSERT(irq_conns); |
137 | 162 | ||
138 | if (ucode) { |
163 | if (ucode) { |
139 | code = code_from_uspace(ucode); |
164 | code = code_from_uspace(ucode); |
140 | if (!code) |
165 | if (!code) |
141 | return EBADMEM; |
166 | return EBADMEM; |
142 | } else |
167 | } else |
143 | code = NULL; |
168 | code = NULL; |
144 | 169 | ||
145 | ipl = interrupts_disable(); |
170 | ipl = interrupts_disable(); |
146 | spinlock_lock(&irq_conns[irq].lock); |
171 | spinlock_lock(&irq_conns[irq].lock); |
147 | 172 | ||
148 | if (irq_conns[irq].box) { |
173 | if (irq_conns[irq].box) { |
149 | spinlock_unlock(&irq_conns[irq].lock); |
174 | spinlock_unlock(&irq_conns[irq].lock); |
150 | interrupts_restore(ipl); |
175 | interrupts_restore(ipl); |
151 | code_free(code); |
176 | code_free(code); |
152 | return EEXISTS; |
177 | return EEXISTS; |
153 | } |
178 | } |
154 | irq_conns[irq].box = box; |
179 | irq_conns[irq].box = box; |
155 | irq_conns[irq].code = code; |
180 | irq_conns[irq].code = code; |
- | 181 | atomic_set(&irq_conns[irq].counter, 0); |
|
156 | spinlock_unlock(&irq_conns[irq].lock); |
182 | spinlock_unlock(&irq_conns[irq].lock); |
157 | interrupts_restore(ipl); |
183 | interrupts_restore(ipl); |
158 | 184 | ||
159 | return 0; |
185 | return 0; |
160 | } |
186 | } |
161 | 187 | ||
162 | /** Notify process that an irq had happend |
188 | /** Notify process that an irq had happend |
163 | * |
189 | * |
164 | * We expect interrupts to be disabled |
190 | * We expect interrupts to be disabled |
165 | */ |
191 | */ |
166 | void ipc_irq_send_notif(int irq) |
192 | void ipc_irq_send_notif(int irq) |
167 | { |
193 | { |
168 | call_t *call; |
194 | call_t *call; |
169 | 195 | ||
170 | ASSERT(irq_conns); |
196 | ASSERT(irq_conns); |
171 | spinlock_lock(&irq_conns[irq].lock); |
197 | spinlock_lock(&irq_conns[irq].lock); |
172 | 198 | ||
173 | if (irq_conns[irq].box) { |
199 | if (irq_conns[irq].box) { |
174 | call = ipc_call_alloc(FRAME_ATOMIC); |
200 | call = ipc_call_alloc(FRAME_ATOMIC); |
175 | call->flags |= IPC_CALL_NOTIF; |
201 | call->flags |= IPC_CALL_NOTIF; |
176 | IPC_SET_METHOD(call->data, IPC_M_INTERRUPT); |
202 | IPC_SET_METHOD(call->data, IPC_M_INTERRUPT); |
177 | IPC_SET_ARG1(call->data, irq); |
203 | IPC_SET_ARG1(call->data, irq); |
- | 204 | IPC_SET_ARG3(call->data, atomic_preinc(&irq_conns[irq].counter)); |
|
178 | 205 | ||
179 | /* Execute code to handle irq */ |
206 | /* Execute code to handle irq */ |
180 | code_execute(call, irq_conns[irq].code); |
207 | code_execute(call, irq_conns[irq].code); |
181 | 208 | ||
182 | spinlock_lock(&irq_conns[irq].box->irq_lock); |
209 | spinlock_lock(&irq_conns[irq].box->irq_lock); |
183 | list_append(&call->list, &irq_conns[irq].box->irq_notifs); |
210 | list_append(&call->list, &irq_conns[irq].box->irq_notifs); |
184 | spinlock_unlock(&irq_conns[irq].box->irq_lock); |
211 | spinlock_unlock(&irq_conns[irq].box->irq_lock); |
185 | 212 | ||
186 | waitq_wakeup(&irq_conns[irq].box->wq, 0); |
213 | waitq_wakeup(&irq_conns[irq].box->wq, 0); |
187 | } |
214 | } |
188 | 215 | ||
189 | spinlock_unlock(&irq_conns[irq].lock); |
216 | spinlock_unlock(&irq_conns[irq].lock); |
190 | } |
217 | } |
191 | 218 | ||
192 | 219 | ||
193 | /** Initialize table of interrupt handlers */ |
220 | /** Initialize table of interrupt handlers */ |
194 | void ipc_irq_make_table(int irqcount) |
221 | void ipc_irq_make_table(int irqcount) |
195 | { |
222 | { |
196 | int i; |
223 | int i; |
197 | 224 | ||
198 | irq_conns_size = irqcount; |
225 | irq_conns_size = irqcount; |
199 | irq_conns = malloc(irqcount * (sizeof(*irq_conns)), 0); |
226 | irq_conns = malloc(irqcount * (sizeof(*irq_conns)), 0); |
200 | for (i=0; i < irqcount; i++) { |
227 | for (i=0; i < irqcount; i++) { |
201 | spinlock_initialize(&irq_conns[i].lock, "irq_ipc_lock"); |
228 | spinlock_initialize(&irq_conns[i].lock, "irq_ipc_lock"); |
202 | irq_conns[i].box = NULL; |
229 | irq_conns[i].box = NULL; |
203 | irq_conns[i].code = NULL; |
230 | irq_conns[i].code = NULL; |
204 | } |
231 | } |
205 | } |
232 | } |
206 | 233 | ||
207 | /** Disconnect all irq's notifications |
234 | /** Disconnect all irq's notifications |
208 | * |
235 | * |
209 | * TODO: It may be better to do some linked list, so that |
236 | * TODO: It may be better to do some linked list, so that |
210 | * we wouldn't need to go through whole array every cleanup |
237 | * we wouldn't need to go through whole array every cleanup |
211 | */ |
238 | */ |
212 | void ipc_irq_cleanup(answerbox_t *box) |
239 | void ipc_irq_cleanup(answerbox_t *box) |
213 | { |
240 | { |
214 | int i; |
241 | int i; |
215 | ipl_t ipl; |
242 | ipl_t ipl; |
216 | 243 | ||
217 | for (i=0; i < irq_conns_size; i++) { |
244 | for (i=0; i < irq_conns_size; i++) { |
218 | ipl = interrupts_disable(); |
245 | ipl = interrupts_disable(); |
219 | spinlock_lock(&irq_conns[i].lock); |
246 | spinlock_lock(&irq_conns[i].lock); |
220 | if (irq_conns[i].box == box) |
247 | if (irq_conns[i].box == box) |
221 | irq_conns[i].box = NULL; |
248 | irq_conns[i].box = NULL; |
222 | spinlock_unlock(&irq_conns[i].lock); |
249 | spinlock_unlock(&irq_conns[i].lock); |
223 | interrupts_restore(ipl); |
250 | interrupts_restore(ipl); |
224 | } |
251 | } |
225 | } |
252 | } |
226 | 253 |