Rev 1284 | Go to most recent revision | Details | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
1281 | palkovsky | 1 | /* |
2 | * Copyright (C) 2006 Ondrej Palkovsky |
||
3 | * All rights reserved. |
||
4 | * |
||
5 | * Redistribution and use in source and binary forms, with or without |
||
6 | * modification, are permitted provided that the following conditions |
||
7 | * are met: |
||
8 | * |
||
9 | * - Redistributions of source code must retain the above copyright |
||
10 | * notice, this list of conditions and the following disclaimer. |
||
11 | * - Redistributions in binary form must reproduce the above copyright |
||
12 | * notice, this list of conditions and the following disclaimer in the |
||
13 | * documentation and/or other materials provided with the distribution. |
||
14 | * - The name of the author may not be used to endorse or promote products |
||
15 | * derived from this software without specific prior written permission. |
||
16 | * |
||
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
||
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
||
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
||
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
||
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
||
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
||
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
||
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
||
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
||
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
||
27 | */ |
||
28 | |||
29 | #include <arch.h> |
||
30 | #include <mm/slab.h> |
||
31 | #include <errno.h> |
||
32 | #include <ipc/ipc.h> |
||
33 | #include <ipc/irq.h> |
||
34 | |||
35 | typedef struct { |
||
36 | SPINLOCK_DECLARE(lock); |
||
37 | answerbox_t *box; |
||
38 | irq_code_t *code; |
||
39 | } ipc_irq_t; |
||
40 | |||
41 | |||
42 | static ipc_irq_t *irq_conns = NULL; |
||
43 | static int irq_conns_size; |
||
44 | |||
45 | #include <print.h> |
||
46 | /* Execute code associated with IRQ notification */ |
||
47 | static void code_execute(call_t *call, irq_code_t *code) |
||
48 | { |
||
49 | int i; |
||
50 | |||
51 | if (!code) |
||
52 | return; |
||
53 | |||
54 | for (i=0; i < code->cmdcount;i++) { |
||
55 | switch (code->cmds[i].cmd) { |
||
56 | case CMD_MEM_READ_1: |
||
57 | IPC_SET_ARG2(call->data, *((__u8 *)code->cmds[i].addr)); |
||
58 | break; |
||
59 | case CMD_MEM_READ_2: |
||
60 | IPC_SET_ARG2(call->data, *((__u16 *)code->cmds[i].addr)); |
||
61 | break; |
||
62 | case CMD_MEM_READ_4: |
||
63 | IPC_SET_ARG2(call->data, *((__u32 *)code->cmds[i].addr)); |
||
64 | break; |
||
65 | case CMD_MEM_READ_8: |
||
66 | IPC_SET_ARG2(call->data, *((__u64 *)code->cmds[i].addr)); |
||
67 | break; |
||
68 | case CMD_MEM_WRITE_1: |
||
69 | *((__u8 *)code->cmds[i].addr) = code->cmds[i].value; |
||
70 | break; |
||
71 | case CMD_MEM_WRITE_2: |
||
72 | *((__u16 *)code->cmds[i].addr) = code->cmds[i].value; |
||
73 | break; |
||
74 | case CMD_MEM_WRITE_4: |
||
75 | *((__u32 *)code->cmds[i].addr) = code->cmds[i].value; |
||
76 | break; |
||
77 | case CMD_MEM_WRITE_8: |
||
78 | *((__u64 *)code->cmds[i].addr) = code->cmds[i].value; |
||
79 | break; |
||
80 | default: |
||
81 | break; |
||
82 | } |
||
83 | } |
||
84 | } |
||
85 | |||
86 | static void code_free(irq_code_t *code) |
||
87 | { |
||
88 | if (code) { |
||
89 | free(code->cmds); |
||
90 | free(code); |
||
91 | } |
||
92 | } |
||
93 | |||
94 | static irq_code_t * code_from_uspace(irq_code_t *ucode) |
||
95 | { |
||
96 | irq_code_t *code; |
||
97 | irq_cmd_t *ucmds; |
||
98 | |||
99 | code = malloc(sizeof(*code), 0); |
||
100 | copy_from_uspace(code, ucode, sizeof(*code)); |
||
101 | |||
102 | if (code->cmdcount > IRQ_MAX_PROG_SIZE) { |
||
103 | free(code); |
||
104 | return NULL; |
||
105 | } |
||
106 | ucmds = code->cmds; |
||
107 | code->cmds = malloc(sizeof(code->cmds[0]) * (code->cmdcount), 0); |
||
108 | copy_from_uspace(code->cmds, ucmds, sizeof(code->cmds[0]) * (code->cmdcount)); |
||
109 | |||
110 | return code; |
||
111 | } |
||
112 | |||
113 | /** Unregister task from irq */ |
||
114 | void ipc_irq_unregister(answerbox_t *box, int irq) |
||
115 | { |
||
116 | ipl_t ipl; |
||
117 | |||
118 | ipl = interrupts_disable(); |
||
119 | spinlock_lock(&irq_conns[irq].lock); |
||
120 | if (irq_conns[irq].box == box) { |
||
121 | irq_conns[irq].box = NULL; |
||
122 | code_free(irq_conns[irq].code); |
||
123 | irq_conns[irq].code = NULL; |
||
124 | } |
||
125 | |||
126 | spinlock_unlock(&irq_conns[irq].lock); |
||
127 | interrupts_restore(ipl); |
||
128 | } |
||
129 | |||
130 | /** Register an answerbox as a receiving end of interrupts notifications */ |
||
131 | int ipc_irq_register(answerbox_t *box, int irq, irq_code_t *ucode) |
||
132 | { |
||
133 | ipl_t ipl; |
||
134 | irq_code_t *code; |
||
135 | |||
136 | ASSERT(irq_conns); |
||
137 | |||
138 | if (ucode) { |
||
139 | code = code_from_uspace(ucode); |
||
140 | if (!code) |
||
141 | return EBADMEM; |
||
142 | } else |
||
143 | code = NULL; |
||
144 | |||
145 | ipl = interrupts_disable(); |
||
146 | spinlock_lock(&irq_conns[irq].lock); |
||
147 | |||
148 | if (irq_conns[irq].box) { |
||
149 | spinlock_unlock(&irq_conns[irq].lock); |
||
150 | interrupts_restore(ipl); |
||
151 | code_free(code); |
||
152 | return EEXISTS; |
||
153 | } |
||
154 | irq_conns[irq].box = box; |
||
155 | irq_conns[irq].code = code; |
||
156 | spinlock_unlock(&irq_conns[irq].lock); |
||
157 | interrupts_restore(ipl); |
||
158 | |||
159 | return 0; |
||
160 | } |
||
161 | |||
162 | /** Notify process that an irq had happend |
||
163 | * |
||
164 | * We expect interrupts to be disabled |
||
165 | */ |
||
166 | void ipc_irq_send_notif(int irq) |
||
167 | { |
||
168 | call_t *call; |
||
169 | |||
170 | ASSERT(irq_conns); |
||
171 | spinlock_lock(&irq_conns[irq].lock); |
||
172 | |||
173 | if (irq_conns[irq].box) { |
||
174 | call = ipc_call_alloc(FRAME_ATOMIC); |
||
175 | call->flags |= IPC_CALL_NOTIF; |
||
176 | IPC_SET_METHOD(call->data, IPC_M_INTERRUPT); |
||
177 | IPC_SET_ARG1(call->data, irq); |
||
178 | |||
179 | /* Execute code to handle irq */ |
||
180 | code_execute(call, irq_conns[irq].code); |
||
181 | |||
182 | spinlock_lock(&irq_conns[irq].box->irq_lock); |
||
183 | list_append(&call->list, &irq_conns[irq].box->irq_notifs); |
||
184 | spinlock_unlock(&irq_conns[irq].box->irq_lock); |
||
185 | |||
186 | waitq_wakeup(&irq_conns[irq].box->wq, 0); |
||
187 | } |
||
188 | |||
189 | spinlock_unlock(&irq_conns[irq].lock); |
||
190 | } |
||
191 | |||
192 | |||
193 | /** Initialize table of interrupt handlers */ |
||
194 | void ipc_irq_make_table(int irqcount) |
||
195 | { |
||
196 | int i; |
||
197 | |||
198 | irq_conns_size = irqcount; |
||
199 | irq_conns = malloc(irqcount * (sizeof(*irq_conns)), 0); |
||
200 | for (i=0; i < irqcount; i++) { |
||
201 | spinlock_initialize(&irq_conns[i].lock, "irq_ipc_lock"); |
||
202 | irq_conns[i].box = NULL; |
||
203 | irq_conns[i].code = NULL; |
||
204 | } |
||
205 | } |
||
206 | |||
207 | /** Disconnect all irq's notifications |
||
208 | * |
||
209 | * TODO: It may be better to do some linked list, so that |
||
210 | * we wouldn't need to go through whole array every cleanup |
||
211 | */ |
||
212 | void ipc_irq_cleanup(answerbox_t *box) |
||
213 | { |
||
214 | int i; |
||
215 | ipl_t ipl; |
||
216 | |||
217 | for (i=0; i < irq_conns_size; i++) { |
||
218 | ipl = interrupts_disable(); |
||
219 | spinlock_lock(&irq_conns[i].lock); |
||
220 | if (irq_conns[i].box == box) |
||
221 | irq_conns[i].box = NULL; |
||
222 | spinlock_unlock(&irq_conns[i].lock); |
||
223 | interrupts_restore(ipl); |
||
224 | } |
||
225 | } |