Rev 2784 | Rev 3424 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
224 | palkovsky | 1 | # |
2071 | jermar | 2 | # Copyright (c) 2005 Ondrej Palkovsky |
224 | palkovsky | 3 | # All rights reserved. |
4 | # |
||
5 | # Redistribution and use in source and binary forms, with or without |
||
6 | # modification, are permitted provided that the following conditions |
||
7 | # are met: |
||
8 | # |
||
9 | # - Redistributions of source code must retain the above copyright |
||
10 | # notice, this list of conditions and the following disclaimer. |
||
11 | # - Redistributions in binary form must reproduce the above copyright |
||
12 | # notice, this list of conditions and the following disclaimer in the |
||
13 | # documentation and/or other materials provided with the distribution. |
||
14 | # - The name of the author may not be used to endorse or promote products |
||
15 | # derived from this software without specific prior written permission. |
||
16 | # |
||
17 | # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
||
18 | # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
||
19 | # OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
||
20 | # IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
||
21 | # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
||
22 | # NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
||
23 | # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
||
24 | # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
||
25 | # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
||
26 | # THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
||
27 | # |
||
28 | |||
1021 | jermar | 29 | #define IREGISTER_SPACE 120 |
224 | palkovsky | 30 | |
1021 | jermar | 31 | #define IOFFSET_RAX 0x0 |
32 | #define IOFFSET_RBX 0x8 |
||
33 | #define IOFFSET_RCX 0x10 |
||
34 | #define IOFFSET_RDX 0x18 |
||
35 | #define IOFFSET_RSI 0x20 |
||
36 | #define IOFFSET_RDI 0x28 |
||
37 | #define IOFFSET_R8 0x30 |
||
38 | #define IOFFSET_R9 0x38 |
||
39 | #define IOFFSET_R10 0x40 |
||
40 | #define IOFFSET_R11 0x48 |
||
41 | #define IOFFSET_R12 0x50 |
||
42 | #define IOFFSET_R13 0x58 |
||
43 | #define IOFFSET_R14 0x60 |
||
44 | #define IOFFSET_R15 0x68 |
||
45 | #define IOFFSET_RBP 0x70 |
||
46 | |||
224 | palkovsky | 47 | # Mask for interrupts 0 - 31 (bits 0 - 31) where 0 means that int has no error word |
48 | # and 1 means interrupt with error word |
||
49 | #define ERROR_WORD_INTERRUPT_LIST 0x00027D00 |
||
50 | |||
51 | #include <arch/pm.h> |
||
808 | palkovsky | 52 | #include <arch/mm/page.h> |
224 | palkovsky | 53 | |
54 | .text |
||
55 | .global interrupt_handlers |
||
803 | palkovsky | 56 | .global syscall_entry |
224 | palkovsky | 57 | .global panic_printf |
58 | |||
59 | panic_printf: |
||
60 | movq $halt, (%rsp) |
||
61 | jmp printf |
||
62 | |||
252 | palkovsky | 63 | .global cpuid |
242 | palkovsky | 64 | .global has_cpuid |
2018 | decky | 65 | .global get_cycle |
251 | palkovsky | 66 | .global read_efer_flag |
67 | .global set_efer_flag |
||
1288 | jermar | 68 | .global memcpy |
69 | .global memcpy_from_uspace |
||
70 | .global memcpy_to_uspace |
||
71 | .global memcpy_from_uspace_failover_address |
||
72 | .global memcpy_to_uspace_failover_address |
||
73 | |||
74 | #define MEMCPY_DST %rdi |
||
75 | #define MEMCPY_SRC %rsi |
||
76 | #define MEMCPY_SIZE %rdx |
||
77 | |||
78 | /** |
||
79 | * Copy memory from/to userspace. |
||
80 | * |
||
81 | * This is almost conventional memcpy(). |
||
82 | * The difference is that there is a failover part |
||
83 | * to where control is returned from a page fault if |
||
84 | * the page fault occurs during copy_from_uspace() |
||
85 | * or copy_to_uspace(). |
||
86 | * |
||
87 | * @param MEMCPY_DST Destination address. |
||
88 | * @param MEMCPY_SRC Source address. |
||
89 | * @param MEMCPY_SIZE Number of bytes to copy. |
||
90 | * |
||
91 | * @retrun MEMCPY_SRC on success, 0 on failure. |
||
92 | */ |
||
93 | memcpy: |
||
94 | memcpy_from_uspace: |
||
95 | memcpy_to_uspace: |
||
96 | movq MEMCPY_SRC, %rax |
||
97 | |||
98 | movq MEMCPY_SIZE, %rcx |
||
99 | shrq $3, %rcx /* size / 8 */ |
||
251 | palkovsky | 100 | |
1288 | jermar | 101 | rep movsq /* copy as much as possible word by word */ |
102 | |||
103 | movq MEMCPY_SIZE, %rcx |
||
104 | andq $7, %rcx /* size % 8 */ |
||
105 | jz 0f |
||
106 | |||
107 | rep movsb /* copy the rest byte by byte */ |
||
108 | |||
109 | 0: |
||
110 | ret /* return MEMCPY_SRC, success */ |
||
111 | |||
112 | memcpy_from_uspace_failover_address: |
||
113 | memcpy_to_uspace_failover_address: |
||
114 | xorq %rax, %rax /* return 0, failure */ |
||
115 | ret |
||
116 | |||
242 | palkovsky | 117 | ## Determine CPUID support |
118 | # |
||
119 | # Return 0 in EAX if CPUID is not support, 1 if supported. |
||
120 | # |
||
121 | has_cpuid: |
||
122 | pushfq # store flags |
||
123 | popq %rax # read flags |
||
348 | jermar | 124 | movq %rax,%rdx # copy flags |
125 | btcl $21,%edx # swap the ID bit |
||
126 | pushq %rdx |
||
242 | palkovsky | 127 | popfq # propagate the change into flags |
128 | pushfq |
||
348 | jermar | 129 | popq %rdx # read flags |
242 | palkovsky | 130 | andl $(1<<21),%eax # interested only in ID bit |
348 | jermar | 131 | andl $(1<<21),%edx |
132 | xorl %edx,%eax # 0 if not supported, 1 if supported |
||
242 | palkovsky | 133 | ret |
134 | |||
251 | palkovsky | 135 | cpuid: |
136 | movq %rbx, %r10 # we have to preserve rbx across function calls |
||
242 | palkovsky | 137 | |
251 | palkovsky | 138 | movl %edi,%eax # load the command into %eax |
139 | |||
140 | cpuid |
||
141 | movl %eax,0(%rsi) |
||
142 | movl %ebx,4(%rsi) |
||
143 | movl %ecx,8(%rsi) |
||
144 | movl %edx,12(%rsi) |
||
145 | |||
146 | movq %r10, %rbx |
||
147 | ret |
||
148 | |||
2018 | decky | 149 | get_cycle: |
242 | palkovsky | 150 | xorq %rax,%rax |
151 | rdtsc |
||
152 | ret |
||
251 | palkovsky | 153 | |
154 | set_efer_flag: |
||
155 | movq $0xc0000080, %rcx |
||
156 | rdmsr |
||
157 | btsl %edi, %eax |
||
158 | wrmsr |
||
159 | ret |
||
242 | palkovsky | 160 | |
251 | palkovsky | 161 | read_efer_flag: |
162 | movq $0xc0000080, %rcx |
||
163 | rdmsr |
||
164 | ret |
||
242 | palkovsky | 165 | |
224 | palkovsky | 166 | # Push all general purpose registers on stack except %rbp, %rsp |
799 | palkovsky | 167 | .macro save_all_gpr |
168 | movq %rax, IOFFSET_RAX(%rsp) |
||
169 | movq %rcx, IOFFSET_RCX(%rsp) |
||
170 | movq %rdx, IOFFSET_RDX(%rsp) |
||
171 | movq %rsi, IOFFSET_RSI(%rsp) |
||
172 | movq %rdi, IOFFSET_RDI(%rsp) |
||
173 | movq %r8, IOFFSET_R8(%rsp) |
||
174 | movq %r9, IOFFSET_R9(%rsp) |
||
175 | movq %r10, IOFFSET_R10(%rsp) |
||
176 | movq %r11, IOFFSET_R11(%rsp) |
||
1094 | palkovsky | 177 | #ifdef CONFIG_DEBUG_ALLREGS |
178 | movq %rbx, IOFFSET_RBX(%rsp) |
||
179 | movq %rbp, IOFFSET_RBP(%rsp) |
||
799 | palkovsky | 180 | movq %r12, IOFFSET_R12(%rsp) |
181 | movq %r13, IOFFSET_R13(%rsp) |
||
182 | movq %r14, IOFFSET_R14(%rsp) |
||
183 | movq %r15, IOFFSET_R15(%rsp) |
||
1094 | palkovsky | 184 | #endif |
224 | palkovsky | 185 | .endm |
186 | |||
799 | palkovsky | 187 | .macro restore_all_gpr |
188 | movq IOFFSET_RAX(%rsp), %rax |
||
189 | movq IOFFSET_RCX(%rsp), %rcx |
||
190 | movq IOFFSET_RDX(%rsp), %rdx |
||
191 | movq IOFFSET_RSI(%rsp), %rsi |
||
192 | movq IOFFSET_RDI(%rsp), %rdi |
||
193 | movq IOFFSET_R8(%rsp), %r8 |
||
194 | movq IOFFSET_R9(%rsp), %r9 |
||
195 | movq IOFFSET_R10(%rsp), %r10 |
||
196 | movq IOFFSET_R11(%rsp), %r11 |
||
1094 | palkovsky | 197 | #ifdef CONFIG_DEBUG_ALLREGS |
198 | movq IOFFSET_RBX(%rsp), %rbx |
||
199 | movq IOFFSET_RBP(%rsp), %rbp |
||
799 | palkovsky | 200 | movq IOFFSET_R12(%rsp), %r12 |
201 | movq IOFFSET_R13(%rsp), %r13 |
||
202 | movq IOFFSET_R14(%rsp), %r14 |
||
203 | movq IOFFSET_R15(%rsp), %r15 |
||
1094 | palkovsky | 204 | #endif |
224 | palkovsky | 205 | .endm |
1021 | jermar | 206 | |
1094 | palkovsky | 207 | #ifdef CONFIG_DEBUG_ALLREGS |
208 | # define INTERRUPT_ALIGN 256 |
||
209 | #else |
||
210 | # define INTERRUPT_ALIGN 128 |
||
211 | #endif |
||
212 | |||
224 | palkovsky | 213 | ## Declare interrupt handlers |
214 | # |
||
215 | # Declare interrupt handlers for n interrupt |
||
216 | # vectors starting at vector i. |
||
217 | # |
||
1021 | jermar | 218 | # The handlers call exc_dispatch(). |
224 | palkovsky | 219 | # |
220 | .macro handler i n |
||
221 | |||
1021 | jermar | 222 | /* |
2606 | jermar | 223 | * Choose between version with error code and version without error |
224 | * code. Both versions have to be of the same size. amd64 assembly is, |
||
225 | * however, a little bit tricky. For instance, subq $0x80, %rsp and |
||
226 | * subq $0x78, %rsp can result in two instructions with different |
||
227 | * op-code lengths. |
||
1121 | jermar | 228 | * Therefore we align the interrupt handlers. |
1021 | jermar | 229 | */ |
224 | palkovsky | 230 | |
1021 | jermar | 231 | .iflt \i-32 |
232 | .if (1 << \i) & ERROR_WORD_INTERRUPT_LIST |
||
233 | /* |
||
234 | * Version with error word. |
||
235 | */ |
||
236 | subq $IREGISTER_SPACE, %rsp |
||
237 | .else |
||
238 | /* |
||
239 | * Version without error word, |
||
240 | */ |
||
241 | subq $(IREGISTER_SPACE+8), %rsp |
||
242 | .endif |
||
243 | .else |
||
244 | /* |
||
245 | * Version without error word, |
||
246 | */ |
||
247 | subq $(IREGISTER_SPACE+8), %rsp |
||
248 | .endif |
||
224 | palkovsky | 249 | |
1021 | jermar | 250 | save_all_gpr |
2784 | jermar | 251 | cld |
224 | palkovsky | 252 | |
1021 | jermar | 253 | movq $(\i), %rdi # %rdi - first parameter |
254 | movq %rsp, %rsi # %rsi - pointer to istate |
||
255 | call exc_dispatch # exc_dispatch(i, istate) |
||
256 | |||
799 | palkovsky | 257 | restore_all_gpr |
258 | # $8 = Skip error word |
||
1021 | jermar | 259 | addq $(IREGISTER_SPACE+8), %rsp |
224 | palkovsky | 260 | iretq |
261 | |||
1094 | palkovsky | 262 | .align INTERRUPT_ALIGN |
224 | palkovsky | 263 | .if (\n-\i)-1 |
264 | handler "(\i+1)",\n |
||
265 | .endif |
||
266 | .endm |
||
1094 | palkovsky | 267 | |
268 | .align INTERRUPT_ALIGN |
||
224 | palkovsky | 269 | interrupt_handlers: |
270 | h_start: |
||
271 | handler 0 IDT_ITEMS |
||
272 | h_end: |
||
803 | palkovsky | 273 | |
2606 | jermar | 274 | ## Low-level syscall handler |
275 | # |
||
276 | # Registers on entry: |
||
277 | # |
||
278 | # @param rcx Userspace return address. |
||
279 | # @param r11 Userspace RLFAGS. |
||
280 | # |
||
281 | # @param rax Syscall number. |
||
282 | # @param rdi 1st syscall argument. |
||
283 | # @param rsi 2nd syscall argument. |
||
284 | # @param rdx 3rd syscall argument. |
||
285 | # @param r10 4th syscall argument. Used instead of RCX because the |
||
286 | # SYSCALL instruction clobbers it. |
||
287 | # @param r8 5th syscall argument. |
||
288 | # @param r9 6th syscall argument. |
||
289 | # |
||
290 | # @return Return value is in rax. |
||
291 | # |
||
803 | palkovsky | 292 | syscall_entry: |
2606 | jermar | 293 | swapgs # Switch to hidden gs |
294 | # |
||
295 | # %gs:0 Scratch space for this thread's user RSP |
||
296 | # %gs:8 Address to be used as this thread's kernel RSP |
||
297 | # |
||
298 | movq %rsp, %gs:0 # Save this thread's user RSP |
||
299 | movq %gs:8, %rsp # Set this thread's kernel RSP |
||
300 | swapgs # Switch back to remain consistent |
||
301 | sti |
||
806 | palkovsky | 302 | |
2606 | jermar | 303 | pushq %rcx |
304 | pushq %r11 |
||
806 | palkovsky | 305 | |
2606 | jermar | 306 | movq %r10, %rcx # Copy the 4th argument where it is expected |
307 | pushq %rax |
||
803 | palkovsky | 308 | call syscall_handler |
2606 | jermar | 309 | addq $8, %rsp |
955 | palkovsky | 310 | |
806 | palkovsky | 311 | popq %r11 |
312 | popq %rcx |
||
2606 | jermar | 313 | |
314 | cli |
||
315 | swapgs |
||
316 | movq %gs:0, %rsp # Restore the user RSP |
||
317 | swapgs |
||
318 | |||
806 | palkovsky | 319 | sysretq |
2606 | jermar | 320 | |
224 | palkovsky | 321 | .data |
322 | .global interrupt_handler_size |
||
323 | |||
820 | jermar | 324 | interrupt_handler_size: .quad (h_end-h_start)/IDT_ITEMS |