Subversion Repositories HelenOS

Rev

Rev 3274 | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 3274 Rev 3890
1
#
1
#
2
# Copyright (c) 2005 Ondrej Palkovsky
2
# Copyright (c) 2005 Ondrej Palkovsky
3
# All rights reserved.
3
# All rights reserved.
4
#
4
#
5
# Redistribution and use in source and binary forms, with or without
5
# Redistribution and use in source and binary forms, with or without
6
# modification, are permitted provided that the following conditions
6
# modification, are permitted provided that the following conditions
7
# are met:
7
# are met:
8
#
8
#
9
# - Redistributions of source code must retain the above copyright
9
# - Redistributions of source code must retain the above copyright
10
#   notice, this list of conditions and the following disclaimer.
10
#   notice, this list of conditions and the following disclaimer.
11
# - Redistributions in binary form must reproduce the above copyright
11
# - Redistributions in binary form must reproduce the above copyright
12
#   notice, this list of conditions and the following disclaimer in the
12
#   notice, this list of conditions and the following disclaimer in the
13
#   documentation and/or other materials provided with the distribution.
13
#   documentation and/or other materials provided with the distribution.
14
# - The name of the author may not be used to endorse or promote products
14
# - The name of the author may not be used to endorse or promote products
15
#   derived from this software without specific prior written permission.
15
#   derived from this software without specific prior written permission.
16
#
16
#
17
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
#
27
#
28
 
28
 
29
#define IREGISTER_SPACE 120
29
#define IREGISTER_SPACE	72 
30
 
30
 
31
#define IOFFSET_RAX 0x0
31
#define IOFFSET_RAX	0x0
32
#define IOFFSET_RBX 0x8
32
#define IOFFSET_RCX	0x8
33
#define IOFFSET_RCX 0x10
33
#define IOFFSET_RDX	0x10
34
#define IOFFSET_RDX 0x18
34
#define IOFFSET_RSI	0x18
35
#define IOFFSET_RSI 0x20
35
#define IOFFSET_RDI	0x20
36
#define IOFFSET_RDI 0x28
36
#define IOFFSET_R8	0x28
37
#define IOFFSET_R8 0x30
-
 
38
#define IOFFSET_R9 0x38
37
#define IOFFSET_R9	0x30
39
#define IOFFSET_R10 0x40
38
#define IOFFSET_R10	0x38
40
#define IOFFSET_R11 0x48
39
#define IOFFSET_R11	0x40
41
#define IOFFSET_R12 0x50
-
 
42
#define IOFFSET_R13 0x58
-
 
43
#define IOFFSET_R14 0x60
-
 
44
#define IOFFSET_R15 0x68
-
 
45
#define IOFFSET_RBP 0x70
-
 
46
 
40
 
47
#  Mask for interrupts 0 - 31 (bits 0 - 31) where 0 means that int has no error word
41
#  Mask for interrupts 0 - 31 (bits 0 - 31) where 0 means that int has no error word
48
# and 1 means interrupt with error word
42
# and 1 means interrupt with error word
49
#define ERROR_WORD_INTERRUPT_LIST 0x00027D00
43
#define ERROR_WORD_INTERRUPT_LIST 0x00027D00
50
 
44
 
51
#include <arch/pm.h>
45
#include <arch/pm.h>
52
#include <arch/mm/page.h>
46
#include <arch/mm/page.h>
53
	
47
	
54
.text
48
.text
55
.global interrupt_handlers
49
.global interrupt_handlers
56
.global syscall_entry
50
.global syscall_entry
57
.global panic_printf
51
.global panic_printf
58
 
52
 
59
panic_printf:
53
panic_printf:
60
	movq $halt, (%rsp)
54
	movq $halt, (%rsp)
61
	jmp printf
55
	jmp printf
62
 
56
 
63
.global cpuid
57
.global cpuid
64
.global has_cpuid
58
.global has_cpuid
65
.global get_cycle
59
.global get_cycle
66
.global read_efer_flag
60
.global read_efer_flag
67
.global set_efer_flag
61
.global set_efer_flag
68
.global memsetb
62
.global memsetb
69
.global memsetw
63
.global memsetw
70
.global memcpy
64
.global memcpy
71
.global memcpy_from_uspace
65
.global memcpy_from_uspace
72
.global memcpy_to_uspace
66
.global memcpy_to_uspace
73
.global memcpy_from_uspace_failover_address
67
.global memcpy_from_uspace_failover_address
74
.global memcpy_to_uspace_failover_address
68
.global memcpy_to_uspace_failover_address
75
 
69
 
76
# Wrapper for generic memsetb
70
# Wrapper for generic memsetb
77
memsetb:
71
memsetb:
78
	jmp _memsetb
72
	jmp _memsetb
79
 
73
 
80
# Wrapper for generic memsetw
74
# Wrapper for generic memsetw
81
memsetw:
75
memsetw:
82
	jmp _memsetw
76
	jmp _memsetw
83
 
77
 
84
#define MEMCPY_DST	%rdi
78
#define MEMCPY_DST	%rdi
85
#define MEMCPY_SRC	%rsi
79
#define MEMCPY_SRC	%rsi
86
#define MEMCPY_SIZE	%rdx
80
#define MEMCPY_SIZE	%rdx
87
 
81
 
88
/**
82
/**
89
 * Copy memory from/to userspace.
83
 * Copy memory from/to userspace.
90
 *
84
 *
91
 * This is almost conventional memcpy().
85
 * This is almost conventional memcpy().
92
 * The difference is that there is a failover part
86
 * The difference is that there is a failover part
93
 * to where control is returned from a page fault if
87
 * to where control is returned from a page fault if
94
 * the page fault occurs during copy_from_uspace()
88
 * the page fault occurs during copy_from_uspace()
95
 * or copy_to_uspace().
89
 * or copy_to_uspace().
96
 *
90
 *
97
 * @param MEMCPY_DST	Destination address.
91
 * @param MEMCPY_DST	Destination address.
98
 * @param MEMCPY_SRC	Source address.
92
 * @param MEMCPY_SRC	Source address.
99
 * @param MEMCPY_SIZE	Number of bytes to copy.
93
 * @param MEMCPY_SIZE	Number of bytes to copy.
100
 *
94
 *
101
 * @retrun MEMCPY_DST on success, 0 on failure.
95
 * @retrun MEMCPY_DST on success, 0 on failure.
102
 */
96
 */
103
memcpy:
97
memcpy:
104
memcpy_from_uspace:
98
memcpy_from_uspace:
105
memcpy_to_uspace:
99
memcpy_to_uspace:
106
	movq MEMCPY_DST, %rax
100
	movq MEMCPY_DST, %rax
107
 
101
 
108
	movq MEMCPY_SIZE, %rcx
102
	movq MEMCPY_SIZE, %rcx
109
	shrq $3, %rcx			/* size / 8 */
103
	shrq $3, %rcx			/* size / 8 */
110
	
104
	
111
	rep movsq			/* copy as much as possible word by word */
105
	rep movsq			/* copy as much as possible word by word */
112
 
106
 
113
	movq MEMCPY_SIZE, %rcx
107
	movq MEMCPY_SIZE, %rcx
114
	andq $7, %rcx			/* size % 8 */
108
	andq $7, %rcx			/* size % 8 */
115
	jz 0f
109
	jz 0f
116
	
110
	
117
	rep movsb			/* copy the rest byte by byte */
111
	rep movsb			/* copy the rest byte by byte */
118
	
112
	
119
0:
113
0:
120
	ret				/* return MEMCPY_SRC, success */
114
	ret				/* return MEMCPY_SRC, success */
121
 
115
 
122
memcpy_from_uspace_failover_address:
116
memcpy_from_uspace_failover_address:
123
memcpy_to_uspace_failover_address:
117
memcpy_to_uspace_failover_address:
124
	xorq %rax, %rax			/* return 0, failure */
118
	xorq %rax, %rax			/* return 0, failure */
125
	ret
119
	ret
126
 
120
 
127
## Determine CPUID support
121
## Determine CPUID support
128
#
122
#
129
# Return 0 in EAX if CPUID is not support, 1 if supported.
123
# Return 0 in EAX if CPUID is not support, 1 if supported.
130
#
124
#
131
has_cpuid:
125
has_cpuid:
132
	pushfq			# store flags
126
	pushfq			# store flags
133
	popq %rax		# read flags
127
	popq %rax		# read flags
134
	movq %rax,%rdx		# copy flags
128
	movq %rax,%rdx		# copy flags
135
	btcl $21,%edx		# swap the ID bit
129
	btcl $21,%edx		# swap the ID bit
136
	pushq %rdx
130
	pushq %rdx
137
	popfq			# propagate the change into flags
131
	popfq			# propagate the change into flags
138
	pushfq
132
	pushfq
139
	popq %rdx		# read flags	
133
	popq %rdx		# read flags	
140
	andl $(1<<21),%eax	# interested only in ID bit
134
	andl $(1<<21),%eax	# interested only in ID bit
141
	andl $(1<<21),%edx
135
	andl $(1<<21),%edx
142
	xorl %edx,%eax		# 0 if not supported, 1 if supported
136
	xorl %edx,%eax		# 0 if not supported, 1 if supported
143
	ret
137
	ret
144
 
138
 
145
cpuid:
139
cpuid:
146
	movq %rbx, %r10  # we have to preserve rbx across function calls
140
	movq %rbx, %r10  # we have to preserve rbx across function calls
147
 
141
 
148
	movl %edi,%eax	# load the command into %eax
142
	movl %edi,%eax	# load the command into %eax
149
 
143
 
150
	cpuid	
144
	cpuid	
151
	movl %eax,0(%rsi)
145
	movl %eax,0(%rsi)
152
	movl %ebx,4(%rsi)
146
	movl %ebx,4(%rsi)
153
	movl %ecx,8(%rsi)
147
	movl %ecx,8(%rsi)
154
	movl %edx,12(%rsi)
148
	movl %edx,12(%rsi)
155
 
149
 
156
	movq %r10, %rbx
150
	movq %r10, %rbx
157
	ret
151
	ret
158
 
152
 
159
get_cycle:
153
get_cycle:
160
	xorq %rax,%rax
154
	xorq %rax,%rax
161
	rdtsc
155
	rdtsc
162
	ret
156
	ret
163
 
157
 
164
set_efer_flag:
158
set_efer_flag:
165
	movq $0xc0000080, %rcx
159
	movq $0xc0000080, %rcx
166
	rdmsr
160
	rdmsr
167
	btsl %edi, %eax
161
	btsl %edi, %eax
168
	wrmsr
162
	wrmsr
169
	ret
163
	ret
170
	
164
	
171
read_efer_flag:	
165
read_efer_flag:	
172
	movq $0xc0000080, %rcx
166
	movq $0xc0000080, %rcx
173
	rdmsr
167
	rdmsr
174
	ret 		
168
	ret 		
175
 
169
 
176
# Push all general purpose registers on stack except %rbp, %rsp
170
# Push all volatile general purpose registers on stack
177
.macro save_all_gpr
171
.macro save_all_gpr
178
	movq %rax, IOFFSET_RAX(%rsp)
172
	movq %rax, IOFFSET_RAX(%rsp)
179
	movq %rcx, IOFFSET_RCX(%rsp)
173
	movq %rcx, IOFFSET_RCX(%rsp)
180
	movq %rdx, IOFFSET_RDX(%rsp)
174
	movq %rdx, IOFFSET_RDX(%rsp)
181
	movq %rsi, IOFFSET_RSI(%rsp)
175
	movq %rsi, IOFFSET_RSI(%rsp)
182
	movq %rdi, IOFFSET_RDI(%rsp)
176
	movq %rdi, IOFFSET_RDI(%rsp)
183
	movq %r8, IOFFSET_R8(%rsp)
177
	movq %r8, IOFFSET_R8(%rsp)
184
	movq %r9, IOFFSET_R9(%rsp)
178
	movq %r9, IOFFSET_R9(%rsp)
185
	movq %r10, IOFFSET_R10(%rsp)
179
	movq %r10, IOFFSET_R10(%rsp)
186
	movq %r11, IOFFSET_R11(%rsp)
180
	movq %r11, IOFFSET_R11(%rsp)
187
#ifdef CONFIG_DEBUG_ALLREGS	
-
 
188
	movq %rbx, IOFFSET_RBX(%rsp)
-
 
189
	movq %rbp, IOFFSET_RBP(%rsp)
-
 
190
	movq %r12, IOFFSET_R12(%rsp)
-
 
191
	movq %r13, IOFFSET_R13(%rsp)
-
 
192
	movq %r14, IOFFSET_R14(%rsp)
-
 
193
	movq %r15, IOFFSET_R15(%rsp)
-
 
194
#endif
-
 
195
.endm
181
.endm
196
 
182
 
197
.macro restore_all_gpr
183
.macro restore_all_gpr
198
	movq IOFFSET_RAX(%rsp), %rax
184
	movq IOFFSET_RAX(%rsp), %rax
199
	movq IOFFSET_RCX(%rsp), %rcx
185
	movq IOFFSET_RCX(%rsp), %rcx
200
	movq IOFFSET_RDX(%rsp), %rdx
186
	movq IOFFSET_RDX(%rsp), %rdx
201
	movq IOFFSET_RSI(%rsp), %rsi
187
	movq IOFFSET_RSI(%rsp), %rsi
202
	movq IOFFSET_RDI(%rsp), %rdi
188
	movq IOFFSET_RDI(%rsp), %rdi
203
	movq IOFFSET_R8(%rsp), %r8
189
	movq IOFFSET_R8(%rsp), %r8
204
	movq IOFFSET_R9(%rsp), %r9
190
	movq IOFFSET_R9(%rsp), %r9
205
	movq IOFFSET_R10(%rsp), %r10
191
	movq IOFFSET_R10(%rsp), %r10
206
	movq IOFFSET_R11(%rsp), %r11
192
	movq IOFFSET_R11(%rsp), %r11
207
#ifdef CONFIG_DEBUG_ALLREGS	
-
 
208
	movq IOFFSET_RBX(%rsp), %rbx
-
 
209
	movq IOFFSET_RBP(%rsp), %rbp
-
 
210
	movq IOFFSET_R12(%rsp), %r12
-
 
211
	movq IOFFSET_R13(%rsp), %r13
-
 
212
	movq IOFFSET_R14(%rsp), %r14
-
 
213
	movq IOFFSET_R15(%rsp), %r15
-
 
214
#endif
-
 
215
.endm
193
.endm
216
 
194
 
217
#ifdef CONFIG_DEBUG_ALLREGS
-
 
218
# define INTERRUPT_ALIGN 256
-
 
219
#else
-
 
220
# define INTERRUPT_ALIGN 128
195
#define INTERRUPT_ALIGN 128
221
#endif
-
 
222
	
196
	
223
## Declare interrupt handlers
197
## Declare interrupt handlers
224
#
198
#
225
# Declare interrupt handlers for n interrupt
199
# Declare interrupt handlers for n interrupt
226
# vectors starting at vector i.
200
# vectors starting at vector i.
227
#
201
#
228
# The handlers call exc_dispatch().
202
# The handlers call exc_dispatch().
229
#
203
#
230
.macro handler i n
204
.macro handler i n
231
 
205
 
232
	/*
206
	/*
233
	 * Choose between version with error code and version without error
207
	 * Choose between version with error code and version without error
234
	 * code. Both versions have to be of the same size. amd64 assembly is,
208
	 * code. Both versions have to be of the same size. amd64 assembly is,
235
	 * however, a little bit tricky. For instance, subq $0x80, %rsp and
209
	 * however, a little bit tricky. For instance, subq $0x80, %rsp and
236
	 * subq $0x78, %rsp can result in two instructions with different
210
	 * subq $0x78, %rsp can result in two instructions with different
237
	 * op-code lengths.
211
	 * op-code lengths.
238
	 * Therefore we align the interrupt handlers.
212
	 * Therefore we align the interrupt handlers.
239
	 */
213
	 */
240
 
214
 
241
	.iflt \i-32
215
	.iflt \i-32
242
		.if (1 << \i) & ERROR_WORD_INTERRUPT_LIST
216
		.if (1 << \i) & ERROR_WORD_INTERRUPT_LIST
243
			/*
217
			/*
244
			 * Version with error word.
218
			 * Version with error word.
245
			 */
219
			 */
246
			subq $IREGISTER_SPACE, %rsp
220
			subq $IREGISTER_SPACE, %rsp
247
		.else
221
		.else
248
			/*
222
			/*
249
			 * Version without error word,
223
			 * Version without error word,
250
			 */
224
			 */
251
			subq $(IREGISTER_SPACE+8), %rsp
225
			subq $(IREGISTER_SPACE+8), %rsp
252
		.endif
226
		.endif
253
	.else
227
	.else
254
		/*
228
		/*
255
		 * Version without error word,
229
		 * Version without error word,
256
		 */
230
		 */
257
		subq $(IREGISTER_SPACE+8), %rsp
231
		subq $(IREGISTER_SPACE+8), %rsp
258
	.endif	
232
	.endif	
259
 
233
 
260
	save_all_gpr
234
	save_all_gpr
261
	cld
235
	cld
262
 
236
 
263
	movq $(\i), %rdi   	# %rdi - first parameter
237
	movq $(\i), %rdi   	# %rdi - first parameter
264
	movq %rsp, %rsi   	# %rsi - pointer to istate
238
	movq %rsp, %rsi   	# %rsi - pointer to istate
265
	call exc_dispatch 	# exc_dispatch(i, istate)
239
	call exc_dispatch 	# exc_dispatch(i, istate)
266
	
240
	
267
	restore_all_gpr
241
	restore_all_gpr
268
	# $8 = Skip error word
242
	# $8 = Skip error word
269
	addq $(IREGISTER_SPACE+8), %rsp
243
	addq $(IREGISTER_SPACE+8), %rsp
270
	iretq
244
	iretq
271
 
245
 
272
	.align INTERRUPT_ALIGN
246
	.align INTERRUPT_ALIGN
273
	.if (\n-\i)-1
247
	.if (\n-\i)-1
274
	handler "(\i+1)",\n
248
	handler "(\i+1)",\n
275
	.endif
249
	.endif
276
.endm
250
.endm
277
 
251
 
278
.align INTERRUPT_ALIGN
252
.align INTERRUPT_ALIGN
279
interrupt_handlers:
253
interrupt_handlers:
280
h_start:
254
h_start:
281
	handler 0 IDT_ITEMS
255
	handler 0 IDT_ITEMS
282
h_end:
256
h_end:
283
 
257
 
284
## Low-level syscall handler
258
## Low-level syscall handler
285
# 
259
# 
286
# Registers on entry:
260
# Registers on entry:
287
#
261
#
288
# @param rcx		Userspace return address.
262
# @param rcx		Userspace return address.
289
# @param r11		Userspace RLFAGS.
263
# @param r11		Userspace RLFAGS.
290
#
264
#
291
# @param rax		Syscall number.
265
# @param rax		Syscall number.
292
# @param rdi		1st syscall argument.
266
# @param rdi		1st syscall argument.
293
# @param rsi		2nd syscall argument.
267
# @param rsi		2nd syscall argument.
294
# @param rdx		3rd syscall argument.
268
# @param rdx		3rd syscall argument.
295
# @param r10		4th syscall argument. Used instead of RCX because the
269
# @param r10		4th syscall argument. Used instead of RCX because the
296
#			SYSCALL instruction clobbers it.
270
#			SYSCALL instruction clobbers it.
297
# @param r8		5th syscall argument.
271
# @param r8		5th syscall argument.
298
# @param r9		6th syscall argument.
272
# @param r9		6th syscall argument.
299
#
273
#
300
# @return		Return value is in rax.
274
# @return		Return value is in rax.
301
#
275
#
302
syscall_entry:
276
syscall_entry:
303
	swapgs			# Switch to hidden gs	
277
	swapgs			# Switch to hidden gs	
304
	# 
278
	# 
305
	# %gs:0			Scratch space for this thread's user RSP
279
	# %gs:0			Scratch space for this thread's user RSP
306
	# %gs:8			Address to be used as this thread's kernel RSP
280
	# %gs:8			Address to be used as this thread's kernel RSP
307
	#
281
	#
308
	movq %rsp, %gs:0	# Save this thread's user RSP
282
	movq %rsp, %gs:0	# Save this thread's user RSP
309
	movq %gs:8, %rsp	# Set this thread's kernel RSP
283
	movq %gs:8, %rsp	# Set this thread's kernel RSP
310
	swapgs			# Switch back to remain consistent
284
	swapgs			# Switch back to remain consistent
311
	sti
285
	sti
312
	
286
	
313
	pushq %rcx
287
	pushq %rcx
314
	pushq %r11
288
	pushq %r11
315
 
289
 
316
	movq %r10, %rcx		# Copy the 4th argument where it is expected 
290
	movq %r10, %rcx		# Copy the 4th argument where it is expected 
317
	pushq %rax
291
	pushq %rax
318
	call syscall_handler
292
	call syscall_handler
319
	addq $8, %rsp
293
	addq $8, %rsp
320
		
294
		
321
	popq %r11
295
	popq %r11
322
	popq %rcx
296
	popq %rcx
323
 
297
 
324
	cli
298
	cli
325
	swapgs
299
	swapgs
326
	movq %gs:0, %rsp	# Restore the user RSP
300
	movq %gs:0, %rsp	# Restore the user RSP
327
	swapgs
301
	swapgs
328
 
302
 
329
	sysretq
303
	sysretq
330
 
304
 
331
.data
305
.data
332
.global interrupt_handler_size
306
.global interrupt_handler_size
333
 
307
 
334
interrupt_handler_size: .quad (h_end-h_start)/IDT_ITEMS
308
interrupt_handler_size: .quad (h_end-h_start)/IDT_ITEMS
335
 
309