Subversion Repositories HelenOS

Rev

Rev 3022 | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 3022 Rev 4055
1
#
1
#
2
# Copyright (c) 2005 Ondrej Palkovsky
2
# Copyright (c) 2005 Ondrej Palkovsky
3
# All rights reserved.
3
# All rights reserved.
4
#
4
#
5
# Redistribution and use in source and binary forms, with or without
5
# Redistribution and use in source and binary forms, with or without
6
# modification, are permitted provided that the following conditions
6
# modification, are permitted provided that the following conditions
7
# are met:
7
# are met:
8
#
8
#
9
# - Redistributions of source code must retain the above copyright
9
# - Redistributions of source code must retain the above copyright
10
#   notice, this list of conditions and the following disclaimer.
10
#   notice, this list of conditions and the following disclaimer.
11
# - Redistributions in binary form must reproduce the above copyright
11
# - Redistributions in binary form must reproduce the above copyright
12
#   notice, this list of conditions and the following disclaimer in the
12
#   notice, this list of conditions and the following disclaimer in the
13
#   documentation and/or other materials provided with the distribution.
13
#   documentation and/or other materials provided with the distribution.
14
# - The name of the author may not be used to endorse or promote products
14
# - The name of the author may not be used to endorse or promote products
15
#   derived from this software without specific prior written permission.
15
#   derived from this software without specific prior written permission.
16
#
16
#
17
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
#
27
#
28
 
28
 
29
#define IREGISTER_SPACE 120
29
#define IREGISTER_SPACE	72 
30
 
30
 
31
#define IOFFSET_RAX 0x0
31
#define IOFFSET_RAX	0x0
32
#define IOFFSET_RBX 0x8
32
#define IOFFSET_RCX	0x8
33
#define IOFFSET_RCX 0x10
33
#define IOFFSET_RDX	0x10
34
#define IOFFSET_RDX 0x18
34
#define IOFFSET_RSI	0x18
35
#define IOFFSET_RSI 0x20
35
#define IOFFSET_RDI	0x20
36
#define IOFFSET_RDI 0x28
36
#define IOFFSET_R8	0x28
37
#define IOFFSET_R8 0x30
-
 
38
#define IOFFSET_R9 0x38
37
#define IOFFSET_R9	0x30
39
#define IOFFSET_R10 0x40
38
#define IOFFSET_R10	0x38
40
#define IOFFSET_R11 0x48
39
#define IOFFSET_R11	0x40
41
#define IOFFSET_R12 0x50
-
 
42
#define IOFFSET_R13 0x58
-
 
43
#define IOFFSET_R14 0x60
-
 
44
#define IOFFSET_R15 0x68
-
 
45
#define IOFFSET_RBP 0x70
-
 
46
 
40
 
47
#  Mask for interrupts 0 - 31 (bits 0 - 31) where 0 means that int has no error word
41
#  Mask for interrupts 0 - 31 (bits 0 - 31) where 0 means that int has no error word
48
# and 1 means interrupt with error word
42
# and 1 means interrupt with error word
49
#define ERROR_WORD_INTERRUPT_LIST 0x00027D00
43
#define ERROR_WORD_INTERRUPT_LIST 0x00027D00
50
 
44
 
51
#include <arch/pm.h>
45
#include <arch/pm.h>
52
#include <arch/mm/page.h>
46
#include <arch/mm/page.h>
53
	
47
	
54
.text
48
.text
55
.global interrupt_handlers
49
.global interrupt_handlers
56
.global syscall_entry
50
.global syscall_entry
57
.global panic_printf
51
.global panic_printf
58
 
52
 
59
panic_printf:
53
panic_printf:
60
	movq $halt, (%rsp)
54
	movq $halt, (%rsp)
61
	jmp printf
55
	jmp printf
62
 
56
 
63
.global cpuid
57
.global cpuid
64
.global has_cpuid
58
.global has_cpuid
65
.global get_cycle
59
.global get_cycle
66
.global read_efer_flag
60
.global read_efer_flag
67
.global set_efer_flag
61
.global set_efer_flag
-
 
62
.global memsetb
-
 
63
.global memsetw
68
.global memcpy
64
.global memcpy
69
.global memcpy_from_uspace
65
.global memcpy_from_uspace
70
.global memcpy_to_uspace
66
.global memcpy_to_uspace
71
.global memcpy_from_uspace_failover_address
67
.global memcpy_from_uspace_failover_address
72
.global memcpy_to_uspace_failover_address
68
.global memcpy_to_uspace_failover_address
73
 
69
 
-
 
70
# Wrapper for generic memsetb
-
 
71
memsetb:
-
 
72
	jmp _memsetb
-
 
73
 
-
 
74
# Wrapper for generic memsetw
-
 
75
memsetw:
-
 
76
	jmp _memsetw
-
 
77
 
74
#define MEMCPY_DST	%rdi
78
#define MEMCPY_DST	%rdi
75
#define MEMCPY_SRC	%rsi
79
#define MEMCPY_SRC	%rsi
76
#define MEMCPY_SIZE	%rdx
80
#define MEMCPY_SIZE	%rdx
77
 
81
 
78
/**
82
/**
79
 * Copy memory from/to userspace.
83
 * Copy memory from/to userspace.
80
 *
84
 *
81
 * This is almost conventional memcpy().
85
 * This is almost conventional memcpy().
82
 * The difference is that there is a failover part
86
 * The difference is that there is a failover part
83
 * to where control is returned from a page fault if
87
 * to where control is returned from a page fault if
84
 * the page fault occurs during copy_from_uspace()
88
 * the page fault occurs during copy_from_uspace()
85
 * or copy_to_uspace().
89
 * or copy_to_uspace().
86
 *
90
 *
87
 * @param MEMCPY_DST	Destination address.
91
 * @param MEMCPY_DST	Destination address.
88
 * @param MEMCPY_SRC	Source address.
92
 * @param MEMCPY_SRC	Source address.
89
 * @param MEMCPY_SIZE	Number of bytes to copy.
93
 * @param MEMCPY_SIZE	Number of bytes to copy.
90
 *
94
 *
91
 * @retrun MEMCPY_SRC on success, 0 on failure.
95
 * @retrun MEMCPY_DST on success, 0 on failure.
92
 */
96
 */
93
memcpy:
97
memcpy:
94
memcpy_from_uspace:
98
memcpy_from_uspace:
95
memcpy_to_uspace:
99
memcpy_to_uspace:
96
	movq MEMCPY_SRC, %rax
100
	movq MEMCPY_DST, %rax
97
 
101
 
98
	movq MEMCPY_SIZE, %rcx
102
	movq MEMCPY_SIZE, %rcx
99
	shrq $3, %rcx			/* size / 8 */
103
	shrq $3, %rcx			/* size / 8 */
100
	
104
	
101
	rep movsq			/* copy as much as possible word by word */
105
	rep movsq			/* copy as much as possible word by word */
102
 
106
 
103
	movq MEMCPY_SIZE, %rcx
107
	movq MEMCPY_SIZE, %rcx
104
	andq $7, %rcx			/* size % 8 */
108
	andq $7, %rcx			/* size % 8 */
105
	jz 0f
109
	jz 0f
106
	
110
	
107
	rep movsb			/* copy the rest byte by byte */
111
	rep movsb			/* copy the rest byte by byte */
108
	
112
	
109
0:
113
0:
110
	ret				/* return MEMCPY_SRC, success */
114
	ret				/* return MEMCPY_SRC, success */
111
 
115
 
112
memcpy_from_uspace_failover_address:
116
memcpy_from_uspace_failover_address:
113
memcpy_to_uspace_failover_address:
117
memcpy_to_uspace_failover_address:
114
	xorq %rax, %rax			/* return 0, failure */
118
	xorq %rax, %rax			/* return 0, failure */
115
	ret
119
	ret
116
 
120
 
117
## Determine CPUID support
121
## Determine CPUID support
118
#
122
#
119
# Return 0 in EAX if CPUID is not support, 1 if supported.
123
# Return 0 in EAX if CPUID is not support, 1 if supported.
120
#
124
#
121
has_cpuid:
125
has_cpuid:
122
	pushfq			# store flags
126
	pushfq			# store flags
123
	popq %rax		# read flags
127
	popq %rax		# read flags
124
	movq %rax,%rdx		# copy flags
128
	movq %rax,%rdx		# copy flags
125
	btcl $21,%edx		# swap the ID bit
129
	btcl $21,%edx		# swap the ID bit
126
	pushq %rdx
130
	pushq %rdx
127
	popfq			# propagate the change into flags
131
	popfq			# propagate the change into flags
128
	pushfq
132
	pushfq
129
	popq %rdx		# read flags	
133
	popq %rdx		# read flags	
130
	andl $(1<<21),%eax	# interested only in ID bit
134
	andl $(1<<21),%eax	# interested only in ID bit
131
	andl $(1<<21),%edx
135
	andl $(1<<21),%edx
132
	xorl %edx,%eax		# 0 if not supported, 1 if supported
136
	xorl %edx,%eax		# 0 if not supported, 1 if supported
133
	ret
137
	ret
134
 
138
 
135
cpuid:
139
cpuid:
136
	movq %rbx, %r10  # we have to preserve rbx across function calls
140
	movq %rbx, %r10  # we have to preserve rbx across function calls
137
 
141
 
138
	movl %edi,%eax	# load the command into %eax
142
	movl %edi,%eax	# load the command into %eax
139
 
143
 
140
	cpuid	
144
	cpuid	
141
	movl %eax,0(%rsi)
145
	movl %eax,0(%rsi)
142
	movl %ebx,4(%rsi)
146
	movl %ebx,4(%rsi)
143
	movl %ecx,8(%rsi)
147
	movl %ecx,8(%rsi)
144
	movl %edx,12(%rsi)
148
	movl %edx,12(%rsi)
145
 
149
 
146
	movq %r10, %rbx
150
	movq %r10, %rbx
147
	ret
151
	ret
148
 
152
 
149
get_cycle:
153
get_cycle:
150
	xorq %rax,%rax
154
	xorq %rax,%rax
151
	rdtsc
155
	rdtsc
152
	ret
156
	ret
153
 
157
 
154
set_efer_flag:
158
set_efer_flag:
155
	movq $0xc0000080, %rcx
159
	movq $0xc0000080, %rcx
156
	rdmsr
160
	rdmsr
157
	btsl %edi, %eax
161
	btsl %edi, %eax
158
	wrmsr
162
	wrmsr
159
	ret
163
	ret
160
	
164
	
161
read_efer_flag:	
165
read_efer_flag:	
162
	movq $0xc0000080, %rcx
166
	movq $0xc0000080, %rcx
163
	rdmsr
167
	rdmsr
164
	ret 		
168
	ret 		
165
 
169
 
166
# Push all general purpose registers on stack except %rbp, %rsp
170
# Push all volatile general purpose registers on stack
167
.macro save_all_gpr
171
.macro save_all_gpr
168
	movq %rax, IOFFSET_RAX(%rsp)
172
	movq %rax, IOFFSET_RAX(%rsp)
169
	movq %rcx, IOFFSET_RCX(%rsp)
173
	movq %rcx, IOFFSET_RCX(%rsp)
170
	movq %rdx, IOFFSET_RDX(%rsp)
174
	movq %rdx, IOFFSET_RDX(%rsp)
171
	movq %rsi, IOFFSET_RSI(%rsp)
175
	movq %rsi, IOFFSET_RSI(%rsp)
172
	movq %rdi, IOFFSET_RDI(%rsp)
176
	movq %rdi, IOFFSET_RDI(%rsp)
173
	movq %r8, IOFFSET_R8(%rsp)
177
	movq %r8, IOFFSET_R8(%rsp)
174
	movq %r9, IOFFSET_R9(%rsp)
178
	movq %r9, IOFFSET_R9(%rsp)
175
	movq %r10, IOFFSET_R10(%rsp)
179
	movq %r10, IOFFSET_R10(%rsp)
176
	movq %r11, IOFFSET_R11(%rsp)
180
	movq %r11, IOFFSET_R11(%rsp)
177
#ifdef CONFIG_DEBUG_ALLREGS	
-
 
178
	movq %rbx, IOFFSET_RBX(%rsp)
-
 
179
	movq %rbp, IOFFSET_RBP(%rsp)
-
 
180
	movq %r12, IOFFSET_R12(%rsp)
-
 
181
	movq %r13, IOFFSET_R13(%rsp)
-
 
182
	movq %r14, IOFFSET_R14(%rsp)
-
 
183
	movq %r15, IOFFSET_R15(%rsp)
-
 
184
#endif
-
 
185
.endm
181
.endm
186
 
182
 
187
.macro restore_all_gpr
183
.macro restore_all_gpr
188
	movq IOFFSET_RAX(%rsp), %rax
184
	movq IOFFSET_RAX(%rsp), %rax
189
	movq IOFFSET_RCX(%rsp), %rcx
185
	movq IOFFSET_RCX(%rsp), %rcx
190
	movq IOFFSET_RDX(%rsp), %rdx
186
	movq IOFFSET_RDX(%rsp), %rdx
191
	movq IOFFSET_RSI(%rsp), %rsi
187
	movq IOFFSET_RSI(%rsp), %rsi
192
	movq IOFFSET_RDI(%rsp), %rdi
188
	movq IOFFSET_RDI(%rsp), %rdi
193
	movq IOFFSET_R8(%rsp), %r8
189
	movq IOFFSET_R8(%rsp), %r8
194
	movq IOFFSET_R9(%rsp), %r9
190
	movq IOFFSET_R9(%rsp), %r9
195
	movq IOFFSET_R10(%rsp), %r10
191
	movq IOFFSET_R10(%rsp), %r10
196
	movq IOFFSET_R11(%rsp), %r11
192
	movq IOFFSET_R11(%rsp), %r11
197
#ifdef CONFIG_DEBUG_ALLREGS	
-
 
198
	movq IOFFSET_RBX(%rsp), %rbx
-
 
199
	movq IOFFSET_RBP(%rsp), %rbp
-
 
200
	movq IOFFSET_R12(%rsp), %r12
-
 
201
	movq IOFFSET_R13(%rsp), %r13
-
 
202
	movq IOFFSET_R14(%rsp), %r14
-
 
203
	movq IOFFSET_R15(%rsp), %r15
-
 
204
#endif
-
 
205
.endm
193
.endm
206
 
194
 
207
#ifdef CONFIG_DEBUG_ALLREGS
-
 
208
# define INTERRUPT_ALIGN 256
-
 
209
#else
-
 
210
# define INTERRUPT_ALIGN 128
195
#define INTERRUPT_ALIGN 128
211
#endif
-
 
212
	
196
	
213
## Declare interrupt handlers
197
## Declare interrupt handlers
214
#
198
#
215
# Declare interrupt handlers for n interrupt
199
# Declare interrupt handlers for n interrupt
216
# vectors starting at vector i.
200
# vectors starting at vector i.
217
#
201
#
218
# The handlers call exc_dispatch().
202
# The handlers call exc_dispatch().
219
#
203
#
220
.macro handler i n
204
.macro handler i n
221
 
205
 
222
	/*
206
	/*
223
	 * Choose between version with error code and version without error
207
	 * Choose between version with error code and version without error
224
	 * code. Both versions have to be of the same size. amd64 assembly is,
208
	 * code. Both versions have to be of the same size. amd64 assembly is,
225
	 * however, a little bit tricky. For instance, subq $0x80, %rsp and
209
	 * however, a little bit tricky. For instance, subq $0x80, %rsp and
226
	 * subq $0x78, %rsp can result in two instructions with different
210
	 * subq $0x78, %rsp can result in two instructions with different
227
	 * op-code lengths.
211
	 * op-code lengths.
228
	 * Therefore we align the interrupt handlers.
212
	 * Therefore we align the interrupt handlers.
229
	 */
213
	 */
230
 
214
 
231
	.iflt \i-32
215
	.iflt \i-32
232
		.if (1 << \i) & ERROR_WORD_INTERRUPT_LIST
216
		.if (1 << \i) & ERROR_WORD_INTERRUPT_LIST
233
			/*
217
			/*
234
			 * Version with error word.
218
			 * Version with error word.
235
			 */
219
			 */
236
			subq $IREGISTER_SPACE, %rsp
220
			subq $IREGISTER_SPACE, %rsp
237
		.else
221
		.else
238
			/*
222
			/*
239
			 * Version without error word,
223
			 * Version without error word,
240
			 */
224
			 */
241
			subq $(IREGISTER_SPACE+8), %rsp
225
			subq $(IREGISTER_SPACE+8), %rsp
242
		.endif
226
		.endif
243
	.else
227
	.else
244
		/*
228
		/*
245
		 * Version without error word,
229
		 * Version without error word,
246
		 */
230
		 */
247
		subq $(IREGISTER_SPACE+8), %rsp
231
		subq $(IREGISTER_SPACE+8), %rsp
248
	.endif	
232
	.endif	
249
 
233
 
250
	save_all_gpr
234
	save_all_gpr
251
	cld
235
	cld
252
 
236
 
253
	movq $(\i), %rdi   	# %rdi - first parameter
237
	movq $(\i), %rdi   	# %rdi - first parameter
254
	movq %rsp, %rsi   	# %rsi - pointer to istate
238
	movq %rsp, %rsi   	# %rsi - pointer to istate
255
	call exc_dispatch 	# exc_dispatch(i, istate)
239
	call exc_dispatch 	# exc_dispatch(i, istate)
256
	
240
	
257
	restore_all_gpr
241
	restore_all_gpr
258
	# $8 = Skip error word
242
	# $8 = Skip error word
259
	addq $(IREGISTER_SPACE+8), %rsp
243
	addq $(IREGISTER_SPACE+8), %rsp
260
	iretq
244
	iretq
261
 
245
 
262
	.align INTERRUPT_ALIGN
246
	.align INTERRUPT_ALIGN
263
	.if (\n-\i)-1
247
	.if (\n-\i)-1
264
	handler "(\i+1)",\n
248
	handler "(\i+1)",\n
265
	.endif
249
	.endif
266
.endm
250
.endm
267
 
251
 
268
.align INTERRUPT_ALIGN
252
.align INTERRUPT_ALIGN
269
interrupt_handlers:
253
interrupt_handlers:
270
h_start:
254
h_start:
271
	handler 0 IDT_ITEMS
255
	handler 0 IDT_ITEMS
272
h_end:
256
h_end:
273
 
257
 
274
## Low-level syscall handler
258
## Low-level syscall handler
275
# 
259
# 
276
# Registers on entry:
260
# Registers on entry:
277
#
261
#
278
# @param rcx		Userspace return address.
262
# @param rcx		Userspace return address.
279
# @param r11		Userspace RLFAGS.
263
# @param r11		Userspace RLFAGS.
280
#
264
#
281
# @param rax		Syscall number.
265
# @param rax		Syscall number.
282
# @param rdi		1st syscall argument.
266
# @param rdi		1st syscall argument.
283
# @param rsi		2nd syscall argument.
267
# @param rsi		2nd syscall argument.
284
# @param rdx		3rd syscall argument.
268
# @param rdx		3rd syscall argument.
285
# @param r10		4th syscall argument. Used instead of RCX because the
269
# @param r10		4th syscall argument. Used instead of RCX because the
286
#			SYSCALL instruction clobbers it.
270
#			SYSCALL instruction clobbers it.
287
# @param r8		5th syscall argument.
271
# @param r8		5th syscall argument.
288
# @param r9		6th syscall argument.
272
# @param r9		6th syscall argument.
289
#
273
#
290
# @return		Return value is in rax.
274
# @return		Return value is in rax.
291
#
275
#
292
syscall_entry:
276
syscall_entry:
293
	swapgs			# Switch to hidden gs	
277
	swapgs			# Switch to hidden gs	
294
	# 
278
	# 
295
	# %gs:0			Scratch space for this thread's user RSP
279
	# %gs:0			Scratch space for this thread's user RSP
296
	# %gs:8			Address to be used as this thread's kernel RSP
280
	# %gs:8			Address to be used as this thread's kernel RSP
297
	#
281
	#
298
	movq %rsp, %gs:0	# Save this thread's user RSP
282
	movq %rsp, %gs:0	# Save this thread's user RSP
299
	movq %gs:8, %rsp	# Set this thread's kernel RSP
283
	movq %gs:8, %rsp	# Set this thread's kernel RSP
300
	swapgs			# Switch back to remain consistent
284
	swapgs			# Switch back to remain consistent
301
	sti
285
	sti
302
	
286
	
303
	pushq %rcx
287
	pushq %rcx
304
	pushq %r11
288
	pushq %r11
305
 
289
 
306
	movq %r10, %rcx		# Copy the 4th argument where it is expected 
290
	movq %r10, %rcx		# Copy the 4th argument where it is expected 
307
	pushq %rax
291
	pushq %rax
308
	call syscall_handler
292
	call syscall_handler
309
	addq $8, %rsp
293
	addq $8, %rsp
310
		
294
		
311
	popq %r11
295
	popq %r11
312
	popq %rcx
296
	popq %rcx
313
 
297
 
314
	cli
298
	cli
315
	swapgs
299
	swapgs
316
	movq %gs:0, %rsp	# Restore the user RSP
300
	movq %gs:0, %rsp	# Restore the user RSP
317
	swapgs
301
	swapgs
318
 
302
 
319
	sysretq
303
	sysretq
320
 
304
 
321
.data
305
.data
322
.global interrupt_handler_size
306
.global interrupt_handler_size
323
 
307
 
324
interrupt_handler_size: .quad (h_end-h_start)/IDT_ITEMS
308
interrupt_handler_size: .quad (h_end-h_start)/IDT_ITEMS
325
 
309