Subversion Repositories HelenOS

Rev

Rev 3766 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 3766 Rev 3781
1
#
1
#
2
# Copyright (c) 2005 Jakub Jermar
2
# Copyright (c) 2005 Jakub Jermar
3
# All rights reserved.
3
# All rights reserved.
4
#
4
#
5
# Redistribution and use in source and binary forms, with or without
5
# Redistribution and use in source and binary forms, with or without
6
# modification, are permitted provided that the following conditions
6
# modification, are permitted provided that the following conditions
7
# are met:
7
# are met:
8
#
8
#
9
# - Redistributions of source code must retain the above copyright
9
# - Redistributions of source code must retain the above copyright
10
#   notice, this list of conditions and the following disclaimer.
10
#   notice, this list of conditions and the following disclaimer.
11
# - Redistributions in binary form must reproduce the above copyright
11
# - Redistributions in binary form must reproduce the above copyright
12
#   notice, this list of conditions and the following disclaimer in the
12
#   notice, this list of conditions and the following disclaimer in the
13
#   documentation and/or other materials provided with the distribution.
13
#   documentation and/or other materials provided with the distribution.
14
# - The name of the author may not be used to endorse or promote products
14
# - The name of the author may not be used to endorse or promote products
15
#   derived from this software without specific prior written permission.
15
#   derived from this software without specific prior written permission.
16
#
16
#
17
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
#
27
#
28
 
28
 
29
#include <arch/register.h>
29
#include <arch/register.h>
30
#include <arch/mm/page.h>
30
#include <arch/mm/page.h>
31
#include <arch/mm/asid.h>
31
#include <arch/mm/asid.h>
32
#include <mm/asid.h>
32
#include <mm/asid.h>
33
 
33
 
34
#define RR_MASK (0xFFFFFFFF00000002)
34
#define RR_MASK (0xFFFFFFFF00000002)
35
#define RID_SHIFT	8
35
#define RID_SHIFT	8
36
#define PS_SHIFT	2
36
#define PS_SHIFT	2
37
 
37
 
38
#define KERNEL_TRANSLATION_I	0x0010000000000661
38
#define KERNEL_TRANSLATION_I	0x0010000000000661
39
#define KERNEL_TRANSLATION_D	0x0010000000000661
39
#define KERNEL_TRANSLATION_D	0x0010000000000661
40
#define KERNEL_TRANSLATION_VIO	0x0010000000000671
40
#define KERNEL_TRANSLATION_VIO	0x0010000000000671
41
#define KERNEL_TRANSLATION_IO	0x00100FFFFC000671 
41
#define KERNEL_TRANSLATION_IO	0x00100FFFFC000671 
42
#define KERNEL_TRANSLATION_FW	0x00100000F0000671 
42
#define KERNEL_TRANSLATION_FW	0x00100000F0000671 
43
 
43
 
44
.section K_TEXT_START, "ax"
44
.section K_TEXT_START, "ax"
45
 
45
 
46
.global kernel_image_start
46
.global kernel_image_start
47
 
47
 
48
stack0:
48
stack0:
49
kernel_image_start:
49
kernel_image_start:
50
	.auto
50
	.auto
51
 
51
 
52
	# Identify self(CPU) in OS structures by ID / EID
52
	# Identify self(CPU) in OS structures by ID / EID
53
 
53
 
54
	mov r9 = cr64
54
	mov r9 = cr64
55
	mov r10 = 1
55
	mov r10 = 1
56
	movl r12 = 0xffffffff
56
	movl r12 = 0xffffffff
57
	movl r8 = cpu_by_id_eid_list
57
	movl r8 = cpu_by_id_eid_list
58
	and r8 = r8, r12
58
	and r8 = r8, r12
59
	shr r9 = r9, 16
59
	shr r9 = r9, 16
60
	add r8 = r8, r9
60
	add r8 = r8, r9
61
	st1 [r8] = r10
61
	st1 [r8] = r10
62
 
62
 
63
	mov psr.l = r0
63
	mov psr.l = r0
64
	srlz.i
64
	srlz.i
65
	srlz.d
65
	srlz.d
66
 
66
 
67
	# Fill TR.i and TR.d using Region Register #VRN_KERNEL
67
	# Fill TR.i and TR.d using Region Register #VRN_KERNEL
68
 
68
 
69
	movl r8 = (VRN_KERNEL << VRN_SHIFT)
69
	movl r8 = (VRN_KERNEL << VRN_SHIFT)
70
	mov r9 = rr[r8]
70
	mov r9 = rr[r8]
71
 
71
 
72
	movl r10 = (RR_MASK)
72
	movl r10 = (RR_MASK)
73
	and r9 = r10, r9
73
	and r9 = r10, r9
74
	movl r10 = ((RID_KERNEL << RID_SHIFT) | (KERNEL_PAGE_WIDTH << PS_SHIFT))
74
	movl r10 = ((RID_KERNEL << RID_SHIFT) | (KERNEL_PAGE_WIDTH << PS_SHIFT))
75
	or  r9 = r10, r9
75
	or  r9 = r10, r9
76
 
76
 
77
	mov rr[r8] = r9
77
	mov rr[r8] = r9
78
 
78
 
79
	movl r8 = (VRN_KERNEL << VRN_SHIFT)
79
	movl r8 = (VRN_KERNEL << VRN_SHIFT)
80
	mov cr.ifa = r8
80
	mov cr.ifa = r8
81
 
81
 
82
	mov r11 = cr.itir ;;
82
	mov r11 = cr.itir ;;
83
	movl r10 = (KERNEL_PAGE_WIDTH << PS_SHIFT);;
83
	movl r10 = (KERNEL_PAGE_WIDTH << PS_SHIFT);;
84
	or r10 = r10, r11 ;;
84
	or r10 = r10, r11 ;;
85
	mov cr.itir = r10;;
85
	mov cr.itir = r10;;
86
 
86
 
87
	movl r10 = (KERNEL_TRANSLATION_I)
87
	movl r10 = (KERNEL_TRANSLATION_I)
88
	itr.i itr[r0] = r10
88
	itr.i itr[r0] = r10
89
	movl r10 = (KERNEL_TRANSLATION_D)
89
	movl r10 = (KERNEL_TRANSLATION_D)
90
	itr.d dtr[r0] = r10
90
	itr.d dtr[r0] = r10
91
 
91
 
92
	movl r7 = 1
92
	movl r7 = 1
93
	movl r8 = (VRN_KERNEL << VRN_SHIFT) | VIO_OFFSET
93
	movl r8 = (VRN_KERNEL << VRN_SHIFT) | VIO_OFFSET
94
	mov cr.ifa = r8
94
	mov cr.ifa = r8
95
	movl r10 = (KERNEL_TRANSLATION_VIO)
95
	movl r10 = (KERNEL_TRANSLATION_VIO)
96
	itr.d dtr[r7] = r10
96
	itr.d dtr[r7] = r10
97
 
97
 
98
	mov r11 = cr.itir ;;
98
	mov r11 = cr.itir ;;
99
	movl r10 = ~0xfc;;
99
	movl r10 = ~0xfc;;
100
	and r10 = r10, r11 ;;
100
	and r10 = r10, r11 ;;
101
	movl r11 = (IO_PAGE_WIDTH << PS_SHIFT);;
101
	movl r11 = (IO_PAGE_WIDTH << PS_SHIFT);;
102
	or r10 = r10, r11 ;;
102
	or r10 = r10, r11 ;;
103
	mov cr.itir = r10;;
103
	mov cr.itir = r10;;
104
 
104
 
105
	movl r7 = 2
105
	movl r7 = 2
106
	movl r8 = (VRN_KERNEL << VRN_SHIFT) | IO_OFFSET
106
	movl r8 = (VRN_KERNEL << VRN_SHIFT) | IO_OFFSET
107
	mov cr.ifa = r8
107
	mov cr.ifa = r8
108
	movl r10 = (KERNEL_TRANSLATION_IO)
108
	movl r10 = (KERNEL_TRANSLATION_IO)
109
	itr.d dtr[r7] = r10
109
	itr.d dtr[r7] = r10
110
 
110
 
111
	# Setup mapping for fimware arrea (also SAPIC)
111
	# Setup mapping for fimware arrea (also SAPIC)
112
 
112
 
113
	mov r11 = cr.itir ;;
113
	mov r11 = cr.itir ;;
114
	movl r10 = ~0xfc;;
114
	movl r10 = ~0xfc;;
115
	and r10 = r10, r11 ;;
115
	and r10 = r10, r11 ;;
116
	movl r11 = (FW_PAGE_WIDTH << PS_SHIFT);;
116
	movl r11 = (FW_PAGE_WIDTH << PS_SHIFT);;
117
	or r10 = r10, r11 ;;
117
	or r10 = r10, r11 ;;
118
	mov cr.itir = r10;;
118
	mov cr.itir = r10;;
119
 
119
 
120
	movl r7 = 3
120
	movl r7 = 3
121
	movl r8 = (VRN_KERNEL << VRN_SHIFT) | FW_OFFSET
121
	movl r8 = (VRN_KERNEL << VRN_SHIFT) | FW_OFFSET
122
	mov cr.ifa = r8
122
	mov cr.ifa = r8
123
	movl r10 = (KERNEL_TRANSLATION_FW)
123
	movl r10 = (KERNEL_TRANSLATION_FW)
124
	itr.d dtr[r7] = r10
124
	itr.d dtr[r7] = r10
125
 
125
 
126
	# Initialize PSR
126
	# Initialize PSR
127
 
127
 
128
	movl r10 = (PSR_DT_MASK | PSR_RT_MASK | PSR_IT_MASK | PSR_IC_MASK)  /* Enable paging */
128
	movl r10 = (PSR_DT_MASK | PSR_RT_MASK | PSR_IT_MASK | PSR_IC_MASK)  /* Enable paging */
129
	mov r9 = psr
129
	mov r9 = psr
130
 
130
 
131
	or r10 = r10, r9
131
	or r10 = r10, r9
132
	mov cr.ipsr = r10
132
	mov cr.ipsr = r10
133
	mov cr.ifs = r0
133
	mov cr.ifs = r0
134
	movl r8 = paging_start
134
	movl r8 = paging_start
135
	mov cr.iip = r8
135
	mov cr.iip = r8
136
	srlz.d
136
	srlz.d
137
	srlz.i
137
	srlz.i
138
 
138
 
139
	.explicit
139
	.explicit
140
 
140
 
141
	/*
141
	/*
142
	 * Return From Interupt is the only the way to fill upper half word of PSR.
142
	 * Return From Interrupt is the only way to
-
 
143
	 * fill the upper half word of PSR.
143
	 */
144
	 */
144
	rfi;;
145
	rfi;;
145
 
146
 
146
 
147
 
147
.global paging_start
148
.global paging_start
148
paging_start:
149
paging_start:
149
 
150
 
150
	/*
151
	/*
151
	 * Now we are paging.
152
	 * Now we are paging.
152
	 */
153
	 */
153
 
154
 
154
	# Switch to register bank 1
155
	# Switch to register bank 1
155
	bsw.1
156
	bsw.1
156
 
157
 
157
	# Am I BSP or AP?
158
	# Am I BSP or AP?
158
	movl r20 = bsp_started;;
159
	movl r20 = bsp_started;;
159
	ld8 r20 = [r20];;
160
	ld8 r20 = [r20];;
160
	cmp.eq p3, p2 = r20, r0;;
161
	cmp.eq p3, p2 = r20, r0;;
161
	
162
	
162
	# Initialize register stack
163
	# Initialize register stack
163
	mov ar.rsc = r0
164
	mov ar.rsc = r0
164
	movl r8 = (VRN_KERNEL << VRN_SHIFT) ;;
165
	movl r8 = (VRN_KERNEL << VRN_SHIFT) ;;
165
	mov ar.bspstore = r8
166
	mov ar.bspstore = r8
166
	loadrs
167
	loadrs
167
 
168
 
168
	# Initialize memory stack to some sane value
169
	# Initialize memory stack to some sane value
169
	movl r12 = stack0 ;;
170
	movl r12 = stack0 ;;
170
	add r12 = -16, r12	/* allocate a scratch area on the stack */
171
	add r12 = -16, r12	/* allocate a scratch area on the stack */
171
 
172
 
172
	# Initialize gp (Global Pointer) register
173
	# Initialize gp (Global Pointer) register
173
	movl r20 = (VRN_KERNEL << VRN_SHIFT);;
174
	movl r20 = (VRN_KERNEL << VRN_SHIFT);;
174
	or r20 = r20,r1;;
175
	or r20 = r20,r1;;
175
	movl r1 = _hardcoded_load_address
176
	movl r1 = _hardcoded_load_address
176
	
177
	
177
	/*
178
	/*
178
	 * Initialize hardcoded_* variables. Do only BSP
179
	 * Initialize hardcoded_* variables. Do only BSP
179
	 */
180
	 */
180
(p3)	movl r14 = _hardcoded_ktext_size
181
(p3)	movl r14 = _hardcoded_ktext_size
181
(p3)	movl r15 = _hardcoded_kdata_size
182
(p3)	movl r15 = _hardcoded_kdata_size
182
(p3)	movl r16 = _hardcoded_load_address ;;
183
(p3)	movl r16 = _hardcoded_load_address ;;
183
(p3)	addl r17 = @gprel(hardcoded_ktext_size), gp
184
(p3)	addl r17 = @gprel(hardcoded_ktext_size), gp
184
(p3)	addl r18 = @gprel(hardcoded_kdata_size), gp
185
(p3)	addl r18 = @gprel(hardcoded_kdata_size), gp
185
(p3)	addl r19 = @gprel(hardcoded_load_address), gp
186
(p3)	addl r19 = @gprel(hardcoded_load_address), gp
186
(p3)	addl r21 = @gprel(bootinfo), gp
187
(p3)	addl r21 = @gprel(bootinfo), gp
187
	;;
188
	;;
188
(p3)	st8 [r17] = r14
189
(p3)	st8 [r17] = r14
189
(p3)	st8 [r18] = r15
190
(p3)	st8 [r18] = r15
190
(p3)	st8 [r19] = r16
191
(p3)	st8 [r19] = r16
191
(p3)	st8 [r21] = r20
192
(p3)	st8 [r21] = r20
192
 
193
 
193
	ssm (1 << 19) ;; /* Disable f32 - f127 */
194
	ssm (1 << 19) ;; /* Disable f32 - f127 */
194
	srlz.i
195
	srlz.i
195
	srlz.d ;;
196
	srlz.d ;;
196
 
197
 
197
(p2)	movl r18 = main_ap ;;
198
(p2)	movl r18 = main_ap ;;
198
(p2)   	mov b1 = r18 ;;
199
(p2)   	mov b1 = r18 ;;
199
(p2)	br.call.sptk.many b0 = b1
200
(p2)	br.call.sptk.many b0 = b1
200
 
201
 
201
	# Mark that BSP is on
202
	# Mark that BSP is on
202
	mov r20=1;;
203
	mov r20=1;;
203
	movl r21=bsp_started;;
204
	movl r21=bsp_started;;
204
	st8 [r21]=r20;;
205
	st8 [r21]=r20;;
205
 
206
 
206
	br.call.sptk.many b0 = arch_pre_main
207
	br.call.sptk.many b0 = arch_pre_main
207
 
208
 
208
	movl r18 = main_bsp ;;
209
	movl r18 = main_bsp ;;
209
	mov b1 = r18 ;;
210
	mov b1 = r18 ;;
210
	br.call.sptk.many b0 = b1
211
	br.call.sptk.many b0 = b1
211
 
212
 
212
0:
213
0:
213
	br 0b
214
	br 0b
214
.align 4096
215
.align 4096
215
 
216
 
216
kernel_image_ap_start:
217
kernel_image_ap_start:
217
	.auto
218
	.auto
218
 
219
 
219
	# Identify self(CPU) in OS structures by ID / EID
220
	# Identify self(CPU) in OS structures by ID / EID
220
 
221
 
221
	mov r9 = cr64
222
	mov r9 = cr64
222
	mov r10 = 1
223
	mov r10 = 1
223
	movl r12 = 0xffffffff
224
	movl r12 = 0xffffffff
224
	movl r8 = cpu_by_id_eid_list
225
	movl r8 = cpu_by_id_eid_list
225
	and r8 = r8, r12
226
	and r8 = r8, r12
226
	shr r9 = r9, 16
227
	shr r9 = r9, 16
227
	add r8 = r8, r9
228
	add r8 = r8, r9
228
	st1 [r8] = r10
229
	st1 [r8] = r10
229
	
230
	
230
	# Wait for wakeup synchro signal (#3 in cpu_by_id_eid_list)
231
	# Wait for wakeup synchro signal (#3 in cpu_by_id_eid_list)
231
kernel_image_ap_start_loop:
232
kernel_image_ap_start_loop:
232
	movl r11 = kernel_image_ap_start_loop
233
	movl r11 = kernel_image_ap_start_loop
233
	and r11 = r11, r12
234
	and r11 = r11, r12
234
   	mov b1 = r11 
235
   	mov b1 = r11 
235
 
236
 
236
	ld1 r20 = [r8];;
237
	ld1 r20 = [r8];;
237
	movl r21 = 3;;
238
	movl r21 = 3;;
238
	cmp.eq p2, p3 = r20, r21;;
239
	cmp.eq p2, p3 = r20, r21;;
239
(p3)	br.call.sptk.many b0 = b1
240
(p3)	br.call.sptk.many b0 = b1
240
 
241
 
241
	movl r11 = kernel_image_start
242
	movl r11 = kernel_image_start
242
	and r11 = r11, r12
243
	and r11 = r11, r12
243
	mov b1 = r11 
244
	mov b1 = r11 
244
	br.call.sptk.many b0 = b1
245
	br.call.sptk.many b0 = b1
245
 
246
 
246
 
247
 
247
.align 16
248
.align 16
248
.global bsp_started
249
.global bsp_started
249
bsp_started:
250
bsp_started:
250
.space 8
251
.space 8
251
 
252
 
252
.align 4096
253
.align 4096
253
.global cpu_by_id_eid_list
254
.global cpu_by_id_eid_list
254
cpu_by_id_eid_list:
255
cpu_by_id_eid_list:
255
.space 65536
256
.space 65536
256
 
257
 
257
 
258