Subversion Repositories HelenOS

Rev

Rev 3782 | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 3782 Rev 4605
1
#
1
#
2
# Copyright (c) 2005 Jakub Jermar
2
# Copyright (c) 2005 Jakub Jermar
3
# All rights reserved.
3
# All rights reserved.
4
#
4
#
5
# Redistribution and use in source and binary forms, with or without
5
# Redistribution and use in source and binary forms, with or without
6
# modification, are permitted provided that the following conditions
6
# modification, are permitted provided that the following conditions
7
# are met:
7
# are met:
8
#
8
#
9
# - Redistributions of source code must retain the above copyright
9
# - Redistributions of source code must retain the above copyright
10
#   notice, this list of conditions and the following disclaimer.
10
#   notice, this list of conditions and the following disclaimer.
11
# - Redistributions in binary form must reproduce the above copyright
11
# - Redistributions in binary form must reproduce the above copyright
12
#   notice, this list of conditions and the following disclaimer in the
12
#   notice, this list of conditions and the following disclaimer in the
13
#   documentation and/or other materials provided with the distribution.
13
#   documentation and/or other materials provided with the distribution.
14
# - The name of the author may not be used to endorse or promote products
14
# - The name of the author may not be used to endorse or promote products
15
#   derived from this software without specific prior written permission.
15
#   derived from this software without specific prior written permission.
16
#
16
#
17
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
#
27
#
28
 
28
 
29
#include <arch/register.h>
29
#include <arch/register.h>
30
#include <arch/mm/page.h>
30
#include <arch/mm/page.h>
31
#include <arch/mm/asid.h>
31
#include <arch/mm/asid.h>
32
#include <mm/asid.h>
32
#include <mm/asid.h>
33
 
33
 
34
#define RR_MASK (0xFFFFFFFF00000002)
34
#define RR_MASK (0xFFFFFFFF00000002)
35
#define RID_SHIFT	8
35
#define RID_SHIFT	8
36
#define PS_SHIFT	2
36
#define PS_SHIFT	2
37
 
37
 
38
#define KERNEL_TRANSLATION_I	0x0010000000000661
38
#define KERNEL_TRANSLATION_I	0x0010000000000661
39
#define KERNEL_TRANSLATION_D	0x0010000000000661
39
#define KERNEL_TRANSLATION_D	0x0010000000000661
40
#define KERNEL_TRANSLATION_VIO	0x0010000000000671
40
#define KERNEL_TRANSLATION_VIO	0x0010000000000671
41
#define KERNEL_TRANSLATION_IO	0x00100FFFFC000671 
41
#define KERNEL_TRANSLATION_IO	0x00100FFFFC000671 
42
#define KERNEL_TRANSLATION_FW	0x00100000F0000671 
42
#define KERNEL_TRANSLATION_FW	0x00100000F0000671 
43
 
43
 
44
.section K_TEXT_START, "ax"
44
.section K_TEXT_START, "ax"
45
 
45
 
46
.global kernel_image_start
46
.global kernel_image_start
47
 
47
 
48
stack0:
48
stack0:
49
kernel_image_start:
49
kernel_image_start:
50
	.auto
50
	.auto
51
 
51
 
52
#ifdef CONFIG_SMP
52
#ifdef CONFIG_SMP
53
	# Identify self(CPU) in OS structures by ID / EID
53
	# Identify self(CPU) in OS structures by ID / EID
54
 
54
 
55
	mov r9 = cr64
55
	mov r9 = cr64
56
	mov r10 = 1
56
	mov r10 = 1
57
	movl r12 = 0xffffffff
57
	movl r12 = 0xffffffff
58
	movl r8 = cpu_by_id_eid_list
58
	movl r8 = cpu_by_id_eid_list
59
	and r8 = r8, r12
59
	and r8 = r8, r12
60
	shr r9 = r9, 16
60
	shr r9 = r9, 16
61
	add r8 = r8, r9
61
	add r8 = r8, r9
62
	st1 [r8] = r10
62
	st1 [r8] = r10
63
#endif
63
#endif
64
 
64
 
65
	mov psr.l = r0
65
	mov psr.l = r0
66
	srlz.i
66
	srlz.i
67
	srlz.d
67
	srlz.d
68
 
68
 
69
	# Fill TR.i and TR.d using Region Register #VRN_KERNEL
69
	# Fill TR.i and TR.d using Region Register #VRN_KERNEL
70
 
70
 
71
	movl r8 = (VRN_KERNEL << VRN_SHIFT)
71
	movl r8 = (VRN_KERNEL << VRN_SHIFT)
72
	mov r9 = rr[r8]
72
	mov r9 = rr[r8]
73
 
73
 
74
	movl r10 = (RR_MASK)
74
	movl r10 = (RR_MASK)
75
	and r9 = r10, r9
75
	and r9 = r10, r9
76
	movl r10 = ((RID_KERNEL << RID_SHIFT) | (KERNEL_PAGE_WIDTH << PS_SHIFT))
76
	movl r10 = ((RID_KERNEL << RID_SHIFT) | (KERNEL_PAGE_WIDTH << PS_SHIFT))
77
	or  r9 = r10, r9
77
	or  r9 = r10, r9
78
 
78
 
79
	mov rr[r8] = r9
79
	mov rr[r8] = r9
80
 
80
 
81
	movl r8 = (VRN_KERNEL << VRN_SHIFT)
81
	movl r8 = (VRN_KERNEL << VRN_SHIFT)
82
	mov cr.ifa = r8
82
	mov cr.ifa = r8
83
 
83
 
84
	mov r11 = cr.itir
84
	mov r11 = cr.itir
85
	movl r10 = (KERNEL_PAGE_WIDTH << PS_SHIFT)
85
	movl r10 = (KERNEL_PAGE_WIDTH << PS_SHIFT)
86
	or r10 = r10, r11
86
	or r10 = r10, r11
87
	mov cr.itir = r10
87
	mov cr.itir = r10
88
 
88
 
89
	movl r10 = (KERNEL_TRANSLATION_I)
89
	movl r10 = (KERNEL_TRANSLATION_I)
90
	itr.i itr[r0] = r10
90
	itr.i itr[r0] = r10
91
	movl r10 = (KERNEL_TRANSLATION_D)
91
	movl r10 = (KERNEL_TRANSLATION_D)
92
	itr.d dtr[r0] = r10
92
	itr.d dtr[r0] = r10
93
 
93
 
94
	movl r7 = 1
94
	movl r7 = 1
95
	movl r8 = (VRN_KERNEL << VRN_SHIFT) | VIO_OFFSET
95
	movl r8 = (VRN_KERNEL << VRN_SHIFT) | VIO_OFFSET
96
	mov cr.ifa = r8
96
	mov cr.ifa = r8
97
	movl r10 = (KERNEL_TRANSLATION_VIO)
97
	movl r10 = (KERNEL_TRANSLATION_VIO)
98
	itr.d dtr[r7] = r10
98
	itr.d dtr[r7] = r10
99
 
99
 
100
	mov r11 = cr.itir
100
	mov r11 = cr.itir
101
	movl r10 = ~0xfc
101
	movl r10 = ~0xfc
102
	and r10 = r10, r11
102
	and r10 = r10, r11
103
	movl r11 = (IO_PAGE_WIDTH << PS_SHIFT)
103
	movl r11 = (IO_PAGE_WIDTH << PS_SHIFT)
104
	or r10 = r10, r11
104
	or r10 = r10, r11
105
	mov cr.itir = r10
105
	mov cr.itir = r10
106
 
106
 
107
	movl r7 = 2
107
	movl r7 = 2
108
	movl r8 = (VRN_KERNEL << VRN_SHIFT) | IO_OFFSET
108
	movl r8 = (VRN_KERNEL << VRN_SHIFT) | IO_OFFSET
109
	mov cr.ifa = r8
109
	mov cr.ifa = r8
110
	movl r10 = (KERNEL_TRANSLATION_IO)
110
	movl r10 = (KERNEL_TRANSLATION_IO)
111
	itr.d dtr[r7] = r10
111
	itr.d dtr[r7] = r10
112
 
112
 
113
	# Setup mapping for fimware arrea (also SAPIC)
113
	# Setup mapping for fimware arrea (also SAPIC)
114
 
114
 
115
	mov r11 = cr.itir
115
	mov r11 = cr.itir
116
	movl r10 = ~0xfc
116
	movl r10 = ~0xfc
117
	and r10 = r10, r11
117
	and r10 = r10, r11
118
	movl r11 = (FW_PAGE_WIDTH << PS_SHIFT)
118
	movl r11 = (FW_PAGE_WIDTH << PS_SHIFT)
119
	or r10 = r10, r11
119
	or r10 = r10, r11
120
	mov cr.itir = r10
120
	mov cr.itir = r10
121
 
121
 
122
	movl r7 = 3
122
	movl r7 = 3
123
	movl r8 = (VRN_KERNEL << VRN_SHIFT) | FW_OFFSET
123
	movl r8 = (VRN_KERNEL << VRN_SHIFT) | FW_OFFSET
124
	mov cr.ifa = r8
124
	mov cr.ifa = r8
125
	movl r10 = (KERNEL_TRANSLATION_FW)
125
	movl r10 = (KERNEL_TRANSLATION_FW)
126
	itr.d dtr[r7] = r10
126
	itr.d dtr[r7] = r10
127
 
127
 
128
	# Initialize PSR
128
	# Initialize PSR
129
 
129
 
130
	movl r10 = (PSR_DT_MASK | PSR_RT_MASK | PSR_IT_MASK | PSR_IC_MASK)  /* Enable paging */
130
	movl r10 = (PSR_DT_MASK | PSR_RT_MASK | PSR_IT_MASK | PSR_IC_MASK)  /* Enable paging */
131
	mov r9 = psr
131
	mov r9 = psr
132
 
132
 
133
	or r10 = r10, r9
133
	or r10 = r10, r9
134
	mov cr.ipsr = r10
134
	mov cr.ipsr = r10
135
	mov cr.ifs = r0
135
	mov cr.ifs = r0
136
	movl r8 = paging_start
136
	movl r8 = paging_start
137
	mov cr.iip = r8
137
	mov cr.iip = r8
138
	srlz.d
138
	srlz.d
139
	srlz.i
139
	srlz.i
140
 
140
 
141
	.explicit
141
	.explicit
142
 
142
 
143
	/*
143
	/*
144
	 * Return From Interrupt is the only way to
144
	 * Return From Interrupt is the only way to
145
	 * fill the upper half word of PSR.
145
	 * fill the upper half word of PSR.
146
	 */
146
	 */
147
	rfi ;;
147
	rfi ;;
148
 
148
 
149
 
149
 
150
.global paging_start
150
.global paging_start
151
paging_start:
151
paging_start:
152
 
152
 
153
	/*
153
	/*
154
	 * Now we are paging.
154
	 * Now we are paging.
155
	 */
155
	 */
156
 
156
 
157
	# Switch to register bank 1
157
	# Switch to register bank 1
158
	bsw.1
158
	bsw.1
159
 
159
 
160
#ifdef CONFIG_SMP
160
#ifdef CONFIG_SMP
161
	# Am I BSP or AP?
161
	# Am I BSP or AP?
162
	movl r20 = bsp_started ;;
162
	movl r20 = bsp_started ;;
163
	ld8 r20 = [r20] ;;
163
	ld8 r20 = [r20] ;;
164
	cmp.eq p3, p2 = r20, r0 ;;
164
	cmp.eq p3, p2 = r20, r0 ;;
165
#else
165
#else
166
	cmp.eq p3, p2 = r0, r0 ;;	/* you are BSP */
166
	cmp.eq p3, p2 = r0, r0 ;;	/* you are BSP */
167
#endif	/* CONFIG_SMP */
167
#endif	/* CONFIG_SMP */
168
	
168
	
169
	# Initialize register stack
169
	# Initialize register stack
170
	mov ar.rsc = r0
170
	mov ar.rsc = r0
171
	movl r8 = (VRN_KERNEL << VRN_SHIFT) ;;
171
	movl r8 = (VRN_KERNEL << VRN_SHIFT) ;;
172
	mov ar.bspstore = r8
172
	mov ar.bspstore = r8
173
	loadrs
173
	loadrs
174
 
174
 
175
	# Initialize memory stack to some sane value
175
	# Initialize memory stack to some sane value
176
	movl r12 = stack0 ;;
176
	movl r12 = stack0 ;;
177
	add r12 = -16, r12	/* allocate a scratch area on the stack */
177
	add r12 = -16, r12	/* allocate a scratch area on the stack */
178
 
178
 
179
	# Initialize gp (Global Pointer) register
179
	# Initialize gp (Global Pointer) register
180
	movl r20 = (VRN_KERNEL << VRN_SHIFT);;
180
	movl r20 = (VRN_KERNEL << VRN_SHIFT);;
181
	or r20 = r20,r1;;
181
	or r20 = r20,r1;;
182
	movl r1 = _hardcoded_load_address
182
	movl r1 = _hardcoded_load_address
183
	
183
	
184
	/*
184
	/*
185
	 * Initialize hardcoded_* variables. Do only BSP
185
	 * Initialize hardcoded_* variables. Do only BSP
186
	 */
186
	 */
187
(p3)	movl r14 = _hardcoded_ktext_size
187
(p3)	movl r14 = _hardcoded_ktext_size
188
(p3)	movl r15 = _hardcoded_kdata_size
188
(p3)	movl r15 = _hardcoded_kdata_size
189
(p3)	movl r16 = _hardcoded_load_address ;;
189
(p3)	movl r16 = _hardcoded_load_address ;;
190
(p3)	addl r17 = @gprel(hardcoded_ktext_size), gp
190
(p3)	addl r17 = @gprel(hardcoded_ktext_size), gp
191
(p3)	addl r18 = @gprel(hardcoded_kdata_size), gp
191
(p3)	addl r18 = @gprel(hardcoded_kdata_size), gp
192
(p3)	addl r19 = @gprel(hardcoded_load_address), gp
192
(p3)	addl r19 = @gprel(hardcoded_load_address), gp
193
(p3)	addl r21 = @gprel(bootinfo), gp
193
(p3)	addl r21 = @gprel(bootinfo), gp
194
	;;
194
	;;
195
(p3)	st8 [r17] = r14
195
(p3)	st8 [r17] = r14
196
(p3)	st8 [r18] = r15
196
(p3)	st8 [r18] = r15
197
(p3)	st8 [r19] = r16
197
(p3)	st8 [r19] = r16
198
(p3)	st8 [r21] = r20
198
(p3)	st8 [r21] = r20
199
 
199
 
200
	ssm (1 << 19) ;; /* Disable f32 - f127 */
200
	ssm (1 << 19) ;; /* Disable f32 - f127 */
201
	srlz.i
201
	srlz.i
202
	srlz.d ;;
202
	srlz.d ;;
203
 
203
 
204
#ifdef CONFIG_SMP
204
#ifdef CONFIG_SMP
205
(p2)	movl r18 = main_ap ;;
205
(p2)	movl r18 = main_ap ;;
206
(p2)   	mov b1 = r18 ;;
206
(p2)   	mov b1 = r18 ;;
207
(p2)	br.call.sptk.many b0 = b1
207
(p2)	br.call.sptk.many b0 = b1
208
 
208
 
209
	# Mark that BSP is on
209
	# Mark that BSP is on
210
	mov r20 = 1 ;;
210
	mov r20 = 1 ;;
211
	movl r21 = bsp_started ;;
211
	movl r21 = bsp_started ;;
212
	st8 [r21] = r20 ;;
212
	st8 [r21] = r20 ;;
213
#endif
213
#endif
214
 
214
 
215
	br.call.sptk.many b0 = arch_pre_main
215
	br.call.sptk.many b0 = arch_pre_main
216
 
216
 
217
	movl r18 = main_bsp ;;
217
	movl r18 = main_bsp ;;
218
	mov b1 = r18 ;;
218
	mov b1 = r18 ;;
219
	br.call.sptk.many b0 = b1
219
	br.call.sptk.many b0 = b1
220
 
220
 
221
0:
221
0:
222
	br 0b
222
	br 0b
223
 
223
 
224
#ifdef CONFIG_SMP
224
#ifdef CONFIG_SMP
225
 
225
 
226
.align 4096
226
.align 4096
227
kernel_image_ap_start:
227
kernel_image_ap_start:
228
	.auto
228
	.auto
229
 
229
 
230
	# Identify self(CPU) in OS structures by ID / EID
230
	# Identify self(CPU) in OS structures by ID / EID
231
 
231
 
232
	mov r9 = cr64
232
	mov r9 = cr64
233
	mov r10 = 1
233
	mov r10 = 1
234
	movl r12 = 0xffffffff
234
	movl r12 = 0xffffffff
235
	movl r8 = cpu_by_id_eid_list
235
	movl r8 = cpu_by_id_eid_list
236
	and r8 = r8, r12
236
	and r8 = r8, r12
237
	shr r9 = r9, 16
237
	shr r9 = r9, 16
238
	add r8 = r8, r9
238
	add r8 = r8, r9
239
	st1 [r8] = r10
239
	st1 [r8] = r10
240
	
240
	
241
	# Wait for wakeup synchro signal (#3 in cpu_by_id_eid_list)
241
	# Wait for wakeup synchro signal (#3 in cpu_by_id_eid_list)
242
	
242
	
243
kernel_image_ap_start_loop:
243
kernel_image_ap_start_loop:
244
	movl r11 = kernel_image_ap_start_loop
244
	movl r11 = kernel_image_ap_start_loop
245
	and r11 = r11, r12
245
	and r11 = r11, r12
246
   	mov b1 = r11 
246
   	mov b1 = r11 
247
 
247
 
248
	ld1 r20 = [r8] ;;
248
	ld1 r20 = [r8] ;;
249
	movl r21 = 3 ;;
249
	movl r21 = 3 ;;
250
	cmp.eq p2, p3 = r20, r21 ;;
250
	cmp.eq p2, p3 = r20, r21 ;;
251
(p3)	br.call.sptk.many b0 = b1
251
(p3)	br.call.sptk.many b0 = b1
252
 
252
 
253
	movl r11 = kernel_image_start
253
	movl r11 = kernel_image_start
254
	and r11 = r11, r12
254
	and r11 = r11, r12
255
	mov b1 = r11 
255
	mov b1 = r11 
256
	br.call.sptk.many b0 = b1
256
	br.call.sptk.many b0 = b1
257
 
257
 
258
.align 16
258
.align 16
259
.global bsp_started
259
.global bsp_started
260
bsp_started:
260
bsp_started:
261
.space 8
261
.space 8
262
 
262
 
263
.align 4096
263
.align 4096
264
.global cpu_by_id_eid_list
264
.global cpu_by_id_eid_list
265
cpu_by_id_eid_list:
265
cpu_by_id_eid_list:
266
.space 65536
266
.space 65536
267
 
267
 
268
#endif	/* CONFIG_SMP */
268
#endif	/* CONFIG_SMP */
269
 
269