Subversion Repositories HelenOS

Rev

Rev 3343 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 3343 Rev 3365
1
#
1
#
2
# Copyright (c) 2005 Jakub Jermar
2
# Copyright (c) 2005 Jakub Jermar
3
# All rights reserved.
3
# All rights reserved.
4
#
4
#
5
# Redistribution and use in source and binary forms, with or without
5
# Redistribution and use in source and binary forms, with or without
6
# modification, are permitted provided that the following conditions
6
# modification, are permitted provided that the following conditions
7
# are met:
7
# are met:
8
#
8
#
9
# - Redistributions of source code must retain the above copyright
9
# - Redistributions of source code must retain the above copyright
10
#   notice, this list of conditions and the following disclaimer.
10
#   notice, this list of conditions and the following disclaimer.
11
# - Redistributions in binary form must reproduce the above copyright
11
# - Redistributions in binary form must reproduce the above copyright
12
#   notice, this list of conditions and the following disclaimer in the
12
#   notice, this list of conditions and the following disclaimer in the
13
#   documentation and/or other materials provided with the distribution.
13
#   documentation and/or other materials provided with the distribution.
14
# - The name of the author may not be used to endorse or promote products
14
# - The name of the author may not be used to endorse or promote products
15
#   derived from this software without specific prior written permission.
15
#   derived from this software without specific prior written permission.
16
#
16
#
17
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
#
27
#
28
 
28
 
29
#include <arch/arch.h>
29
#include <arch/arch.h>
30
#include <arch/regdef.h>
30
#include <arch/regdef.h>
31
#include <arch/boot/boot.h>
31
#include <arch/boot/boot.h>
32
#include <arch/stack.h>
32
#include <arch/stack.h>
33
 
33
 
34
#include <arch/mm/mmu.h>
34
#include <arch/mm/mmu.h>
35
#include <arch/mm/tlb.h>
35
#include <arch/mm/tlb.h>
36
#include <arch/mm/tte.h>
36
#include <arch/mm/tte.h>
37
 
37
 
38
#ifdef CONFIG_SMP
38
#ifdef CONFIG_SMP
39
#include <arch/context_offset.h>
39
#include <arch/context_offset.h>
40
#endif
40
#endif
41
 
41
 
42
.register %g2, #scratch
42
.register %g2, #scratch
43
.register %g3, #scratch
43
.register %g3, #scratch
44
 
44
 
45
.section K_TEXT_START, "ax"
45
.section K_TEXT_START, "ax"
46
 
46
 
47
#define BSP_FLAG	1
47
#define BSP_FLAG	1
48
 
48
 
49
/*
49
/*
-
 
50
 * 2^PHYSMEM_ADDR_SIZE is the size of the physical address space on
-
 
51
 * a given processor.
-
 
52
 */
-
 
53
#if defined (US2)
-
 
54
    #define PHYSMEM_ADDR_SIZE	41
-
 
55
#elif defined (US3)
-
 
56
    #define PHYSMEM_ADDR_SIZE	43
-
 
57
#endif
-
 
58
 
-
 
59
/*
50
 * Here is where the kernel is passed control from the boot loader.
60
 * Here is where the kernel is passed control from the boot loader.
51
 * 
61
 * 
52
 * The registers are expected to be in this state:
62
 * The registers are expected to be in this state:
53
 * - %o0 starting address of physical memory + bootstrap processor flag
63
 * - %o0 starting address of physical memory + bootstrap processor flag
54
 * 	bits 63...1:	physical memory starting address / 2
64
 * 	bits 63...1:	physical memory starting address / 2
55
 *	bit 0:		non-zero on BSP processor, zero on AP processors
65
 *	bit 0:		non-zero on BSP processor, zero on AP processors
56
 * - %o1 bootinfo structure address (BSP only)
66
 * - %o1 bootinfo structure address (BSP only)
57
 * - %o2 bootinfo structure size (BSP only)
67
 * - %o2 bootinfo structure size (BSP only)
58
 *
68
 *
59
 * Moreover, we depend on boot having established the following environment:
69
 * Moreover, we depend on boot having established the following environment:
60
 * - TLBs are on
70
 * - TLBs are on
61
 * - identity mapping for the kernel image
71
 * - identity mapping for the kernel image
62
 */
72
 */
63
 
73
 
64
.global kernel_image_start
74
.global kernel_image_start
65
kernel_image_start:
75
kernel_image_start:
66
	mov BSP_FLAG, %l0
76
	mov BSP_FLAG, %l0
67
	and %o0, %l0, %l7			! l7 <= bootstrap processor?
77
	and %o0, %l0, %l7			! l7 <= bootstrap processor?
68
	andn %o0, %l0, %l6			! l6 <= start of physical memory
78
	andn %o0, %l0, %l6			! l6 <= start of physical memory
69
 
79
 
70
	! Get bits 40:13 of physmem_base.
80
	! Get bits (PHYSMEM_ADDR_SIZE - 1):13 of physmem_base.
71
	srlx %l6, 13, %l5
81
	srlx %l6, 13, %l5
-
 
82
	
-
 
83
	! l5 <= physmem_base[(PHYSMEM_ADDR_SIZE - 1):13]
72
	sllx %l5, 13 + (63 - 40), %l5
84
	sllx %l5, 13 + (63 - (PHYSMEM_ADDR_SIZE - 1)), %l5
73
	srlx %l5, 63 - 40, %l5			! l5 <= physmem_base[40:13]
85
	srlx %l5, 63 - (PHYSMEM_ADDR_SIZE - 1), %l5	
74
	
86
	
75
	/*
87
	/*
76
	 * Setup basic runtime environment.
88
	 * Setup basic runtime environment.
77
	 */
89
	 */
78
 
90
 
79
	wrpr %g0, NWINDOWS - 2, %cansave	! set maximum saveable windows
91
	wrpr %g0, NWINDOWS - 2, %cansave	! set maximum saveable windows
80
	wrpr %g0, 0, %canrestore		! get rid of windows we will
92
	wrpr %g0, 0, %canrestore		! get rid of windows we will
81
						! never need again
93
						! never need again
82
	wrpr %g0, 0, %otherwin			! make sure the window state is
94
	wrpr %g0, 0, %otherwin			! make sure the window state is
83
						! consistent
95
						! consistent
84
	wrpr %g0, NWINDOWS - 1, %cleanwin	! prevent needless clean_window
96
	wrpr %g0, NWINDOWS - 1, %cleanwin	! prevent needless clean_window
85
						! traps for kernel
97
						! traps for kernel
-
 
98
						
-
 
99
	wrpr %g0, 0, %wstate			! use the default spill/fill trap
86
 
100
 
87
	wrpr %g0, 0, %tl			! TL = 0, primary context
101
	wrpr %g0, 0, %tl			! TL = 0, primary context
88
						! register is used
102
						! register is used
89
 
103
 
90
	wrpr %g0, PSTATE_PRIV_BIT, %pstate	! disable interrupts and disable
104
	wrpr %g0, PSTATE_PRIV_BIT, %pstate	! disable interrupts and disable
91
						! 32-bit address masking
105
						! 32-bit address masking
92
 
106
 
93
	wrpr %g0, 0, %pil			! intialize %pil
107
	wrpr %g0, 0, %pil			! intialize %pil
94
 
108
 
95
	/*
109
	/*
96
	 * Switch to kernel trap table.
110
	 * Switch to kernel trap table.
97
	 */
111
	 */
98
	sethi %hi(trap_table), %g1
112
	sethi %hi(trap_table), %g1
99
	wrpr %g1, %lo(trap_table), %tba
113
	wrpr %g1, %lo(trap_table), %tba
100
 
114
 
101
	/* 
115
	/* 
102
	 * Take over the DMMU by installing locked TTE entry identically
116
	 * Take over the DMMU by installing locked TTE entry identically
103
	 * mapping the first 4M of memory.
117
	 * mapping the first 4M of memory.
104
	 *
118
	 *
105
	 * In case of DMMU, no FLUSH instructions need to be issued. Because of
119
	 * In case of DMMU, no FLUSH instructions need to be issued. Because of
106
	 * that, the old DTLB contents can be demapped pretty straightforwardly
120
	 * that, the old DTLB contents can be demapped pretty straightforwardly
107
	 * and without causing any traps.
121
	 * and without causing any traps.
108
	 */
122
	 */
109
 
123
 
110
	wr %g0, ASI_DMMU, %asi
124
	wr %g0, ASI_DMMU, %asi
111
 
125
 
112
#define SET_TLB_DEMAP_CMD(r1, context_id) \
126
#define SET_TLB_DEMAP_CMD(r1, context_id) \
113
	set (TLB_DEMAP_CONTEXT << TLB_DEMAP_TYPE_SHIFT) | (context_id << \
127
	set (TLB_DEMAP_CONTEXT << TLB_DEMAP_TYPE_SHIFT) | (context_id << \
114
		TLB_DEMAP_CONTEXT_SHIFT), %r1
128
		TLB_DEMAP_CONTEXT_SHIFT), %r1
115
	
129
	
116
	! demap context 0
130
	! demap context 0
117
	SET_TLB_DEMAP_CMD(g1, TLB_DEMAP_NUCLEUS)
131
	SET_TLB_DEMAP_CMD(g1, TLB_DEMAP_NUCLEUS)
118
	stxa %g0, [%g1] ASI_DMMU_DEMAP			
132
	stxa %g0, [%g1] ASI_DMMU_DEMAP			
119
	membar #Sync
133
	membar #Sync
120
 
134
 
121
#define SET_TLB_TAG(r1, context) \
135
#define SET_TLB_TAG(r1, context) \
122
	set VMA | (context << TLB_TAG_ACCESS_CONTEXT_SHIFT), %r1
136
	set VMA | (context << TLB_TAG_ACCESS_CONTEXT_SHIFT), %r1
123
 
137
 
124
	! write DTLB tag
138
	! write DTLB tag
125
	SET_TLB_TAG(g1, MEM_CONTEXT_KERNEL)
139
	SET_TLB_TAG(g1, MEM_CONTEXT_KERNEL)
126
	stxa %g1, [VA_DMMU_TAG_ACCESS] %asi			
140
	stxa %g1, [VA_DMMU_TAG_ACCESS] %asi			
127
	membar #Sync
141
	membar #Sync
128
 
142
 
129
#ifdef CONFIG_VIRT_IDX_DCACHE
143
#ifdef CONFIG_VIRT_IDX_DCACHE
130
#define TTE_LOW_DATA(imm) 	(TTE_CP | TTE_CV | TTE_P | LMA | (imm))
144
#define TTE_LOW_DATA(imm) 	(TTE_CP | TTE_CV | TTE_P | LMA | (imm))
131
#else /* CONFIG_VIRT_IDX_DCACHE */
145
#else /* CONFIG_VIRT_IDX_DCACHE */
132
#define TTE_LOW_DATA(imm) 	(TTE_CP | TTE_P | LMA | (imm))
146
#define TTE_LOW_DATA(imm) 	(TTE_CP | TTE_P | LMA | (imm))
133
#endif /* CONFIG_VIRT_IDX_DCACHE */
147
#endif /* CONFIG_VIRT_IDX_DCACHE */
134
 
148
 
135
#define SET_TLB_DATA(r1, r2, imm) \
149
#define SET_TLB_DATA(r1, r2, imm) \
136
	set TTE_LOW_DATA(imm), %r1; \
150
	set TTE_LOW_DATA(imm), %r1; \
137
	or %r1, %l5, %r1; \
151
	or %r1, %l5, %r1; \
138
	mov PAGESIZE_4M, %r2; \
152
	mov PAGESIZE_4M, %r2; \
139
	sllx %r2, TTE_SIZE_SHIFT, %r2; \
153
	sllx %r2, TTE_SIZE_SHIFT, %r2; \
140
	or %r1, %r2, %r1; \
154
	or %r1, %r2, %r1; \
141
	mov 1, %r2; \
155
	mov 1, %r2; \
142
	sllx %r2, TTE_V_SHIFT, %r2; \
156
	sllx %r2, TTE_V_SHIFT, %r2; \
143
	or %r1, %r2, %r1;
157
	or %r1, %r2, %r1;
144
	
158
	
145
	! write DTLB data and install the kernel mapping
159
	! write DTLB data and install the kernel mapping
146
	SET_TLB_DATA(g1, g2, TTE_L | TTE_W)	! use non-global mapping
160
	SET_TLB_DATA(g1, g2, TTE_L | TTE_W)	! use non-global mapping
147
	stxa %g1, [%g0] ASI_DTLB_DATA_IN_REG		
161
	stxa %g1, [%g0] ASI_DTLB_DATA_IN_REG		
148
	membar #Sync
162
	membar #Sync
149
 
163
 
150
	/*
164
	/*
151
	 * Because we cannot use global mappings (because we want to have
165
	 * Because we cannot use global mappings (because we want to have
152
	 * separate 64-bit address spaces for both the kernel and the
166
	 * separate 64-bit address spaces for both the kernel and the
153
	 * userspace), we prepare the identity mapping also in context 1. This
167
	 * userspace), we prepare the identity mapping also in context 1. This
154
	 * step is required by the code installing the ITLB mapping.
168
	 * step is required by the code installing the ITLB mapping.
155
	 */
169
	 */
156
	! write DTLB tag of context 1 (i.e. MEM_CONTEXT_TEMP)
170
	! write DTLB tag of context 1 (i.e. MEM_CONTEXT_TEMP)
157
	SET_TLB_TAG(g1, MEM_CONTEXT_TEMP)
171
	SET_TLB_TAG(g1, MEM_CONTEXT_TEMP)
158
	stxa %g1, [VA_DMMU_TAG_ACCESS] %asi			
172
	stxa %g1, [VA_DMMU_TAG_ACCESS] %asi			
159
	membar #Sync
173
	membar #Sync
160
 
174
 
161
	! write DTLB data and install the kernel mapping in context 1
175
	! write DTLB data and install the kernel mapping in context 1
162
	SET_TLB_DATA(g1, g2, TTE_W)			! use non-global mapping
176
	SET_TLB_DATA(g1, g2, TTE_W)			! use non-global mapping
163
	stxa %g1, [%g0] ASI_DTLB_DATA_IN_REG		
177
	stxa %g1, [%g0] ASI_DTLB_DATA_IN_REG		
164
	membar #Sync
178
	membar #Sync
165
	
179
	
166
	/*
180
	/*
167
	 * Now is time to take over the IMMU. Unfortunatelly, it cannot be done
181
	 * Now is time to take over the IMMU. Unfortunatelly, it cannot be done
168
	 * as easily as the DMMU, because the IMMU is mapping the code it
182
	 * as easily as the DMMU, because the IMMU is mapping the code it
169
	 * executes.
183
	 * executes.
170
	 *
184
	 *
171
	 * [ Note that brave experiments with disabling the IMMU and using the
185
	 * [ Note that brave experiments with disabling the IMMU and using the
172
	 * DMMU approach failed after a dozen of desparate days with only little
186
	 * DMMU approach failed after a dozen of desparate days with only little
173
	 * success. ]
187
	 * success. ]
174
	 *
188
	 *
175
	 * The approach used here is inspired from OpenBSD. First, the kernel
189
	 * The approach used here is inspired from OpenBSD. First, the kernel
176
	 * creates IMMU mapping for itself in context 1 (MEM_CONTEXT_TEMP) and
190
	 * creates IMMU mapping for itself in context 1 (MEM_CONTEXT_TEMP) and
177
	 * switches to it. Context 0 (MEM_CONTEXT_KERNEL) can be demapped
191
	 * switches to it. Context 0 (MEM_CONTEXT_KERNEL) can be demapped
178
	 * afterwards and replaced with the kernel permanent mapping. Finally,
192
	 * afterwards and replaced with the kernel permanent mapping. Finally,
179
	 * the kernel switches back to context 0 and demaps context 1.
193
	 * the kernel switches back to context 0 and demaps context 1.
180
	 *
194
	 *
181
	 * Moreover, the IMMU requires use of the FLUSH instructions. But that
195
	 * Moreover, the IMMU requires use of the FLUSH instructions. But that
182
	 * is OK because we always use operands with addresses already mapped by
196
	 * is OK because we always use operands with addresses already mapped by
183
	 * the taken over DTLB.
197
	 * the taken over DTLB.
184
	 */
198
	 */
185
	
199
	
186
	set kernel_image_start, %g5
200
	set kernel_image_start, %g5
187
	
201
	
188
	! write ITLB tag of context 1
202
	! write ITLB tag of context 1
189
	SET_TLB_TAG(g1, MEM_CONTEXT_TEMP)
203
	SET_TLB_TAG(g1, MEM_CONTEXT_TEMP)
190
	mov VA_DMMU_TAG_ACCESS, %g2
204
	mov VA_DMMU_TAG_ACCESS, %g2
191
	stxa %g1, [%g2] ASI_IMMU
205
	stxa %g1, [%g2] ASI_IMMU
192
	flush %g5
206
	flush %g5
193
 
207
 
194
	! write ITLB data and install the temporary mapping in context 1
208
	! write ITLB data and install the temporary mapping in context 1
195
	SET_TLB_DATA(g1, g2, 0)			! use non-global mapping
209
	SET_TLB_DATA(g1, g2, 0)			! use non-global mapping
196
	stxa %g1, [%g0] ASI_ITLB_DATA_IN_REG		
210
	stxa %g1, [%g0] ASI_ITLB_DATA_IN_REG		
197
	flush %g5
211
	flush %g5
198
	
212
	
199
	! switch to context 1
213
	! switch to context 1
200
	mov MEM_CONTEXT_TEMP, %g1
214
	mov MEM_CONTEXT_TEMP, %g1
201
	stxa %g1, [VA_PRIMARY_CONTEXT_REG] %asi	! ASI_DMMU is correct here !!!
215
	stxa %g1, [VA_PRIMARY_CONTEXT_REG] %asi	! ASI_DMMU is correct here !!!
202
	flush %g5
216
	flush %g5
203
	
217
	
204
	! demap context 0
218
	! demap context 0
205
	SET_TLB_DEMAP_CMD(g1, TLB_DEMAP_NUCLEUS)
219
	SET_TLB_DEMAP_CMD(g1, TLB_DEMAP_NUCLEUS)
206
	stxa %g0, [%g1] ASI_IMMU_DEMAP			
220
	stxa %g0, [%g1] ASI_IMMU_DEMAP			
207
	flush %g5
221
	flush %g5
208
	
222
	
209
	! write ITLB tag of context 0
223
	! write ITLB tag of context 0
210
	SET_TLB_TAG(g1, MEM_CONTEXT_KERNEL)
224
	SET_TLB_TAG(g1, MEM_CONTEXT_KERNEL)
211
	mov VA_DMMU_TAG_ACCESS, %g2
225
	mov VA_DMMU_TAG_ACCESS, %g2
212
	stxa %g1, [%g2] ASI_IMMU
226
	stxa %g1, [%g2] ASI_IMMU
213
	flush %g5
227
	flush %g5
214
 
228
 
215
	! write ITLB data and install the permanent kernel mapping in context 0
229
	! write ITLB data and install the permanent kernel mapping in context 0
216
	SET_TLB_DATA(g1, g2, TTE_L)		! use non-global mapping
230
	SET_TLB_DATA(g1, g2, TTE_L)		! use non-global mapping
217
	stxa %g1, [%g0] ASI_ITLB_DATA_IN_REG		
231
	stxa %g1, [%g0] ASI_ITLB_DATA_IN_REG		
218
	flush %g5
232
	flush %g5
219
 
233
 
220
	! enter nucleus - using context 0
234
	! enter nucleus - using context 0
221
	wrpr %g0, 1, %tl
235
	wrpr %g0, 1, %tl
222
 
236
 
223
	! demap context 1
237
	! demap context 1
224
	SET_TLB_DEMAP_CMD(g1, TLB_DEMAP_PRIMARY)
238
	SET_TLB_DEMAP_CMD(g1, TLB_DEMAP_PRIMARY)
225
	stxa %g0, [%g1] ASI_IMMU_DEMAP			
239
	stxa %g0, [%g1] ASI_IMMU_DEMAP			
226
	flush %g5
240
	flush %g5
227
	
241
	
228
	! set context 0 in the primary context register
242
	! set context 0 in the primary context register
229
	stxa %g0, [VA_PRIMARY_CONTEXT_REG] %asi	! ASI_DMMU is correct here !!!
243
	stxa %g0, [VA_PRIMARY_CONTEXT_REG] %asi	! ASI_DMMU is correct here !!!
230
	flush %g5
244
	flush %g5
231
	
245
	
232
	! leave nucleus - using primary context, i.e. context 0
246
	! leave nucleus - using primary context, i.e. context 0
233
	wrpr %g0, 0, %tl
247
	wrpr %g0, 0, %tl
234
 
248
 
235
	brz %l7, 1f				! skip if you are not the bootstrap CPU
249
	brz %l7, 1f				! skip if you are not the bootstrap CPU
236
	nop
250
	nop
237
 
251
 
238
	/*
252
	/*
239
	 * Save physmem_base for use by the mm subsystem.
253
	 * Save physmem_base for use by the mm subsystem.
240
	 * %l6 contains starting physical address
254
	 * %l6 contains starting physical address
241
	 */
255
	 */
242
	sethi %hi(physmem_base), %l4
256
	sethi %hi(physmem_base), %l4
243
	stx %l6, [%l4 + %lo(physmem_base)]
257
	stx %l6, [%l4 + %lo(physmem_base)]
244
 
258
 
245
	/*
259
	/*
246
	 * Precompute kernel 8K TLB data template.
260
	 * Precompute kernel 8K TLB data template.
247
	 * %l5 contains starting physical address bits [40:13]
261
	 * %l5 contains starting physical address
-
 
262
	 * bits [(PHYSMEM_ADDR_SIZE - 1):13]
248
	 */
263
	 */
249
	sethi %hi(kernel_8k_tlb_data_template), %l4
264
	sethi %hi(kernel_8k_tlb_data_template), %l4
250
	ldx [%l4 + %lo(kernel_8k_tlb_data_template)], %l3
265
	ldx [%l4 + %lo(kernel_8k_tlb_data_template)], %l3
251
	or %l3, %l5, %l3
266
	or %l3, %l5, %l3
252
	stx %l3, [%l4 + %lo(kernel_8k_tlb_data_template)]
267
	stx %l3, [%l4 + %lo(kernel_8k_tlb_data_template)]
253
 
268
 
254
	/*
269
	/*
255
	 * Flush D-Cache.
270
	 * Flush D-Cache.
256
	 */
271
	 */
257
	call dcache_flush
272
	call dcache_flush
258
	nop
273
	nop
259
 
274
 
260
	/*
275
	/*
261
	 * So far, we have not touched the stack.
276
	 * So far, we have not touched the stack.
262
	 * It is a good idea to set the kernel stack to a known state now.
277
	 * It is a good idea to set the kernel stack to a known state now.
263
	 */
278
	 */
264
	sethi %hi(temporary_boot_stack), %sp
279
	sethi %hi(temporary_boot_stack), %sp
265
	or %sp, %lo(temporary_boot_stack), %sp
280
	or %sp, %lo(temporary_boot_stack), %sp
266
	sub %sp, STACK_BIAS, %sp
281
	sub %sp, STACK_BIAS, %sp
267
 
282
 
268
	sethi %hi(bootinfo), %o0
283
	sethi %hi(bootinfo), %o0
269
	call memcpy				! copy bootinfo
284
	call memcpy				! copy bootinfo
270
	or %o0, %lo(bootinfo), %o0
285
	or %o0, %lo(bootinfo), %o0
271
 
286
 
272
	call arch_pre_main
287
	call arch_pre_main
273
	nop
288
	nop
274
	
289
	
275
	call main_bsp
290
	call main_bsp
276
	nop
291
	nop
277
 
292
 
278
	/* Not reached. */
293
	/* Not reached. */
279
 
294
 
280
0:
295
0:
281
	ba 0b
296
	ba 0b
282
	nop
297
	nop
283
 
298
 
284
 
299
 
285
	/*
300
	/*
286
	 * Read MID from the processor.
301
	 * Read MID from the processor.
287
	 */
302
	 */
288
1:
303
1:
289
	ldxa [%g0] ASI_UPA_CONFIG, %g1
304
	ldxa [%g0] ASI_UPA_CONFIG, %g1
290
	srlx %g1, UPA_CONFIG_MID_SHIFT, %g1
305
	srlx %g1, UPA_CONFIG_MID_SHIFT, %g1
291
	and %g1, UPA_CONFIG_MID_MASK, %g1
306
	and %g1, UPA_CONFIG_MID_MASK, %g1
292
 
307
 
293
#ifdef CONFIG_SMP
308
#ifdef CONFIG_SMP
294
	/*
309
	/*
295
	 * Active loop for APs until the BSP picks them up. A processor cannot
310
	 * Active loop for APs until the BSP picks them up. A processor cannot
296
	 * leave the loop until the global variable 'waking_up_mid' equals its
311
	 * leave the loop until the global variable 'waking_up_mid' equals its
297
	 * MID.
312
	 * MID.
298
	 */
313
	 */
299
	set waking_up_mid, %g2
314
	set waking_up_mid, %g2
300
2:
315
2:
301
	ldx [%g2], %g3
316
	ldx [%g2], %g3
302
	cmp %g3, %g1
317
	cmp %g3, %g1
303
	bne 2b
318
	bne 2b
304
	nop
319
	nop
305
 
320
 
306
	/*
321
	/*
307
	 * Configure stack for the AP.
322
	 * Configure stack for the AP.
308
	 * The AP is expected to use the stack saved
323
	 * The AP is expected to use the stack saved
309
	 * in the ctx global variable.
324
	 * in the ctx global variable.
310
	 */
325
	 */
311
	set ctx, %g1
326
	set ctx, %g1
312
	add %g1, OFFSET_SP, %g1
327
	add %g1, OFFSET_SP, %g1
313
	ldx [%g1], %o6
328
	ldx [%g1], %o6
314
 
329
 
315
	call main_ap
330
	call main_ap
316
	nop
331
	nop
317
 
332
 
318
	/* Not reached. */
333
	/* Not reached. */
319
#endif
334
#endif
320
	
335
	
321
0:
336
0:
322
	ba 0b
337
	ba 0b
323
	nop
338
	nop
324
 
339
 
325
 
340
 
326
.section K_DATA_START, "aw", @progbits
341
.section K_DATA_START, "aw", @progbits
327
 
342
 
328
/*
343
/*
329
 * Create small stack to be used by the bootstrap processor. It is going to be
344
 * Create small stack to be used by the bootstrap processor. It is going to be
330
 * used only for a very limited period of time, but we switch to it anyway,
345
 * used only for a very limited period of time, but we switch to it anyway,
331
 * just to be sure we are properly initialized.
346
 * just to be sure we are properly initialized.
332
 */
347
 */
333
 
348
 
334
#define INITIAL_STACK_SIZE	1024
349
#define INITIAL_STACK_SIZE	1024
335
 
350
 
336
.align STACK_ALIGNMENT
351
.align STACK_ALIGNMENT
337
	.space INITIAL_STACK_SIZE
352
	.space INITIAL_STACK_SIZE
338
.align STACK_ALIGNMENT
353
.align STACK_ALIGNMENT
339
temporary_boot_stack:
354
temporary_boot_stack:
340
	.space STACK_WINDOW_SAVE_AREA_SIZE
355
	.space STACK_WINDOW_SAVE_AREA_SIZE
341
 
356
 
342
 
357
 
343
.data
358
.data
344
 
359
 
345
.align 8
360
.align 8
346
.global physmem_base		! copy of the physical memory base address
361
.global physmem_base		! copy of the physical memory base address
347
physmem_base:
362
physmem_base:
348
	.quad 0
363
	.quad 0
349
 
364
 
350
/*
365
/*
351
 * This variable is used by the fast_data_MMU_miss trap handler. In runtime, it
366
 * This variable is used by the fast_data_MMU_miss trap handler. In runtime, it
352
 * is further modified to reflect the starting address of physical memory.
367
 * is further modified to reflect the starting address of physical memory.
353
 */
368
 */
354
.global kernel_8k_tlb_data_template
369
.global kernel_8k_tlb_data_template
355
kernel_8k_tlb_data_template:
370
kernel_8k_tlb_data_template:
356
#ifdef CONFIG_VIRT_IDX_DCACHE
371
#ifdef CONFIG_VIRT_IDX_DCACHE
357
	.quad ((1 << TTE_V_SHIFT) | (PAGESIZE_8K << TTE_SIZE_SHIFT) | TTE_CP | \
372
	.quad ((1 << TTE_V_SHIFT) | (PAGESIZE_8K << TTE_SIZE_SHIFT) | TTE_CP | \
358
		 TTE_CV | TTE_P | TTE_W)
373
		 TTE_CV | TTE_P | TTE_W)
359
#else /* CONFIG_VIRT_IDX_DCACHE */
374
#else /* CONFIG_VIRT_IDX_DCACHE */
360
	.quad ((1 << TTE_V_SHIFT) | (PAGESIZE_8K << TTE_SIZE_SHIFT) | TTE_CP | \
375
	.quad ((1 << TTE_V_SHIFT) | (PAGESIZE_8K << TTE_SIZE_SHIFT) | TTE_CP | \
361
		TTE_P | TTE_W)
376
		TTE_P | TTE_W)
362
#endif /* CONFIG_VIRT_IDX_DCACHE */
377
#endif /* CONFIG_VIRT_IDX_DCACHE */
363
 
378
 
364
 
379