Subversion Repositories HelenOS

Rev

Rev 4369 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 4369 Rev 4614
1
#
1
#
2
# Copyright (c) 2005 Jakub Jermar
2
# Copyright (c) 2005 Jakub Jermar
3
# Copyright (c) 2008 Pavel Rimsky
3
# Copyright (c) 2008 Pavel Rimsky
4
# All rights reserved.
4
# All rights reserved.
5
#
5
#
6
# Redistribution and use in source and binary forms, with or without
6
# Redistribution and use in source and binary forms, with or without
7
# modification, are permitted provided that the following conditions
7
# modification, are permitted provided that the following conditions
8
# are met:
8
# are met:
9
#
9
#
10
# - Redistributions of source code must retain the above copyright
10
# - Redistributions of source code must retain the above copyright
11
#   notice, this list of conditions and the following disclaimer.
11
#   notice, this list of conditions and the following disclaimer.
12
# - Redistributions in binary form must reproduce the above copyright
12
# - Redistributions in binary form must reproduce the above copyright
13
#   notice, this list of conditions and the following disclaimer in the
13
#   notice, this list of conditions and the following disclaimer in the
14
#   documentation and/or other materials provided with the distribution.
14
#   documentation and/or other materials provided with the distribution.
15
# - The name of the author may not be used to endorse or promote products
15
# - The name of the author may not be used to endorse or promote products
16
#   derived from this software without specific prior written permission.
16
#   derived from this software without specific prior written permission.
17
#
17
#
18
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28
#
28
#
29
 
29
 
30
#include <arch/arch.h>
30
#include <arch/arch.h>
31
#include <arch/stack.h>
31
#include <arch/stack.h>
-
 
32
#include <arch/context_offset.h>
32
#include <arch/sun4v/regdef.h>
33
#include <arch/sun4v/regdef.h>
33
#include <arch/sun4v/hypercall.h>
34
#include <arch/sun4v/hypercall.h>
34
#include <arch/sun4v/arch.h>
35
#include <arch/sun4v/arch.h>
35
#include <arch/sun4v/cpu.h>
36
#include <arch/sun4v/cpu.h>
36
#include <arch/mm/pagesize.h>
37
#include <arch/mm/pagesize.h>
37
#include <arch/mm/sun4v/tte.h>
38
#include <arch/mm/sun4v/tte.h>
38
#include <arch/mm/sun4v/mmu.h>
39
#include <arch/mm/sun4v/mmu.h>
39
#include <arch/mm/sun4v/tlb.h>
40
#include <arch/mm/sun4v/tlb.h>
40
 
41
 
41
.register %g2, #scratch
42
.register %g2, #scratch
42
.register %g3, #scratch
43
.register %g3, #scratch
43
 
44
 
44
.section K_TEXT_START, "ax"
45
.section K_TEXT_START, "ax"
45
 
46
 
46
#define BSP_FLAG		1
47
#define BSP_FLAG		1
47
#define PHYSMEM_ADDR_SIZE	56
48
#define PHYSMEM_ADDR_SIZE	56
48
 
49
 
49
/*
50
/*
50
 * Flags set in the TTE data entry mapping the kernel.
51
 * Flags set in the TTE data entry mapping the kernel.
51
 */
52
 */
52
#ifdef CONFIG_VIRT_IDX_DCACHE
53
#ifdef CONFIG_VIRT_IDX_DCACHE
53
	#define TTE_FLAGS \
54
	#define TTE_FLAGS \
54
		(1 << TTE_V_SHIFT) \
55
		(1 << TTE_V_SHIFT) \
55
		| (1 << TTE_EP_SHIFT) \
56
		| (1 << TTE_EP_SHIFT) \
56
		| (1 << TTE_CP_SHIFT) \
57
		| (1 << TTE_CP_SHIFT) \
57
		| (1 << TTE_CV_SHIFT) \
58
		| (1 << TTE_CV_SHIFT) \
58
		| (1 << TTE_P_SHIFT) \
59
		| (1 << TTE_P_SHIFT) \
59
		| (1 << TTE_W_SHIFT)
60
		| (1 << TTE_W_SHIFT)
60
#else
61
#else
61
	#define TTE_FLAGS \
62
	#define TTE_FLAGS \
62
		(1 << TTE_V_SHIFT) \
63
		(1 << TTE_V_SHIFT) \
63
		| (1 << TTE_EP_SHIFT) \
64
		| (1 << TTE_EP_SHIFT) \
64
		| (1 << TTE_CP_SHIFT) \
65
		| (1 << TTE_CP_SHIFT) \
65
		| (1 << TTE_P_SHIFT) \
66
		| (1 << TTE_P_SHIFT) \
66
		| (1 << TTE_W_SHIFT)
67
		| (1 << TTE_W_SHIFT)
67
#endif
68
#endif
68
 
69
 
69
 
70
 
70
/*
71
/*
71
 * Fills a register with a TTE Data item. The item will map the given virtual
72
 * Fills a register with a TTE Data item. The item will map the given virtual
72
 * address to a real address which will be computed by adding the starting
73
 * address to a real address which will be computed by adding the starting
73
 * address of the physical memory to the virtual address.
74
 * address of the physical memory to the virtual address.
74
 *
75
 *
75
 * parameters:
76
 * parameters:
76
 * 	addr:			virtual address to be mapped
77
 * 	addr:			virtual address to be mapped
77
 *	rphysmem_start:		register containing the starting address of the
78
 *	rphysmem_start:		register containing the starting address of the
78
 *				physical memory
79
 *				physical memory
79
 *	rtmp1:			a register to be used as temporary
80
 *	rtmp1:			a register to be used as temporary
80
 *	rtmp2:			a register to be used as temporary
81
 *	rtmp2:			a register to be used as temporary
81
 *	rd:			register where the result will be saved
82
 *	rd:			register where the result will be saved
82
 */
83
 */
83
#define TTE_DATA(addr, rphysmem_start, rtmp1, rtmp2, rd) \
84
#define TTE_DATA(addr, rphysmem_start, rtmp1, rtmp2, rd) \
84
	setx TTE_FLAGS | PAGESIZE_4M, rtmp1, rd; \
85
	setx TTE_FLAGS | PAGESIZE_4M, rtmp1, rd; \
85
	add rd, rphysmem_start, rd; \
86
	add rd, rphysmem_start, rd; \
86
	setx (addr), rtmp1, rtmp2; \
87
	setx (addr), rtmp1, rtmp2; \
87
	add rd, rtmp2, rd;
88
	add rd, rtmp2, rd;
88
 
89
 
89
/*
90
/*
90
 * Here is where the kernel is passed control from the boot loader.
91
 * Here is where the kernel is passed control from the boot loader.
91
 * 
92
 * 
92
 * The registers are expected to be in this state:
93
 * The registers are expected to be in this state:
93
 * - %o0 starting address of physical memory + bootstrap processor flag
94
 * - %o0 starting address of physical memory + bootstrap processor flag
94
 * 	bits 63...1:	physical memory starting address / 2
95
 * 	bits 63...1:	physical memory starting address / 2
95
 *	bit 0:		non-zero on BSP processor, zero on AP processors
96
 *	bit 0:		non-zero on BSP processor, zero on AP processors
96
 * - %o1 bootinfo structure address (BSP only)
97
 * - %o1 bootinfo structure address (BSP only)
97
 * - %o2 bootinfo structure size (BSP only)
98
 * - %o2 bootinfo structure size (BSP only)
98
 *
99
 *
99
 * Moreover, we depend on boot having established the following environment:
100
 * Moreover, we depend on boot having established the following environment:
100
 * - TLBs are on
101
 * - TLBs are on
101
 * - identity mapping for the kernel image
102
 * - identity mapping for the kernel image
102
 */
103
 */
103
.global kernel_image_start
104
.global kernel_image_start
104
kernel_image_start:
105
kernel_image_start:
105
	mov BSP_FLAG, %l0
106
	mov BSP_FLAG, %l0
106
	and %o0, %l0, %l7			! l7 <= bootstrap processor?
107
	and %o0, %l0, %l7			! l7 <= bootstrap processor?
107
	andn %o0, %l0, %l6			! l6 <= start of physical memory
108
	andn %o0, %l0, %l6			! l6 <= start of physical memory
108
	or %o1, %g0, %l1
109
	or %o1, %g0, %l1
109
	or %o2, %g0, %l2
110
	or %o2, %g0, %l2
110
 
111
 
111
	! Get bits (PHYSMEM_ADDR_SIZE - 1):13 of physmem_base.
112
	! Get bits (PHYSMEM_ADDR_SIZE - 1):13 of physmem_base.
112
	srlx %l6, 13, %l5
113
	srlx %l6, 13, %l5
113
	
114
	
114
	! l5 <= physmem_base[(PHYSMEM_ADDR_SIZE - 1):13]
115
	! l5 <= physmem_base[(PHYSMEM_ADDR_SIZE - 1):13]
115
	sllx %l5, 13 + (63 - (PHYSMEM_ADDR_SIZE - 1)), %l5
116
	sllx %l5, 13 + (63 - (PHYSMEM_ADDR_SIZE - 1)), %l5
116
	srlx %l5, 63 - (PHYSMEM_ADDR_SIZE - 1), %l5	
117
	srlx %l5, 63 - (PHYSMEM_ADDR_SIZE - 1), %l5	
117
 
118
 
118
	/*
119
	/*
119
	 * Setup basic runtime environment.
120
	 * Setup basic runtime environment.
120
	 */
121
	 */
121
	wrpr %g0, NWINDOWS - 2, %cansave	! set maximum saveable windows
122
	wrpr %g0, NWINDOWS - 2, %cansave	! set maximum saveable windows
122
	wrpr %g0, 0, %canrestore		! get rid of windows we will
123
	wrpr %g0, 0, %canrestore		! get rid of windows we will
123
						! never need again
124
						! never need again
124
	wrpr %g0, 0, %otherwin			! make sure the window state is
125
	wrpr %g0, 0, %otherwin			! make sure the window state is
125
						! consistent
126
						! consistent
126
	wrpr %g0, NWINDOWS - 1, %cleanwin	! prevent needless clean_window
127
	wrpr %g0, NWINDOWS - 1, %cleanwin	! prevent needless clean_window
127
						! traps for kernel
128
						! traps for kernel
128
						
129
						
129
	wrpr %g0, 0, %wstate			! use default spill/fill trap
130
	wrpr %g0, 0, %wstate			! use default spill/fill trap
130
 
131
 
131
	wrpr %g0, 0, %tl			! TL = 0, primary context
132
	wrpr %g0, 0, %tl			! TL = 0, primary context
132
						! register is used
133
						! register is used
133
 
134
 
134
	wrpr %g0, PSTATE_PRIV_BIT, %pstate	! disable interrupts and disable
135
	wrpr %g0, PSTATE_PRIV_BIT, %pstate	! disable interrupts and disable
135
						! 32-bit address masking
136
						! 32-bit address masking
136
 
137
 
137
	wrpr %g0, 0, %pil			! intialize %pil
138
	wrpr %g0, 0, %pil			! intialize %pil
138
 
139
 
139
	/*
140
	/*
140
	 * Switch to kernel trap table.
141
	 * Switch to kernel trap table.
141
	 */
142
	 */
142
	sethi %hi(trap_table), %g1
143
	sethi %hi(trap_table), %g1
143
	wrpr %g1, %lo(trap_table), %tba
144
	wrpr %g1, %lo(trap_table), %tba
144
 
145
 
-
 
146
	/* Explicitly switch to hypervisor API 1.1. */
-
 
147
	mov 1, %o0
-
 
148
   	mov 1, %o1
-
 
149
   	mov 1, %o2
-
 
150
   	mov 0, %o3
-
 
151
   	mov 0, %o4
-
 
152
   	mov 0, %o5
-
 
153
   	ta 0xff
-
 
154
   	nop
145
 
155
 
146
	/*
156
	/*
147
	 * Take over the MMU.
157
	 * Take over the MMU.
148
	 */
158
	 */
149
 
159
 
150
	! map kernel in context 1
160
	! map kernel in context 1
151
	set kernel_image_start, %o0				! virt. address
161
	set kernel_image_start, %o0				! virt. address
152
	set 1, %o1						! context
162
	set 1, %o1						! context
153
	TTE_DATA(kernel_image_start, %l5, %g2, %g3, %o2)	! TTE data
163
	TTE_DATA(kernel_image_start, %l5, %g2, %g3, %o2)	! TTE data
154
	set MMU_FLAG_DTLB | MMU_FLAG_ITLB, %o3			! MMU flags
164
	set MMU_FLAG_DTLB | MMU_FLAG_ITLB, %o3			! MMU flags
155
	__HYPERCALL_HYPERFAST(MMU_MAP_ADDR)
165
	__HYPERCALL_HYPERFAST(MMU_MAP_ADDR)
156
 
166
 
157
	! switch to context 1
167
	! switch to context 1
158
	set 1, %o0
168
	set 1, %o0
159
	set VA_PRIMARY_CONTEXT_REG, %o1
169
	set VA_PRIMARY_CONTEXT_REG, %o1
160
	stxa %o0, [%o1] ASI_PRIMARY_CONTEXT_REG
170
	stxa %o0, [%o1] ASI_PRIMARY_CONTEXT_REG
161
 
171
 
162
	! demap all in context 0
172
	! demap all in context 0
163
	set 0, %o0						! reserved
173
	set 0, %o0						! reserved
164
	set 0, %o1						! reserved
174
	set 0, %o1						! reserved
165
	set 0, %o2						! context
175
	set 0, %o2						! context
166
	set MMU_FLAG_DTLB | MMU_FLAG_ITLB, %o3			! MMU flags
176
	set MMU_FLAG_DTLB | MMU_FLAG_ITLB, %o3			! MMU flags
167
	__HYPERCALL_FAST(MMU_DEMAP_CTX)
177
	__HYPERCALL_FAST(MMU_DEMAP_CTX)
168
 
178
 
169
	! install permanent mapping for kernel in context 0
179
	! install permanent mapping for kernel in context 0
170
	set kernel_image_start, %o0				! virtual address
180
	set kernel_image_start, %o0				! virtual address
171
	set 0, %o1						! context
181
	set 0, %o1						! context
172
	TTE_DATA(kernel_image_start, %l5, %g2, %g3, %o2)	! TTE data
182
	TTE_DATA(kernel_image_start, %l5, %g2, %g3, %o2)	! TTE data
173
	set MMU_FLAG_DTLB | MMU_FLAG_ITLB, %o3			! MMU flags
183
	set MMU_FLAG_DTLB | MMU_FLAG_ITLB, %o3			! MMU flags
174
	__HYPERCALL_FAST(MMU_MAP_PERM_ADDR)
184
	__HYPERCALL_FAST(MMU_MAP_PERM_ADDR)
175
 
185
 
176
	! switch to context 0
186
	! switch to context 0
177
	mov 0, %o0
187
	mov 0, %o0
178
	set VA_PRIMARY_CONTEXT_REG, %o1
188
	set VA_PRIMARY_CONTEXT_REG, %o1
179
	stxa %o0, [%o1] ASI_PRIMARY_CONTEXT_REG
189
	stxa %o0, [%o1] ASI_PRIMARY_CONTEXT_REG
180
 
190
 
181
	! demap all in context 1 (cleanup)
191
	! demap all in context 1 (cleanup)
182
	set 0, %o0						! reserved
192
	set 0, %o0						! reserved
183
	set 0, %o1						! reserved
193
	set 0, %o1						! reserved
184
	set 1, %o2						! context
194
	set 1, %o2						! context
185
	set MMU_FLAG_DTLB | MMU_FLAG_ITLB, %o3			! MMU flags
195
	set MMU_FLAG_DTLB | MMU_FLAG_ITLB, %o3			! MMU flags
186
	__HYPERCALL_FAST(MMU_DEMAP_CTX)
196
	__HYPERCALL_FAST(MMU_DEMAP_CTX)
187
	
-
 
188
	/*
-
 
189
	 * Save physmem_base for use by the mm subsystem.
-
 
190
	 * %l6 contains starting physical address
-
 
191
	 */
-
 
192
	sethi %hi(physmem_base), %l4
-
 
193
	stx %l6, [%l4 + %lo(physmem_base)]
-
 
194
 
197
 
195
	/*
198
	/*
196
	 * Set CPUID.
199
	 * Set CPUID.
197
	 */
200
	 */
198
	__HYPERCALL_FAST(CPU_MYID)
201
	__HYPERCALL_FAST(CPU_MYID)
199
	mov SCRATCHPAD_CPUID, %g1
202
	mov SCRATCHPAD_CPUID, %g1
200
	stxa %o1, [%g1] ASI_SCRATCHPAD
203
	stxa %o1, [%g1] ASI_SCRATCHPAD
201
 
204
 
202
	/*
205
	/*
203
	 * Set MMU fault status area for the current CPU.
206
	 * Set MMU fault status area for the current CPU.
204
	 */
207
	 */
205
	set mmu_fsas, %o0			! o0 <= addr. of fault status areas array
208
	set mmu_fsas, %o0			! o0 <= addr. of fault status areas array
206
	add %o0, %l6, %o0			! kernel address to real address
209
	add %o0, %l6, %o0			! kernel address to real address
207
	mulx %o1, MMU_FSA_SIZE, %g1		! g1 <= offset of current CPU's fault status area
210
	mulx %o1, MMU_FSA_SIZE, %g1		! g1 <= offset of current CPU's fault status area
208
	add %g1, %o0, %o0			! o0 <= FSA of the current CPU
211
	add %g1, %o0, %o0			! o0 <= FSA of the current CPU
209
	mov SCRATCHPAD_MMU_FSA, %g1
212
	mov SCRATCHPAD_MMU_FSA, %g1
210
	stxa %o0, [%g1] ASI_SCRATCHPAD		! remember MMU fault status area to speed up miss handler
213
	stxa %o0, [%g1] ASI_SCRATCHPAD		! remember MMU fault status area to speed up miss handler
211
	__HYPERCALL_FAST(MMU_FAULT_AREA_CONF)
214
	__HYPERCALL_FAST(MMU_FAULT_AREA_CONF)
212
	
215
 
-
 
216
	! on APs skip executing the following code
-
 
217
	cmp %l7, 0
-
 
218
	be 1f
-
 
219
	nop
-
 
220
 
-
 
221
	/*
-
 
222
	 * Save physmem_base for use by the mm subsystem.
-
 
223
	 * %l6 contains starting physical address
-
 
224
	 */	
-
 
225
	sethi %hi(physmem_base), %l4
-
 
226
	stx %l6, [%l4 + %lo(physmem_base)]
-
 
227
 
213
	/*
228
	/*
214
	 * Store a template of a TTE Data entry for kernel mappings.
229
	 * Store a template of a TTE Data entry for kernel mappings.
215
	 * This template will be used from the kernel MMU miss handler.
230
	 * This template will be used from the kernel MMU miss handler.
216
	 */
231
	 */
217
	!TTE_DATA(0, %l5, %g2, %g3, %g1)
232
	!TTE_DATA(0, %l5, %g2, %g3, %g1)
218
	setx TTE_FLAGS | PAGESIZE_8K, %g2, %g1; \
233
	setx TTE_FLAGS | PAGESIZE_8K, %g2, %g1; \
219
	add %g1, %l5, %g1; \
234
	add %g1, %l5, %g1; \
220
	set kernel_8k_tlb_data_template, %g4
235
	set kernel_8k_tlb_data_template, %g4
221
	stx %g1, [%g4]
236
	stx %g1, [%g4]
222
 
237
 
223
	/*
238
	/*
224
	 * So far, we have not touched the stack.
239
	 * So far, we have not touched the stack.
225
	 * It is a good idea to set the kernel stack to a known state now.
240
	 * It is a good idea to set the kernel stack to a known state now.
226
	 */
241
	 */
227
	sethi %hi(temporary_boot_stack), %sp
242
	sethi %hi(temporary_boot_stack), %sp
228
	or %sp, %lo(temporary_boot_stack), %sp
243
	or %sp, %lo(temporary_boot_stack), %sp
229
	sub %sp, STACK_BIAS, %sp
244
	sub %sp, STACK_BIAS, %sp
230
 
245
 
231
	or %l1, %g0, %o1
246
	or %l1, %g0, %o1
232
	or %l2, %g0, %o2
247
	or %l2, %g0, %o2
233
	sethi %hi(bootinfo), %o0
248
	sethi %hi(bootinfo), %o0
234
	call memcpy				! copy bootinfo
249
	call memcpy				! copy bootinfo
235
	or %o0, %lo(bootinfo), %o0
250
	or %o0, %lo(bootinfo), %o0
236
 
251
 
237
	call arch_pre_main
252
	call arch_pre_main
238
	nop
253
	nop
239
	
254
	
240
	call main_bsp
255
	call main_bsp
241
	nop
256
	nop
242
 
257
 
243
	/* Not reached. */
258
	/* Not reached. */
244
 
259
 
245
0:
260
0:
-
 
261
	ba 0b
-
 
262
	nop
-
 
263
 
-
 
264
1:
-
 
265
 
-
 
266
#ifdef CONFIG_SMP
-
 
267
 
-
 
268
	/*
-
 
269
	 * Configure stack for the AP.
-
 
270
	 * The AP is expected to use the stack saved
-
 
271
	 * in the ctx global variable.
-
 
272
	 */
-
 
273
 
-
 
274
	mov	1, %o0			! MMU enable flag
-
 
275
	set	mmu_enabled, %o1
-
 
276
	mov	MMU_ENABLE, %o5	! MMU enable HV call
-
 
277
	ta	0x80		! call HV
-
 
278
 
-
 
279
	mmu_enabled:
-
 
280
 
-
 
281
	/*
-
 
282
	 * Configure stack for the AP.
-
 
283
	 * The AP is expected to use the stack saved
-
 
284
	 * in the ctx global variable.
-
 
285
	 */
-
 
286
	set ctx, %g1
-
 
287
	add %g1, OFFSET_SP, %g1
-
 
288
	ldx [%g1], %o6
-
 
289
 
-
 
290
	call main_ap
-
 
291
	nop
-
 
292
#endif
-
 
293
 
-
 
294
	/* Not reached. */
-
 
295
0:
246
	ba 0b
296
	ba 0b
247
	nop
297
	nop
248
 
298
 
249
.section K_DATA_START, "aw", @progbits
299
.section K_DATA_START, "aw", @progbits
250
 
300
 
251
#define INITIAL_STACK_SIZE		1024
301
#define INITIAL_STACK_SIZE		1024
252
 
302
 
253
.align STACK_ALIGNMENT
303
.align STACK_ALIGNMENT
254
	.space INITIAL_STACK_SIZE
304
	.space INITIAL_STACK_SIZE
255
.align STACK_ALIGNMENT
305
.align STACK_ALIGNMENT
256
temporary_boot_stack:
306
temporary_boot_stack:
257
	.space STACK_WINDOW_SAVE_AREA_SIZE
307
	.space STACK_WINDOW_SAVE_AREA_SIZE
258
 
308
 
259
 
309
 
260
.data
310
.data
261
 
311
 
262
.align 8
312
.align 8
263
.global physmem_base		! copy of the physical memory base address
313
.global physmem_base		! copy of the physical memory base address
264
physmem_base:
314
physmem_base:
265
	.quad 0
315
	.quad 0
266
 
316
 
267
.global kernel_8k_tlb_data_template
317
.global kernel_8k_tlb_data_template
268
kernel_8k_tlb_data_template:
318
kernel_8k_tlb_data_template:
269
	.quad 0
319
	.quad 0
270
 
320
 
271
/* MMU fault status areas for all CPUs */
321
/* MMU fault status areas for all CPUs */
272
.align MMU_FSA_ALIGNMENT
322
.align MMU_FSA_ALIGNMENT
273
.global mmu_fsas
323
.global mmu_fsas
274
mmu_fsas:
324
mmu_fsas:
275
	.space (MMU_FSA_SIZE * MAX_NUM_STRANDS)
325
	.space (MMU_FSA_SIZE * MAX_NUM_STRANDS)
276
 
326