Subversion Repositories HelenOS

Rev

Rev 2058 | Rev 3672 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
418 jermar 1
#
2071 jermar 2
# Copyright (c) 2005 Jakub Jermar
418 jermar 3
# All rights reserved.
4
#
5
# Redistribution and use in source and binary forms, with or without
6
# modification, are permitted provided that the following conditions
7
# are met:
8
#
9
# - Redistributions of source code must retain the above copyright
10
#   notice, this list of conditions and the following disclaimer.
11
# - Redistributions in binary form must reproduce the above copyright
12
#   notice, this list of conditions and the following disclaimer in the
13
#   documentation and/or other materials provided with the distribution.
14
# - The name of the author may not be used to endorse or promote products
15
#   derived from this software without specific prior written permission.
16
#
17
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
#
28
 
1903 jermar 29
#include <arch/arch.h>
1789 jermar 30
#include <arch/regdef.h>
1823 jermar 31
#include <arch/boot/boot.h>
1917 jermar 32
#include <arch/stack.h>
846 jermar 33
 
1823 jermar 34
#include <arch/mm/mmu.h>
35
#include <arch/mm/tlb.h>
36
#include <arch/mm/tte.h>
37
 
1903 jermar 38
#ifdef CONFIG_SMP
39
#include <arch/context_offset.h>
40
#endif
41
 
426 jermar 42
.register %g2, #scratch
43
.register %g3, #scratch
44
 
418 jermar 45
.section K_TEXT_START, "ax"
46
 
1978 jermar 47
#define BSP_FLAG	1
48
 
847 jermar 49
/*
1978 jermar 50
 * Here is where the kernel is passed control from the boot loader.
1790 jermar 51
 * 
52
 * The registers are expected to be in this state:
1978 jermar 53
 * - %o0 starting address of physical memory + bootstrap processor flag
54
 * 	bits 63...1:	physical memory starting address / 2
55
 *	bit 0:		non-zero on BSP processor, zero on AP processors
56
 * - %o1 bootinfo structure address (BSP only)
57
 * - %o2 bootinfo structure size (BSP only)
1792 jermar 58
 *
1978 jermar 59
 * Moreover, we depend on boot having established the following environment:
1792 jermar 60
 * - TLBs are on
61
 * - identity mapping for the kernel image
847 jermar 62
 */
63
 
418 jermar 64
.global kernel_image_start
65
kernel_image_start:
1978 jermar 66
	mov BSP_FLAG, %l0
2001 jermar 67
	and %o0, %l0, %l7			! l7 <= bootstrap processor?
68
	andn %o0, %l0, %l6			! l6 <= start of physical memory
846 jermar 69
 
1982 jermar 70
	! Get bits 40:13 of physmem_base.
71
	srlx %l6, 13, %l5
72
	sllx %l5, 13 + (63 - 40), %l5
2001 jermar 73
	srlx %l5, 63 - 40, %l5			! l5 <= physmem_base[40:13]
1978 jermar 74
 
75
	/*
1823 jermar 76
	 * Setup basic runtime environment.
1790 jermar 77
	 */
424 jermar 78
 
1954 jermar 79
	wrpr %g0, NWINDOWS - 2, %cansave	! set maximum saveable windows
2049 jermar 80
	wrpr %g0, 0, %canrestore		! get rid of windows we will
81
						! never need again
82
	wrpr %g0, 0, %otherwin			! make sure the window state is
83
						! consistent
84
	wrpr %g0, NWINDOWS - 1, %cleanwin	! prevent needless clean_window
85
						! traps for kernel
1823 jermar 86
 
2049 jermar 87
	wrpr %g0, 0, %tl			! TL = 0, primary context
88
						! register is used
1823 jermar 89
 
2049 jermar 90
	wrpr %g0, PSTATE_PRIV_BIT, %pstate	! disable interrupts and disable
91
						! 32-bit address masking
1823 jermar 92
 
1881 jermar 93
	wrpr %g0, 0, %pil			! intialize %pil
94
 
1790 jermar 95
	/*
1823 jermar 96
	 * Switch to kernel trap table.
97
	 */
1880 jermar 98
	sethi %hi(trap_table), %g1
99
	wrpr %g1, %lo(trap_table), %tba
1823 jermar 100
 
101
	/* 
2049 jermar 102
	 * Take over the DMMU by installing global locked TTE entry identically
103
	 * mapping the first 4M of memory.
1792 jermar 104
	 *
2049 jermar 105
	 * In case of DMMU, no FLUSH instructions need to be issued. Because of
106
	 * that, the old DTLB contents can be demapped pretty straightforwardly
107
	 * and without causing any traps.
1792 jermar 108
	 */
109
 
1823 jermar 110
	wr %g0, ASI_DMMU, %asi
895 jermar 111
 
1823 jermar 112
#define SET_TLB_DEMAP_CMD(r1, context_id) \
2049 jermar 113
	set (TLB_DEMAP_CONTEXT << TLB_DEMAP_TYPE_SHIFT) | (context_id << \
114
		TLB_DEMAP_CONTEXT_SHIFT), %r1
1823 jermar 115
 
116
	! demap context 0
117
	SET_TLB_DEMAP_CMD(g1, TLB_DEMAP_NUCLEUS)
118
	stxa %g0, [%g1] ASI_DMMU_DEMAP			
119
	membar #Sync
120
 
121
#define SET_TLB_TAG(r1, context) \
2049 jermar 122
	set VMA | (context << TLB_TAG_ACCESS_CONTEXT_SHIFT), %r1
1823 jermar 123
 
124
	! write DTLB tag
125
	SET_TLB_TAG(g1, MEM_CONTEXT_KERNEL)
126
	stxa %g1, [VA_DMMU_TAG_ACCESS] %asi			
127
	membar #Sync
128
 
2009 jermar 129
#ifdef CONFIG_VIRT_IDX_DCACHE
1996 jermar 130
#define TTE_LOW_DATA(imm) 	(TTE_CP | TTE_CV | TTE_P | LMA | (imm))
2009 jermar 131
#else /* CONFIG_VIRT_IDX_DCACHE */
1996 jermar 132
#define TTE_LOW_DATA(imm) 	(TTE_CP | TTE_P | LMA | (imm))
2009 jermar 133
#endif /* CONFIG_VIRT_IDX_DCACHE */
1996 jermar 134
 
1823 jermar 135
#define SET_TLB_DATA(r1, r2, imm) \
1996 jermar 136
	set TTE_LOW_DATA(imm), %r1; \
1978 jermar 137
	or %r1, %l5, %r1; \
138
	mov PAGESIZE_4M, %r2; \
1823 jermar 139
	sllx %r2, TTE_SIZE_SHIFT, %r2; \
140
	or %r1, %r2, %r1; \
1880 jermar 141
	mov 1, %r2; \
1823 jermar 142
	sllx %r2, TTE_V_SHIFT, %r2; \
143
	or %r1, %r2, %r1;
144
 
145
	! write DTLB data and install the kernel mapping
1887 jermar 146
	SET_TLB_DATA(g1, g2, TTE_L | TTE_W)	! use non-global mapping
1823 jermar 147
	stxa %g1, [%g0] ASI_DTLB_DATA_IN_REG		
148
	membar #Sync
1868 jermar 149
 
150
	/*
2049 jermar 151
	 * Because we cannot use global mappings (because we want to have
152
	 * separate 64-bit address spaces for both the kernel and the
153
	 * userspace), we prepare the identity mapping also in context 1. This
154
	 * step is required by the code installing the ITLB mapping.
1868 jermar 155
	 */
156
	! write DTLB tag of context 1 (i.e. MEM_CONTEXT_TEMP)
157
	SET_TLB_TAG(g1, MEM_CONTEXT_TEMP)
158
	stxa %g1, [VA_DMMU_TAG_ACCESS] %asi			
159
	membar #Sync
160
 
161
	! write DTLB data and install the kernel mapping in context 1
1887 jermar 162
	SET_TLB_DATA(g1, g2, TTE_W)			! use non-global mapping
1868 jermar 163
	stxa %g1, [%g0] ASI_DTLB_DATA_IN_REG		
164
	membar #Sync
1823 jermar 165
 
166
	/*
2049 jermar 167
	 * Now is time to take over the IMMU. Unfortunatelly, it cannot be done
168
	 * as easily as the DMMU, because the IMMU is mapping the code it
169
	 * executes.
1823 jermar 170
	 *
2049 jermar 171
	 * [ Note that brave experiments with disabling the IMMU and using the
172
	 * DMMU approach failed after a dozen of desparate days with only little
173
	 * success. ]
1823 jermar 174
	 *
2049 jermar 175
	 * The approach used here is inspired from OpenBSD. First, the kernel
176
	 * creates IMMU mapping for itself in context 1 (MEM_CONTEXT_TEMP) and
177
	 * switches to it. Context 0 (MEM_CONTEXT_KERNEL) can be demapped
178
	 * afterwards and replaced with the kernel permanent mapping. Finally,
179
	 * the kernel switches back to context 0 and demaps context 1.
1823 jermar 180
	 *
2049 jermar 181
	 * Moreover, the IMMU requires use of the FLUSH instructions. But that
182
	 * is OK because we always use operands with addresses already mapped by
183
	 * the taken over DTLB.
1823 jermar 184
	 */
185
 
1852 jermar 186
	set kernel_image_start, %g5
1823 jermar 187
 
188
	! write ITLB tag of context 1
189
	SET_TLB_TAG(g1, MEM_CONTEXT_TEMP)
1880 jermar 190
	mov VA_DMMU_TAG_ACCESS, %g2
1823 jermar 191
	stxa %g1, [%g2] ASI_IMMU
1852 jermar 192
	flush %g5
1823 jermar 193
 
194
	! write ITLB data and install the temporary mapping in context 1
195
	SET_TLB_DATA(g1, g2, 0)			! use non-global mapping
196
	stxa %g1, [%g0] ASI_ITLB_DATA_IN_REG		
1852 jermar 197
	flush %g5
1823 jermar 198
 
199
	! switch to context 1
1880 jermar 200
	mov MEM_CONTEXT_TEMP, %g1
1823 jermar 201
	stxa %g1, [VA_PRIMARY_CONTEXT_REG] %asi	! ASI_DMMU is correct here !!!
1852 jermar 202
	flush %g5
1823 jermar 203
 
204
	! demap context 0
205
	SET_TLB_DEMAP_CMD(g1, TLB_DEMAP_NUCLEUS)
206
	stxa %g0, [%g1] ASI_IMMU_DEMAP			
1852 jermar 207
	flush %g5
1823 jermar 208
 
209
	! write ITLB tag of context 0
210
	SET_TLB_TAG(g1, MEM_CONTEXT_KERNEL)
1880 jermar 211
	mov VA_DMMU_TAG_ACCESS, %g2
1823 jermar 212
	stxa %g1, [%g2] ASI_IMMU
1852 jermar 213
	flush %g5
1823 jermar 214
 
215
	! write ITLB data and install the permanent kernel mapping in context 0
1887 jermar 216
	SET_TLB_DATA(g1, g2, TTE_L)		! use non-global mapping
1823 jermar 217
	stxa %g1, [%g0] ASI_ITLB_DATA_IN_REG		
1852 jermar 218
	flush %g5
1823 jermar 219
 
1906 jermar 220
	! enter nucleus - using context 0
1823 jermar 221
	wrpr %g0, 1, %tl
222
 
223
	! demap context 1
224
	SET_TLB_DEMAP_CMD(g1, TLB_DEMAP_PRIMARY)
225
	stxa %g0, [%g1] ASI_IMMU_DEMAP			
1852 jermar 226
	flush %g5
1823 jermar 227
 
228
	! set context 0 in the primary context register
229
	stxa %g0, [VA_PRIMARY_CONTEXT_REG] %asi	! ASI_DMMU is correct here !!!
1852 jermar 230
	flush %g5
1823 jermar 231
 
1906 jermar 232
	! leave nucleus - using primary context, i.e. context 0
1823 jermar 233
	wrpr %g0, 0, %tl
1864 jermar 234
 
1903 jermar 235
	brz %l7, 1f				! skip if you are not the bootstrap CPU
236
	nop
1900 jermar 237
 
1917 jermar 238
	/*
1982 jermar 239
	 * Save physmem_base for use by the mm subsystem.
240
	 * %l6 contains starting physical address
241
	 */
242
	sethi %hi(physmem_base), %l4
243
	stx %l6, [%l4 + %lo(physmem_base)]
244
 
245
	/*
246
	 * Precompute kernel 8K TLB data template.
247
	 * %l5 contains starting physical address bits [40:13]
248
	 */
249
	sethi %hi(kernel_8k_tlb_data_template), %l4
250
	ldx [%l4 + %lo(kernel_8k_tlb_data_template)], %l3
251
	or %l3, %l5, %l3
252
	stx %l3, [%l4 + %lo(kernel_8k_tlb_data_template)]
253
 
254
	/*
2008 jermar 255
	 * Flush D-Cache.
256
	 */
257
	call dcache_flush
258
	nop
259
 
260
	/*
1917 jermar 261
	 * So far, we have not touched the stack.
1975 jermar 262
	 * It is a good idea to set the kernel stack to a known state now.
1917 jermar 263
	 */
264
	sethi %hi(temporary_boot_stack), %sp
265
	or %sp, %lo(temporary_boot_stack), %sp
266
	sub %sp, STACK_BIAS, %sp
267
 
1906 jermar 268
	sethi %hi(bootinfo), %o0
269
	call memcpy				! copy bootinfo
270
	or %o0, %lo(bootinfo), %o0
271
 
1864 jermar 272
	call arch_pre_main
273
	nop
1823 jermar 274
 
426 jermar 275
	call main_bsp
276
	nop
277
 
278
	/* Not reached. */
279
 
1903 jermar 280
0:
281
	ba 0b
282
	nop
283
 
284
 
285
	/*
286
	 * Read MID from the processor.
287
	 */
288
1:
289
	ldxa [%g0] ASI_UPA_CONFIG, %g1
290
	srlx %g1, UPA_CONFIG_MID_SHIFT, %g1
291
	and %g1, UPA_CONFIG_MID_MASK, %g1
292
 
1905 jermar 293
#ifdef CONFIG_SMP
1903 jermar 294
	/*
2049 jermar 295
	 * Active loop for APs until the BSP picks them up. A processor cannot
296
	 * leave the loop until the global variable 'waking_up_mid' equals its
1903 jermar 297
	 * MID.
298
	 */
299
	set waking_up_mid, %g2
424 jermar 300
2:
1903 jermar 301
	ldx [%g2], %g3
302
	cmp %g3, %g1
303
	bne 2b
424 jermar 304
	nop
1903 jermar 305
 
306
	/*
307
	 * Configure stack for the AP.
308
	 * The AP is expected to use the stack saved
309
	 * in the ctx global variable.
310
	 */
311
	set ctx, %g1
312
	add %g1, OFFSET_SP, %g1
313
	ldx [%g1], %o6
314
 
315
	call main_ap
316
	nop
317
 
318
	/* Not reached. */
1905 jermar 319
#endif
1903 jermar 320
 
321
0:
322
	ba 0b
323
	nop
1917 jermar 324
 
325
 
326
.section K_DATA_START, "aw", @progbits
327
 
328
/*
2049 jermar 329
 * Create small stack to be used by the bootstrap processor. It is going to be
330
 * used only for a very limited period of time, but we switch to it anyway,
331
 * just to be sure we are properly initialized.
1917 jermar 332
 */
333
 
334
#define INITIAL_STACK_SIZE	1024
335
 
336
.align STACK_ALIGNMENT
1978 jermar 337
	.space INITIAL_STACK_SIZE
1917 jermar 338
.align STACK_ALIGNMENT
339
temporary_boot_stack:
1978 jermar 340
	.space STACK_WINDOW_SAVE_AREA_SIZE
341
 
342
 
343
.data
344
 
345
.align 8
346
.global physmem_base		! copy of the physical memory base address
347
physmem_base:
348
	.quad 0
349
 
350
/*
2049 jermar 351
 * This variable is used by the fast_data_MMU_miss trap handler. In runtime, it
352
 * is further modified to reflect the starting address of physical memory.
1978 jermar 353
 */
354
.global kernel_8k_tlb_data_template
355
kernel_8k_tlb_data_template:
2009 jermar 356
#ifdef CONFIG_VIRT_IDX_DCACHE
2049 jermar 357
	.quad ((1 << TTE_V_SHIFT) | (PAGESIZE_8K << TTE_SIZE_SHIFT) | TTE_CP | \
358
		 TTE_CV | TTE_P | TTE_W)
2009 jermar 359
#else /* CONFIG_VIRT_IDX_DCACHE */
2049 jermar 360
	.quad ((1 << TTE_V_SHIFT) | (PAGESIZE_8K << TTE_SIZE_SHIFT) | TTE_CP | \
361
		TTE_P | TTE_W)
2009 jermar 362
#endif /* CONFIG_VIRT_IDX_DCACHE */
2049 jermar 363