Subversion Repositories HelenOS

Rev

Rev 1903 | Rev 1906 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 1903 Rev 1905
1
#
1
#
2
# Copyright (C) 2005 Jakub Jermar
2
# Copyright (C) 2005 Jakub Jermar
3
# All rights reserved.
3
# All rights reserved.
4
#
4
#
5
# Redistribution and use in source and binary forms, with or without
5
# Redistribution and use in source and binary forms, with or without
6
# modification, are permitted provided that the following conditions
6
# modification, are permitted provided that the following conditions
7
# are met:
7
# are met:
8
#
8
#
9
# - Redistributions of source code must retain the above copyright
9
# - Redistributions of source code must retain the above copyright
10
#   notice, this list of conditions and the following disclaimer.
10
#   notice, this list of conditions and the following disclaimer.
11
# - Redistributions in binary form must reproduce the above copyright
11
# - Redistributions in binary form must reproduce the above copyright
12
#   notice, this list of conditions and the following disclaimer in the
12
#   notice, this list of conditions and the following disclaimer in the
13
#   documentation and/or other materials provided with the distribution.
13
#   documentation and/or other materials provided with the distribution.
14
# - The name of the author may not be used to endorse or promote products
14
# - The name of the author may not be used to endorse or promote products
15
#   derived from this software without specific prior written permission.
15
#   derived from this software without specific prior written permission.
16
#
16
#
17
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
#
27
#
28
 
28
 
29
#include <arch/arch.h>
29
#include <arch/arch.h>
30
#include <arch/regdef.h>
30
#include <arch/regdef.h>
31
#include <arch/boot/boot.h>
31
#include <arch/boot/boot.h>
32
 
32
 
33
#include <arch/mm/mmu.h>
33
#include <arch/mm/mmu.h>
34
#include <arch/mm/tlb.h>
34
#include <arch/mm/tlb.h>
35
#include <arch/mm/tte.h>
35
#include <arch/mm/tte.h>
36
 
36
 
37
#ifdef CONFIG_SMP
37
#ifdef CONFIG_SMP
38
#include <arch/context_offset.h>
38
#include <arch/context_offset.h>
39
#endif
39
#endif
40
 
40
 
41
.register %g2, #scratch
41
.register %g2, #scratch
42
.register %g3, #scratch
42
.register %g3, #scratch
43
 
43
 
44
.section K_TEXT_START, "ax"
44
.section K_TEXT_START, "ax"
45
 
45
 
46
/*
46
/*
47
 * Here is where the kernel is passed control
47
 * Here is where the kernel is passed control
48
 * from the boot loader.
48
 * from the boot loader.
49
 * 
49
 * 
50
 * The registers are expected to be in this state:
50
 * The registers are expected to be in this state:
51
 * - %o0 non-zero for the bootstrap processor, zero for application/secondary processors
51
 * - %o0 non-zero for the bootstrap processor, zero for application/secondary processors
52
 * - %o1 bootinfo structure address
52
 * - %o1 bootinfo structure address
53
 * - %o2 bootinfo structure size
53
 * - %o2 bootinfo structure size
54
 *
54
 *
55
 * Moreover, we depend on boot having established the
55
 * Moreover, we depend on boot having established the
56
 * following environment:
56
 * following environment:
57
 * - TLBs are on
57
 * - TLBs are on
58
 * - identity mapping for the kernel image
58
 * - identity mapping for the kernel image
59
 * - identity mapping for memory stack
59
 * - identity mapping for memory stack
60
 */
60
 */
61
 
61
 
62
.global kernel_image_start
62
.global kernel_image_start
63
kernel_image_start:
63
kernel_image_start:
64
	mov %o0, %l7
64
	mov %o0, %l7
65
 
65
 
66
	/*
66
	/*
67
	 * Setup basic runtime environment.
67
	 * Setup basic runtime environment.
68
	 */
68
	 */
69
 
69
 
70
	flushw					! flush all but the active register window
70
	flushw					! flush all but the active register window
71
 
71
 
72
	wrpr %g0, 0, %tl			! TL = 0, primary context register is used
72
	wrpr %g0, 0, %tl			! TL = 0, primary context register is used
73
 
73
 
74
	wrpr %g0, PSTATE_PRIV_BIT, %pstate	! Disable interrupts and disable 32-bit address masking.
74
	wrpr %g0, PSTATE_PRIV_BIT, %pstate	! Disable interrupts and disable 32-bit address masking.
75
 
75
 
76
	wrpr %g0, 0, %pil			! intialize %pil
76
	wrpr %g0, 0, %pil			! intialize %pil
77
 
77
 
78
	/*
78
	/*
79
	 * Copy the bootinfo structure passed from the boot loader
79
	 * Copy the bootinfo structure passed from the boot loader
80
	 * to the kernel bootinfo structure.
80
	 * to the kernel bootinfo structure.
81
	 */
81
	 */
82
	brz %l7, 0f				! skip if you are not the bootstrap CPU
82
	brz %l7, 0f				! skip if you are not the bootstrap CPU
-
 
83
	nop
-
 
84
	
83
	sethi %hi(bootinfo), %o0
85
	sethi %hi(bootinfo), %o0
84
	call memcpy
86
	call memcpy
85
	or %o0, %lo(bootinfo), %o0
87
	or %o0, %lo(bootinfo), %o0
86
0:
88
0:
87
 
89
 
88
	/*
90
	/*
89
	 * Switch to kernel trap table.
91
	 * Switch to kernel trap table.
90
	 */
92
	 */
91
	sethi %hi(trap_table), %g1
93
	sethi %hi(trap_table), %g1
92
	wrpr %g1, %lo(trap_table), %tba
94
	wrpr %g1, %lo(trap_table), %tba
93
 
95
 
94
	/* 
96
	/* 
95
	 * Take over the DMMU by installing global locked
97
	 * Take over the DMMU by installing global locked
96
	 * TTE entry identically mapping the first 4M
98
	 * TTE entry identically mapping the first 4M
97
	 * of memory.
99
	 * of memory.
98
	 *
100
	 *
99
	 * In case of DMMU, no FLUSH instructions need to be
101
	 * In case of DMMU, no FLUSH instructions need to be
100
	 * issued. Because of that, the old DTLB contents can
102
	 * issued. Because of that, the old DTLB contents can
101
	 * be demapped pretty straightforwardly and without
103
	 * be demapped pretty straightforwardly and without
102
	 * causing any traps.
104
	 * causing any traps.
103
	 */
105
	 */
104
 
106
 
105
	wr %g0, ASI_DMMU, %asi
107
	wr %g0, ASI_DMMU, %asi
106
 
108
 
107
#define SET_TLB_DEMAP_CMD(r1, context_id) \
109
#define SET_TLB_DEMAP_CMD(r1, context_id) \
108
	set (TLB_DEMAP_CONTEXT<<TLB_DEMAP_TYPE_SHIFT) | (context_id<<TLB_DEMAP_CONTEXT_SHIFT), %r1
110
	set (TLB_DEMAP_CONTEXT<<TLB_DEMAP_TYPE_SHIFT) | (context_id<<TLB_DEMAP_CONTEXT_SHIFT), %r1
109
	
111
	
110
	! demap context 0
112
	! demap context 0
111
	SET_TLB_DEMAP_CMD(g1, TLB_DEMAP_NUCLEUS)
113
	SET_TLB_DEMAP_CMD(g1, TLB_DEMAP_NUCLEUS)
112
	stxa %g0, [%g1] ASI_DMMU_DEMAP			
114
	stxa %g0, [%g1] ASI_DMMU_DEMAP			
113
	membar #Sync
115
	membar #Sync
114
 
116
 
115
#define SET_TLB_TAG(r1, context) \
117
#define SET_TLB_TAG(r1, context) \
116
	set VMA | (context<<TLB_TAG_ACCESS_CONTEXT_SHIFT), %r1
118
	set VMA | (context<<TLB_TAG_ACCESS_CONTEXT_SHIFT), %r1
117
 
119
 
118
	! write DTLB tag
120
	! write DTLB tag
119
	SET_TLB_TAG(g1, MEM_CONTEXT_KERNEL)
121
	SET_TLB_TAG(g1, MEM_CONTEXT_KERNEL)
120
	stxa %g1, [VA_DMMU_TAG_ACCESS] %asi			
122
	stxa %g1, [VA_DMMU_TAG_ACCESS] %asi			
121
	membar #Sync
123
	membar #Sync
122
 
124
 
123
#define SET_TLB_DATA(r1, r2, imm) \
125
#define SET_TLB_DATA(r1, r2, imm) \
124
	set TTE_CV | TTE_CP | TTE_P | LMA | imm, %r1; \
126
	set TTE_CV | TTE_CP | TTE_P | LMA | imm, %r1; \
125
	set PAGESIZE_4M, %r2; \
127
	set PAGESIZE_4M, %r2; \
126
	sllx %r2, TTE_SIZE_SHIFT, %r2; \
128
	sllx %r2, TTE_SIZE_SHIFT, %r2; \
127
	or %r1, %r2, %r1; \
129
	or %r1, %r2, %r1; \
128
	mov 1, %r2; \
130
	mov 1, %r2; \
129
	sllx %r2, TTE_V_SHIFT, %r2; \
131
	sllx %r2, TTE_V_SHIFT, %r2; \
130
	or %r1, %r2, %r1;
132
	or %r1, %r2, %r1;
131
	
133
	
132
	! write DTLB data and install the kernel mapping
134
	! write DTLB data and install the kernel mapping
133
	SET_TLB_DATA(g1, g2, TTE_L | TTE_W)	! use non-global mapping
135
	SET_TLB_DATA(g1, g2, TTE_L | TTE_W)	! use non-global mapping
134
	stxa %g1, [%g0] ASI_DTLB_DATA_IN_REG		
136
	stxa %g1, [%g0] ASI_DTLB_DATA_IN_REG		
135
	membar #Sync
137
	membar #Sync
136
 
138
 
137
	/*
139
	/*
138
	 * Because we cannot use global mappings (because we want to
140
	 * Because we cannot use global mappings (because we want to
139
	 * have separate 64-bit address spaces for both the kernel
141
	 * have separate 64-bit address spaces for both the kernel
140
	 * and the userspace), we prepare the identity mapping also in
142
	 * and the userspace), we prepare the identity mapping also in
141
	 * context 1. This step is required by the
143
	 * context 1. This step is required by the
142
	 * code installing the ITLB mapping.
144
	 * code installing the ITLB mapping.
143
	 */
145
	 */
144
	! write DTLB tag of context 1 (i.e. MEM_CONTEXT_TEMP)
146
	! write DTLB tag of context 1 (i.e. MEM_CONTEXT_TEMP)
145
	SET_TLB_TAG(g1, MEM_CONTEXT_TEMP)
147
	SET_TLB_TAG(g1, MEM_CONTEXT_TEMP)
146
	stxa %g1, [VA_DMMU_TAG_ACCESS] %asi			
148
	stxa %g1, [VA_DMMU_TAG_ACCESS] %asi			
147
	membar #Sync
149
	membar #Sync
148
 
150
 
149
	! write DTLB data and install the kernel mapping in context 1
151
	! write DTLB data and install the kernel mapping in context 1
150
	SET_TLB_DATA(g1, g2, TTE_W)			! use non-global mapping
152
	SET_TLB_DATA(g1, g2, TTE_W)			! use non-global mapping
151
	stxa %g1, [%g0] ASI_DTLB_DATA_IN_REG		
153
	stxa %g1, [%g0] ASI_DTLB_DATA_IN_REG		
152
	membar #Sync
154
	membar #Sync
153
	
155
	
154
	/*
156
	/*
155
	 * Now is time to take over the IMMU.
157
	 * Now is time to take over the IMMU.
156
	 * Unfortunatelly, it cannot be done as easily as the DMMU,
158
	 * Unfortunatelly, it cannot be done as easily as the DMMU,
157
	 * because the IMMU is mapping the code it executes.
159
	 * because the IMMU is mapping the code it executes.
158
	 *
160
	 *
159
	 * [ Note that brave experiments with disabling the IMMU
161
	 * [ Note that brave experiments with disabling the IMMU
160
	 * and using the DMMU approach failed after a dozen
162
	 * and using the DMMU approach failed after a dozen
161
	 * of desparate days with only little success. ]
163
	 * of desparate days with only little success. ]
162
	 *
164
	 *
163
	 * The approach used here is inspired from OpenBSD.
165
	 * The approach used here is inspired from OpenBSD.
164
	 * First, the kernel creates IMMU mapping for itself
166
	 * First, the kernel creates IMMU mapping for itself
165
	 * in context 1 (MEM_CONTEXT_TEMP) and switches to
167
	 * in context 1 (MEM_CONTEXT_TEMP) and switches to
166
	 * it. Context 0 (MEM_CONTEXT_KERNEL) can be demapped
168
	 * it. Context 0 (MEM_CONTEXT_KERNEL) can be demapped
167
	 * afterwards and replaced with the kernel permanent
169
	 * afterwards and replaced with the kernel permanent
168
	 * mapping. Finally, the kernel switches back to
170
	 * mapping. Finally, the kernel switches back to
169
	 * context 0 and demaps context 1.
171
	 * context 0 and demaps context 1.
170
	 *
172
	 *
171
	 * Moreover, the IMMU requires use of the FLUSH instructions.
173
	 * Moreover, the IMMU requires use of the FLUSH instructions.
172
	 * But that is OK because we always use operands with
174
	 * But that is OK because we always use operands with
173
	 * addresses already mapped by the taken over DTLB.
175
	 * addresses already mapped by the taken over DTLB.
174
	 */
176
	 */
175
	
177
	
176
	set kernel_image_start, %g5
178
	set kernel_image_start, %g5
177
	
179
	
178
	! write ITLB tag of context 1
180
	! write ITLB tag of context 1
179
	SET_TLB_TAG(g1, MEM_CONTEXT_TEMP)
181
	SET_TLB_TAG(g1, MEM_CONTEXT_TEMP)
180
	mov VA_DMMU_TAG_ACCESS, %g2
182
	mov VA_DMMU_TAG_ACCESS, %g2
181
	stxa %g1, [%g2] ASI_IMMU
183
	stxa %g1, [%g2] ASI_IMMU
182
	flush %g5
184
	flush %g5
183
 
185
 
184
	! write ITLB data and install the temporary mapping in context 1
186
	! write ITLB data and install the temporary mapping in context 1
185
	SET_TLB_DATA(g1, g2, 0)			! use non-global mapping
187
	SET_TLB_DATA(g1, g2, 0)			! use non-global mapping
186
	stxa %g1, [%g0] ASI_ITLB_DATA_IN_REG		
188
	stxa %g1, [%g0] ASI_ITLB_DATA_IN_REG		
187
	flush %g5
189
	flush %g5
188
	
190
	
189
	! switch to context 1
191
	! switch to context 1
190
	mov MEM_CONTEXT_TEMP, %g1
192
	mov MEM_CONTEXT_TEMP, %g1
191
	stxa %g1, [VA_PRIMARY_CONTEXT_REG] %asi	! ASI_DMMU is correct here !!!
193
	stxa %g1, [VA_PRIMARY_CONTEXT_REG] %asi	! ASI_DMMU is correct here !!!
192
	flush %g5
194
	flush %g5
193
	
195
	
194
	! demap context 0
196
	! demap context 0
195
	SET_TLB_DEMAP_CMD(g1, TLB_DEMAP_NUCLEUS)
197
	SET_TLB_DEMAP_CMD(g1, TLB_DEMAP_NUCLEUS)
196
	stxa %g0, [%g1] ASI_IMMU_DEMAP			
198
	stxa %g0, [%g1] ASI_IMMU_DEMAP			
197
	flush %g5
199
	flush %g5
198
	
200
	
199
	! write ITLB tag of context 0
201
	! write ITLB tag of context 0
200
	SET_TLB_TAG(g1, MEM_CONTEXT_KERNEL)
202
	SET_TLB_TAG(g1, MEM_CONTEXT_KERNEL)
201
	mov VA_DMMU_TAG_ACCESS, %g2
203
	mov VA_DMMU_TAG_ACCESS, %g2
202
	stxa %g1, [%g2] ASI_IMMU
204
	stxa %g1, [%g2] ASI_IMMU
203
	flush %g5
205
	flush %g5
204
 
206
 
205
	! write ITLB data and install the permanent kernel mapping in context 0
207
	! write ITLB data and install the permanent kernel mapping in context 0
206
	SET_TLB_DATA(g1, g2, TTE_L)		! use non-global mapping
208
	SET_TLB_DATA(g1, g2, TTE_L)		! use non-global mapping
207
	stxa %g1, [%g0] ASI_ITLB_DATA_IN_REG		
209
	stxa %g1, [%g0] ASI_ITLB_DATA_IN_REG		
208
	flush %g5
210
	flush %g5
209
 
211
 
210
	! switch to context 0
212
	! switch to context 0
211
	stxa %g0, [VA_PRIMARY_CONTEXT_REG] %asi	! ASI_DMMU is correct here !!!
213
	stxa %g0, [VA_PRIMARY_CONTEXT_REG] %asi	! ASI_DMMU is correct here !!!
212
	flush %g5
214
	flush %g5
213
 
215
 
214
	! ensure nucleus mapping
216
	! ensure nucleus mapping
215
	wrpr %g0, 1, %tl
217
	wrpr %g0, 1, %tl
216
 
218
 
217
	! set context 1 in the primary context register
219
	! set context 1 in the primary context register
218
	mov MEM_CONTEXT_TEMP, %g1
220
	mov MEM_CONTEXT_TEMP, %g1
219
	stxa %g1, [VA_PRIMARY_CONTEXT_REG] %asi	! ASI_DMMU is correct here !!!
221
	stxa %g1, [VA_PRIMARY_CONTEXT_REG] %asi	! ASI_DMMU is correct here !!!
220
	flush %g5
222
	flush %g5
221
 
223
 
222
	! demap context 1
224
	! demap context 1
223
	SET_TLB_DEMAP_CMD(g1, TLB_DEMAP_PRIMARY)
225
	SET_TLB_DEMAP_CMD(g1, TLB_DEMAP_PRIMARY)
224
	stxa %g0, [%g1] ASI_IMMU_DEMAP			
226
	stxa %g0, [%g1] ASI_IMMU_DEMAP			
225
	flush %g5
227
	flush %g5
226
	
228
	
227
	! set context 0 in the primary context register
229
	! set context 0 in the primary context register
228
	stxa %g0, [VA_PRIMARY_CONTEXT_REG] %asi	! ASI_DMMU is correct here !!!
230
	stxa %g0, [VA_PRIMARY_CONTEXT_REG] %asi	! ASI_DMMU is correct here !!!
229
	flush %g5
231
	flush %g5
230
	
232
	
231
	! set TL back to 0
233
	! set TL back to 0
232
	wrpr %g0, 0, %tl
234
	wrpr %g0, 0, %tl
233
 
235
 
234
	brz %l7, 1f				! skip if you are not the bootstrap CPU
236
	brz %l7, 1f				! skip if you are not the bootstrap CPU
235
	nop
237
	nop
236
 
238
 
237
	call arch_pre_main
239
	call arch_pre_main
238
	nop
240
	nop
239
	
241
	
240
	call main_bsp
242
	call main_bsp
241
	nop
243
	nop
242
 
244
 
243
	/* Not reached. */
245
	/* Not reached. */
244
 
246
 
245
0:
247
0:
246
	ba 0b
248
	ba 0b
247
	nop
249
	nop
248
 
250
 
249
 
251
 
250
	/*
252
	/*
251
	 * Read MID from the processor.
253
	 * Read MID from the processor.
252
	 */
254
	 */
253
1:
255
1:
254
	ldxa [%g0] ASI_UPA_CONFIG, %g1
256
	ldxa [%g0] ASI_UPA_CONFIG, %g1
255
	srlx %g1, UPA_CONFIG_MID_SHIFT, %g1
257
	srlx %g1, UPA_CONFIG_MID_SHIFT, %g1
256
	and %g1, UPA_CONFIG_MID_MASK, %g1
258
	and %g1, UPA_CONFIG_MID_MASK, %g1
257
 
259
 
-
 
260
#ifdef CONFIG_SMP
258
	/*
261
	/*
259
	 * Active loop for APs until the BSP picks them up.
262
	 * Active loop for APs until the BSP picks them up.
260
	 * A processor cannot leave the loop until the
263
	 * A processor cannot leave the loop until the
261
	 * global variable 'waking_up_mid' equals its
264
	 * global variable 'waking_up_mid' equals its
262
	 * MID.
265
	 * MID.
263
	 */
266
	 */
264
	set waking_up_mid, %g2
267
	set waking_up_mid, %g2
265
2:
268
2:
266
	ldx [%g2], %g3
269
	ldx [%g2], %g3
267
	cmp %g3, %g1
270
	cmp %g3, %g1
268
	bne 2b
271
	bne 2b
269
	nop
272
	nop
270
 
273
 
271
#ifdef CONFIG_SMP
-
 
-
 
274
 
272
	/*
275
	/*
273
	 * Configure stack for the AP.
276
	 * Configure stack for the AP.
274
	 * The AP is expected to use the stack saved
277
	 * The AP is expected to use the stack saved
275
	 * in the ctx global variable.
278
	 * in the ctx global variable.
276
	 */
279
	 */
277
	set ctx, %g1
280
	set ctx, %g1
278
	add %g1, OFFSET_SP, %g1
281
	add %g1, OFFSET_SP, %g1
279
	ldx [%g1], %o6
282
	ldx [%g1], %o6
280
 
283
 
281
	call main_ap
284
	call main_ap
282
	nop
285
	nop
283
#endif
-
 
284
 
286
 
285
	/* Not reached. */
287
	/* Not reached. */
-
 
288
#endif
286
	
289
	
287
0:
290
0:
288
	ba 0b
291
	ba 0b
289
	nop
292
	nop
290
 
293
 
291
 
294
 
292

Generated by GNU Enscript 1.6.6.
295

Generated by GNU Enscript 1.6.6.
293
 
296
 
294
 
297
 
295
 
298