Rev 3097 | Rev 3193 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 3097 | Rev 3175 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (c) 2001-2004 Jakub Jermar |
2 | * Copyright (c) 2001-2004 Jakub Jermar |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | /** @addtogroup main |
29 | /** @addtogroup main |
30 | * @{ |
30 | * @{ |
31 | */ |
31 | */ |
32 | 32 | ||
33 | /** |
33 | /** |
34 | * @file |
34 | * @file |
35 | * @brief Main initialization kernel function for all processors. |
35 | * @brief Main initialization kernel function for all processors. |
36 | * |
36 | * |
37 | * During kernel boot, all processors, after architecture dependent |
37 | * During kernel boot, all processors, after architecture dependent |
38 | * initialization, start executing code found in this file. After |
38 | * initialization, start executing code found in this file. After |
39 | * bringing up all subsystems, control is passed to scheduler(). |
39 | * bringing up all subsystems, control is passed to scheduler(). |
40 | * |
40 | * |
41 | * The bootstrap processor starts executing main_bsp() while |
41 | * The bootstrap processor starts executing main_bsp() while |
42 | * the application processors start executing main_ap(). |
42 | * the application processors start executing main_ap(). |
43 | * |
43 | * |
44 | * @see scheduler() |
44 | * @see scheduler() |
45 | * @see main_bsp() |
45 | * @see main_bsp() |
46 | * @see main_ap() |
46 | * @see main_ap() |
47 | */ |
47 | */ |
48 | 48 | ||
49 | #include <arch/asm.h> |
49 | #include <arch/asm.h> |
50 | #include <context.h> |
50 | #include <context.h> |
51 | #include <print.h> |
51 | #include <print.h> |
52 | #include <panic.h> |
52 | #include <panic.h> |
53 | #include <debug.h> |
53 | #include <debug.h> |
54 | #include <config.h> |
54 | #include <config.h> |
55 | #include <time/clock.h> |
55 | #include <time/clock.h> |
56 | #include <time/timeout.h> |
56 | #include <time/timeout.h> |
57 | #include <proc/scheduler.h> |
57 | #include <proc/scheduler.h> |
58 | #include <proc/thread.h> |
58 | #include <proc/thread.h> |
59 | #include <proc/task.h> |
59 | #include <proc/task.h> |
60 | #include <proc/tasklet.h> |
60 | #include <proc/tasklet.h> |
61 | #include <main/kinit.h> |
61 | #include <main/kinit.h> |
62 | #include <main/version.h> |
62 | #include <main/version.h> |
63 | #include <console/kconsole.h> |
63 | #include <console/kconsole.h> |
64 | #include <cpu.h> |
64 | #include <cpu.h> |
65 | #include <align.h> |
65 | #include <align.h> |
66 | #include <interrupt.h> |
66 | #include <interrupt.h> |
67 | #include <mm/frame.h> |
67 | #include <mm/frame.h> |
68 | #include <mm/page.h> |
68 | #include <mm/page.h> |
69 | #include <genarch/mm/page_pt.h> |
69 | #include <genarch/mm/page_pt.h> |
70 | #include <mm/tlb.h> |
70 | #include <mm/tlb.h> |
71 | #include <mm/as.h> |
71 | #include <mm/as.h> |
72 | #include <mm/slab.h> |
72 | #include <mm/slab.h> |
73 | #include <synch/waitq.h> |
73 | #include <synch/waitq.h> |
74 | #include <synch/futex.h> |
74 | #include <synch/futex.h> |
75 | #include <arch/arch.h> |
75 | #include <arch/arch.h> |
76 | #include <arch.h> |
76 | #include <arch.h> |
77 | #include <arch/faddr.h> |
77 | #include <arch/faddr.h> |
78 | #include <ipc/ipc.h> |
78 | #include <ipc/ipc.h> |
79 | #include <macros.h> |
79 | #include <macros.h> |
80 | #include <adt/btree.h> |
80 | #include <adt/btree.h> |
81 | #include <smp/smp.h> |
81 | #include <smp/smp.h> |
82 | #include <ddi/ddi.h> |
82 | #include <ddi/ddi.h> |
83 | #include <console/console.h> |
83 | #include <console/console.h> |
84 | 84 | ||
85 | /** Global configuration structure. */ |
85 | /** Global configuration structure. */ |
86 | config_t config; |
86 | config_t config; |
87 | 87 | ||
88 | /** Initial user-space tasks */ |
88 | /** Initial user-space tasks */ |
89 | init_t init = { |
89 | init_t init = { |
90 | .cnt = 0 |
90 | .cnt = 0 |
91 | }; |
91 | }; |
92 | 92 | ||
93 | /** Boot allocations. */ |
93 | /** Boot allocations. */ |
94 | ballocs_t ballocs = { |
94 | ballocs_t ballocs = { |
95 | .base = NULL, |
95 | .base = NULL, |
96 | .size = 0 |
96 | .size = 0 |
97 | }; |
97 | }; |
98 | 98 | ||
99 | context_t ctx; |
99 | context_t ctx; |
100 | 100 | ||
101 | /* |
101 | /* |
102 | * These 'hardcoded' variables will be intialized by |
102 | * These 'hardcoded' variables will be intialized by |
103 | * the linker or the low level assembler code with |
103 | * the linker or the low level assembler code with |
104 | * appropriate sizes and addresses. |
104 | * appropriate sizes and addresses. |
105 | */ |
105 | */ |
106 | 106 | ||
107 | /**< Virtual address of where the kernel is loaded. */ |
107 | /**< Virtual address of where the kernel is loaded. */ |
108 | uintptr_t hardcoded_load_address = 0; |
108 | uintptr_t hardcoded_load_address = 0; |
109 | /**< Size of the kernel code in bytes. */ |
109 | /**< Size of the kernel code in bytes. */ |
110 | size_t hardcoded_ktext_size = 0; |
110 | size_t hardcoded_ktext_size = 0; |
111 | /**< Size of the kernel data in bytes. */ |
111 | /**< Size of the kernel data in bytes. */ |
112 | size_t hardcoded_kdata_size = 0; |
112 | size_t hardcoded_kdata_size = 0; |
113 | /**< Lowest safe stack virtual address. */ |
113 | /**< Lowest safe stack virtual address. */ |
114 | uintptr_t stack_safe = 0; |
114 | uintptr_t stack_safe = 0; |
115 | 115 | ||
116 | void main_bsp(void); |
116 | void main_bsp(void); |
117 | void main_ap(void); |
117 | void main_ap(void); |
118 | 118 | ||
119 | /* |
119 | /* |
120 | * These two functions prevent stack from underflowing during the |
120 | * These two functions prevent stack from underflowing during the |
121 | * kernel boot phase when SP is set to the very top of the reserved |
121 | * kernel boot phase when SP is set to the very top of the reserved |
122 | * space. The stack could get corrupted by a fooled compiler-generated |
122 | * space. The stack could get corrupted by a fooled compiler-generated |
123 | * pop sequence otherwise. |
123 | * pop sequence otherwise. |
124 | */ |
124 | */ |
125 | static void main_bsp_separated_stack(void); |
125 | static void main_bsp_separated_stack(void); |
126 | #ifdef CONFIG_SMP |
126 | #ifdef CONFIG_SMP |
127 | static void main_ap_separated_stack(void); |
127 | static void main_ap_separated_stack(void); |
128 | #endif |
128 | #endif |
129 | 129 | ||
130 | #define CONFIG_STACK_SIZE ((1 << STACK_FRAMES) * STACK_SIZE) |
130 | #define CONFIG_STACK_SIZE ((1 << STACK_FRAMES) * STACK_SIZE) |
131 | 131 | ||
132 | /** Main kernel routine for bootstrap CPU. |
132 | /** Main kernel routine for bootstrap CPU. |
133 | * |
133 | * |
134 | * Initializes the kernel by bootstrap CPU. |
134 | * The code here still runs on the boot stack, which knows nothing about |
135 | * This function passes control directly to |
135 | * preemption counts. Because of that, this function cannot directly call |
- | 136 | * functions that disable or enable preemption (e.g. spinlock_lock()). The |
|
- | 137 | * primary task of this function is to calculate address of a new stack and |
|
136 | * main_bsp_separated_stack(). |
138 | * switch to it. |
137 | * |
139 | * |
138 | * Assuming interrupts_disable(). |
140 | * Assuming interrupts_disable(). |
139 | * |
141 | * |
140 | */ |
142 | */ |
141 | void main_bsp(void) |
143 | void main_bsp(void) |
142 | { |
144 | { |
143 | LOG(); |
- | |
144 | - | ||
145 | config.cpu_count = 1; |
145 | config.cpu_count = 1; |
146 | config.cpu_active = 1; |
146 | config.cpu_active = 1; |
147 | 147 | ||
148 | config.base = hardcoded_load_address; |
148 | config.base = hardcoded_load_address; |
149 | config.kernel_size = ALIGN_UP(hardcoded_ktext_size + |
149 | config.kernel_size = ALIGN_UP(hardcoded_ktext_size + |
150 | hardcoded_kdata_size, PAGE_SIZE); |
150 | hardcoded_kdata_size, PAGE_SIZE); |
151 | config.stack_size = CONFIG_STACK_SIZE; |
151 | config.stack_size = CONFIG_STACK_SIZE; |
152 | 152 | ||
153 | /* Initialy the stack is placed just after the kernel */ |
153 | /* Initialy the stack is placed just after the kernel */ |
154 | config.stack_base = config.base + config.kernel_size; |
154 | config.stack_base = config.base + config.kernel_size; |
155 | 155 | ||
156 | /* Avoid placing stack on top of init */ |
156 | /* Avoid placing stack on top of init */ |
157 | count_t i; |
157 | count_t i; |
158 | for (i = 0; i < init.cnt; i++) { |
158 | for (i = 0; i < init.cnt; i++) { |
159 | if (PA_overlaps(config.stack_base, config.stack_size, |
159 | if (PA_overlaps(config.stack_base, config.stack_size, |
160 | init.tasks[i].addr, init.tasks[i].size)) |
160 | init.tasks[i].addr, init.tasks[i].size)) |
161 | config.stack_base = ALIGN_UP(init.tasks[i].addr + |
161 | config.stack_base = ALIGN_UP(init.tasks[i].addr + |
162 | init.tasks[i].size, config.stack_size); |
162 | init.tasks[i].size, config.stack_size); |
163 | } |
163 | } |
164 | 164 | ||
165 | /* Avoid placing stack on top of boot allocations. */ |
165 | /* Avoid placing stack on top of boot allocations. */ |
166 | if (ballocs.size) { |
166 | if (ballocs.size) { |
167 | if (PA_overlaps(config.stack_base, config.stack_size, |
167 | if (PA_overlaps(config.stack_base, config.stack_size, |
168 | ballocs.base, ballocs.size)) |
168 | ballocs.base, ballocs.size)) |
169 | config.stack_base = ALIGN_UP(ballocs.base + |
169 | config.stack_base = ALIGN_UP(ballocs.base + |
170 | ballocs.size, PAGE_SIZE); |
170 | ballocs.size, PAGE_SIZE); |
171 | } |
171 | } |
172 | 172 | ||
173 | if (config.stack_base < stack_safe) |
173 | if (config.stack_base < stack_safe) |
174 | config.stack_base = ALIGN_UP(stack_safe, PAGE_SIZE); |
174 | config.stack_base = ALIGN_UP(stack_safe, PAGE_SIZE); |
175 | 175 | ||
176 | version_print(); |
- | |
177 | - | ||
178 | LOG("\nconfig.base=%#" PRIp " config.kernel_size=%" PRIs |
- | |
179 | "\nconfig.stack_base=%#" PRIp " config.stack_size=%" PRIs, |
- | |
180 | config.base, config.kernel_size, |
- | |
181 | config.stack_base, config.stack_size); |
- | |
182 | - | ||
183 | context_save(&ctx); |
176 | context_save(&ctx); |
184 | context_set(&ctx, FADDR(main_bsp_separated_stack), config.stack_base, |
177 | context_set(&ctx, FADDR(main_bsp_separated_stack), config.stack_base, |
185 | THREAD_STACK_SIZE); |
178 | THREAD_STACK_SIZE); |
186 | context_restore(&ctx); |
179 | context_restore(&ctx); |
187 | /* not reached */ |
180 | /* not reached */ |
188 | } |
181 | } |
189 | 182 | ||
190 | 183 | ||
191 | /** Main kernel routine for bootstrap CPU using new stack. |
184 | /** Main kernel routine for bootstrap CPU using new stack. |
192 | * |
185 | * |
193 | * Second part of main_bsp(). |
186 | * Second part of main_bsp(). |
194 | * |
187 | * |
195 | */ |
188 | */ |
196 | void main_bsp_separated_stack(void) |
189 | void main_bsp_separated_stack(void) |
197 | { |
190 | { |
- | 191 | /* Keep this the first thing. */ |
|
- | 192 | the_initialize(THE); |
|
- | 193 | ||
198 | LOG(); |
194 | LOG(); |
199 | 195 | ||
200 | the_initialize(THE); |
196 | version_print(); |
- | 197 | ||
- | 198 | LOG("\nconfig.base=%#" PRIp " config.kernel_size=%" PRIs |
|
- | 199 | "\nconfig.stack_base=%#" PRIp " config.stack_size=%" PRIs, |
|
- | 200 | config.base, config.kernel_size, config.stack_base, |
|
- | 201 | config.stack_size); |
|
- | 202 | ||
201 | 203 | ||
202 | /* |
204 | /* |
203 | * kconsole data structures must be initialized very early |
205 | * kconsole data structures must be initialized very early |
204 | * because other subsystems will register their respective |
206 | * because other subsystems will register their respective |
205 | * commands. |
207 | * commands. |
206 | */ |
208 | */ |
207 | LOG_EXEC(kconsole_init()); |
209 | LOG_EXEC(kconsole_init()); |
208 | 210 | ||
209 | /* |
211 | /* |
210 | * Exception handler initialization, before architecture |
212 | * Exception handler initialization, before architecture |
211 | * starts adding its own handlers |
213 | * starts adding its own handlers |
212 | */ |
214 | */ |
213 | LOG_EXEC(exc_init()); |
215 | LOG_EXEC(exc_init()); |
214 | 216 | ||
215 | /* |
217 | /* |
216 | * Memory management subsystems initialization. |
218 | * Memory management subsystems initialization. |
217 | */ |
219 | */ |
218 | LOG_EXEC(arch_pre_mm_init()); |
220 | LOG_EXEC(arch_pre_mm_init()); |
219 | LOG_EXEC(frame_init()); |
221 | LOG_EXEC(frame_init()); |
220 | 222 | ||
221 | /* Initialize at least 1 memory segment big enough for slab to work. */ |
223 | /* Initialize at least 1 memory segment big enough for slab to work. */ |
222 | LOG_EXEC(slab_cache_init()); |
224 | LOG_EXEC(slab_cache_init()); |
223 | LOG_EXEC(btree_init()); |
225 | LOG_EXEC(btree_init()); |
224 | LOG_EXEC(as_init()); |
226 | LOG_EXEC(as_init()); |
225 | LOG_EXEC(page_init()); |
227 | LOG_EXEC(page_init()); |
226 | LOG_EXEC(tlb_init()); |
228 | LOG_EXEC(tlb_init()); |
227 | LOG_EXEC(ddi_init()); |
229 | LOG_EXEC(ddi_init()); |
228 | LOG_EXEC(tasklet_init()); |
230 | LOG_EXEC(tasklet_init()); |
229 | LOG_EXEC(arch_post_mm_init()); |
231 | LOG_EXEC(arch_post_mm_init()); |
230 | LOG_EXEC(arch_pre_smp_init()); |
232 | LOG_EXEC(arch_pre_smp_init()); |
231 | LOG_EXEC(smp_init()); |
233 | LOG_EXEC(smp_init()); |
232 | 234 | ||
233 | /* Slab must be initialized after we know the number of processors. */ |
235 | /* Slab must be initialized after we know the number of processors. */ |
234 | LOG_EXEC(slab_enable_cpucache()); |
236 | LOG_EXEC(slab_enable_cpucache()); |
235 | 237 | ||
236 | printf("Detected %" PRIc " CPU(s), %" PRIu64" MB free memory\n", |
238 | printf("Detected %" PRIc " CPU(s), %" PRIu64" MB free memory\n", |
237 | config.cpu_count, SIZE2MB(zone_total_size())); |
239 | config.cpu_count, SIZE2MB(zone_total_size())); |
238 | 240 | ||
239 | LOG_EXEC(cpu_init()); |
241 | LOG_EXEC(cpu_init()); |
240 | 242 | ||
241 | LOG_EXEC(calibrate_delay_loop()); |
243 | LOG_EXEC(calibrate_delay_loop()); |
242 | LOG_EXEC(clock_counter_init()); |
244 | LOG_EXEC(clock_counter_init()); |
243 | LOG_EXEC(timeout_init()); |
245 | LOG_EXEC(timeout_init()); |
244 | LOG_EXEC(scheduler_init()); |
246 | LOG_EXEC(scheduler_init()); |
245 | LOG_EXEC(task_init()); |
247 | LOG_EXEC(task_init()); |
246 | LOG_EXEC(thread_init()); |
248 | LOG_EXEC(thread_init()); |
247 | LOG_EXEC(futex_init()); |
249 | LOG_EXEC(futex_init()); |
248 | 250 | ||
249 | if (init.cnt > 0) { |
251 | if (init.cnt > 0) { |
250 | count_t i; |
252 | count_t i; |
251 | for (i = 0; i < init.cnt; i++) |
253 | for (i = 0; i < init.cnt; i++) |
252 | printf("init[%" PRIc "].addr=%#" PRIp |
254 | printf("init[%" PRIc "].addr=%#" PRIp ", init[%" PRIc |
253 | ", init[%" PRIc "].size=%#" PRIs "\n", |
- | |
254 | i, init.tasks[i].addr, |
255 | "].size=%#" PRIs "\n", i, init.tasks[i].addr, |
255 | i, init.tasks[i].size); |
256 | i, init.tasks[i].size); |
256 | } else |
257 | } else |
257 | printf("No init binaries found\n"); |
258 | printf("No init binaries found\n"); |
258 | 259 | ||
259 | LOG_EXEC(ipc_init()); |
260 | LOG_EXEC(ipc_init()); |
260 | LOG_EXEC(klog_init()); |
261 | LOG_EXEC(klog_init()); |
261 | 262 | ||
262 | /* |
263 | /* |
263 | * Create kernel task. |
264 | * Create kernel task. |
264 | */ |
265 | */ |
265 | task_t *kernel = task_create(AS_KERNEL, "kernel"); |
266 | task_t *kernel = task_create(AS_KERNEL, "kernel"); |
266 | if (!kernel) |
267 | if (!kernel) |
267 | panic("Can't create kernel task\n"); |
268 | panic("Can't create kernel task\n"); |
268 | 269 | ||
269 | /* |
270 | /* |
270 | * Create the first thread. |
271 | * Create the first thread. |
271 | */ |
272 | */ |
272 | thread_t *kinit_thread = thread_create(kinit, NULL, kernel, 0, "kinit", true); |
273 | thread_t *kinit_thread = thread_create(kinit, NULL, kernel, 0, "kinit", |
- | 274 | true); |
|
273 | if (!kinit_thread) |
275 | if (!kinit_thread) |
274 | panic("Can't create kinit thread\n"); |
276 | panic("Can't create kinit thread\n"); |
275 | LOG_EXEC(thread_ready(kinit_thread)); |
277 | LOG_EXEC(thread_ready(kinit_thread)); |
276 | 278 | ||
277 | /* |
279 | /* |
278 | * This call to scheduler() will return to kinit, |
280 | * This call to scheduler() will return to kinit, |
279 | * starting the thread of kernel threads. |
281 | * starting the thread of kernel threads. |
280 | */ |
282 | */ |
281 | scheduler(); |
283 | scheduler(); |
282 | /* not reached */ |
284 | /* not reached */ |
283 | } |
285 | } |
284 | 286 | ||
285 | 287 | ||
286 | #ifdef CONFIG_SMP |
288 | #ifdef CONFIG_SMP |
287 | /** Main kernel routine for application CPUs. |
289 | /** Main kernel routine for application CPUs. |
288 | * |
290 | * |
289 | * Executed by application processors, temporary stack |
291 | * Executed by application processors, temporary stack |
290 | * is at ctx.sp which was set during BSP boot. |
292 | * is at ctx.sp which was set during BSP boot. |
291 | * This function passes control directly to |
293 | * This function passes control directly to |
292 | * main_ap_separated_stack(). |
294 | * main_ap_separated_stack(). |
293 | * |
295 | * |
294 | * Assuming interrupts_disable()'d. |
296 | * Assuming interrupts_disable()'d. |
295 | * |
297 | * |
296 | */ |
298 | */ |
297 | void main_ap(void) |
299 | void main_ap(void) |
298 | { |
300 | { |
299 | /* |
301 | /* |
300 | * Incrementing the active CPU counter will guarantee that the |
302 | * Incrementing the active CPU counter will guarantee that the |
301 | * *_init() functions can find out that they need to |
303 | * *_init() functions can find out that they need to |
302 | * do initialization for AP only. |
304 | * do initialization for AP only. |
303 | */ |
305 | */ |
304 | config.cpu_active++; |
306 | config.cpu_active++; |
305 | 307 | ||
306 | /* |
308 | /* |
307 | * The THE structure is well defined because ctx.sp is used as stack. |
309 | * The THE structure is well defined because ctx.sp is used as stack. |
308 | */ |
310 | */ |
309 | the_initialize(THE); |
311 | the_initialize(THE); |
310 | 312 | ||
311 | arch_pre_mm_init(); |
313 | arch_pre_mm_init(); |
312 | frame_init(); |
314 | frame_init(); |
313 | page_init(); |
315 | page_init(); |
314 | tlb_init(); |
316 | tlb_init(); |
315 | arch_post_mm_init(); |
317 | arch_post_mm_init(); |
316 | 318 | ||
317 | cpu_init(); |
319 | cpu_init(); |
318 | calibrate_delay_loop(); |
320 | calibrate_delay_loop(); |
319 | arch_post_cpu_init(); |
321 | arch_post_cpu_init(); |
320 | 322 | ||
321 | the_copy(THE, (the_t *) CPU->stack); |
323 | the_copy(THE, (the_t *) CPU->stack); |
322 | 324 | ||
323 | /* |
325 | /* |
324 | * If we woke kmp up before we left the kernel stack, we could |
326 | * If we woke kmp up before we left the kernel stack, we could |
325 | * collide with another CPU coming up. To prevent this, we |
327 | * collide with another CPU coming up. To prevent this, we |
326 | * switch to this cpu's private stack prior to waking kmp up. |
328 | * switch to this cpu's private stack prior to waking kmp up. |
327 | */ |
329 | */ |
328 | context_set(&CPU->saved_context, FADDR(main_ap_separated_stack), |
330 | context_set(&CPU->saved_context, FADDR(main_ap_separated_stack), |
329 | (uintptr_t) CPU->stack, CPU_STACK_SIZE); |
331 | (uintptr_t) CPU->stack, CPU_STACK_SIZE); |
330 | context_restore(&CPU->saved_context); |
332 | context_restore(&CPU->saved_context); |
331 | /* not reached */ |
333 | /* not reached */ |
332 | } |
334 | } |
333 | 335 | ||
334 | 336 | ||
335 | /** Main kernel routine for application CPUs using new stack. |
337 | /** Main kernel routine for application CPUs using new stack. |
336 | * |
338 | * |
337 | * Second part of main_ap(). |
339 | * Second part of main_ap(). |
338 | * |
340 | * |
339 | */ |
341 | */ |
340 | void main_ap_separated_stack(void) |
342 | void main_ap_separated_stack(void) |
341 | { |
343 | { |
342 | /* |
344 | /* |
343 | * Configure timeouts for this cpu. |
345 | * Configure timeouts for this cpu. |
344 | */ |
346 | */ |
345 | timeout_init(); |
347 | timeout_init(); |
346 | 348 | ||
347 | waitq_wakeup(&ap_completion_wq, WAKEUP_FIRST); |
349 | waitq_wakeup(&ap_completion_wq, WAKEUP_FIRST); |
348 | scheduler(); |
350 | scheduler(); |
349 | /* not reached */ |
351 | /* not reached */ |
350 | } |
352 | } |
351 | #endif /* CONFIG_SMP */ |
353 | #endif /* CONFIG_SMP */ |
352 | 354 | ||
353 | /** @} |
355 | /** @} |
354 | */ |
356 | */ |
355 | 357 |