Subversion Repositories HelenOS

Rev

Rev 4377 | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 4377 Rev 4692
1
/*
1
/*
2
 * Copyright (c) 2001-2004 Jakub Jermar
2
 * Copyright (c) 2001-2004 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
/** @addtogroup main
29
/** @addtogroup main
30
 * @{
30
 * @{
31
 */
31
 */
32
 
32
 
33
/**
33
/**
34
 * @file
34
 * @file
35
 * @brief Main initialization kernel function for all processors.
35
 * @brief Main initialization kernel function for all processors.
36
 *
36
 *
37
 * During kernel boot, all processors, after architecture dependent
37
 * During kernel boot, all processors, after architecture dependent
38
 * initialization, start executing code found in this file. After
38
 * initialization, start executing code found in this file. After
39
 * bringing up all subsystems, control is passed to scheduler().
39
 * bringing up all subsystems, control is passed to scheduler().
40
 *
40
 *
41
 * The bootstrap processor starts executing main_bsp() while
41
 * The bootstrap processor starts executing main_bsp() while
42
 * the application processors start executing main_ap().
42
 * the application processors start executing main_ap().
43
 *
43
 *
44
 * @see scheduler()
44
 * @see scheduler()
45
 * @see main_bsp()
45
 * @see main_bsp()
46
 * @see main_ap()
46
 * @see main_ap()
47
 */
47
 */
48
 
48
 
49
#include <arch/asm.h>
49
#include <arch/asm.h>
50
#include <context.h>
50
#include <context.h>
51
#include <print.h>
51
#include <print.h>
52
#include <panic.h>
52
#include <panic.h>
53
#include <debug.h>
53
#include <debug.h>
54
#include <config.h>
54
#include <config.h>
55
#include <time/clock.h>
55
#include <time/clock.h>
56
#include <time/timeout.h>
56
#include <time/timeout.h>
57
#include <proc/scheduler.h>
57
#include <proc/scheduler.h>
58
#include <proc/thread.h>
58
#include <proc/thread.h>
59
#include <proc/task.h>
59
#include <proc/task.h>
60
#include <proc/tasklet.h>
60
#include <proc/tasklet.h>
61
#include <main/kinit.h>
61
#include <main/kinit.h>
62
#include <main/version.h>
62
#include <main/version.h>
63
#include <console/kconsole.h>
63
#include <console/kconsole.h>
64
#include <console/console.h>
64
#include <console/console.h>
65
#include <cpu.h>
65
#include <cpu.h>
66
#include <align.h>
66
#include <align.h>
67
#include <interrupt.h>
67
#include <interrupt.h>
68
#include <mm/frame.h>
68
#include <mm/frame.h>
69
#include <mm/page.h>
69
#include <mm/page.h>
70
#include <genarch/mm/page_pt.h>
70
#include <genarch/mm/page_pt.h>
71
#include <mm/tlb.h>
71
#include <mm/tlb.h>
72
#include <mm/as.h>
72
#include <mm/as.h>
73
#include <mm/slab.h>
73
#include <mm/slab.h>
74
#include <synch/waitq.h>
74
#include <synch/waitq.h>
75
#include <synch/futex.h>
75
#include <synch/futex.h>
76
#include <arch/arch.h>
76
#include <arch/arch.h>
77
#include <arch.h>
77
#include <arch.h>
78
#include <arch/faddr.h>
78
#include <arch/faddr.h>
79
#include <ipc/ipc.h>
79
#include <ipc/ipc.h>
80
#include <macros.h>
80
#include <macros.h>
81
#include <adt/btree.h>
81
#include <adt/btree.h>
82
#include <smp/smp.h>
82
#include <smp/smp.h>
83
#include <ddi/ddi.h>
83
#include <ddi/ddi.h>
84
#include <main/main.h>
84
#include <main/main.h>
85
#include <ipc/event.h>
85
#include <ipc/event.h>
86
 
86
 
87
/** Global configuration structure. */
87
/** Global configuration structure. */
88
config_t config;
88
config_t config;
89
 
89
 
90
/** Initial user-space tasks */
90
/** Initial user-space tasks */
91
init_t init = {
91
init_t init = {
92
    .cnt = 0
92
    .cnt = 0
93
};
93
};
94
 
94
 
95
/** Boot allocations. */
95
/** Boot allocations. */
96
ballocs_t ballocs = {
96
ballocs_t ballocs = {
97
    .base = NULL,
97
    .base = NULL,
98
    .size = 0
98
    .size = 0
99
};
99
};
100
 
100
 
101
context_t ctx;
101
context_t ctx;
102
 
102
 
103
/*
103
/*
104
 * These 'hardcoded' variables will be intialized by
104
 * These 'hardcoded' variables will be intialized by
105
 * the linker or the low level assembler code with
105
 * the linker or the low level assembler code with
106
 * appropriate sizes and addresses.
106
 * appropriate sizes and addresses.
107
 */
107
 */
108
 
108
 
109
/** Virtual address of where the kernel is loaded. */
109
/** Virtual address of where the kernel is loaded. */
110
uintptr_t hardcoded_load_address = 0;
110
uintptr_t hardcoded_load_address = 0;
111
/** Size of the kernel code in bytes. */
111
/** Size of the kernel code in bytes. */
112
size_t hardcoded_ktext_size = 0;
112
size_t hardcoded_ktext_size = 0;
113
/** Size of the kernel data in bytes. */
113
/** Size of the kernel data in bytes. */
114
size_t hardcoded_kdata_size = 0;
114
size_t hardcoded_kdata_size = 0;
115
/** Lowest safe stack virtual address. */
115
/** Lowest safe stack virtual address. */
116
uintptr_t stack_safe = 0;      
116
uintptr_t stack_safe = 0;      
117
 
117
 
118
/*
118
/*
119
 * These two functions prevent stack from underflowing during the
119
 * These two functions prevent stack from underflowing during the
120
 * kernel boot phase when SP is set to the very top of the reserved
120
 * kernel boot phase when SP is set to the very top of the reserved
121
 * space. The stack could get corrupted by a fooled compiler-generated
121
 * space. The stack could get corrupted by a fooled compiler-generated
122
 * pop sequence otherwise.
122
 * pop sequence otherwise.
123
 */
123
 */
124
static void main_bsp_separated_stack(void);
124
static void main_bsp_separated_stack(void);
125
#ifdef CONFIG_SMP
125
#ifdef CONFIG_SMP
126
static void main_ap_separated_stack(void);
126
static void main_ap_separated_stack(void);
127
#endif
127
#endif
128
 
128
 
129
#define CONFIG_STACK_SIZE   ((1 << STACK_FRAMES) * STACK_SIZE)
129
#define CONFIG_STACK_SIZE   ((1 << STACK_FRAMES) * STACK_SIZE)
130
 
130
 
131
/** Main kernel routine for bootstrap CPU.
131
/** Main kernel routine for bootstrap CPU.
132
 *
132
 *
133
 * The code here still runs on the boot stack, which knows nothing about
133
 * The code here still runs on the boot stack, which knows nothing about
134
 * preemption counts.  Because of that, this function cannot directly call
134
 * preemption counts.  Because of that, this function cannot directly call
135
 * functions that disable or enable preemption (e.g. spinlock_lock()). The
135
 * functions that disable or enable preemption (e.g. spinlock_lock()). The
136
 * primary task of this function is to calculate address of a new stack and
136
 * primary task of this function is to calculate address of a new stack and
137
 * switch to it.
137
 * switch to it.
138
 *
138
 *
139
 * Assuming interrupts_disable().
139
 * Assuming interrupts_disable().
140
 *
140
 *
141
 */
141
 */
142
void main_bsp(void)
142
void main_bsp(void)
143
{
143
{
144
    config.cpu_count = 1;
144
    config.cpu_count = 1;
145
    config.cpu_active = 1;
145
    config.cpu_active = 1;
146
   
146
   
147
    config.base = hardcoded_load_address;
147
    config.base = hardcoded_load_address;
148
    config.kernel_size = ALIGN_UP(hardcoded_ktext_size +
148
    config.kernel_size = ALIGN_UP(hardcoded_ktext_size +
149
        hardcoded_kdata_size, PAGE_SIZE);
149
        hardcoded_kdata_size, PAGE_SIZE);
150
    config.stack_size = CONFIG_STACK_SIZE;
150
    config.stack_size = CONFIG_STACK_SIZE;
151
   
151
   
152
    /* Initialy the stack is placed just after the kernel */
152
    /* Initialy the stack is placed just after the kernel */
153
    config.stack_base = config.base + config.kernel_size;
153
    config.stack_base = config.base + config.kernel_size;
154
   
154
   
155
    /* Avoid placing stack on top of init */
155
    /* Avoid placing stack on top of init */
156
    count_t i;
156
    size_t i;
157
    for (i = 0; i < init.cnt; i++) {
157
    for (i = 0; i < init.cnt; i++) {
158
        if (PA_overlaps(config.stack_base, config.stack_size,
158
        if (PA_overlaps(config.stack_base, config.stack_size,
159
            init.tasks[i].addr, init.tasks[i].size))
159
            init.tasks[i].addr, init.tasks[i].size))
160
            config.stack_base = ALIGN_UP(init.tasks[i].addr +
160
            config.stack_base = ALIGN_UP(init.tasks[i].addr +
161
                init.tasks[i].size, config.stack_size);
161
                init.tasks[i].size, config.stack_size);
162
    }
162
    }
163
 
163
 
164
    /* Avoid placing stack on top of boot allocations. */
164
    /* Avoid placing stack on top of boot allocations. */
165
    if (ballocs.size) {
165
    if (ballocs.size) {
166
        if (PA_overlaps(config.stack_base, config.stack_size,
166
        if (PA_overlaps(config.stack_base, config.stack_size,
167
            ballocs.base, ballocs.size))
167
            ballocs.base, ballocs.size))
168
            config.stack_base = ALIGN_UP(ballocs.base +
168
            config.stack_base = ALIGN_UP(ballocs.base +
169
                ballocs.size, PAGE_SIZE);
169
                ballocs.size, PAGE_SIZE);
170
    }
170
    }
171
   
171
   
172
    if (config.stack_base < stack_safe)
172
    if (config.stack_base < stack_safe)
173
        config.stack_base = ALIGN_UP(stack_safe, PAGE_SIZE);
173
        config.stack_base = ALIGN_UP(stack_safe, PAGE_SIZE);
174
   
174
   
175
    context_save(&ctx);
175
    context_save(&ctx);
176
    context_set(&ctx, FADDR(main_bsp_separated_stack), config.stack_base,
176
    context_set(&ctx, FADDR(main_bsp_separated_stack), config.stack_base,
177
        THREAD_STACK_SIZE);
177
        THREAD_STACK_SIZE);
178
    context_restore(&ctx);
178
    context_restore(&ctx);
179
    /* not reached */
179
    /* not reached */
180
}
180
}
181
 
181
 
182
 
182
 
183
/** Main kernel routine for bootstrap CPU using new stack.
183
/** Main kernel routine for bootstrap CPU using new stack.
184
 *
184
 *
185
 * Second part of main_bsp().
185
 * Second part of main_bsp().
186
 *
186
 *
187
 */
187
 */
188
void main_bsp_separated_stack(void)
188
void main_bsp_separated_stack(void)
189
{
189
{
190
    /* Keep this the first thing. */
190
    /* Keep this the first thing. */
191
    the_initialize(THE);
191
    the_initialize(THE);
192
   
192
   
193
    version_print();
193
    version_print();
194
   
194
   
195
    LOG("\nconfig.base=%#" PRIp " config.kernel_size=%" PRIs
195
    LOG("\nconfig.base=%#" PRIp " config.kernel_size=%" PRIs
196
        "\nconfig.stack_base=%#" PRIp " config.stack_size=%" PRIs,
196
        "\nconfig.stack_base=%#" PRIp " config.stack_size=%" PRIs,
197
        config.base, config.kernel_size, config.stack_base,
197
        config.base, config.kernel_size, config.stack_base,
198
        config.stack_size);
198
        config.stack_size);
199
   
199
   
200
#ifdef CONFIG_KCONSOLE
200
#ifdef CONFIG_KCONSOLE
201
    /*
201
    /*
202
     * kconsole data structures must be initialized very early
202
     * kconsole data structures must be initialized very early
203
     * because other subsystems will register their respective
203
     * because other subsystems will register their respective
204
     * commands.
204
     * commands.
205
     */
205
     */
206
    LOG_EXEC(kconsole_init());
206
    LOG_EXEC(kconsole_init());
207
#endif
207
#endif
208
   
208
   
209
    /*
209
    /*
210
     * Exception handler initialization, before architecture
210
     * Exception handler initialization, before architecture
211
     * starts adding its own handlers
211
     * starts adding its own handlers
212
     */
212
     */
213
    LOG_EXEC(exc_init());
213
    LOG_EXEC(exc_init());
214
   
214
   
215
    /*
215
    /*
216
     * Memory management subsystems initialization.
216
     * Memory management subsystems initialization.
217
     */
217
     */
218
    LOG_EXEC(arch_pre_mm_init());
218
    LOG_EXEC(arch_pre_mm_init());
219
    LOG_EXEC(frame_init());
219
    LOG_EXEC(frame_init());
220
   
220
   
221
    /* Initialize at least 1 memory segment big enough for slab to work. */
221
    /* Initialize at least 1 memory segment big enough for slab to work. */
222
    LOG_EXEC(slab_cache_init());
222
    LOG_EXEC(slab_cache_init());
223
    LOG_EXEC(btree_init());
223
    LOG_EXEC(btree_init());
224
    LOG_EXEC(as_init());
224
    LOG_EXEC(as_init());
225
    LOG_EXEC(page_init());
225
    LOG_EXEC(page_init());
226
    LOG_EXEC(tlb_init());
226
    LOG_EXEC(tlb_init());
227
    LOG_EXEC(ddi_init());
227
    LOG_EXEC(ddi_init());
228
    LOG_EXEC(tasklet_init());
228
    LOG_EXEC(tasklet_init());
229
    LOG_EXEC(arch_post_mm_init());
229
    LOG_EXEC(arch_post_mm_init());
230
    LOG_EXEC(arch_pre_smp_init());
230
    LOG_EXEC(arch_pre_smp_init());
231
    LOG_EXEC(smp_init());
231
    LOG_EXEC(smp_init());
232
   
232
   
233
    /* Slab must be initialized after we know the number of processors. */
233
    /* Slab must be initialized after we know the number of processors. */
234
    LOG_EXEC(slab_enable_cpucache());
234
    LOG_EXEC(slab_enable_cpucache());
235
   
235
   
236
    printf("Detected %" PRIc " CPU(s), %" PRIu64" MiB free memory\n",
236
    printf("Detected %" PRIs " CPU(s), %" PRIu64" MiB free memory\n",
237
        config.cpu_count, SIZE2MB(zone_total_size()));
237
        config.cpu_count, SIZE2MB(zone_total_size()));
238
   
238
   
239
    LOG_EXEC(cpu_init());
239
    LOG_EXEC(cpu_init());
240
   
240
   
241
    LOG_EXEC(calibrate_delay_loop());
241
    LOG_EXEC(calibrate_delay_loop());
242
    LOG_EXEC(clock_counter_init());
242
    LOG_EXEC(clock_counter_init());
243
    LOG_EXEC(timeout_init());
243
    LOG_EXEC(timeout_init());
244
    LOG_EXEC(scheduler_init());
244
    LOG_EXEC(scheduler_init());
245
    LOG_EXEC(task_init());
245
    LOG_EXEC(task_init());
246
    LOG_EXEC(thread_init());
246
    LOG_EXEC(thread_init());
247
    LOG_EXEC(futex_init());
247
    LOG_EXEC(futex_init());
248
   
248
   
249
    if (init.cnt > 0) {
249
    if (init.cnt > 0) {
250
        count_t i;
250
        size_t i;
251
        for (i = 0; i < init.cnt; i++)
251
        for (i = 0; i < init.cnt; i++)
252
            LOG("init[%" PRIc "].addr=%#" PRIp ", init[%" PRIc
252
            LOG("init[%" PRIs "].addr=%#" PRIp ", init[%" PRIs
253
                "].size=%#" PRIs "\n", i, init.tasks[i].addr, i,
253
                "].size=%#" PRIs, i, init.tasks[i].addr, i,
254
                init.tasks[i].size);
254
                init.tasks[i].size);
255
    } else
255
    } else
256
        printf("No init binaries found\n");
256
        printf("No init binaries found.\n");
257
   
257
   
258
    LOG_EXEC(ipc_init());
258
    LOG_EXEC(ipc_init());
259
    LOG_EXEC(event_init());
259
    LOG_EXEC(event_init());
260
    LOG_EXEC(klog_init());
260
    LOG_EXEC(klog_init());
261
   
261
   
262
    /*
262
    /*
263
     * Create kernel task.
263
     * Create kernel task.
264
     */
264
     */
265
    task_t *kernel = task_create(AS_KERNEL, "kernel");
265
    task_t *kernel = task_create(AS_KERNEL, "kernel");
266
    if (!kernel)
266
    if (!kernel)
267
        panic("Cannot create kernel task.");
267
        panic("Cannot create kernel task.");
268
   
268
   
269
    /*
269
    /*
270
     * Create the first thread.
270
     * Create the first thread.
271
     */
271
     */
272
    thread_t *kinit_thread
272
    thread_t *kinit_thread
273
        = thread_create(kinit, NULL, kernel, 0, "kinit", true);
273
        = thread_create(kinit, NULL, kernel, 0, "kinit", true);
274
    if (!kinit_thread)
274
    if (!kinit_thread)
275
        panic("Cannot create kinit thread.");
275
        panic("Cannot create kinit thread.");
276
    LOG_EXEC(thread_ready(kinit_thread));
276
    LOG_EXEC(thread_ready(kinit_thread));
277
   
277
   
278
    /*
278
    /*
279
     * This call to scheduler() will return to kinit,
279
     * This call to scheduler() will return to kinit,
280
     * starting the thread of kernel threads.
280
     * starting the thread of kernel threads.
281
     */
281
     */
282
    scheduler();
282
    scheduler();
283
    /* not reached */
283
    /* not reached */
284
}
284
}
285
 
285
 
286
 
286
 
287
#ifdef CONFIG_SMP
287
#ifdef CONFIG_SMP
288
/** Main kernel routine for application CPUs.
288
/** Main kernel routine for application CPUs.
289
 *
289
 *
290
 * Executed by application processors, temporary stack
290
 * Executed by application processors, temporary stack
291
 * is at ctx.sp which was set during BSP boot.
291
 * is at ctx.sp which was set during BSP boot.
292
 * This function passes control directly to
292
 * This function passes control directly to
293
 * main_ap_separated_stack().
293
 * main_ap_separated_stack().
294
 *
294
 *
295
 * Assuming interrupts_disable()'d.
295
 * Assuming interrupts_disable()'d.
296
 *
296
 *
297
 */
297
 */
298
void main_ap(void)
298
void main_ap(void)
299
{
299
{
300
    /*
300
    /*
301
     * Incrementing the active CPU counter will guarantee that the
301
     * Incrementing the active CPU counter will guarantee that the
302
     * *_init() functions can find out that they need to
302
     * *_init() functions can find out that they need to
303
     * do initialization for AP only.
303
     * do initialization for AP only.
304
     */
304
     */
305
    config.cpu_active++;
305
    config.cpu_active++;
306
 
306
 
307
    /*
307
    /*
308
     * The THE structure is well defined because ctx.sp is used as stack.
308
     * The THE structure is well defined because ctx.sp is used as stack.
309
     */
309
     */
310
    the_initialize(THE);
310
    the_initialize(THE);
311
   
311
   
312
    arch_pre_mm_init();
312
    arch_pre_mm_init();
313
    frame_init();
313
    frame_init();
314
    page_init();
314
    page_init();
315
    tlb_init();
315
    tlb_init();
316
    arch_post_mm_init();
316
    arch_post_mm_init();
317
   
317
   
318
    cpu_init();
318
    cpu_init();
319
    calibrate_delay_loop();
319
    calibrate_delay_loop();
320
    arch_post_cpu_init();
320
    arch_post_cpu_init();
321
 
321
 
322
    the_copy(THE, (the_t *) CPU->stack);
322
    the_copy(THE, (the_t *) CPU->stack);
323
 
323
 
324
    /*
324
    /*
325
     * If we woke kmp up before we left the kernel stack, we could
325
     * If we woke kmp up before we left the kernel stack, we could
326
     * collide with another CPU coming up. To prevent this, we
326
     * collide with another CPU coming up. To prevent this, we
327
     * switch to this cpu's private stack prior to waking kmp up.
327
     * switch to this cpu's private stack prior to waking kmp up.
328
     */
328
     */
329
    context_save(&CPU->saved_context);
329
    context_save(&CPU->saved_context);
330
    context_set(&CPU->saved_context, FADDR(main_ap_separated_stack),
330
    context_set(&CPU->saved_context, FADDR(main_ap_separated_stack),
331
        (uintptr_t) CPU->stack, CPU_STACK_SIZE);
331
        (uintptr_t) CPU->stack, CPU_STACK_SIZE);
332
    context_restore(&CPU->saved_context);
332
    context_restore(&CPU->saved_context);
333
    /* not reached */
333
    /* not reached */
334
}
334
}
335
 
335
 
336
 
336
 
337
/** Main kernel routine for application CPUs using new stack.
337
/** Main kernel routine for application CPUs using new stack.
338
 *
338
 *
339
 * Second part of main_ap().
339
 * Second part of main_ap().
340
 *
340
 *
341
 */
341
 */
342
void main_ap_separated_stack(void)
342
void main_ap_separated_stack(void)
343
{
343
{
344
    /*
344
    /*
345
     * Configure timeouts for this cpu.
345
     * Configure timeouts for this cpu.
346
     */
346
     */
347
    timeout_init();
347
    timeout_init();
348
 
348
 
349
    waitq_wakeup(&ap_completion_wq, WAKEUP_FIRST);
349
    waitq_wakeup(&ap_completion_wq, WAKEUP_FIRST);
350
    scheduler();
350
    scheduler();
351
    /* not reached */
351
    /* not reached */
352
}
352
}
353
#endif /* CONFIG_SMP */
353
#endif /* CONFIG_SMP */
354
 
354
 
355
/** @}
355
/** @}
356
 */
356
 */
357
 
357