Subversion Repositories HelenOS-historic

Rev

Rev 1434 | Rev 1702 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 1434 Rev 1595
1
/*
1
/*
2
 * Copyright (C) 2001-2004 Jakub Jermar
2
 * Copyright (C) 2001-2004 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
/**
29
/**
30
 * @file    main.c
30
 * @file    main.c
31
 * @brief   Main initialization kernel function for all processors.
31
 * @brief   Main initialization kernel function for all processors.
32
 *
32
 *
33
 * During kernel boot, all processors, after architecture dependent
33
 * During kernel boot, all processors, after architecture dependent
34
 * initialization, start executing code found in this file. After
34
 * initialization, start executing code found in this file. After
35
 * bringing up all subsystems, control is passed to scheduler().
35
 * bringing up all subsystems, control is passed to scheduler().
36
 *
36
 *
37
 * The bootstrap processor starts executing main_bsp() while
37
 * The bootstrap processor starts executing main_bsp() while
38
 * the application processors start executing main_ap().
38
 * the application processors start executing main_ap().
39
 *
39
 *
40
 * @see scheduler()
40
 * @see scheduler()
41
 * @see main_bsp()
41
 * @see main_bsp()
42
 * @see main_ap()
42
 * @see main_ap()
43
 */
43
 */
44
 
44
 
45
#include <arch/asm.h>
45
#include <arch/asm.h>
46
#include <context.h>
46
#include <context.h>
47
#include <print.h>
47
#include <print.h>
48
#include <panic.h>
48
#include <panic.h>
49
#include <debug.h>
49
#include <debug.h>
50
#include <config.h>
50
#include <config.h>
51
#include <time/clock.h>
51
#include <time/clock.h>
52
#include <proc/scheduler.h>
52
#include <proc/scheduler.h>
53
#include <proc/thread.h>
53
#include <proc/thread.h>
54
#include <proc/task.h>
54
#include <proc/task.h>
55
#include <main/kinit.h>
55
#include <main/kinit.h>
56
#include <main/version.h>
56
#include <main/version.h>
57
#include <console/kconsole.h>
57
#include <console/kconsole.h>
58
#include <cpu.h>
58
#include <cpu.h>
59
#include <align.h>
59
#include <align.h>
60
#include <interrupt.h>
60
#include <interrupt.h>
61
#include <arch/mm/memory_init.h>
61
#include <arch/mm/memory_init.h>
62
#include <mm/frame.h>
62
#include <mm/frame.h>
63
#include <mm/page.h>
63
#include <mm/page.h>
64
#include <genarch/mm/page_pt.h>
64
#include <genarch/mm/page_pt.h>
65
#include <mm/tlb.h>
65
#include <mm/tlb.h>
66
#include <mm/as.h>
66
#include <mm/as.h>
67
#include <mm/slab.h>
67
#include <mm/slab.h>
68
#include <synch/waitq.h>
68
#include <synch/waitq.h>
69
#include <synch/futex.h>
69
#include <synch/futex.h>
70
#include <arch/arch.h>
70
#include <arch/arch.h>
71
#include <arch.h>
71
#include <arch.h>
72
#include <arch/faddr.h>
72
#include <arch/faddr.h>
73
#include <typedefs.h>
73
#include <typedefs.h>
74
#include <ipc/ipc.h>
74
#include <ipc/ipc.h>
75
#include <macros.h>
75
#include <macros.h>
76
#include <adt/btree.h>
76
#include <adt/btree.h>
-
 
77
#include <console/klog.h>
77
 
78
 
78
#ifdef CONFIG_SMP
79
#ifdef CONFIG_SMP
79
#include <arch/smp/apic.h>
80
#include <arch/smp/apic.h>
80
#include <arch/smp/mps.h>
81
#include <arch/smp/mps.h>
81
#endif /* CONFIG_SMP */
82
#endif /* CONFIG_SMP */
82
#include <smp/smp.h>
83
#include <smp/smp.h>
83
 
84
 
84
/** Global configuration structure. */
85
/** Global configuration structure. */
85
config_t config = {
86
config_t config = {
86
    .mm_initialized = false
87
    .mm_initialized = false
87
};
88
};
88
 
89
 
89
/** Initial user-space tasks */
90
/** Initial user-space tasks */
90
init_t init = {
91
init_t init = {
91
    0
92
    0
92
};
93
};
93
 
94
 
94
context_t ctx;
95
context_t ctx;
95
 
96
 
96
/**
97
/**
97
 * These 'hardcoded' variables will be intialized by
98
 * These 'hardcoded' variables will be intialized by
98
 * the linker or the low level assembler code with
99
 * the linker or the low level assembler code with
99
 * appropriate sizes and addresses.
100
 * appropriate sizes and addresses.
100
 */
101
 */
101
__address hardcoded_load_address = 0;
102
__address hardcoded_load_address = 0;
102
size_t hardcoded_ktext_size = 0;
103
size_t hardcoded_ktext_size = 0;
103
size_t hardcoded_kdata_size = 0;
104
size_t hardcoded_kdata_size = 0;
104
 
105
 
105
void main_bsp(void);
106
void main_bsp(void);
106
void main_ap(void);
107
void main_ap(void);
107
 
108
 
108
/*
109
/*
109
 * These two functions prevent stack from underflowing during the
110
 * These two functions prevent stack from underflowing during the
110
 * kernel boot phase when SP is set to the very top of the reserved
111
 * kernel boot phase when SP is set to the very top of the reserved
111
 * space. The stack could get corrupted by a fooled compiler-generated
112
 * space. The stack could get corrupted by a fooled compiler-generated
112
 * pop sequence otherwise.
113
 * pop sequence otherwise.
113
 */
114
 */
114
static void main_bsp_separated_stack(void);
115
static void main_bsp_separated_stack(void);
115
#ifdef CONFIG_SMP
116
#ifdef CONFIG_SMP
116
static void main_ap_separated_stack(void);
117
static void main_ap_separated_stack(void);
117
#endif
118
#endif
118
 
119
 
119
#define CONFIG_STACK_SIZE   ((1<<STACK_FRAMES)*STACK_SIZE)
120
#define CONFIG_STACK_SIZE   ((1<<STACK_FRAMES)*STACK_SIZE)
120
 
121
 
121
/** Main kernel routine for bootstrap CPU.
122
/** Main kernel routine for bootstrap CPU.
122
 *
123
 *
123
 * Initializes the kernel by bootstrap CPU.
124
 * Initializes the kernel by bootstrap CPU.
124
 * This function passes control directly to
125
 * This function passes control directly to
125
 * main_bsp_separated_stack().
126
 * main_bsp_separated_stack().
126
 *
127
 *
127
 * Assuming interrupts_disable().
128
 * Assuming interrupts_disable().
128
 *
129
 *
129
 */
130
 */
130
void main_bsp(void)
131
void main_bsp(void)
131
{
132
{
132
    __address stackaddr;
133
    __address stackaddr;
133
 
134
 
134
    config.cpu_count = 1;
135
    config.cpu_count = 1;
135
    config.cpu_active = 1;
136
    config.cpu_active = 1;
136
   
137
   
137
    config.base = hardcoded_load_address;
138
    config.base = hardcoded_load_address;
138
    config.memory_size = get_memory_size();
139
    config.memory_size = get_memory_size();
139
   
140
   
140
    config.kernel_size = ALIGN_UP(hardcoded_ktext_size + hardcoded_kdata_size, PAGE_SIZE);
141
    config.kernel_size = ALIGN_UP(hardcoded_ktext_size + hardcoded_kdata_size, PAGE_SIZE);
141
    stackaddr = config.base + config.kernel_size;
142
    stackaddr = config.base + config.kernel_size;
142
   
143
   
143
    /* Avoid placing kernel on top of init */
144
    /* Avoid placing kernel on top of init */
144
    count_t i;
145
    count_t i;
145
    bool overlap = false;
146
    bool overlap = false;
146
    for (i = 0; i < init.cnt; i++)
147
    for (i = 0; i < init.cnt; i++)
147
        if (PA_overlaps(stackaddr, CONFIG_STACK_SIZE, init.tasks[i].addr, init.tasks[i].size)) {
148
        if (PA_overlaps(stackaddr, CONFIG_STACK_SIZE, init.tasks[i].addr, init.tasks[i].size)) {
148
            stackaddr = ALIGN_UP(init.tasks[i].addr + init.tasks[i].size, CONFIG_STACK_SIZE);
149
            stackaddr = ALIGN_UP(init.tasks[i].addr + init.tasks[i].size, CONFIG_STACK_SIZE);
149
            init.tasks[i].size = ALIGN_UP(init.tasks[i].size, CONFIG_STACK_SIZE) + CONFIG_STACK_SIZE;
150
            init.tasks[i].size = ALIGN_UP(init.tasks[i].size, CONFIG_STACK_SIZE) + CONFIG_STACK_SIZE;
150
            overlap = true;
151
            overlap = true;
151
        }
152
        }
152
   
153
   
153
    if (!overlap)
154
    if (!overlap)
154
        config.kernel_size += CONFIG_STACK_SIZE;
155
        config.kernel_size += CONFIG_STACK_SIZE;
155
   
156
   
156
    context_save(&ctx);
157
    context_save(&ctx);
157
    context_set(&ctx, FADDR(main_bsp_separated_stack), stackaddr, THREAD_STACK_SIZE);
158
    context_set(&ctx, FADDR(main_bsp_separated_stack), stackaddr, THREAD_STACK_SIZE);
158
    context_restore(&ctx);
159
    context_restore(&ctx);
159
    /* not reached */
160
    /* not reached */
160
}
161
}
161
 
162
 
162
 
163
 
163
/** Main kernel routine for bootstrap CPU using new stack.
164
/** Main kernel routine for bootstrap CPU using new stack.
164
 *
165
 *
165
 * Second part of main_bsp().
166
 * Second part of main_bsp().
166
 *
167
 *
167
 */
168
 */
168
void main_bsp_separated_stack(void)
169
void main_bsp_separated_stack(void)
169
{
170
{
170
    task_t *k;
171
    task_t *k;
171
    thread_t *t;
172
    thread_t *t;
172
    count_t i;
173
    count_t i;
173
   
174
   
174
    the_initialize(THE);
175
    the_initialize(THE);
175
 
176
 
176
    /*
177
    /*
177
     * kconsole data structures must be initialized very early
178
     * kconsole data structures must be initialized very early
178
     * because other subsystems will register their respective
179
     * because other subsystems will register their respective
179
     * commands.
180
     * commands.
180
     */
181
     */
181
    kconsole_init();
182
    kconsole_init();
182
   
183
   
183
    /*
184
    /*
184
     * Exception handler initialization, before architecture
185
     * Exception handler initialization, before architecture
185
     * starts adding its own handlers
186
     * starts adding its own handlers
186
     */
187
     */
187
    exc_init();
188
    exc_init();
188
 
189
 
189
    /*
190
    /*
190
     * Memory management subsystems initialization.
191
     * Memory management subsystems initialization.
191
     */
192
     */
192
    arch_pre_mm_init();
193
    arch_pre_mm_init();
193
    frame_init();       /* Initialize at least 1 memory segment big enough for slab to work */
194
    frame_init();       /* Initialize at least 1 memory segment big enough for slab to work */
194
    slab_cache_init();
195
    slab_cache_init();
195
    btree_init();
196
    btree_init();
196
    as_init();
197
    as_init();
197
    page_init();
198
    page_init();
198
    tlb_init();
199
    tlb_init();
199
    config.mm_initialized = true;
200
    config.mm_initialized = true;
200
    arch_post_mm_init();
201
    arch_post_mm_init();
201
 
202
 
202
    version_print();
203
    version_print();
203
    printf("%.*p: hardcoded_ktext_size=%zdK, hardcoded_kdata_size=%zdK\n", sizeof(__address) * 2, config.base, hardcoded_ktext_size >> 10, hardcoded_kdata_size >> 10);
204
    printf("%.*p: hardcoded_ktext_size=%zdK, hardcoded_kdata_size=%zdK\n", sizeof(__address) * 2, config.base, hardcoded_ktext_size >> 10, hardcoded_kdata_size >> 10);
204
 
205
 
205
    arch_pre_smp_init();
206
    arch_pre_smp_init();
206
    smp_init();
207
    smp_init();
207
   
208
   
208
    slab_enable_cpucache(); /* Slab must be initialized AFTER we know the number of processors */
209
    slab_enable_cpucache(); /* Slab must be initialized AFTER we know the number of processors */
209
 
210
 
210
    printf("config.memory_size=%zdM\n", config.memory_size >> 20);
211
    printf("config.memory_size=%zdM\n", config.memory_size >> 20);
211
    printf("config.cpu_count=%zd\n", config.cpu_count);
212
    printf("config.cpu_count=%zd\n", config.cpu_count);
212
    cpu_init();
213
    cpu_init();
213
   
214
   
214
    calibrate_delay_loop();
215
    calibrate_delay_loop();
215
    clock_counter_init();
216
    clock_counter_init();
216
    timeout_init();
217
    timeout_init();
217
    scheduler_init();
218
    scheduler_init();
218
    task_init();
219
    task_init();
219
    thread_init();
220
    thread_init();
220
    futex_init();
221
    futex_init();
-
 
222
    klog_init();
221
   
223
   
222
    for (i = 0; i < init.cnt; i++)
224
    for (i = 0; i < init.cnt; i++)
223
        printf("init[%zd].addr=%.*p, init[%zd].size=%zd\n", i, sizeof(__address) * 2, init.tasks[i].addr, i, init.tasks[i].size);
225
        printf("init[%zd].addr=%.*p, init[%zd].size=%zd\n", i, sizeof(__address) * 2, init.tasks[i].addr, i, init.tasks[i].size);
224
   
226
   
225
    ipc_init();
227
    ipc_init();
226
 
228
 
227
    /*
229
    /*
228
     * Create kernel task.
230
     * Create kernel task.
229
     */
231
     */
230
    k = task_create(AS_KERNEL, "KERNEL");
232
    k = task_create(AS_KERNEL, "KERNEL");
231
    if (!k)
233
    if (!k)
232
        panic("can't create kernel task\n");
234
        panic("can't create kernel task\n");
233
   
235
   
234
    /*
236
    /*
235
     * Create the first thread.
237
     * Create the first thread.
236
     */
238
     */
237
    t = thread_create(kinit, NULL, k, 0, "kinit");
239
    t = thread_create(kinit, NULL, k, 0, "kinit");
238
    if (!t)
240
    if (!t)
239
        panic("can't create kinit thread\n");
241
        panic("can't create kinit thread\n");
240
    thread_ready(t);
242
    thread_ready(t);
241
   
243
   
242
    /*
244
    /*
243
     * This call to scheduler() will return to kinit,
245
     * This call to scheduler() will return to kinit,
244
     * starting the thread of kernel threads.
246
     * starting the thread of kernel threads.
245
     */
247
     */
246
    scheduler();
248
    scheduler();
247
    /* not reached */
249
    /* not reached */
248
}
250
}
249
 
251
 
250
 
252
 
251
#ifdef CONFIG_SMP
253
#ifdef CONFIG_SMP
252
/** Main kernel routine for application CPUs.
254
/** Main kernel routine for application CPUs.
253
 *
255
 *
254
 * Executed by application processors, temporary stack
256
 * Executed by application processors, temporary stack
255
 * is at ctx.sp which was set during BP boot.
257
 * is at ctx.sp which was set during BP boot.
256
 * This function passes control directly to
258
 * This function passes control directly to
257
 * main_ap_separated_stack().
259
 * main_ap_separated_stack().
258
 *
260
 *
259
 * Assuming interrupts_disable()'d.
261
 * Assuming interrupts_disable()'d.
260
 *
262
 *
261
 */
263
 */
262
void main_ap(void)
264
void main_ap(void)
263
{
265
{
264
    /*
266
    /*
265
     * Incrementing the active CPU counter will guarantee that the
267
     * Incrementing the active CPU counter will guarantee that the
266
     * pm_init() will not attempt to build GDT and IDT tables again.
268
     * pm_init() will not attempt to build GDT and IDT tables again.
267
     * Neither frame_init() will do the complete thing. Neither cpu_init()
269
     * Neither frame_init() will do the complete thing. Neither cpu_init()
268
     * will do.
270
     * will do.
269
     */
271
     */
270
    config.cpu_active++;
272
    config.cpu_active++;
271
 
273
 
272
    /*
274
    /*
273
     * The THE structure is well defined because ctx.sp is used as stack.
275
     * The THE structure is well defined because ctx.sp is used as stack.
274
     */
276
     */
275
    the_initialize(THE);
277
    the_initialize(THE);
276
   
278
   
277
    arch_pre_mm_init();
279
    arch_pre_mm_init();
278
    frame_init();
280
    frame_init();
279
    page_init();
281
    page_init();
280
    tlb_init();
282
    tlb_init();
281
    arch_post_mm_init();
283
    arch_post_mm_init();
282
   
284
   
283
    cpu_init();
285
    cpu_init();
284
   
286
   
285
    calibrate_delay_loop();
287
    calibrate_delay_loop();
286
 
288
 
287
    l_apic_init();
289
    l_apic_init();
288
    l_apic_debug();
290
    l_apic_debug();
289
 
291
 
290
    the_copy(THE, (the_t *) CPU->stack);
292
    the_copy(THE, (the_t *) CPU->stack);
291
 
293
 
292
    /*
294
    /*
293
     * If we woke kmp up before we left the kernel stack, we could
295
     * If we woke kmp up before we left the kernel stack, we could
294
     * collide with another CPU coming up. To prevent this, we
296
     * collide with another CPU coming up. To prevent this, we
295
     * switch to this cpu's private stack prior to waking kmp up.
297
     * switch to this cpu's private stack prior to waking kmp up.
296
     */
298
     */
297
    context_set(&CPU->saved_context, FADDR(main_ap_separated_stack), (__address) CPU->stack, CPU_STACK_SIZE);
299
    context_set(&CPU->saved_context, FADDR(main_ap_separated_stack), (__address) CPU->stack, CPU_STACK_SIZE);
298
    context_restore(&CPU->saved_context);
300
    context_restore(&CPU->saved_context);
299
    /* not reached */
301
    /* not reached */
300
}
302
}
301
 
303
 
302
 
304
 
303
/** Main kernel routine for application CPUs using new stack.
305
/** Main kernel routine for application CPUs using new stack.
304
 *
306
 *
305
 * Second part of main_ap().
307
 * Second part of main_ap().
306
 *
308
 *
307
 */
309
 */
308
void main_ap_separated_stack(void)
310
void main_ap_separated_stack(void)
309
{
311
{
310
    /*
312
    /*
311
     * Configure timeouts for this cpu.
313
     * Configure timeouts for this cpu.
312
     */
314
     */
313
    timeout_init();
315
    timeout_init();
314
 
316
 
315
    waitq_wakeup(&ap_completion_wq, WAKEUP_FIRST);
317
    waitq_wakeup(&ap_completion_wq, WAKEUP_FIRST);
316
    scheduler();
318
    scheduler();
317
    /* not reached */
319
    /* not reached */
318
}
320
}
319
#endif /* CONFIG_SMP */
321
#endif /* CONFIG_SMP */
320
 
322