Rev 1999 | Rev 2039 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 1999 | Rev 2015 | ||
---|---|---|---|
Line 78... | Line 78... | ||
78 | #include <ipc/ipc.h> |
78 | #include <ipc/ipc.h> |
79 | #include <macros.h> |
79 | #include <macros.h> |
80 | #include <adt/btree.h> |
80 | #include <adt/btree.h> |
81 | #include <console/klog.h> |
81 | #include <console/klog.h> |
82 | #include <smp/smp.h> |
82 | #include <smp/smp.h> |
- | 83 | #include <ddi/ddi.h> |
|
83 | 84 | ||
84 | /** Global configuration structure. */ |
85 | /** Global configuration structure. */ |
85 | config_t config; |
86 | config_t config; |
86 | 87 | ||
87 | /** Initial user-space tasks */ |
88 | /** Initial user-space tasks */ |
Line 100... | Line 101... | ||
100 | /* |
101 | /* |
101 | * These 'hardcoded' variables will be intialized by |
102 | * These 'hardcoded' variables will be intialized by |
102 | * the linker or the low level assembler code with |
103 | * the linker or the low level assembler code with |
103 | * appropriate sizes and addresses. |
104 | * appropriate sizes and addresses. |
104 | */ |
105 | */ |
105 | uintptr_t hardcoded_load_address = 0; /**< Virtual address of where the kernel is loaded. */ |
106 | uintptr_t hardcoded_load_address = 0; /**< Virtual address of where the kernel |
- | 107 | * is loaded. */ |
|
106 | size_t hardcoded_ktext_size = 0; /**< Size of the kernel code in bytes. */ |
108 | size_t hardcoded_ktext_size = 0; /**< Size of the kernel code in bytes. |
- | 109 | */ |
|
107 | size_t hardcoded_kdata_size = 0; /**< Size of the kernel data in bytes. */ |
110 | size_t hardcoded_kdata_size = 0; /**< Size of the kernel data in bytes. |
108 | 111 | */ |
|
109 | uintptr_t stack_safe = 0; /**< Lowest safe stack virtual address */ |
112 | uintptr_t stack_safe = 0; /**< Lowest safe stack virtual address. |
- | 113 | */ |
|
110 | 114 | ||
111 | void main_bsp(void); |
115 | void main_bsp(void); |
112 | void main_ap(void); |
116 | void main_ap(void); |
113 | 117 | ||
114 | /* |
118 | /* |
Line 139... | Line 143... | ||
139 | config.cpu_active = 1; |
143 | config.cpu_active = 1; |
140 | 144 | ||
141 | config.base = hardcoded_load_address; |
145 | config.base = hardcoded_load_address; |
142 | config.memory_size = get_memory_size(); |
146 | config.memory_size = get_memory_size(); |
143 | 147 | ||
144 | config.kernel_size = ALIGN_UP(hardcoded_ktext_size + hardcoded_kdata_size, PAGE_SIZE); |
148 | config.kernel_size = ALIGN_UP(hardcoded_ktext_size + |
- | 149 | hardcoded_kdata_size, PAGE_SIZE); |
|
145 | config.stack_size = CONFIG_STACK_SIZE; |
150 | config.stack_size = CONFIG_STACK_SIZE; |
146 | 151 | ||
147 | /* Initialy the stack is placed just after the kernel */ |
152 | /* Initialy the stack is placed just after the kernel */ |
148 | config.stack_base = config.base + config.kernel_size; |
153 | config.stack_base = config.base + config.kernel_size; |
149 | 154 | ||
150 | /* Avoid placing stack on top of init */ |
155 | /* Avoid placing stack on top of init */ |
151 | count_t i; |
156 | count_t i; |
152 | for (i = 0; i < init.cnt; i++) { |
157 | for (i = 0; i < init.cnt; i++) { |
153 | if (PA_overlaps(config.stack_base, config.stack_size, init.tasks[i].addr, init.tasks[i].size)) |
158 | if (PA_overlaps(config.stack_base, config.stack_size, |
- | 159 | init.tasks[i].addr, init.tasks[i].size)) |
|
154 | config.stack_base = ALIGN_UP(init.tasks[i].addr + init.tasks[i].size, config.stack_size); |
160 | config.stack_base = ALIGN_UP(init.tasks[i].addr + |
- | 161 | init.tasks[i].size, config.stack_size); |
|
155 | } |
162 | } |
156 | 163 | ||
157 | /* Avoid placing stack on top of boot allocations. */ |
164 | /* Avoid placing stack on top of boot allocations. */ |
158 | if (ballocs.size) { |
165 | if (ballocs.size) { |
159 | if (PA_overlaps(config.stack_base, config.stack_size, ballocs.base, ballocs.size)) |
166 | if (PA_overlaps(config.stack_base, config.stack_size, |
- | 167 | ballocs.base, ballocs.size)) |
|
160 | config.stack_base = ALIGN_UP(ballocs.base + ballocs.size, PAGE_SIZE); |
168 | config.stack_base = ALIGN_UP(ballocs.base + |
- | 169 | ballocs.size, PAGE_SIZE); |
|
161 | } |
170 | } |
162 | 171 | ||
163 | if (config.stack_base < stack_safe) |
172 | if (config.stack_base < stack_safe) |
164 | config.stack_base = ALIGN_UP(stack_safe, PAGE_SIZE); |
173 | config.stack_base = ALIGN_UP(stack_safe, PAGE_SIZE); |
165 | 174 | ||
166 | context_save(&ctx); |
175 | context_save(&ctx); |
167 | context_set(&ctx, FADDR(main_bsp_separated_stack), config.stack_base, THREAD_STACK_SIZE); |
176 | context_set(&ctx, FADDR(main_bsp_separated_stack), config.stack_base, |
- | 177 | THREAD_STACK_SIZE); |
|
168 | context_restore(&ctx); |
178 | context_restore(&ctx); |
169 | /* not reached */ |
179 | /* not reached */ |
170 | } |
180 | } |
171 | 181 | ||
172 | 182 | ||
Line 198... | Line 208... | ||
198 | 208 | ||
199 | /* |
209 | /* |
200 | * Memory management subsystems initialization. |
210 | * Memory management subsystems initialization. |
201 | */ |
211 | */ |
202 | arch_pre_mm_init(); |
212 | arch_pre_mm_init(); |
- | 213 | frame_init(); |
|
203 | frame_init(); /* Initialize at least 1 memory segment big enough for slab to work */ |
214 | /* Initialize at least 1 memory segment big enough for slab to work. */ |
204 | slab_cache_init(); |
215 | slab_cache_init(); |
205 | btree_init(); |
216 | btree_init(); |
206 | as_init(); |
217 | as_init(); |
207 | page_init(); |
218 | page_init(); |
208 | tlb_init(); |
219 | tlb_init(); |
- | 220 | ddi_init(); |
|
209 | arch_post_mm_init(); |
221 | arch_post_mm_init(); |
210 | 222 | ||
211 | version_print(); |
223 | version_print(); |
- | 224 | printf("kernel: %.*p hardcoded_ktext_size=%zdK, " |
|
- | 225 | "hardcoded_kdata_size=%zdK\n", sizeof(uintptr_t) * 2, |
|
212 | printf("kernel: %.*p hardcoded_ktext_size=%zdK, hardcoded_kdata_size=%zdK\n", sizeof(uintptr_t) * 2, config.base, hardcoded_ktext_size >> 10, hardcoded_kdata_size >> 10); |
226 | config.base, hardcoded_ktext_size >> 10, hardcoded_kdata_size >> |
- | 227 | 10); |
|
213 | printf("stack: %.*p size=%zdK\n", sizeof(uintptr_t) * 2, config.stack_base, config.stack_size >> 10); |
228 | printf("stack: %.*p size=%zdK\n", sizeof(uintptr_t) * 2, |
- | 229 | config.stack_base, config.stack_size >> 10); |
|
214 | 230 | ||
215 | arch_pre_smp_init(); |
231 | arch_pre_smp_init(); |
216 | smp_init(); |
232 | smp_init(); |
217 | - | ||
218 | slab_enable_cpucache(); /* Slab must be initialized AFTER we know the number of processors */ |
233 | /* Slab must be initialized after we know the number of processors. */ |
- | 234 | slab_enable_cpucache(); |
|
219 | 235 | ||
220 | printf("config.memory_size=%zdM\n", config.memory_size >> 20); |
236 | printf("config.memory_size=%zdM\n", config.memory_size >> 20); |
221 | printf("config.cpu_count=%zd\n", config.cpu_count); |
237 | printf("config.cpu_count=%zd\n", config.cpu_count); |
222 | cpu_init(); |
238 | cpu_init(); |
223 | 239 | ||
Line 230... | Line 246... | ||
230 | futex_init(); |
246 | futex_init(); |
231 | klog_init(); |
247 | klog_init(); |
232 | 248 | ||
233 | if (init.cnt > 0) { |
249 | if (init.cnt > 0) { |
234 | for (i = 0; i < init.cnt; i++) |
250 | for (i = 0; i < init.cnt; i++) |
235 | printf("init[%zd].addr=%.*p, init[%zd].size=%zd\n", i, sizeof(uintptr_t) * 2, init.tasks[i].addr, i, init.tasks[i].size); |
251 | printf("init[%zd].addr=%.*p, init[%zd].size=%zd\n", i, |
- | 252 | sizeof(uintptr_t) * 2, init.tasks[i].addr, i, |
|
- | 253 | init.tasks[i].size); |
|
236 | } else |
254 | } else |
237 | printf("No init binaries found\n"); |
255 | printf("No init binaries found\n"); |
238 | 256 | ||
239 | ipc_init(); |
257 | ipc_init(); |
240 | 258 | ||
Line 302... | Line 320... | ||
302 | /* |
320 | /* |
303 | * If we woke kmp up before we left the kernel stack, we could |
321 | * If we woke kmp up before we left the kernel stack, we could |
304 | * collide with another CPU coming up. To prevent this, we |
322 | * collide with another CPU coming up. To prevent this, we |
305 | * switch to this cpu's private stack prior to waking kmp up. |
323 | * switch to this cpu's private stack prior to waking kmp up. |
306 | */ |
324 | */ |
307 | context_set(&CPU->saved_context, FADDR(main_ap_separated_stack), (uintptr_t) CPU->stack, CPU_STACK_SIZE); |
325 | context_set(&CPU->saved_context, FADDR(main_ap_separated_stack), |
- | 326 | (uintptr_t) CPU->stack, CPU_STACK_SIZE); |
|
308 | context_restore(&CPU->saved_context); |
327 | context_restore(&CPU->saved_context); |
309 | /* not reached */ |
328 | /* not reached */ |
310 | } |
329 | } |
311 | 330 | ||
312 | 331 |