Subversion Repositories HelenOS

Rev

Rev 2292 | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 2292 Rev 2307
1
/*
1
/*
2
 * Copyright (c) 2001-2004 Jakub Jermar
2
 * Copyright (c) 2001-2004 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
/** @addtogroup main
29
/** @addtogroup main
30
 * @{
30
 * @{
31
 */
31
 */
32
 
32
 
33
/**
33
/**
34
 * @file
34
 * @file
35
 * @brief   Main initialization kernel function for all processors.
35
 * @brief   Main initialization kernel function for all processors.
36
 *
36
 *
37
 * During kernel boot, all processors, after architecture dependent
37
 * During kernel boot, all processors, after architecture dependent
38
 * initialization, start executing code found in this file. After
38
 * initialization, start executing code found in this file. After
39
 * bringing up all subsystems, control is passed to scheduler().
39
 * bringing up all subsystems, control is passed to scheduler().
40
 *
40
 *
41
 * The bootstrap processor starts executing main_bsp() while
41
 * The bootstrap processor starts executing main_bsp() while
42
 * the application processors start executing main_ap().
42
 * the application processors start executing main_ap().
43
 *
43
 *
44
 * @see scheduler()
44
 * @see scheduler()
45
 * @see main_bsp()
45
 * @see main_bsp()
46
 * @see main_ap()
46
 * @see main_ap()
47
 */
47
 */
48
 
48
 
49
#include <arch/asm.h>
49
#include <arch/asm.h>
50
#include <context.h>
50
#include <context.h>
51
#include <print.h>
51
#include <print.h>
52
#include <panic.h>
52
#include <panic.h>
53
#include <debug.h>
53
#include <debug.h>
54
#include <config.h>
54
#include <config.h>
55
#include <time/clock.h>
55
#include <time/clock.h>
56
#include <time/timeout.h>
56
#include <time/timeout.h>
57
#include <proc/scheduler.h>
57
#include <proc/scheduler.h>
58
#include <proc/thread.h>
58
#include <proc/thread.h>
59
#include <proc/task.h>
59
#include <proc/task.h>
60
#include <main/kinit.h>
60
#include <main/kinit.h>
61
#include <main/version.h>
61
#include <main/version.h>
62
#include <console/kconsole.h>
62
#include <console/kconsole.h>
63
#include <cpu.h>
63
#include <cpu.h>
64
#include <align.h>
64
#include <align.h>
65
#include <interrupt.h>
65
#include <interrupt.h>
66
#include <arch/mm/memory_init.h>
66
#include <arch/mm/memory_init.h>
67
#include <mm/frame.h>
67
#include <mm/frame.h>
68
#include <mm/page.h>
68
#include <mm/page.h>
69
#include <genarch/mm/page_pt.h>
69
#include <genarch/mm/page_pt.h>
70
#include <mm/tlb.h>
70
#include <mm/tlb.h>
71
#include <mm/as.h>
71
#include <mm/as.h>
72
#include <mm/slab.h>
72
#include <mm/slab.h>
73
#include <synch/waitq.h>
73
#include <synch/waitq.h>
74
#include <synch/futex.h>
74
#include <synch/futex.h>
75
#include <arch/arch.h>
75
#include <arch/arch.h>
76
#include <arch.h>
76
#include <arch.h>
77
#include <arch/faddr.h>
77
#include <arch/faddr.h>
78
#include <ipc/ipc.h>
78
#include <ipc/ipc.h>
79
#include <macros.h>
79
#include <macros.h>
80
#include <adt/btree.h>
80
#include <adt/btree.h>
81
#include <console/klog.h>
81
#include <console/klog.h>
82
#include <smp/smp.h>
82
#include <smp/smp.h>
83
#include <ddi/ddi.h>
83
#include <ddi/ddi.h>
84
#include <proc/tasklet.h>
84
#include <proc/tasklet.h>
-
 
85
#include <synch/rcu.h>
85
 
86
 
86
/** Global configuration structure. */
87
/** Global configuration structure. */
87
config_t config;
88
config_t config;
88
 
89
 
89
/** Initial user-space tasks */
90
/** Initial user-space tasks */
90
init_t init = {
91
init_t init = {
91
    0
92
    0
92
};
93
};
93
 
94
 
94
/** Boot allocations. */
95
/** Boot allocations. */
95
ballocs_t ballocs = {
96
ballocs_t ballocs = {
96
    .base = NULL,
97
    .base = NULL,
97
    .size = 0
98
    .size = 0
98
};
99
};
99
 
100
 
100
context_t ctx;
101
context_t ctx;
101
 
102
 
102
/*
103
/*
103
 * These 'hardcoded' variables will be intialized by
104
 * These 'hardcoded' variables will be intialized by
104
 * the linker or the low level assembler code with
105
 * the linker or the low level assembler code with
105
 * appropriate sizes and addresses.
106
 * appropriate sizes and addresses.
106
 */
107
 */
107
uintptr_t hardcoded_load_address = 0;   /**< Virtual address of where the kernel
108
uintptr_t hardcoded_load_address = 0;   /**< Virtual address of where the kernel
108
                      *  is loaded. */
109
                      *  is loaded. */
109
size_t hardcoded_ktext_size = 0;    /**< Size of the kernel code in bytes.
110
size_t hardcoded_ktext_size = 0;    /**< Size of the kernel code in bytes.
110
                      */
111
                      */
111
size_t hardcoded_kdata_size = 0;    /**< Size of the kernel data in bytes.
112
size_t hardcoded_kdata_size = 0;    /**< Size of the kernel data in bytes.
112
                     */
113
                     */
113
uintptr_t stack_safe = 0;       /**< Lowest safe stack virtual address.
114
uintptr_t stack_safe = 0;       /**< Lowest safe stack virtual address.
114
                      */
115
                      */
115
 
116
 
116
void main_bsp(void);
117
void main_bsp(void);
117
void main_ap(void);
118
void main_ap(void);
118
 
119
 
119
/*
120
/*
120
 * These two functions prevent stack from underflowing during the
121
 * These two functions prevent stack from underflowing during the
121
 * kernel boot phase when SP is set to the very top of the reserved
122
 * kernel boot phase when SP is set to the very top of the reserved
122
 * space. The stack could get corrupted by a fooled compiler-generated
123
 * space. The stack could get corrupted by a fooled compiler-generated
123
 * pop sequence otherwise.
124
 * pop sequence otherwise.
124
 */
125
 */
125
static void main_bsp_separated_stack(void);
126
static void main_bsp_separated_stack(void);
126
#ifdef CONFIG_SMP
127
#ifdef CONFIG_SMP
127
static void main_ap_separated_stack(void);
128
static void main_ap_separated_stack(void);
128
#endif
129
#endif
129
 
130
 
130
#define CONFIG_STACK_SIZE   ((1 << STACK_FRAMES) * STACK_SIZE)
131
#define CONFIG_STACK_SIZE   ((1 << STACK_FRAMES) * STACK_SIZE)
131
 
132
 
132
/** Main kernel routine for bootstrap CPU.
133
/** Main kernel routine for bootstrap CPU.
133
 *
134
 *
134
 * Initializes the kernel by bootstrap CPU.
135
 * Initializes the kernel by bootstrap CPU.
135
 * This function passes control directly to
136
 * This function passes control directly to
136
 * main_bsp_separated_stack().
137
 * main_bsp_separated_stack().
137
 *
138
 *
138
 * Assuming interrupts_disable().
139
 * Assuming interrupts_disable().
139
 *
140
 *
140
 */
141
 */
141
void main_bsp(void)
142
void main_bsp(void)
142
{
143
{
143
    config.cpu_count = 1;
144
    config.cpu_count = 1;
144
    config.cpu_active = 1;
145
    config.cpu_active = 1;
145
   
146
   
146
    config.base = hardcoded_load_address;
147
    config.base = hardcoded_load_address;
147
    config.memory_size = get_memory_size();
148
    config.memory_size = get_memory_size();
148
   
149
   
149
    config.kernel_size = ALIGN_UP(hardcoded_ktext_size +
150
    config.kernel_size = ALIGN_UP(hardcoded_ktext_size +
150
        hardcoded_kdata_size, PAGE_SIZE);
151
        hardcoded_kdata_size, PAGE_SIZE);
151
    config.stack_size = CONFIG_STACK_SIZE;
152
    config.stack_size = CONFIG_STACK_SIZE;
152
   
153
   
153
    /* Initialy the stack is placed just after the kernel */
154
    /* Initialy the stack is placed just after the kernel */
154
    config.stack_base = config.base + config.kernel_size;
155
    config.stack_base = config.base + config.kernel_size;
155
   
156
   
156
    /* Avoid placing stack on top of init */
157
    /* Avoid placing stack on top of init */
157
    count_t i;
158
    count_t i;
158
    for (i = 0; i < init.cnt; i++) {
159
    for (i = 0; i < init.cnt; i++) {
159
        if (PA_overlaps(config.stack_base, config.stack_size,
160
        if (PA_overlaps(config.stack_base, config.stack_size,
160
            init.tasks[i].addr, init.tasks[i].size))
161
            init.tasks[i].addr, init.tasks[i].size))
161
            config.stack_base = ALIGN_UP(init.tasks[i].addr +
162
            config.stack_base = ALIGN_UP(init.tasks[i].addr +
162
                init.tasks[i].size, config.stack_size);
163
                init.tasks[i].size, config.stack_size);
163
    }
164
    }
164
 
165
 
165
    /* Avoid placing stack on top of boot allocations. */
166
    /* Avoid placing stack on top of boot allocations. */
166
    if (ballocs.size) {
167
    if (ballocs.size) {
167
        if (PA_overlaps(config.stack_base, config.stack_size,
168
        if (PA_overlaps(config.stack_base, config.stack_size,
168
            ballocs.base, ballocs.size))
169
            ballocs.base, ballocs.size))
169
            config.stack_base = ALIGN_UP(ballocs.base +
170
            config.stack_base = ALIGN_UP(ballocs.base +
170
                ballocs.size, PAGE_SIZE);
171
                ballocs.size, PAGE_SIZE);
171
    }
172
    }
172
   
173
   
173
    if (config.stack_base < stack_safe)
174
    if (config.stack_base < stack_safe)
174
        config.stack_base = ALIGN_UP(stack_safe, PAGE_SIZE);
175
        config.stack_base = ALIGN_UP(stack_safe, PAGE_SIZE);
175
   
176
   
176
    context_save(&ctx);
177
    context_save(&ctx);
177
    context_set(&ctx, FADDR(main_bsp_separated_stack), config.stack_base,
178
    context_set(&ctx, FADDR(main_bsp_separated_stack), config.stack_base,
178
        THREAD_STACK_SIZE);
179
        THREAD_STACK_SIZE);
179
    context_restore(&ctx);
180
    context_restore(&ctx);
180
    /* not reached */
181
    /* not reached */
181
}
182
}
182
 
183
 
183
 
184
 
184
/** Main kernel routine for bootstrap CPU using new stack.
185
/** Main kernel routine for bootstrap CPU using new stack.
185
 *
186
 *
186
 * Second part of main_bsp().
187
 * Second part of main_bsp().
187
 *
188
 *
188
 */
189
 */
189
void main_bsp_separated_stack(void)
190
void main_bsp_separated_stack(void)
190
{
191
{
191
    task_t *k;
192
    task_t *k;
192
    thread_t *t;
193
    thread_t *t;
193
    count_t i;
194
    count_t i;
194
   
195
   
195
    the_initialize(THE);
196
    the_initialize(THE);
196
 
197
 
197
    /*
198
    /*
198
     * kconsole data structures must be initialized very early
199
     * kconsole data structures must be initialized very early
199
     * because other subsystems will register their respective
200
     * because other subsystems will register their respective
200
     * commands.
201
     * commands.
201
     */
202
     */
202
    kconsole_init();
203
    kconsole_init();
203
   
204
   
204
    /*
205
    /*
205
     * Exception handler initialization, before architecture
206
     * Exception handler initialization, before architecture
206
     * starts adding its own handlers
207
     * starts adding its own handlers
207
     */
208
     */
208
    exc_init();
209
    exc_init();
209
 
210
 
210
    /*
211
    /*
211
     * Memory management subsystems initialization.
212
     * Memory management subsystems initialization.
212
     */
213
     */
213
    arch_pre_mm_init();
214
    arch_pre_mm_init();
214
    frame_init();      
215
    frame_init();      
215
    /* Initialize at least 1 memory segment big enough for slab to work. */
216
    /* Initialize at least 1 memory segment big enough for slab to work. */
216
    slab_cache_init();
217
    slab_cache_init();
217
    btree_init();
218
    btree_init();
218
    as_init();
219
    as_init();
219
    page_init();
220
    page_init();
220
    tlb_init();
221
    tlb_init();
221
    ddi_init();
222
    ddi_init();
222
    tasklet_init();
223
    tasklet_init();
223
    arch_post_mm_init();
224
    arch_post_mm_init();
224
 
225
 
225
    version_print();
226
    version_print();
226
    printf("kernel: %.*p hardcoded_ktext_size=%zdK, "
227
    printf("kernel: %.*p hardcoded_ktext_size=%zdK, "
227
        "hardcoded_kdata_size=%zdK\n", sizeof(uintptr_t) * 2,
228
        "hardcoded_kdata_size=%zdK\n", sizeof(uintptr_t) * 2,
228
        config.base, hardcoded_ktext_size >> 10,
229
        config.base, hardcoded_ktext_size >> 10,
229
        hardcoded_kdata_size >> 10);
230
        hardcoded_kdata_size >> 10);
230
    printf("stack:  %.*p size=%zdK\n", sizeof(uintptr_t) * 2,
231
    printf("stack:  %.*p size=%zdK\n", sizeof(uintptr_t) * 2,
231
        config.stack_base, config.stack_size >> 10);
232
        config.stack_base, config.stack_size >> 10);
232
 
233
 
233
    arch_pre_smp_init();
234
    arch_pre_smp_init();
234
    smp_init();
235
    smp_init();
235
    /* Slab must be initialized after we know the number of processors. */
236
    /* Slab must be initialized after we know the number of processors. */
236
    slab_enable_cpucache();
237
    slab_enable_cpucache();
237
 
238
 
238
    printf("config.memory_size=%zdM\n", config.memory_size >> 20);
239
    printf("config.memory_size=%zdM\n", config.memory_size >> 20);
239
    printf("config.cpu_count=%zd\n", config.cpu_count);
240
    printf("config.cpu_count=%zd\n", config.cpu_count);
240
    cpu_init();
241
    cpu_init();
241
   
242
   
242
    calibrate_delay_loop();
243
    calibrate_delay_loop();
243
    clock_counter_init();
244
    clock_counter_init();
244
    timeout_init();
245
    timeout_init();
245
    scheduler_init();
246
    scheduler_init();
246
    task_init();
247
    task_init();
247
    thread_init();
248
    thread_init();
248
    futex_init();
249
    futex_init();
249
    klog_init();
250
    klog_init();
250
   
251
   
251
    if (init.cnt > 0) {
252
    if (init.cnt > 0) {
252
        for (i = 0; i < init.cnt; i++)
253
        for (i = 0; i < init.cnt; i++)
253
            printf("init[%zd].addr=%.*p, init[%zd].size=%zd\n", i,
254
            printf("init[%zd].addr=%.*p, init[%zd].size=%zd\n", i,
254
                sizeof(uintptr_t) * 2, init.tasks[i].addr, i,
255
                sizeof(uintptr_t) * 2, init.tasks[i].addr, i,
255
                init.tasks[i].size);
256
                init.tasks[i].size);
256
    } else
257
    } else
257
        printf("No init binaries found\n");
258
        printf("No init binaries found\n");
258
   
259
   
259
    ipc_init();
260
    ipc_init();
260
 
261
 
261
    /*
262
    /*
262
     * Create kernel task.
263
     * Create kernel task.
263
     */
264
     */
264
    k = task_create(AS_KERNEL, "kernel");
265
    k = task_create(AS_KERNEL, "kernel");
265
    if (!k)
266
    if (!k)
266
        panic("can't create kernel task\n");
267
        panic("can't create kernel task\n");
267
   
268
   
268
    /*
269
    /*
269
     * Create the first thread.
270
     * Create the first thread.
270
     */
271
     */
271
    t = thread_create(kinit, NULL, k, 0, "kinit", true);
272
    t = thread_create(kinit, NULL, k, 0, "kinit", true);
272
    if (!t)
273
    if (!t)
273
        panic("can't create kinit thread\n");
274
        panic("can't create kinit thread\n");
274
    thread_ready(t);
275
    thread_ready(t);
275
   
276
   
276
    tasklet_run_tasklet_thread(k);
277
    tasklet_run_tasklet_thread(k);
277
 
278
 
-
 
279
    rcu_init();
-
 
280
 
278
    /*
281
    /*
279
     * This call to scheduler() will return to kinit,
282
     * This call to scheduler() will return to kinit,
280
     * starting the thread of kernel threads.
283
     * starting the thread of kernel threads.
281
     */
284
     */
282
    scheduler();
285
    scheduler();
283
    /* not reached */
286
    /* not reached */
284
}
287
}
285
 
288
 
286
 
289
 
287
#ifdef CONFIG_SMP
290
#ifdef CONFIG_SMP
288
/** Main kernel routine for application CPUs.
291
/** Main kernel routine for application CPUs.
289
 *
292
 *
290
 * Executed by application processors, temporary stack
293
 * Executed by application processors, temporary stack
291
 * is at ctx.sp which was set during BSP boot.
294
 * is at ctx.sp which was set during BSP boot.
292
 * This function passes control directly to
295
 * This function passes control directly to
293
 * main_ap_separated_stack().
296
 * main_ap_separated_stack().
294
 *
297
 *
295
 * Assuming interrupts_disable()'d.
298
 * Assuming interrupts_disable()'d.
296
 *
299
 *
297
 */
300
 */
298
void main_ap(void)
301
void main_ap(void)
299
{
302
{
300
    /*
303
    /*
301
     * Incrementing the active CPU counter will guarantee that the
304
     * Incrementing the active CPU counter will guarantee that the
302
     * *_init() functions can find out that they need to
305
     * *_init() functions can find out that they need to
303
     * do initialization for AP only.
306
     * do initialization for AP only.
304
     */
307
     */
305
    config.cpu_active++;
308
    config.cpu_active++;
306
 
309
 
307
    /*
310
    /*
308
     * The THE structure is well defined because ctx.sp is used as stack.
311
     * The THE structure is well defined because ctx.sp is used as stack.
309
     */
312
     */
310
    the_initialize(THE);
313
    the_initialize(THE);
311
   
314
   
312
    arch_pre_mm_init();
315
    arch_pre_mm_init();
313
    frame_init();
316
    frame_init();
314
    page_init();
317
    page_init();
315
    tlb_init();
318
    tlb_init();
316
    arch_post_mm_init();
319
    arch_post_mm_init();
317
   
320
   
318
    cpu_init();
321
    cpu_init();
319
    calibrate_delay_loop();
322
    calibrate_delay_loop();
320
    arch_post_cpu_init();
323
    arch_post_cpu_init();
321
 
324
 
322
    the_copy(THE, (the_t *) CPU->stack);
325
    the_copy(THE, (the_t *) CPU->stack);
323
 
326
 
324
    /*
327
    /*
325
     * If we woke kmp up before we left the kernel stack, we could
328
     * If we woke kmp up before we left the kernel stack, we could
326
     * collide with another CPU coming up. To prevent this, we
329
     * collide with another CPU coming up. To prevent this, we
327
     * switch to this cpu's private stack prior to waking kmp up.
330
     * switch to this cpu's private stack prior to waking kmp up.
328
     */
331
     */
329
    context_set(&CPU->saved_context, FADDR(main_ap_separated_stack),
332
    context_set(&CPU->saved_context, FADDR(main_ap_separated_stack),
330
        (uintptr_t) CPU->stack, CPU_STACK_SIZE);
333
        (uintptr_t) CPU->stack, CPU_STACK_SIZE);
331
    context_restore(&CPU->saved_context);
334
    context_restore(&CPU->saved_context);
332
    /* not reached */
335
    /* not reached */
333
}
336
}
334
 
337
 
335
 
338
 
336
/** Main kernel routine for application CPUs using new stack.
339
/** Main kernel routine for application CPUs using new stack.
337
 *
340
 *
338
 * Second part of main_ap().
341
 * Second part of main_ap().
339
 *
342
 *
340
 */
343
 */
341
void main_ap_separated_stack(void)
344
void main_ap_separated_stack(void)
342
{
345
{
343
    /*
346
    /*
344
     * Configure timeouts for this cpu.
347
     * Configure timeouts for this cpu.
345
     */
348
     */
346
    timeout_init();
349
    timeout_init();
347
 
350
 
348
    waitq_wakeup(&ap_completion_wq, WAKEUP_FIRST);
351
    waitq_wakeup(&ap_completion_wq, WAKEUP_FIRST);
349
    scheduler();
352
    scheduler();
350
    /* not reached */
353
    /* not reached */
351
}
354
}
352
#endif /* CONFIG_SMP */
355
#endif /* CONFIG_SMP */
353
 
356
 
354
/** @}
357
/** @}
355
 */
358
 */
356
 
359