Rev 534 | Rev 687 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 534 | Rev 625 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (C) 2005 Jakub Jermar |
2 | * Copyright (C) 2005 Jakub Jermar |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | #include <smp/smp.h> |
29 | #include <smp/smp.h> |
30 | #include <arch/smp/smp.h> |
30 | #include <arch/smp/smp.h> |
31 | #include <arch/smp/mps.h> |
31 | #include <arch/smp/mps.h> |
32 | #include <arch/smp/ap.h> |
32 | #include <arch/smp/ap.h> |
33 | #include <genarch/acpi/acpi.h> |
33 | #include <genarch/acpi/acpi.h> |
34 | #include <genarch/acpi/madt.h> |
34 | #include <genarch/acpi/madt.h> |
35 | #include <config.h> |
35 | #include <config.h> |
36 | #include <synch/waitq.h> |
36 | #include <synch/waitq.h> |
37 | #include <synch/synch.h> |
37 | #include <synch/synch.h> |
38 | #include <arch/pm.h> |
38 | #include <arch/pm.h> |
39 | #include <func.h> |
39 | #include <func.h> |
40 | #include <panic.h> |
40 | #include <panic.h> |
41 | #include <debug.h> |
41 | #include <debug.h> |
42 | #include <arch/asm.h> |
42 | #include <arch/asm.h> |
43 | #include <mm/frame.h> |
43 | #include <mm/frame.h> |
44 | #include <mm/page.h> |
44 | #include <mm/page.h> |
45 | #include <mm/heap.h> |
45 | #include <mm/heap.h> |
46 | #include <print.h> |
46 | #include <print.h> |
47 | #include <memstr.h> |
47 | #include <memstr.h> |
48 | #include <arch/i8259.h> |
48 | #include <arch/i8259.h> |
49 | 49 | ||
50 | #ifdef CONFIG_SMP |
50 | #ifdef CONFIG_SMP |
51 | 51 | ||
52 | static struct smp_config_operations *ops = NULL; |
52 | static struct smp_config_operations *ops = NULL; |
53 | 53 | ||
54 | void smp_init(void) |
54 | void smp_init(void) |
55 | { |
55 | { |
56 | if (acpi_madt) { |
56 | if (acpi_madt) { |
57 | acpi_madt_parse(); |
57 | acpi_madt_parse(); |
58 | ops = &madt_config_operations; |
58 | ops = &madt_config_operations; |
59 | } |
59 | } |
60 | if (config.cpu_count == 1) { |
60 | if (config.cpu_count == 1) { |
61 | mps_init(); |
61 | mps_init(); |
62 | ops = &mps_config_operations; |
62 | ops = &mps_config_operations; |
63 | } |
63 | } |
64 | 64 | ||
65 | if (config.cpu_count > 1) { |
65 | if (config.cpu_count > 1) { |
66 | page_mapping_insert((__address)l_apic, (__address)l_apic, |
66 | page_mapping_insert((__address)l_apic, (__address)l_apic, |
67 | PAGE_NOT_CACHEABLE, 0); |
67 | PAGE_NOT_CACHEABLE, 0); |
68 | page_mapping_insert((__address) io_apic, |
68 | page_mapping_insert((__address) io_apic, |
69 | (__address) io_apic, |
69 | (__address) io_apic, |
70 | PAGE_NOT_CACHEABLE, 0); |
70 | PAGE_NOT_CACHEABLE, 0); |
71 | } |
71 | } |
72 | 72 | ||
73 | /* |
73 | /* |
74 | * Must be initialized outside the kmp thread, since it is waited |
74 | * Must be initialized outside the kmp thread, since it is waited |
75 | * on before the kmp thread is created. |
75 | * on before the kmp thread is created. |
76 | */ |
76 | */ |
77 | waitq_initialize(&kmp_completion_wq); |
77 | waitq_initialize(&kmp_completion_wq); |
78 | 78 | ||
79 | } |
79 | } |
80 | 80 | ||
81 | /* |
81 | /* |
82 | * Kernel thread for bringing up application processors. It becomes clear |
82 | * Kernel thread for bringing up application processors. It becomes clear |
83 | * that we need an arrangement like this (AP's being initialized by a kernel |
83 | * that we need an arrangement like this (AP's being initialized by a kernel |
84 | * thread), for a thread has its dedicated stack. (The stack used during the |
84 | * thread), for a thread has its dedicated stack. (The stack used during the |
85 | * BSP initialization (prior the very first call to scheduler()) will be used |
85 | * BSP initialization (prior the very first call to scheduler()) will be used |
86 | * as an initialization stack for each AP.) |
86 | * as an initialization stack for each AP.) |
87 | */ |
87 | */ |
88 | void kmp(void *arg) |
88 | void kmp(void *arg) |
89 | { |
89 | { |
90 | __address src, dst; |
- | |
91 | int i; |
90 | int i; |
92 | 91 | ||
93 | ASSERT(ops != NULL); |
92 | ASSERT(ops != NULL); |
94 | 93 | ||
95 | waitq_initialize(&ap_completion_wq); |
94 | waitq_initialize(&ap_completion_wq); |
96 | 95 | ||
97 | /* |
96 | /* |
98 | * We need to access data in frame 0. |
97 | * We need to access data in frame 0. |
99 | * We boldly make use of kernel address space mapping. |
98 | * We boldly make use of kernel address space mapping. |
100 | */ |
99 | */ |
101 | 100 | ||
102 | /* |
101 | /* |
103 | * Set the warm-reset vector to the real-mode address of 4K-aligned ap_boot() |
102 | * Set the warm-reset vector to the real-mode address of 4K-aligned ap_boot() |
104 | */ |
103 | */ |
105 | *((__u16 *) (PA2KA(0x467+0))) = ((__address) ap_boot) >> 4; /* segment */ |
104 | *((__u16 *) (PA2KA(0x467+0))) = ((__address) ap_boot) >> 4; /* segment */ |
106 | *((__u16 *) (PA2KA(0x467+2))) = 0; /* offset */ |
105 | *((__u16 *) (PA2KA(0x467+2))) = 0; /* offset */ |
107 | 106 | ||
108 | /* |
107 | /* |
109 | * Save 0xa to address 0xf of the CMOS RAM. |
108 | * Save 0xa to address 0xf of the CMOS RAM. |
110 | * BIOS will not do the POST after the INIT signal. |
109 | * BIOS will not do the POST after the INIT signal. |
111 | */ |
110 | */ |
112 | outb(0x70,0xf); |
111 | outb(0x70,0xf); |
113 | outb(0x71,0xa); |
112 | outb(0x71,0xa); |
114 | 113 | ||
115 | pic_disable_irqs(0xffff); |
114 | pic_disable_irqs(0xffff); |
116 | apic_init(); |
115 | apic_init(); |
117 | 116 | ||
118 | for (i = 0; i < ops->cpu_count(); i++) { |
117 | for (i = 0; i < ops->cpu_count(); i++) { |
119 | struct descriptor *gdt_new; |
118 | struct descriptor *gdt_new; |
120 | 119 | ||
121 | /* |
120 | /* |
122 | * Skip processors marked unusable. |
121 | * Skip processors marked unusable. |
123 | */ |
122 | */ |
124 | if (!ops->cpu_enabled(i)) |
123 | if (!ops->cpu_enabled(i)) |
125 | continue; |
124 | continue; |
126 | 125 | ||
127 | /* |
126 | /* |
128 | * The bootstrap processor is already up. |
127 | * The bootstrap processor is already up. |
129 | */ |
128 | */ |
130 | if (ops->cpu_bootstrap(i)) |
129 | if (ops->cpu_bootstrap(i)) |
131 | continue; |
130 | continue; |
132 | 131 | ||
133 | if (ops->cpu_apic_id(i) == l_apic_id()) { |
132 | if (ops->cpu_apic_id(i) == l_apic_id()) { |
134 | printf("%s: bad processor entry #%d, will not send IPI to myself\n", __FUNCTION__, i); |
133 | printf("%s: bad processor entry #%d, will not send IPI to myself\n", __FUNCTION__, i); |
135 | continue; |
134 | continue; |
136 | } |
135 | } |
137 | 136 | ||
138 | /* |
137 | /* |
139 | * Prepare new GDT for CPU in question. |
138 | * Prepare new GDT for CPU in question. |
140 | */ |
139 | */ |
141 | if (!(gdt_new = (struct descriptor *) malloc(GDT_ITEMS*sizeof(struct descriptor)))) |
140 | if (!(gdt_new = (struct descriptor *) malloc(GDT_ITEMS*sizeof(struct descriptor)))) |
142 | panic("couldn't allocate memory for GDT\n"); |
141 | panic("couldn't allocate memory for GDT\n"); |
143 | 142 | ||
144 | memcpy(gdt_new, gdt, GDT_ITEMS*sizeof(struct descriptor)); |
143 | memcpy(gdt_new, gdt, GDT_ITEMS*sizeof(struct descriptor)); |
145 | memsetb((__address)(&gdt_new[TSS_DES]), sizeof(struct descriptor), 0); |
144 | memsetb((__address)(&gdt_new[TSS_DES]), sizeof(struct descriptor), 0); |
146 | real_bootstrap_gdtr.base = KA2PA((__address) gdt_new); |
145 | real_bootstrap_gdtr.base = KA2PA((__address) gdt_new); |
147 | gdtr.base = (__address) gdt_new; |
146 | gdtr.base = (__address) gdt_new; |
148 | 147 | ||
149 | if (l_apic_send_init_ipi(ops->cpu_apic_id(i))) { |
148 | if (l_apic_send_init_ipi(ops->cpu_apic_id(i))) { |
150 | /* |
149 | /* |
151 | * There may be just one AP being initialized at |
150 | * There may be just one AP being initialized at |
152 | * the time. After it comes completely up, it is |
151 | * the time. After it comes completely up, it is |
153 | * supposed to wake us up. |
152 | * supposed to wake us up. |
154 | */ |
153 | */ |
155 | if (waitq_sleep_timeout(&ap_completion_wq, 1000000, SYNCH_BLOCKING) == ESYNCH_TIMEOUT) |
154 | if (waitq_sleep_timeout(&ap_completion_wq, 1000000, SYNCH_BLOCKING) == ESYNCH_TIMEOUT) |
156 | printf("%s: waiting for cpu%d (APIC ID = %d) timed out\n", __FUNCTION__, config.cpu_active > i ? config.cpu_active : i, ops->cpu_apic_id(i)); |
155 | printf("%s: waiting for cpu%d (APIC ID = %d) timed out\n", __FUNCTION__, config.cpu_active > i ? config.cpu_active : i, ops->cpu_apic_id(i)); |
157 | } else |
156 | } else |
158 | printf("INIT IPI for l_apic%d failed\n", ops->cpu_apic_id(i)); |
157 | printf("INIT IPI for l_apic%d failed\n", ops->cpu_apic_id(i)); |
159 | } |
158 | } |
160 | 159 | ||
161 | /* |
160 | /* |
162 | * Wakeup the kinit thread so that |
161 | * Wakeup the kinit thread so that |
163 | * system initialization can go on. |
162 | * system initialization can go on. |
164 | */ |
163 | */ |
165 | waitq_wakeup(&kmp_completion_wq, WAKEUP_FIRST); |
164 | waitq_wakeup(&kmp_completion_wq, WAKEUP_FIRST); |
166 | } |
165 | } |
167 | 166 | ||
168 | int smp_irq_to_pin(int irq) |
167 | int smp_irq_to_pin(int irq) |
169 | { |
168 | { |
170 | ASSERT(ops != NULL); |
169 | ASSERT(ops != NULL); |
171 | return ops->irq_to_pin(irq); |
170 | return ops->irq_to_pin(irq); |
172 | } |
171 | } |
173 | 172 | ||
174 | #endif /* CONFIG_SMP */ |
173 | #endif /* CONFIG_SMP */ |
175 | 174 |