Subversion Repositories HelenOS

Rev

Rev 3386 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 3386 Rev 4153
1
/*
1
/*
2
 * Copyright (c) 2008 Jakub Jermar
2
 * Copyright (c) 2008 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
/** @addtogroup ia32   
29
/** @addtogroup ia32
30
 * @{
30
 * @{
31
 */
31
 */
32
/** @file
32
/** @file
33
 */
33
 */
34
 
34
 
35
#include <smp/smp.h>
35
#include <smp/smp.h>
36
#include <arch/smp/smp.h>
36
#include <arch/smp/smp.h>
37
#include <arch/smp/mps.h>
37
#include <arch/smp/mps.h>
38
#include <arch/smp/ap.h>
38
#include <arch/smp/ap.h>
39
#include <arch/boot/boot.h>
39
#include <arch/boot/boot.h>
40
#include <genarch/acpi/acpi.h>
40
#include <genarch/acpi/acpi.h>
41
#include <genarch/acpi/madt.h>
41
#include <genarch/acpi/madt.h>
42
#include <config.h>
42
#include <config.h>
43
#include <synch/waitq.h>
43
#include <synch/waitq.h>
44
#include <synch/synch.h>
44
#include <synch/synch.h>
45
#include <arch/pm.h>
45
#include <arch/pm.h>
46
#include <func.h>
46
#include <func.h>
47
#include <panic.h>
47
#include <panic.h>
48
#include <debug.h>
48
#include <debug.h>
49
#include <arch/asm.h>
49
#include <arch/asm.h>
50
#include <mm/frame.h>
50
#include <mm/frame.h>
51
#include <mm/page.h>
51
#include <mm/page.h>
52
#include <mm/slab.h>
52
#include <mm/slab.h>
53
#include <mm/as.h>
53
#include <mm/as.h>
54
#include <print.h>
54
#include <print.h>
55
#include <memstr.h>
55
#include <memstr.h>
56
#include <arch/drivers/i8259.h>
56
#include <arch/drivers/i8259.h>
57
 
57
 
58
#ifdef CONFIG_SMP
58
#ifdef CONFIG_SMP
59
 
59
 
60
static struct smp_config_operations *ops = NULL;
60
static struct smp_config_operations *ops = NULL;
61
 
61
 
62
void smp_init(void)
62
void smp_init(void)
63
{
63
{
64
    uintptr_t l_apic_address, io_apic_address;
64
    uintptr_t l_apic_address, io_apic_address;
65
 
65
 
66
    if (acpi_madt) {
66
    if (acpi_madt) {
67
        acpi_madt_parse();
67
        acpi_madt_parse();
68
        ops = &madt_config_operations;
68
        ops = &madt_config_operations;
69
    }
69
    }
70
    if (config.cpu_count == 1) {
70
    if (config.cpu_count == 1) {
71
        mps_init();
71
        mps_init();
72
        ops = &mps_config_operations;
72
        ops = &mps_config_operations;
73
    }
73
    }
74
 
74
 
75
    l_apic_address = (uintptr_t) frame_alloc(ONE_FRAME,
75
    l_apic_address = (uintptr_t) frame_alloc(ONE_FRAME,
76
        FRAME_ATOMIC | FRAME_KA);
76
        FRAME_ATOMIC | FRAME_KA);
77
    if (!l_apic_address)
77
    if (!l_apic_address)
78
        panic("cannot allocate address for l_apic\n");
78
        panic("Cannot allocate address for l_apic.");
79
 
79
 
80
    io_apic_address = (uintptr_t) frame_alloc(ONE_FRAME,
80
    io_apic_address = (uintptr_t) frame_alloc(ONE_FRAME,
81
        FRAME_ATOMIC | FRAME_KA);
81
        FRAME_ATOMIC | FRAME_KA);
82
    if (!io_apic_address)
82
    if (!io_apic_address)
83
        panic("cannot allocate address for io_apic\n");
83
        panic("Cannot allocate address for io_apic.");
84
 
84
 
85
    if (config.cpu_count > 1) {    
85
    if (config.cpu_count > 1) {    
86
        page_mapping_insert(AS_KERNEL, l_apic_address,
86
        page_mapping_insert(AS_KERNEL, l_apic_address,
87
            (uintptr_t) l_apic, PAGE_NOT_CACHEABLE | PAGE_WRITE);
87
            (uintptr_t) l_apic, PAGE_NOT_CACHEABLE | PAGE_WRITE);
88
        page_mapping_insert(AS_KERNEL, io_apic_address,
88
        page_mapping_insert(AS_KERNEL, io_apic_address,
89
            (uintptr_t) io_apic, PAGE_NOT_CACHEABLE | PAGE_WRITE);
89
            (uintptr_t) io_apic, PAGE_NOT_CACHEABLE | PAGE_WRITE);
90
                 
90
                 
91
        l_apic = (uint32_t *) l_apic_address;
91
        l_apic = (uint32_t *) l_apic_address;
92
        io_apic = (uint32_t *) io_apic_address;
92
        io_apic = (uint32_t *) io_apic_address;
93
    }
93
    }
94
}
94
}
95
 
95
 
96
/*
96
/*
97
 * Kernel thread for bringing up application processors. It becomes clear
97
 * Kernel thread for bringing up application processors. It becomes clear
98
 * that we need an arrangement like this (AP's being initialized by a kernel
98
 * that we need an arrangement like this (AP's being initialized by a kernel
99
 * thread), for a thread has its dedicated stack. (The stack used during the
99
 * thread), for a thread has its dedicated stack. (The stack used during the
100
 * BSP initialization (prior the very first call to scheduler()) will be used
100
 * BSP initialization (prior the very first call to scheduler()) will be used
101
 * as an initialization stack for each AP.)
101
 * as an initialization stack for each AP.)
102
 */
102
 */
103
void kmp(void *arg __attribute__((unused)))
103
void kmp(void *arg __attribute__((unused)))
104
{
104
{
105
    unsigned int i;
105
    unsigned int i;
106
   
106
   
107
    ASSERT(ops != NULL);
107
    ASSERT(ops != NULL);
108
 
108
 
109
    /*
109
    /*
110
     * We need to access data in frame 0.
110
     * We need to access data in frame 0.
111
     * We boldly make use of kernel address space mapping.
111
     * We boldly make use of kernel address space mapping.
112
     */
112
     */
113
 
113
 
114
    /*
114
    /*
115
     * Set the warm-reset vector to the real-mode address of 4K-aligned ap_boot()
115
     * Set the warm-reset vector to the real-mode address of 4K-aligned ap_boot()
116
     */
116
     */
117
    *((uint16_t *) (PA2KA(0x467 + 0))) =
117
    *((uint16_t *) (PA2KA(0x467 + 0))) =
118
        (uint16_t) (((uintptr_t) ap_boot) >> 4);    /* segment */
118
        (uint16_t) (((uintptr_t) ap_boot) >> 4);    /* segment */
119
    *((uint16_t *) (PA2KA(0x467 + 2))) = 0;     /* offset */
119
    *((uint16_t *) (PA2KA(0x467 + 2))) = 0;     /* offset */
120
   
120
   
121
    /*
121
    /*
122
     * Save 0xa to address 0xf of the CMOS RAM.
122
     * Save 0xa to address 0xf of the CMOS RAM.
123
     * BIOS will not do the POST after the INIT signal.
123
     * BIOS will not do the POST after the INIT signal.
124
     */
124
     */
125
    outb(0x70, 0xf);
125
    pio_write_8((ioport8_t *)0x70, 0xf);
126
    outb(0x71, 0xa);
126
    pio_write_8((ioport8_t *)0x71, 0xa);
127
 
127
 
128
    pic_disable_irqs(0xffff);
128
    pic_disable_irqs(0xffff);
129
    apic_init();
129
    apic_init();
130
   
130
   
131
    uint8_t apic = l_apic_id();
131
    uint8_t apic = l_apic_id();
132
 
132
 
133
    for (i = 0; i < ops->cpu_count(); i++) {
133
    for (i = 0; i < ops->cpu_count(); i++) {
134
        struct descriptor *gdt_new;
134
        descriptor_t *gdt_new;
135
   
135
       
136
        /*
136
        /*
137
         * Skip processors marked unusable.
137
         * Skip processors marked unusable.
138
         */
138
         */
139
        if (!ops->cpu_enabled(i))
139
        if (!ops->cpu_enabled(i))
140
            continue;
140
            continue;
141
 
141
 
142
        /*
142
        /*
143
         * The bootstrap processor is already up.
143
         * The bootstrap processor is already up.
144
         */
144
         */
145
        if (ops->cpu_bootstrap(i))
145
        if (ops->cpu_bootstrap(i))
146
            continue;
146
            continue;
147
 
147
 
148
        if (ops->cpu_apic_id(i) == apic) {
148
        if (ops->cpu_apic_id(i) == apic) {
149
            printf("%s: bad processor entry #%u, will not send IPI "
149
            printf("%s: bad processor entry #%u, will not send IPI "
150
                "to myself\n", __FUNCTION__, i);
150
                "to myself\n", __FUNCTION__, i);
151
            continue;
151
            continue;
152
        }
152
        }
153
       
153
       
154
        /*
154
        /*
155
         * Prepare new GDT for CPU in question.
155
         * Prepare new GDT for CPU in question.
156
         */
156
         */
-
 
157
       
-
 
158
        /* XXX Flag FRAME_LOW_4_GiB was removed temporarily,
-
 
159
         * it needs to be replaced by a generic fuctionality of
-
 
160
         * the memory subsystem
-
 
161
         */
157
        gdt_new = (struct descriptor *) malloc(GDT_ITEMS *
162
        gdt_new = (descriptor_t *) malloc(GDT_ITEMS *
158
            sizeof(struct descriptor), FRAME_ATOMIC | FRAME_LOW_4_GiB);
163
            sizeof(descriptor_t), FRAME_ATOMIC);
159
        if (!gdt_new)
164
        if (!gdt_new)
160
            panic("couldn't allocate memory for GDT\n");
165
            panic("Cannot allocate memory for GDT.");
161
 
166
 
162
        memcpy(gdt_new, gdt, GDT_ITEMS * sizeof(struct descriptor));
167
        memcpy(gdt_new, gdt, GDT_ITEMS * sizeof(descriptor_t));
163
        memsetb(&gdt_new[TSS_DES], sizeof(struct descriptor), 0);
168
        memsetb(&gdt_new[TSS_DES], sizeof(descriptor_t), 0);
164
        protected_ap_gdtr.limit = GDT_ITEMS * sizeof(struct descriptor);
169
        protected_ap_gdtr.limit = GDT_ITEMS * sizeof(descriptor_t);
165
        protected_ap_gdtr.base = KA2PA((uintptr_t) gdt_new);
170
        protected_ap_gdtr.base = KA2PA((uintptr_t) gdt_new);
166
        gdtr.base = (uintptr_t) gdt_new;
171
        gdtr.base = (uintptr_t) gdt_new;
167
 
172
 
168
        if (l_apic_send_init_ipi(ops->cpu_apic_id(i))) {
173
        if (l_apic_send_init_ipi(ops->cpu_apic_id(i))) {
169
            /*
174
            /*
170
             * There may be just one AP being initialized at
175
             * There may be just one AP being initialized at
171
             * the time. After it comes completely up, it is
176
             * the time. After it comes completely up, it is
172
             * supposed to wake us up.
177
             * supposed to wake us up.
173
             */
178
             */
174
            if (waitq_sleep_timeout(&ap_completion_wq, 1000000,
179
            if (waitq_sleep_timeout(&ap_completion_wq, 1000000,
175
                SYNCH_FLAGS_NONE) == ESYNCH_TIMEOUT) {
180
                SYNCH_FLAGS_NONE) == ESYNCH_TIMEOUT) {
176
                unsigned int cpu = (config.cpu_active > i) ?
181
                unsigned int cpu = (config.cpu_active > i) ?
177
                    config.cpu_active : i;
182
                    config.cpu_active : i;
178
                printf("%s: waiting for cpu%u (APIC ID = %d) "
183
                printf("%s: waiting for cpu%u (APIC ID = %d) "
179
                    "timed out\n", __FUNCTION__, cpu,
184
                    "timed out\n", __FUNCTION__, cpu,
180
                    ops->cpu_apic_id(i));
185
                    ops->cpu_apic_id(i));
181
            }
186
            }
182
        } else
187
        } else
183
            printf("INIT IPI for l_apic%d failed\n",
188
            printf("INIT IPI for l_apic%d failed\n",
184
                ops->cpu_apic_id(i));
189
                ops->cpu_apic_id(i));
185
    }
190
    }
186
}
191
}
187
 
192
 
188
int smp_irq_to_pin(unsigned int irq)
193
int smp_irq_to_pin(unsigned int irq)
189
{
194
{
190
    ASSERT(ops != NULL);
195
    ASSERT(ops != NULL);
191
    return ops->irq_to_pin(irq);
196
    return ops->irq_to_pin(irq);
192
}
197
}
193
 
198
 
194
#endif /* CONFIG_SMP */
199
#endif /* CONFIG_SMP */
195
 
200
 
196
/** @}
201
/** @}
197
 */
202
 */
198
 
203