Subversion Repositories HelenOS-historic

Rev

Rev 822 | Rev 1477 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 822 Rev 1059
1
/*
1
/*
2
 * Copyright (C) 2005 Jakub Jermar
2
 * Copyright (C) 2005 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
#include <smp/smp.h>
29
#include <smp/smp.h>
30
#include <arch/smp/smp.h>
30
#include <arch/smp/smp.h>
31
#include <arch/smp/mps.h>
31
#include <arch/smp/mps.h>
32
#include <arch/smp/ap.h>
32
#include <arch/smp/ap.h>
33
#include <arch/boot/boot.h>
33
#include <arch/boot/boot.h>
34
#include <genarch/acpi/acpi.h>
34
#include <genarch/acpi/acpi.h>
35
#include <genarch/acpi/madt.h>
35
#include <genarch/acpi/madt.h>
36
#include <config.h>
36
#include <config.h>
37
#include <synch/waitq.h>
37
#include <synch/waitq.h>
38
#include <synch/synch.h>
38
#include <synch/synch.h>
39
#include <arch/pm.h>
39
#include <arch/pm.h>
40
#include <func.h>
40
#include <func.h>
41
#include <panic.h>
41
#include <panic.h>
42
#include <debug.h>
42
#include <debug.h>
43
#include <arch/asm.h>
43
#include <arch/asm.h>
44
#include <mm/frame.h>
44
#include <mm/frame.h>
45
#include <mm/page.h>
45
#include <mm/page.h>
46
#include <mm/slab.h>
46
#include <mm/slab.h>
47
#include <mm/as.h>
47
#include <mm/as.h>
48
#include <print.h>
48
#include <print.h>
49
#include <memstr.h>
49
#include <memstr.h>
50
#include <arch/i8259.h>
50
#include <arch/i8259.h>
51
 
51
 
52
#ifdef CONFIG_SMP
52
#ifdef CONFIG_SMP
53
 
53
 
54
static struct smp_config_operations *ops = NULL;
54
static struct smp_config_operations *ops = NULL;
55
 
55
 
56
void smp_init(void)
56
void smp_init(void)
57
{
57
{
-
 
58
    int status;
-
 
59
    __address l_apic_address, io_apic_address;
-
 
60
 
58
    if (acpi_madt) {
61
    if (acpi_madt) {
59
        acpi_madt_parse();
62
        acpi_madt_parse();
60
        ops = &madt_config_operations;
63
        ops = &madt_config_operations;
61
    }
64
    }
62
    if (config.cpu_count == 1) {
65
    if (config.cpu_count == 1) {
63
        mps_init();
66
        mps_init();
64
        ops = &mps_config_operations;
67
        ops = &mps_config_operations;
65
    }
68
    }
66
 
69
 
-
 
70
    l_apic_address = PA2KA(PFN2ADDR(frame_alloc_rc(ONE_FRAME, FRAME_ATOMIC | FRAME_KA, &status)));
-
 
71
    if (status != FRAME_OK)
-
 
72
        panic("cannot allocate address for l_apic\n");
-
 
73
 
-
 
74
    io_apic_address = PA2KA(PFN2ADDR(frame_alloc_rc(ONE_FRAME, FRAME_ATOMIC | FRAME_KA, &status)));
-
 
75
    if (status != FRAME_OK)
-
 
76
        panic("cannot allocate address for io_apic\n");
-
 
77
 
67
    if (config.cpu_count > 1) {    
78
    if (config.cpu_count > 1) {    
68
        page_mapping_insert(AS_KERNEL, (__address) l_apic, (__address) l_apic,
79
        page_mapping_insert(AS_KERNEL, l_apic_address, (__address) l_apic,
69
                  PAGE_NOT_CACHEABLE);
80
                  PAGE_NOT_CACHEABLE);
70
        page_mapping_insert(AS_KERNEL, (__address) io_apic, (__address) io_apic,
81
        page_mapping_insert(AS_KERNEL, io_apic_address, (__address) io_apic,
71
                  PAGE_NOT_CACHEABLE);
82
                  PAGE_NOT_CACHEABLE);
-
 
83
                 
-
 
84
        l_apic = (__u32 *) l_apic_address;
-
 
85
        io_apic = (__u32 *) io_apic_address;
72
        }
86
        }
73
 
87
 
74
        /*
88
        /*
75
         * Must be initialized outside the kmp thread, since it is waited
89
         * Must be initialized outside the kmp thread, since it is waited
76
         * on before the kmp thread is created.
90
         * on before the kmp thread is created.
77
         */
91
         */
78
        waitq_initialize(&kmp_completion_wq);
92
        waitq_initialize(&kmp_completion_wq);
79
 
93
 
80
}
94
}
81
 
95
 
82
/*
96
/*
83
 * Kernel thread for bringing up application processors. It becomes clear
97
 * Kernel thread for bringing up application processors. It becomes clear
84
 * that we need an arrangement like this (AP's being initialized by a kernel
98
 * that we need an arrangement like this (AP's being initialized by a kernel
85
 * thread), for a thread has its dedicated stack. (The stack used during the
99
 * thread), for a thread has its dedicated stack. (The stack used during the
86
 * BSP initialization (prior the very first call to scheduler()) will be used
100
 * BSP initialization (prior the very first call to scheduler()) will be used
87
 * as an initialization stack for each AP.)
101
 * as an initialization stack for each AP.)
88
 */
102
 */
89
void kmp(void *arg)
103
void kmp(void *arg)
90
{
104
{
91
    int i;
105
    int i;
92
   
106
   
93
    ASSERT(ops != NULL);
107
    ASSERT(ops != NULL);
94
 
108
 
95
    waitq_initialize(&ap_completion_wq);
109
    waitq_initialize(&ap_completion_wq);
96
 
110
 
97
    /*
111
    /*
98
     * We need to access data in frame 0.
112
     * We need to access data in frame 0.
99
     * We boldly make use of kernel address space mapping.
113
     * We boldly make use of kernel address space mapping.
100
     */
114
     */
101
 
115
 
102
    /*
116
    /*
103
     * Set the warm-reset vector to the real-mode address of 4K-aligned ap_boot()
117
     * Set the warm-reset vector to the real-mode address of 4K-aligned ap_boot()
104
     */
118
     */
105
    *((__u16 *) (PA2KA(0x467+0))) =  ((__address) ap_boot) >> 4;    /* segment */
119
    *((__u16 *) (PA2KA(0x467+0))) =  ((__address) ap_boot) >> 4;    /* segment */
106
    *((__u16 *) (PA2KA(0x467+2))) =  0;             /* offset */
120
    *((__u16 *) (PA2KA(0x467+2))) =  0;             /* offset */
107
   
121
   
108
    /*
122
    /*
109
     * Save 0xa to address 0xf of the CMOS RAM.
123
     * Save 0xa to address 0xf of the CMOS RAM.
110
     * BIOS will not do the POST after the INIT signal.
124
     * BIOS will not do the POST after the INIT signal.
111
     */
125
     */
112
    outb(0x70,0xf);
126
    outb(0x70,0xf);
113
    outb(0x71,0xa);
127
    outb(0x71,0xa);
114
 
128
 
115
    pic_disable_irqs(0xffff);
129
    pic_disable_irqs(0xffff);
116
    apic_init();
130
    apic_init();
117
 
131
 
118
    for (i = 0; i < ops->cpu_count(); i++) {
132
    for (i = 0; i < ops->cpu_count(); i++) {
119
        struct descriptor *gdt_new;
133
        struct descriptor *gdt_new;
120
   
134
   
121
        /*
135
        /*
122
         * Skip processors marked unusable.
136
         * Skip processors marked unusable.
123
         */
137
         */
124
        if (!ops->cpu_enabled(i))
138
        if (!ops->cpu_enabled(i))
125
            continue;
139
            continue;
126
 
140
 
127
        /*
141
        /*
128
         * The bootstrap processor is already up.
142
         * The bootstrap processor is already up.
129
         */
143
         */
130
        if (ops->cpu_bootstrap(i))
144
        if (ops->cpu_bootstrap(i))
131
            continue;
145
            continue;
132
 
146
 
133
        if (ops->cpu_apic_id(i) == l_apic_id()) {
147
        if (ops->cpu_apic_id(i) == l_apic_id()) {
134
            printf("%s: bad processor entry #%d, will not send IPI to myself\n", __FUNCTION__, i);
148
            printf("%s: bad processor entry #%d, will not send IPI to myself\n", __FUNCTION__, i);
135
            continue;
149
            continue;
136
        }
150
        }
137
       
151
       
138
        /*
152
        /*
139
         * Prepare new GDT for CPU in question.
153
         * Prepare new GDT for CPU in question.
140
         */
154
         */
141
        if (!(gdt_new = (struct descriptor *) malloc(GDT_ITEMS*sizeof(struct descriptor), FRAME_ATOMIC)))
155
        if (!(gdt_new = (struct descriptor *) malloc(GDT_ITEMS*sizeof(struct descriptor), FRAME_ATOMIC)))
142
            panic("couldn't allocate memory for GDT\n");
156
            panic("couldn't allocate memory for GDT\n");
143
 
157
 
144
        memcpy(gdt_new, gdt, GDT_ITEMS * sizeof(struct descriptor));
158
        memcpy(gdt_new, gdt, GDT_ITEMS * sizeof(struct descriptor));
145
        memsetb((__address)(&gdt_new[TSS_DES]), sizeof(struct descriptor), 0);
159
        memsetb((__address)(&gdt_new[TSS_DES]), sizeof(struct descriptor), 0);
146
        protected_ap_gdtr.limit = GDT_ITEMS * sizeof(struct descriptor);
160
        protected_ap_gdtr.limit = GDT_ITEMS * sizeof(struct descriptor);
147
        protected_ap_gdtr.base = KA2PA((__address) gdt_new);
161
        protected_ap_gdtr.base = KA2PA((__address) gdt_new);
148
        gdtr.base = (__address) gdt_new;
162
        gdtr.base = (__address) gdt_new;
149
 
163
 
150
        if (l_apic_send_init_ipi(ops->cpu_apic_id(i))) {
164
        if (l_apic_send_init_ipi(ops->cpu_apic_id(i))) {
151
            /*
165
            /*
152
             * There may be just one AP being initialized at
166
             * There may be just one AP being initialized at
153
             * the time. After it comes completely up, it is
167
             * the time. After it comes completely up, it is
154
             * supposed to wake us up.
168
             * supposed to wake us up.
155
             */
169
             */
156
            if (waitq_sleep_timeout(&ap_completion_wq, 1000000, SYNCH_BLOCKING) == ESYNCH_TIMEOUT)
170
            if (waitq_sleep_timeout(&ap_completion_wq, 1000000, SYNCH_BLOCKING) == ESYNCH_TIMEOUT)
157
                printf("%s: waiting for cpu%d (APIC ID = %d) timed out\n", __FUNCTION__, config.cpu_active > i ? config.cpu_active : i, ops->cpu_apic_id(i));
171
                printf("%s: waiting for cpu%d (APIC ID = %d) timed out\n", __FUNCTION__, config.cpu_active > i ? config.cpu_active : i, ops->cpu_apic_id(i));
158
        } else
172
        } else
159
            printf("INIT IPI for l_apic%d failed\n", ops->cpu_apic_id(i));
173
            printf("INIT IPI for l_apic%d failed\n", ops->cpu_apic_id(i));
160
    }
174
    }
161
 
175
 
162
    /*
176
    /*
163
     * Wakeup the kinit thread so that
177
     * Wakeup the kinit thread so that
164
     * system initialization can go on.
178
     * system initialization can go on.
165
     */
179
     */
166
    waitq_wakeup(&kmp_completion_wq, WAKEUP_FIRST);
180
    waitq_wakeup(&kmp_completion_wq, WAKEUP_FIRST);
167
}
181
}
168
 
182
 
169
int smp_irq_to_pin(int irq)
183
int smp_irq_to_pin(int irq)
170
{
184
{
171
    ASSERT(ops != NULL);
185
    ASSERT(ops != NULL);
172
    return ops->irq_to_pin(irq);
186
    return ops->irq_to_pin(irq);
173
}
187
}
174
 
188
 
175
#endif /* CONFIG_SMP */
189
#endif /* CONFIG_SMP */
176
 
190