Subversion Repositories HelenOS-historic

Rev

Rev 1576 | Rev 1760 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 1576 Rev 1702
1
/*
1
/*
2
 * Copyright (C) 2005 Jakub Jermar
2
 * Copyright (C) 2005 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
-
 
29
 /** @addtogroup ia32  
-
 
30
 * @{
-
 
31
 */
-
 
32
/** @file
-
 
33
 */
-
 
34
 
29
#include <smp/smp.h>
35
#include <smp/smp.h>
30
#include <arch/smp/smp.h>
36
#include <arch/smp/smp.h>
31
#include <arch/smp/mps.h>
37
#include <arch/smp/mps.h>
32
#include <arch/smp/ap.h>
38
#include <arch/smp/ap.h>
33
#include <arch/boot/boot.h>
39
#include <arch/boot/boot.h>
34
#include <genarch/acpi/acpi.h>
40
#include <genarch/acpi/acpi.h>
35
#include <genarch/acpi/madt.h>
41
#include <genarch/acpi/madt.h>
36
#include <config.h>
42
#include <config.h>
37
#include <synch/waitq.h>
43
#include <synch/waitq.h>
38
#include <synch/synch.h>
44
#include <synch/synch.h>
39
#include <arch/pm.h>
45
#include <arch/pm.h>
40
#include <func.h>
46
#include <func.h>
41
#include <panic.h>
47
#include <panic.h>
42
#include <debug.h>
48
#include <debug.h>
43
#include <arch/asm.h>
49
#include <arch/asm.h>
44
#include <mm/frame.h>
50
#include <mm/frame.h>
45
#include <mm/page.h>
51
#include <mm/page.h>
46
#include <mm/slab.h>
52
#include <mm/slab.h>
47
#include <mm/as.h>
53
#include <mm/as.h>
48
#include <print.h>
54
#include <print.h>
49
#include <memstr.h>
55
#include <memstr.h>
50
#include <arch/drivers/i8259.h>
56
#include <arch/drivers/i8259.h>
51
 
57
 
52
#ifdef CONFIG_SMP
58
#ifdef CONFIG_SMP
53
 
59
 
54
static struct smp_config_operations *ops = NULL;
60
static struct smp_config_operations *ops = NULL;
55
 
61
 
56
void smp_init(void)
62
void smp_init(void)
57
{
63
{
58
    int status;
64
    int status;
59
    __address l_apic_address, io_apic_address;
65
    __address l_apic_address, io_apic_address;
60
 
66
 
61
    if (acpi_madt) {
67
    if (acpi_madt) {
62
        acpi_madt_parse();
68
        acpi_madt_parse();
63
        ops = &madt_config_operations;
69
        ops = &madt_config_operations;
64
    }
70
    }
65
    if (config.cpu_count == 1) {
71
    if (config.cpu_count == 1) {
66
        mps_init();
72
        mps_init();
67
        ops = &mps_config_operations;
73
        ops = &mps_config_operations;
68
    }
74
    }
69
 
75
 
70
    l_apic_address = PA2KA(PFN2ADDR(frame_alloc_rc(ONE_FRAME, FRAME_ATOMIC | FRAME_KA, &status)));
76
    l_apic_address = PA2KA(PFN2ADDR(frame_alloc_rc(ONE_FRAME, FRAME_ATOMIC | FRAME_KA, &status)));
71
    if (status != FRAME_OK)
77
    if (status != FRAME_OK)
72
        panic("cannot allocate address for l_apic\n");
78
        panic("cannot allocate address for l_apic\n");
73
 
79
 
74
    io_apic_address = PA2KA(PFN2ADDR(frame_alloc_rc(ONE_FRAME, FRAME_ATOMIC | FRAME_KA, &status)));
80
    io_apic_address = PA2KA(PFN2ADDR(frame_alloc_rc(ONE_FRAME, FRAME_ATOMIC | FRAME_KA, &status)));
75
    if (status != FRAME_OK)
81
    if (status != FRAME_OK)
76
        panic("cannot allocate address for io_apic\n");
82
        panic("cannot allocate address for io_apic\n");
77
 
83
 
78
    if (config.cpu_count > 1) {    
84
    if (config.cpu_count > 1) {    
79
        page_mapping_insert(AS_KERNEL, l_apic_address, (__address) l_apic,
85
        page_mapping_insert(AS_KERNEL, l_apic_address, (__address) l_apic,
80
                  PAGE_NOT_CACHEABLE);
86
                  PAGE_NOT_CACHEABLE);
81
        page_mapping_insert(AS_KERNEL, io_apic_address, (__address) io_apic,
87
        page_mapping_insert(AS_KERNEL, io_apic_address, (__address) io_apic,
82
                  PAGE_NOT_CACHEABLE);
88
                  PAGE_NOT_CACHEABLE);
83
                 
89
                 
84
        l_apic = (__u32 *) l_apic_address;
90
        l_apic = (__u32 *) l_apic_address;
85
        io_apic = (__u32 *) io_apic_address;
91
        io_apic = (__u32 *) io_apic_address;
86
        }
92
        }
87
}
93
}
88
 
94
 
89
/*
95
/*
90
 * Kernel thread for bringing up application processors. It becomes clear
96
 * Kernel thread for bringing up application processors. It becomes clear
91
 * that we need an arrangement like this (AP's being initialized by a kernel
97
 * that we need an arrangement like this (AP's being initialized by a kernel
92
 * thread), for a thread has its dedicated stack. (The stack used during the
98
 * thread), for a thread has its dedicated stack. (The stack used during the
93
 * BSP initialization (prior the very first call to scheduler()) will be used
99
 * BSP initialization (prior the very first call to scheduler()) will be used
94
 * as an initialization stack for each AP.)
100
 * as an initialization stack for each AP.)
95
 */
101
 */
96
void kmp(void *arg)
102
void kmp(void *arg)
97
{
103
{
98
    int i;
104
    int i;
99
   
105
   
100
    ASSERT(ops != NULL);
106
    ASSERT(ops != NULL);
101
 
107
 
102
    waitq_initialize(&ap_completion_wq);
108
    waitq_initialize(&ap_completion_wq);
103
 
109
 
104
    /*
110
    /*
105
     * We need to access data in frame 0.
111
     * We need to access data in frame 0.
106
     * We boldly make use of kernel address space mapping.
112
     * We boldly make use of kernel address space mapping.
107
     */
113
     */
108
 
114
 
109
    /*
115
    /*
110
     * Set the warm-reset vector to the real-mode address of 4K-aligned ap_boot()
116
     * Set the warm-reset vector to the real-mode address of 4K-aligned ap_boot()
111
     */
117
     */
112
    *((__u16 *) (PA2KA(0x467+0))) =  ((__address) ap_boot) >> 4;    /* segment */
118
    *((__u16 *) (PA2KA(0x467+0))) =  ((__address) ap_boot) >> 4;    /* segment */
113
    *((__u16 *) (PA2KA(0x467+2))) =  0;             /* offset */
119
    *((__u16 *) (PA2KA(0x467+2))) =  0;             /* offset */
114
   
120
   
115
    /*
121
    /*
116
     * Save 0xa to address 0xf of the CMOS RAM.
122
     * Save 0xa to address 0xf of the CMOS RAM.
117
     * BIOS will not do the POST after the INIT signal.
123
     * BIOS will not do the POST after the INIT signal.
118
     */
124
     */
119
    outb(0x70,0xf);
125
    outb(0x70,0xf);
120
    outb(0x71,0xa);
126
    outb(0x71,0xa);
121
 
127
 
122
    pic_disable_irqs(0xffff);
128
    pic_disable_irqs(0xffff);
123
    apic_init();
129
    apic_init();
124
 
130
 
125
    for (i = 0; i < ops->cpu_count(); i++) {
131
    for (i = 0; i < ops->cpu_count(); i++) {
126
        struct descriptor *gdt_new;
132
        struct descriptor *gdt_new;
127
   
133
   
128
        /*
134
        /*
129
         * Skip processors marked unusable.
135
         * Skip processors marked unusable.
130
         */
136
         */
131
        if (!ops->cpu_enabled(i))
137
        if (!ops->cpu_enabled(i))
132
            continue;
138
            continue;
133
 
139
 
134
        /*
140
        /*
135
         * The bootstrap processor is already up.
141
         * The bootstrap processor is already up.
136
         */
142
         */
137
        if (ops->cpu_bootstrap(i))
143
        if (ops->cpu_bootstrap(i))
138
            continue;
144
            continue;
139
 
145
 
140
        if (ops->cpu_apic_id(i) == l_apic_id()) {
146
        if (ops->cpu_apic_id(i) == l_apic_id()) {
141
            printf("%s: bad processor entry #%d, will not send IPI to myself\n", __FUNCTION__, i);
147
            printf("%s: bad processor entry #%d, will not send IPI to myself\n", __FUNCTION__, i);
142
            continue;
148
            continue;
143
        }
149
        }
144
       
150
       
145
        /*
151
        /*
146
         * Prepare new GDT for CPU in question.
152
         * Prepare new GDT for CPU in question.
147
         */
153
         */
148
        if (!(gdt_new = (struct descriptor *) malloc(GDT_ITEMS*sizeof(struct descriptor), FRAME_ATOMIC)))
154
        if (!(gdt_new = (struct descriptor *) malloc(GDT_ITEMS*sizeof(struct descriptor), FRAME_ATOMIC)))
149
            panic("couldn't allocate memory for GDT\n");
155
            panic("couldn't allocate memory for GDT\n");
150
 
156
 
151
        memcpy(gdt_new, gdt, GDT_ITEMS * sizeof(struct descriptor));
157
        memcpy(gdt_new, gdt, GDT_ITEMS * sizeof(struct descriptor));
152
        memsetb((__address)(&gdt_new[TSS_DES]), sizeof(struct descriptor), 0);
158
        memsetb((__address)(&gdt_new[TSS_DES]), sizeof(struct descriptor), 0);
153
        protected_ap_gdtr.limit = GDT_ITEMS * sizeof(struct descriptor);
159
        protected_ap_gdtr.limit = GDT_ITEMS * sizeof(struct descriptor);
154
        protected_ap_gdtr.base = KA2PA((__address) gdt_new);
160
        protected_ap_gdtr.base = KA2PA((__address) gdt_new);
155
        gdtr.base = (__address) gdt_new;
161
        gdtr.base = (__address) gdt_new;
156
 
162
 
157
        if (l_apic_send_init_ipi(ops->cpu_apic_id(i))) {
163
        if (l_apic_send_init_ipi(ops->cpu_apic_id(i))) {
158
            /*
164
            /*
159
             * There may be just one AP being initialized at
165
             * There may be just one AP being initialized at
160
             * the time. After it comes completely up, it is
166
             * the time. After it comes completely up, it is
161
             * supposed to wake us up.
167
             * supposed to wake us up.
162
             */
168
             */
163
            if (waitq_sleep_timeout(&ap_completion_wq, 1000000, SYNCH_FLAGS_NONE) == ESYNCH_TIMEOUT)
169
            if (waitq_sleep_timeout(&ap_completion_wq, 1000000, SYNCH_FLAGS_NONE) == ESYNCH_TIMEOUT)
164
                printf("%s: waiting for cpu%d (APIC ID = %d) timed out\n", __FUNCTION__, config.cpu_active > i ? config.cpu_active : i, ops->cpu_apic_id(i));
170
                printf("%s: waiting for cpu%d (APIC ID = %d) timed out\n", __FUNCTION__, config.cpu_active > i ? config.cpu_active : i, ops->cpu_apic_id(i));
165
        } else
171
        } else
166
            printf("INIT IPI for l_apic%d failed\n", ops->cpu_apic_id(i));
172
            printf("INIT IPI for l_apic%d failed\n", ops->cpu_apic_id(i));
167
    }
173
    }
168
}
174
}
169
 
175
 
170
int smp_irq_to_pin(int irq)
176
int smp_irq_to_pin(int irq)
171
{
177
{
172
    ASSERT(ops != NULL);
178
    ASSERT(ops != NULL);
173
    return ops->irq_to_pin(irq);
179
    return ops->irq_to_pin(irq);
174
}
180
}
175
 
181
 
176
#endif /* CONFIG_SMP */
182
#endif /* CONFIG_SMP */
-
 
183
 
-
 
184
 /** @}
-
 
185
 */
-
 
186
 
177
 
187