Subversion Repositories HelenOS

Rev

Rev 131 | Rev 195 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 131 Rev 170
1
/*
1
/*
2
 * Copyright (C) 2005 Jakub Jermar
2
 * Copyright (C) 2005 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
#include <smp/smp.h>
29
#include <smp/smp.h>
30
#include <arch/smp/smp.h>
30
#include <arch/smp/smp.h>
31
#include <arch/smp/mps.h>
31
#include <arch/smp/mps.h>
32
#include <arch/smp/ap.h>
32
#include <arch/smp/ap.h>
33
#include <arch/acpi/acpi.h>
33
#include <arch/acpi/acpi.h>
34
#include <arch/acpi/madt.h>
34
#include <arch/acpi/madt.h>
35
#include <config.h>
35
#include <config.h>
36
#include <synch/waitq.h>
36
#include <synch/waitq.h>
37
#include <arch/pm.h>
37
#include <arch/pm.h>
38
#include <func.h>
38
#include <func.h>
39
#include <panic.h>
39
#include <panic.h>
40
#include <debug.h>
40
#include <debug.h>
41
#include <arch/asm.h>
41
#include <arch/asm.h>
42
#include <mm/frame.h>
42
#include <mm/frame.h>
43
#include <mm/page.h>
43
#include <mm/page.h>
44
#include <mm/heap.h>
44
#include <mm/heap.h>
45
 
45
 
46
#ifdef __SMP__
46
#ifdef __SMP__
47
 
47
 
48
static struct smp_config_operations *ops = NULL;
48
static struct smp_config_operations *ops = NULL;
49
 
49
 
50
void smp_init(void)
50
void smp_init(void)
51
{
51
{
52
    if (acpi_madt) {
52
    if (acpi_madt) {
53
        acpi_madt_parse();
53
        acpi_madt_parse();
54
        ops = &madt_config_operations;
54
        ops = &madt_config_operations;
55
    }
55
    }
56
    if (config.cpu_count == 1) {
56
    if (config.cpu_count == 1) {
57
        mps_init();
57
        mps_init();
58
        ops = &mps_config_operations;
58
        ops = &mps_config_operations;
59
    }
59
    }
60
 
60
 
61
    if (config.cpu_count > 1) {
61
    if (config.cpu_count > 1) {
62
        map_page_to_frame((__address) l_apic, (__address) l_apic, PAGE_NOT_CACHEABLE, 0);
62
        map_page_to_frame((__address) l_apic, (__address) l_apic, PAGE_NOT_CACHEABLE, 0);
63
        map_page_to_frame((__address) io_apic, (__address) io_apic, PAGE_NOT_CACHEABLE, 0);
63
        map_page_to_frame((__address) io_apic, (__address) io_apic, PAGE_NOT_CACHEABLE, 0);
64
        }
64
        }
65
 
65
 
66
        /*
66
        /*
67
         * Must be initialized outside the kmp thread, since it is waited
67
         * Must be initialized outside the kmp thread, since it is waited
68
         * on before the kmp thread is created.
68
         * on before the kmp thread is created.
69
         */
69
         */
70
        waitq_initialize(&kmp_completion_wq);
70
        waitq_initialize(&kmp_completion_wq);
71
 
71
 
72
}
72
}
73
 
73
 
74
/*
74
/*
75
 * Kernel thread for bringing up application processors. It becomes clear
75
 * Kernel thread for bringing up application processors. It becomes clear
76
 * that we need an arrangement like this (AP's being initialized by a kernel
76
 * that we need an arrangement like this (AP's being initialized by a kernel
77
 * thread), for a thread has its dedicated stack. (The stack used during the
77
 * thread), for a thread has its dedicated stack. (The stack used during the
78
 * BSP initialization (prior the very first call to scheduler()) will be used
78
 * BSP initialization (prior the very first call to scheduler()) will be used
79
 * as an initialization stack for each AP.)
79
 * as an initialization stack for each AP.)
80
 */
80
 */
81
void kmp(void *arg)
81
void kmp(void *arg)
82
{
82
{
83
    __address src, dst;
83
    __address src, dst;
84
    int i;
84
    int i;
85
 
85
 
86
    ASSERT(ops != NULL);
86
    ASSERT(ops != NULL);
87
 
87
 
88
    waitq_initialize(&ap_completion_wq);
88
    waitq_initialize(&ap_completion_wq);
89
 
89
 
90
    /*
90
    /*
91
     * We need to access data in frame 0.
91
     * We need to access data in frame 0.
92
     * We boldly make use of kernel address space mapping.
92
     * We boldly make use of kernel address space mapping.
93
     */
93
     */
94
 
94
 
95
    /*
95
    /*
96
     * Set the warm-reset vector to the real-mode address of 4K-aligned ap_boot()
96
     * Set the warm-reset vector to the real-mode address of 4K-aligned ap_boot()
97
     */
97
     */
98
    *((__u16 *) (PA2KA(0x467+0))) =  ((__address) ap_boot) >> 4;    /* segment */
98
    *((__u16 *) (PA2KA(0x467+0))) =  ((__address) ap_boot) >> 4;    /* segment */
99
    *((__u16 *) (PA2KA(0x467+2))) =  0;             /* offset */
99
    *((__u16 *) (PA2KA(0x467+2))) =  0;             /* offset */
100
   
100
   
101
    /*
101
    /*
102
     * Save 0xa to address 0xf of the CMOS RAM.
102
     * Save 0xa to address 0xf of the CMOS RAM.
103
     * BIOS will not do the POST after the INIT signal.
103
     * BIOS will not do the POST after the INIT signal.
104
     */
104
     */
105
    outb(0x70,0xf);
105
    outb(0x70,0xf);
106
    outb(0x71,0xa);
106
    outb(0x71,0xa);
107
 
107
 
108
    cpu_priority_high();
108
    cpu_priority_high();
109
 
109
 
110
    pic_disable_irqs(0xffff);
110
    pic_disable_irqs(0xffff);
111
    apic_init();
111
    apic_init();
112
 
112
 
113
    for (i = 0; i < ops->cpu_count(); i++) {
113
    for (i = 0; i < ops->cpu_count(); i++) {
114
        struct descriptor *gdt_new;
114
        struct descriptor *gdt_new;
115
   
115
   
116
        /*
116
        /*
117
         * Skip processors marked unusable.
117
         * Skip processors marked unusable.
118
         */
118
         */
119
        if (!ops->cpu_enabled(i))
119
        if (!ops->cpu_enabled(i))
120
            continue;
120
            continue;
121
 
121
 
122
        /*
122
        /*
123
         * The bootstrap processor is already up.
123
         * The bootstrap processor is already up.
124
         */
124
         */
125
        if (ops->cpu_bootstrap(i))
125
        if (ops->cpu_bootstrap(i))
126
            continue;
126
            continue;
127
 
127
 
128
        if (ops->cpu_apic_id(i) == l_apic_id()) {
128
        if (ops->cpu_apic_id(i) == l_apic_id()) {
129
            printf("kmp: bad processor entry #%d, will not send IPI to myself\n", i);
129
            printf("kmp: bad processor entry #%d, will not send IPI to myself\n", i);
130
            continue;
130
            continue;
131
        }
131
        }
132
       
132
       
133
        /*
133
        /*
134
         * Prepare new GDT for CPU in question.
134
         * Prepare new GDT for CPU in question.
135
         */
135
         */
136
        if (!(gdt_new = (struct descriptor *) malloc(GDT_ITEMS*sizeof(struct descriptor))))
136
        if (!(gdt_new = (struct descriptor *) malloc(GDT_ITEMS*sizeof(struct descriptor))))
137
            panic("couldn't allocate memory for GDT\n");
137
            panic("couldn't allocate memory for GDT\n");
138
 
138
 
139
        memcopy(gdt, gdt_new, GDT_ITEMS*sizeof(struct descriptor));
139
        memcopy(gdt, gdt_new, GDT_ITEMS*sizeof(struct descriptor));
140
        memsetb(&gdt_new[TSS_DES], sizeof(struct descriptor), 0);
140
        memsetb(&gdt_new[TSS_DES], sizeof(struct descriptor), 0);
141
        gdtr.base = KA2PA((__address) gdt_new);
141
        gdtr.base = KA2PA((__address) gdt_new);
142
 
142
 
143
        if (l_apic_send_init_ipi(ops->cpu_apic_id(i))) {
143
        if (l_apic_send_init_ipi(ops->cpu_apic_id(i))) {
144
            /*
144
            /*
145
                 * There may be just one AP being initialized at
145
                 * There may be just one AP being initialized at
146
             * the time. After it comes completely up, it is
146
             * the time. After it comes completely up, it is
147
             * supposed to wake us up.
147
             * supposed to wake us up.
148
                 */
148
                 */
149
            waitq_sleep(&ap_completion_wq);
149
            waitq_sleep(&ap_completion_wq);
150
            cpu_priority_high();
-
 
151
        }
150
        }
152
        else {
151
        else {
153
            printf("INIT IPI for l_apic%d failed\n", ops->cpu_apic_id(i));
152
            printf("INIT IPI for l_apic%d failed\n", ops->cpu_apic_id(i));
154
        }
153
        }
155
    }
154
    }
156
 
155
 
157
    /*
156
    /*
158
     * Wakeup the kinit thread so that
157
     * Wakeup the kinit thread so that
159
     * system initialization can go on.
158
     * system initialization can go on.
160
     */
159
     */
161
    waitq_wakeup(&kmp_completion_wq, WAKEUP_FIRST);
160
    waitq_wakeup(&kmp_completion_wq, WAKEUP_FIRST);
162
}
161
}
163
 
162
 
164
#endif /* __SMP__ */
163
#endif /* __SMP__ */
165
 
164