Subversion Repositories HelenOS-historic

Rev

Rev 1019 | Rev 1186 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 1019 Rev 1112
1
/*
1
/*
2
 * Copyright (C) 2001-2004 Jakub Jermar
2
 * Copyright (C) 2001-2004 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
#include <arch/pm.h>
29
#include <arch/pm.h>
30
#include <config.h>
30
#include <config.h>
31
#include <arch/types.h>
31
#include <arch/types.h>
32
#include <typedefs.h>
32
#include <typedefs.h>
33
#include <arch/interrupt.h>
33
#include <arch/interrupt.h>
34
#include <arch/asm.h>
34
#include <arch/asm.h>
35
#include <arch/context.h>
35
#include <arch/context.h>
36
#include <panic.h>
36
#include <panic.h>
37
#include <arch/mm/page.h>
37
#include <arch/mm/page.h>
38
#include <mm/slab.h>
38
#include <mm/slab.h>
39
#include <memstr.h>
39
#include <memstr.h>
40
#include <arch/boot/boot.h>
40
#include <arch/boot/boot.h>
41
#include <interrupt.h>
41
#include <interrupt.h>
42
 
42
 
43
/*
43
/*
44
 * Early ia32 configuration functions and data structures.
44
 * Early ia32 configuration functions and data structures.
45
 */
45
 */
46
 
46
 
47
/*
47
/*
48
 * We have no use for segmentation so we set up flat mode. In this
48
 * We have no use for segmentation so we set up flat mode. In this
49
 * mode, we use, for each privilege level, two segments spanning the
49
 * mode, we use, for each privilege level, two segments spanning the
50
 * whole memory. One is for code and one is for data.
50
 * whole memory. One is for code and one is for data.
-
 
51
 *
-
 
52
 * One is for GS register which holds pointer to the TLS thread
-
 
53
 * structure in it's base.
51
 */
54
 */
52
struct descriptor gdt[GDT_ITEMS] = {
55
struct descriptor gdt[GDT_ITEMS] = {
53
    /* NULL descriptor */
56
    /* NULL descriptor */
54
    { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
57
    { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
55
    /* KTEXT descriptor */
58
    /* KTEXT descriptor */
56
    { 0xffff, 0, 0, AR_PRESENT | AR_CODE | DPL_KERNEL, 0xf, 0, 0, 1, 1, 0 },
59
    { 0xffff, 0, 0, AR_PRESENT | AR_CODE | DPL_KERNEL, 0xf, 0, 0, 1, 1, 0 },
57
    /* KDATA descriptor */
60
    /* KDATA descriptor */
58
    { 0xffff, 0, 0, AR_PRESENT | AR_DATA | AR_WRITABLE | DPL_KERNEL, 0xf, 0, 0, 1, 1, 0 },
61
    { 0xffff, 0, 0, AR_PRESENT | AR_DATA | AR_WRITABLE | DPL_KERNEL, 0xf, 0, 0, 1, 1, 0 },
59
    /* UTEXT descriptor */
62
    /* UTEXT descriptor */
60
    { 0xffff, 0, 0, AR_PRESENT | AR_CODE | DPL_USER, 0xf, 0, 0, 1, 1, 0 },
63
    { 0xffff, 0, 0, AR_PRESENT | AR_CODE | DPL_USER, 0xf, 0, 0, 1, 1, 0 },
61
    /* UDATA descriptor */
64
    /* UDATA descriptor */
62
    { 0xffff, 0, 0, AR_PRESENT | AR_DATA | AR_WRITABLE | DPL_USER, 0xf, 0, 0, 1, 1, 0 },
65
    { 0xffff, 0, 0, AR_PRESENT | AR_DATA | AR_WRITABLE | DPL_USER, 0xf, 0, 0, 1, 1, 0 },
63
    /* TSS descriptor - set up will be completed later */
66
    /* TSS descriptor - set up will be completed later */
64
    { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
67
    { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
-
 
68
    { 0xffff, 0, 0, AR_PRESENT | AR_DATA | AR_WRITABLE | DPL_USER, 0xf, 0, 0, 1, 1, 0 }
65
};
69
};
66
 
70
 
67
static struct idescriptor idt[IDT_ITEMS];
71
static struct idescriptor idt[IDT_ITEMS];
68
 
72
 
69
static struct tss tss;
73
static struct tss tss;
70
 
74
 
71
struct tss *tss_p = NULL;
75
struct tss *tss_p = NULL;
72
 
76
 
73
/* gdtr is changed by kmp before next CPU is initialized */
77
/* gdtr is changed by kmp before next CPU is initialized */
74
struct ptr_16_32 bootstrap_gdtr = { .limit = sizeof(gdt), .base = KA2PA((__address) gdt) };
78
struct ptr_16_32 bootstrap_gdtr = { .limit = sizeof(gdt), .base = KA2PA((__address) gdt) };
75
struct ptr_16_32 gdtr = { .limit = sizeof(gdt), .base = (__address) gdt };
79
struct ptr_16_32 gdtr = { .limit = sizeof(gdt), .base = (__address) gdt };
76
 
80
 
77
void gdt_setbase(struct descriptor *d, __address base)
81
void gdt_setbase(struct descriptor *d, __address base)
78
{
82
{
79
    d->base_0_15 = base & 0xffff;
83
    d->base_0_15 = base & 0xffff;
80
    d->base_16_23 = ((base) >> 16) & 0xff;
84
    d->base_16_23 = ((base) >> 16) & 0xff;
81
    d->base_24_31 = ((base) >> 24) & 0xff;
85
    d->base_24_31 = ((base) >> 24) & 0xff;
82
}
86
}
83
 
87
 
84
void gdt_setlimit(struct descriptor *d, __u32 limit)
88
void gdt_setlimit(struct descriptor *d, __u32 limit)
85
{
89
{
86
    d->limit_0_15 = limit & 0xffff;
90
    d->limit_0_15 = limit & 0xffff;
87
    d->limit_16_19 = (limit >> 16) & 0xf;
91
    d->limit_16_19 = (limit >> 16) & 0xf;
88
}
92
}
89
 
93
 
90
void idt_setoffset(struct idescriptor *d, __address offset)
94
void idt_setoffset(struct idescriptor *d, __address offset)
91
{
95
{
92
    /*
96
    /*
93
     * Offset is a linear address.
97
     * Offset is a linear address.
94
     */
98
     */
95
    d->offset_0_15 = offset & 0xffff;
99
    d->offset_0_15 = offset & 0xffff;
96
    d->offset_16_31 = offset >> 16;
100
    d->offset_16_31 = offset >> 16;
97
}
101
}
98
 
102
 
99
void tss_initialize(struct tss *t)
103
void tss_initialize(struct tss *t)
100
{
104
{
101
    memsetb((__address) t, sizeof(struct tss), 0);
105
    memsetb((__address) t, sizeof(struct tss), 0);
102
}
106
}
103
 
107
 
104
/*
108
/*
105
 * This function takes care of proper setup of IDT and IDTR.
109
 * This function takes care of proper setup of IDT and IDTR.
106
 */
110
 */
107
void idt_init(void)
111
void idt_init(void)
108
{
112
{
109
    struct idescriptor *d;
113
    struct idescriptor *d;
110
    int i;
114
    int i;
111
 
115
 
112
    for (i = 0; i < IDT_ITEMS; i++) {
116
    for (i = 0; i < IDT_ITEMS; i++) {
113
        d = &idt[i];
117
        d = &idt[i];
114
 
118
 
115
        d->unused = 0;
119
        d->unused = 0;
116
        d->selector = selector(KTEXT_DES);
120
        d->selector = selector(KTEXT_DES);
117
 
121
 
118
        d->access = AR_PRESENT | AR_INTERRUPT;  /* masking interrupt */
122
        d->access = AR_PRESENT | AR_INTERRUPT;  /* masking interrupt */
119
 
123
 
120
        if (i == VECTOR_SYSCALL) {
124
        if (i == VECTOR_SYSCALL) {
121
            /*
125
            /*
122
             * The syscall interrupt gate must be calleable from userland.
126
             * The syscall interrupt gate must be calleable from userland.
123
             */
127
             */
124
            d->access |= DPL_USER;
128
            d->access |= DPL_USER;
125
        }
129
        }
126
       
130
       
127
        idt_setoffset(d, ((__address) interrupt_handlers) + i*interrupt_handler_size);
131
        idt_setoffset(d, ((__address) interrupt_handlers) + i*interrupt_handler_size);
128
        exc_register(i, "undef", (iroutine) null_interrupt);
132
        exc_register(i, "undef", (iroutine) null_interrupt);
129
    }
133
    }
130
    exc_register(13, "gp_fault", (iroutine) gp_fault);
134
    exc_register(13, "gp_fault", (iroutine) gp_fault);
131
    exc_register( 7, "nm_fault", (iroutine) nm_fault);
135
    exc_register( 7, "nm_fault", (iroutine) nm_fault);
132
    exc_register(12, "ss_fault", (iroutine) ss_fault);
136
    exc_register(12, "ss_fault", (iroutine) ss_fault);
133
    exc_register(19, "simd_fp", (iroutine) simd_fp_exception);
137
    exc_register(19, "simd_fp", (iroutine) simd_fp_exception);
134
}
138
}
135
 
139
 
136
 
140
 
137
/* Clean IOPL(12,13) and NT(14) flags in EFLAGS register */
141
/* Clean IOPL(12,13) and NT(14) flags in EFLAGS register */
138
static void clean_IOPL_NT_flags(void)
142
static void clean_IOPL_NT_flags(void)
139
{
143
{
140
    asm
144
    asm
141
    (
145
    (
142
        "pushfl;"
146
        "pushfl;"
143
        "pop %%eax;"
147
        "pop %%eax;"
144
        "and $0xffff8fff,%%eax;"
148
        "and $0xffff8fff,%%eax;"
145
        "push %%eax;"
149
        "push %%eax;"
146
        "popfl;"
150
        "popfl;"
147
        :
151
        :
148
        :
152
        :
149
        :"%eax"
153
        :"%eax"
150
    );
154
    );
151
}
155
}
152
 
156
 
153
/* Clean AM(18) flag in CR0 register */
157
/* Clean AM(18) flag in CR0 register */
154
static void clean_AM_flag(void)
158
static void clean_AM_flag(void)
155
{
159
{
156
    asm
160
    asm
157
    (
161
    (
158
        "mov %%cr0,%%eax;"
162
        "mov %%cr0,%%eax;"
159
        "and $0xFFFBFFFF,%%eax;"
163
        "and $0xFFFBFFFF,%%eax;"
160
        "mov %%eax,%%cr0;"
164
        "mov %%eax,%%cr0;"
161
        :
165
        :
162
        :
166
        :
163
        :"%eax"
167
        :"%eax"
164
    );
168
    );
165
}
169
}
166
 
170
 
167
void pm_init(void)
171
void pm_init(void)
168
{
172
{
169
    struct descriptor *gdt_p = (struct descriptor *) gdtr.base;
173
    struct descriptor *gdt_p = (struct descriptor *) gdtr.base;
170
    struct ptr_16_32 idtr;
174
    struct ptr_16_32 idtr;
171
 
175
 
172
    /*
176
    /*
173
     * Update addresses in GDT and IDT to their virtual counterparts.
177
     * Update addresses in GDT and IDT to their virtual counterparts.
174
     */
178
     */
175
    idtr.limit = sizeof(idt);
179
    idtr.limit = sizeof(idt);
176
    idtr.base = (__address) idt;
180
    idtr.base = (__address) idt;
177
    __asm__ volatile ("lgdt %0\n" : : "m" (gdtr));
181
    __asm__ volatile ("lgdt %0\n" : : "m" (gdtr));
178
    __asm__ volatile ("lidt %0\n" : : "m" (idtr)); 
182
    __asm__ volatile ("lidt %0\n" : : "m" (idtr)); 
179
   
183
   
180
    /*
184
    /*
181
     * Each CPU has its private GDT and TSS.
185
     * Each CPU has its private GDT and TSS.
182
     * All CPUs share one IDT.
186
     * All CPUs share one IDT.
183
     */
187
     */
184
 
188
 
185
    if (config.cpu_active == 1) {
189
    if (config.cpu_active == 1) {
186
        idt_init();
190
        idt_init();
187
        /*
191
        /*
188
         * NOTE: bootstrap CPU has statically allocated TSS, because
192
         * NOTE: bootstrap CPU has statically allocated TSS, because
189
         * the heap hasn't been initialized so far.
193
         * the heap hasn't been initialized so far.
190
         */
194
         */
191
        tss_p = &tss;
195
        tss_p = &tss;
192
    }
196
    }
193
    else {
197
    else {
194
        tss_p = (struct tss *) malloc(sizeof(struct tss),FRAME_ATOMIC);
198
        tss_p = (struct tss *) malloc(sizeof(struct tss),FRAME_ATOMIC);
195
        if (!tss_p)
199
        if (!tss_p)
196
            panic("could not allocate TSS\n");
200
            panic("could not allocate TSS\n");
197
    }
201
    }
198
 
202
 
199
    tss_initialize(tss_p);
203
    tss_initialize(tss_p);
200
   
204
   
201
    gdt_p[TSS_DES].access = AR_PRESENT | AR_TSS | DPL_KERNEL;
205
    gdt_p[TSS_DES].access = AR_PRESENT | AR_TSS | DPL_KERNEL;
202
    gdt_p[TSS_DES].special = 1;
206
    gdt_p[TSS_DES].special = 1;
203
    gdt_p[TSS_DES].granularity = 1;
207
    gdt_p[TSS_DES].granularity = 1;
204
   
208
   
205
    gdt_setbase(&gdt_p[TSS_DES], (__address) tss_p);
209
    gdt_setbase(&gdt_p[TSS_DES], (__address) tss_p);
206
    gdt_setlimit(&gdt_p[TSS_DES], sizeof(struct tss) - 1);
210
    gdt_setlimit(&gdt_p[TSS_DES], sizeof(struct tss) - 1);
207
 
211
 
208
    /*
212
    /*
209
     * As of this moment, the current CPU has its own GDT pointing
213
     * As of this moment, the current CPU has its own GDT pointing
210
     * to its own TSS. We just need to load the TR register.
214
     * to its own TSS. We just need to load the TR register.
211
     */
215
     */
212
    __asm__ volatile ("ltr %0" : : "r" ((__u16) selector(TSS_DES)));
216
    __asm__ volatile ("ltr %0" : : "r" ((__u16) selector(TSS_DES)));
213
   
217
   
214
    clean_IOPL_NT_flags();    /* Disable I/O on nonprivileged levels */
218
    clean_IOPL_NT_flags();    /* Disable I/O on nonprivileged levels */
215
    clean_AM_flag();          /* Disable alignment check */
219
    clean_AM_flag();          /* Disable alignment check */
216
}
220
}
-
 
221
 
-
 
222
void set_tls_desc(__address tls)
-
 
223
{
-
 
224
    struct ptr_16_32 cpugdtr;
-
 
225
    struct descriptor *gdt_p = (struct descriptor *) cpugdtr.base;
-
 
226
 
-
 
227
    __asm__ volatile ("sgdt %0\n" : : "m" (cpugdtr));
-
 
228
 
-
 
229
    gdt_setbase(&gdt_p[TLS_DES], tls);
-
 
230
    /* Reload gdt register to update GS in CPU */
-
 
231
    __asm__ volatile ("lgdt %0\n" : : "m" (cpugdtr));
-
 
232
}
217
 
233