Subversion Repositories HelenOS-historic

Rev

Rev 1251 | Rev 1289 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 1251 Rev 1287
1
/*
1
/*
2
 * Copyright (C) 2001-2004 Jakub Jermar
2
 * Copyright (C) 2001-2004 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
#include <arch/pm.h>
29
#include <arch/pm.h>
30
#include <config.h>
30
#include <config.h>
31
#include <arch/types.h>
31
#include <arch/types.h>
32
#include <typedefs.h>
32
#include <typedefs.h>
33
#include <arch/interrupt.h>
33
#include <arch/interrupt.h>
34
#include <arch/asm.h>
34
#include <arch/asm.h>
35
#include <arch/context.h>
35
#include <arch/context.h>
36
#include <panic.h>
36
#include <panic.h>
37
#include <arch/mm/page.h>
37
#include <arch/mm/page.h>
38
#include <mm/slab.h>
38
#include <mm/slab.h>
39
#include <memstr.h>
39
#include <memstr.h>
40
#include <arch/boot/boot.h>
40
#include <arch/boot/boot.h>
41
#include <interrupt.h>
41
#include <interrupt.h>
42
 
42
 
43
/*
43
/*
44
 * Early ia32 configuration functions and data structures.
44
 * Early ia32 configuration functions and data structures.
45
 */
45
 */
46
 
46
 
47
/*
47
/*
48
 * We have no use for segmentation so we set up flat mode. In this
48
 * We have no use for segmentation so we set up flat mode. In this
49
 * mode, we use, for each privilege level, two segments spanning the
49
 * mode, we use, for each privilege level, two segments spanning the
50
 * whole memory. One is for code and one is for data.
50
 * whole memory. One is for code and one is for data.
51
 *
51
 *
52
 * One is for GS register which holds pointer to the TLS thread
52
 * One is for GS register which holds pointer to the TLS thread
53
 * structure in it's base.
53
 * structure in it's base.
54
 */
54
 */
55
descriptor_t gdt[GDT_ITEMS] = {
55
descriptor_t gdt[GDT_ITEMS] = {
56
    /* NULL descriptor */
56
    /* NULL descriptor */
57
    { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
57
    { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
58
    /* KTEXT descriptor */
58
    /* KTEXT descriptor */
59
    { 0xffff, 0, 0, AR_PRESENT | AR_CODE | DPL_KERNEL, 0xf, 0, 0, 1, 1, 0 },
59
    { 0xffff, 0, 0, AR_PRESENT | AR_CODE | DPL_KERNEL, 0xf, 0, 0, 1, 1, 0 },
60
    /* KDATA descriptor */
60
    /* KDATA descriptor */
61
    { 0xffff, 0, 0, AR_PRESENT | AR_DATA | AR_WRITABLE | DPL_KERNEL, 0xf, 0, 0, 1, 1, 0 },
61
    { 0xffff, 0, 0, AR_PRESENT | AR_DATA | AR_WRITABLE | DPL_KERNEL, 0xf, 0, 0, 1, 1, 0 },
62
    /* UTEXT descriptor */
62
    /* UTEXT descriptor */
63
    { 0xffff, 0, 0, AR_PRESENT | AR_CODE | DPL_USER, 0xf, 0, 0, 1, 1, 0 },
63
    { 0xffff, 0, 0, AR_PRESENT | AR_CODE | DPL_USER, 0xf, 0, 0, 1, 1, 0 },
64
    /* UDATA descriptor */
64
    /* UDATA descriptor */
65
    { 0xffff, 0, 0, AR_PRESENT | AR_DATA | AR_WRITABLE | DPL_USER, 0xf, 0, 0, 1, 1, 0 },
65
    { 0xffff, 0, 0, AR_PRESENT | AR_DATA | AR_WRITABLE | DPL_USER, 0xf, 0, 0, 1, 1, 0 },
66
    /* TSS descriptor - set up will be completed later */
66
    /* TSS descriptor - set up will be completed later */
67
    { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
67
    { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
68
    /* TLS descriptor */
68
    /* TLS descriptor */
69
    { 0xffff, 0, 0, AR_PRESENT | AR_DATA | AR_WRITABLE | DPL_USER, 0xf, 0, 0, 1, 1, 0 }
69
    { 0xffff, 0, 0, AR_PRESENT | AR_DATA | AR_WRITABLE | DPL_USER, 0xf, 0, 0, 1, 1, 0 },
-
 
70
    /* VESA Init descriptor */
-
 
71
    { 0xffff, 0, VESA_INIT_SEGMENT>>12, AR_PRESENT | AR_CODE | DPL_KERNEL, 0xf, 0, 0, 0, 0, 0 },
-
 
72
   
70
};
73
};
71
 
74
 
72
static idescriptor_t idt[IDT_ITEMS];
75
static idescriptor_t idt[IDT_ITEMS];
73
 
76
 
74
static tss_t tss;
77
static tss_t tss;
75
 
78
 
76
tss_t *tss_p = NULL;
79
tss_t *tss_p = NULL;
77
 
80
 
78
/* gdtr is changed by kmp before next CPU is initialized */
81
/* gdtr is changed by kmp before next CPU is initialized */
79
ptr_16_32_t bootstrap_gdtr = { .limit = sizeof(gdt), .base = KA2PA((__address) gdt) };
82
ptr_16_32_t bootstrap_gdtr = { .limit = sizeof(gdt), .base = KA2PA((__address) gdt) };
80
ptr_16_32_t gdtr = { .limit = sizeof(gdt), .base = (__address) gdt };
83
ptr_16_32_t gdtr = { .limit = sizeof(gdt), .base = (__address) gdt };
81
 
84
 
82
void gdt_setbase(descriptor_t *d, __address base)
85
void gdt_setbase(descriptor_t *d, __address base)
83
{
86
{
84
    d->base_0_15 = base & 0xffff;
87
    d->base_0_15 = base & 0xffff;
85
    d->base_16_23 = ((base) >> 16) & 0xff;
88
    d->base_16_23 = ((base) >> 16) & 0xff;
86
    d->base_24_31 = ((base) >> 24) & 0xff;
89
    d->base_24_31 = ((base) >> 24) & 0xff;
87
}
90
}
88
 
91
 
89
void gdt_setlimit(descriptor_t *d, __u32 limit)
92
void gdt_setlimit(descriptor_t *d, __u32 limit)
90
{
93
{
91
    d->limit_0_15 = limit & 0xffff;
94
    d->limit_0_15 = limit & 0xffff;
92
    d->limit_16_19 = (limit >> 16) & 0xf;
95
    d->limit_16_19 = (limit >> 16) & 0xf;
93
}
96
}
94
 
97
 
95
void idt_setoffset(idescriptor_t *d, __address offset)
98
void idt_setoffset(idescriptor_t *d, __address offset)
96
{
99
{
97
    /*
100
    /*
98
     * Offset is a linear address.
101
     * Offset is a linear address.
99
     */
102
     */
100
    d->offset_0_15 = offset & 0xffff;
103
    d->offset_0_15 = offset & 0xffff;
101
    d->offset_16_31 = offset >> 16;
104
    d->offset_16_31 = offset >> 16;
102
}
105
}
103
 
106
 
104
void tss_initialize(tss_t *t)
107
void tss_initialize(tss_t *t)
105
{
108
{
106
    memsetb((__address) t, sizeof(struct tss), 0);
109
    memsetb((__address) t, sizeof(struct tss), 0);
107
}
110
}
108
 
111
 
109
/*
112
/*
110
 * This function takes care of proper setup of IDT and IDTR.
113
 * This function takes care of proper setup of IDT and IDTR.
111
 */
114
 */
112
void idt_init(void)
115
void idt_init(void)
113
{
116
{
114
    idescriptor_t *d;
117
    idescriptor_t *d;
115
    int i;
118
    int i;
116
 
119
 
117
    for (i = 0; i < IDT_ITEMS; i++) {
120
    for (i = 0; i < IDT_ITEMS; i++) {
118
        d = &idt[i];
121
        d = &idt[i];
119
 
122
 
120
        d->unused = 0;
123
        d->unused = 0;
121
        d->selector = selector(KTEXT_DES);
124
        d->selector = selector(KTEXT_DES);
122
 
125
 
123
        d->access = AR_PRESENT | AR_INTERRUPT;  /* masking interrupt */
126
        d->access = AR_PRESENT | AR_INTERRUPT;  /* masking interrupt */
124
 
127
 
125
        if (i == VECTOR_SYSCALL) {
128
        if (i == VECTOR_SYSCALL) {
126
            /*
129
            /*
127
             * The syscall interrupt gate must be calleable from userland.
130
             * The syscall interrupt gate must be calleable from userland.
128
             */
131
             */
129
            d->access |= DPL_USER;
132
            d->access |= DPL_USER;
130
        }
133
        }
131
       
134
       
132
        idt_setoffset(d, ((__address) interrupt_handlers) + i*interrupt_handler_size);
135
        idt_setoffset(d, ((__address) interrupt_handlers) + i*interrupt_handler_size);
133
        exc_register(i, "undef", (iroutine) null_interrupt);
136
        exc_register(i, "undef", (iroutine) null_interrupt);
134
    }
137
    }
135
    exc_register(13, "gp_fault", (iroutine) gp_fault);
138
    exc_register(13, "gp_fault", (iroutine) gp_fault);
136
    exc_register( 7, "nm_fault", (iroutine) nm_fault);
139
    exc_register( 7, "nm_fault", (iroutine) nm_fault);
137
    exc_register(12, "ss_fault", (iroutine) ss_fault);
140
    exc_register(12, "ss_fault", (iroutine) ss_fault);
138
    exc_register(19, "simd_fp", (iroutine) simd_fp_exception);
141
    exc_register(19, "simd_fp", (iroutine) simd_fp_exception);
139
}
142
}
140
 
143
 
141
 
144
 
142
/* Clean IOPL(12,13) and NT(14) flags in EFLAGS register */
145
/* Clean IOPL(12,13) and NT(14) flags in EFLAGS register */
143
static void clean_IOPL_NT_flags(void)
146
static void clean_IOPL_NT_flags(void)
144
{
147
{
145
    __asm__ volatile (
148
    __asm__ volatile (
146
        "pushfl\n"
149
        "pushfl\n"
147
        "pop %%eax\n"
150
        "pop %%eax\n"
148
        "and $0xffff8fff, %%eax\n"
151
        "and $0xffff8fff, %%eax\n"
149
        "push %%eax\n"
152
        "push %%eax\n"
150
        "popfl\n"
153
        "popfl\n"
151
        : : : "eax"
154
        : : : "eax"
152
    );
155
    );
153
}
156
}
154
 
157
 
155
/* Clean AM(18) flag in CR0 register */
158
/* Clean AM(18) flag in CR0 register */
156
static void clean_AM_flag(void)
159
static void clean_AM_flag(void)
157
{
160
{
158
    __asm__ volatile (
161
    __asm__ volatile (
159
        "mov %%cr0, %%eax\n"
162
        "mov %%cr0, %%eax\n"
160
        "and $0xfffbffff, %%eax\n"
163
        "and $0xfffbffff, %%eax\n"
161
        "mov %%eax, %%cr0\n"
164
        "mov %%eax, %%cr0\n"
162
        : : : "eax"
165
        : : : "eax"
163
    );
166
    );
164
}
167
}
165
 
168
 
166
void pm_init(void)
169
void pm_init(void)
167
{
170
{
168
    descriptor_t *gdt_p = (descriptor_t *) gdtr.base;
171
    descriptor_t *gdt_p = (descriptor_t *) gdtr.base;
169
    ptr_16_32_t idtr;
172
    ptr_16_32_t idtr;
170
 
173
 
171
    /*
174
    /*
172
     * Update addresses in GDT and IDT to their virtual counterparts.
175
     * Update addresses in GDT and IDT to their virtual counterparts.
173
     */
176
     */
174
    idtr.limit = sizeof(idt);
177
    idtr.limit = sizeof(idt);
175
    idtr.base = (__address) idt;
178
    idtr.base = (__address) idt;
176
    gdtr_load(&gdtr);
179
    gdtr_load(&gdtr);
177
    idtr_load(&idtr);
180
    idtr_load(&idtr);
178
   
181
   
179
    /*
182
    /*
180
     * Each CPU has its private GDT and TSS.
183
     * Each CPU has its private GDT and TSS.
181
     * All CPUs share one IDT.
184
     * All CPUs share one IDT.
182
     */
185
     */
183
 
186
 
184
    if (config.cpu_active == 1) {
187
    if (config.cpu_active == 1) {
185
        idt_init();
188
        idt_init();
186
        /*
189
        /*
187
         * NOTE: bootstrap CPU has statically allocated TSS, because
190
         * NOTE: bootstrap CPU has statically allocated TSS, because
188
         * the heap hasn't been initialized so far.
191
         * the heap hasn't been initialized so far.
189
         */
192
         */
190
        tss_p = &tss;
193
        tss_p = &tss;
191
    }
194
    }
192
    else {
195
    else {
193
        tss_p = (tss_t *) malloc(sizeof(tss_t), FRAME_ATOMIC);
196
        tss_p = (tss_t *) malloc(sizeof(tss_t), FRAME_ATOMIC);
194
        if (!tss_p)
197
        if (!tss_p)
195
            panic("could not allocate TSS\n");
198
            panic("could not allocate TSS\n");
196
    }
199
    }
197
 
200
 
198
    tss_initialize(tss_p);
201
    tss_initialize(tss_p);
199
   
202
   
200
    gdt_p[TSS_DES].access = AR_PRESENT | AR_TSS | DPL_KERNEL;
203
    gdt_p[TSS_DES].access = AR_PRESENT | AR_TSS | DPL_KERNEL;
201
    gdt_p[TSS_DES].special = 1;
204
    gdt_p[TSS_DES].special = 1;
202
    gdt_p[TSS_DES].granularity = 0;
205
    gdt_p[TSS_DES].granularity = 0;
203
   
206
   
204
    gdt_setbase(&gdt_p[TSS_DES], (__address) tss_p);
207
    gdt_setbase(&gdt_p[TSS_DES], (__address) tss_p);
205
    gdt_setlimit(&gdt_p[TSS_DES], TSS_BASIC_SIZE - 1);
208
    gdt_setlimit(&gdt_p[TSS_DES], TSS_BASIC_SIZE - 1);
206
 
209
 
207
    /*
210
    /*
208
     * As of this moment, the current CPU has its own GDT pointing
211
     * As of this moment, the current CPU has its own GDT pointing
209
     * to its own TSS. We just need to load the TR register.
212
     * to its own TSS. We just need to load the TR register.
210
     */
213
     */
211
    tr_load(selector(TSS_DES));
214
    tr_load(selector(TSS_DES));
212
   
215
   
213
    clean_IOPL_NT_flags();    /* Disable I/O on nonprivileged levels and clear NT flag. */
216
    clean_IOPL_NT_flags();    /* Disable I/O on nonprivileged levels and clear NT flag. */
214
    clean_AM_flag();          /* Disable alignment check */
217
    clean_AM_flag();          /* Disable alignment check */
215
}
218
}
216
 
219
 
217
void set_tls_desc(__address tls)
220
void set_tls_desc(__address tls)
218
{
221
{
219
    ptr_16_32_t cpugdtr;
222
    ptr_16_32_t cpugdtr;
220
    descriptor_t *gdt_p;
223
    descriptor_t *gdt_p;
221
 
224
 
222
    gdtr_store(&cpugdtr);
225
    gdtr_store(&cpugdtr);
223
    gdt_p = (descriptor_t *) cpugdtr.base;
226
    gdt_p = (descriptor_t *) cpugdtr.base;
224
    gdt_setbase(&gdt_p[TLS_DES], tls);
227
    gdt_setbase(&gdt_p[TLS_DES], tls);
225
    /* Reload gdt register to update GS in CPU */
228
    /* Reload gdt register to update GS in CPU */
226
    gdtr_load(&cpugdtr);
229
    gdtr_load(&cpugdtr);
227
}
230
}
228
 
231