Subversion Repositories HelenOS-historic

Rev

Rev 1292 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 1292 Rev 1702
1
/*
1
/*
2
 * Copyright (C) 2001-2004 Jakub Jermar
2
 * Copyright (C) 2001-2004 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
-
 
29
 /** @addtogroup ia32  
-
 
30
 * @{
-
 
31
 */
-
 
32
/** @file
-
 
33
 */
-
 
34
 
29
#include <arch/pm.h>
35
#include <arch/pm.h>
30
#include <config.h>
36
#include <config.h>
31
#include <arch/types.h>
37
#include <arch/types.h>
32
#include <typedefs.h>
38
#include <typedefs.h>
33
#include <arch/interrupt.h>
39
#include <arch/interrupt.h>
34
#include <arch/asm.h>
40
#include <arch/asm.h>
35
#include <arch/context.h>
41
#include <arch/context.h>
36
#include <panic.h>
42
#include <panic.h>
37
#include <arch/mm/page.h>
43
#include <arch/mm/page.h>
38
#include <mm/slab.h>
44
#include <mm/slab.h>
39
#include <memstr.h>
45
#include <memstr.h>
40
#include <arch/boot/boot.h>
46
#include <arch/boot/boot.h>
41
#include <interrupt.h>
47
#include <interrupt.h>
42
 
48
 
43
/*
49
/*
44
 * Early ia32 configuration functions and data structures.
50
 * Early ia32 configuration functions and data structures.
45
 */
51
 */
46
 
52
 
47
/*
53
/*
48
 * We have no use for segmentation so we set up flat mode. In this
54
 * We have no use for segmentation so we set up flat mode. In this
49
 * mode, we use, for each privilege level, two segments spanning the
55
 * mode, we use, for each privilege level, two segments spanning the
50
 * whole memory. One is for code and one is for data.
56
 * whole memory. One is for code and one is for data.
51
 *
57
 *
52
 * One is for GS register which holds pointer to the TLS thread
58
 * One is for GS register which holds pointer to the TLS thread
53
 * structure in it's base.
59
 * structure in it's base.
54
 */
60
 */
55
descriptor_t gdt[GDT_ITEMS] = {
61
descriptor_t gdt[GDT_ITEMS] = {
56
    /* NULL descriptor */
62
    /* NULL descriptor */
57
    { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
63
    { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
58
    /* KTEXT descriptor */
64
    /* KTEXT descriptor */
59
    { 0xffff, 0, 0, AR_PRESENT | AR_CODE | DPL_KERNEL, 0xf, 0, 0, 1, 1, 0 },
65
    { 0xffff, 0, 0, AR_PRESENT | AR_CODE | DPL_KERNEL, 0xf, 0, 0, 1, 1, 0 },
60
    /* KDATA descriptor */
66
    /* KDATA descriptor */
61
    { 0xffff, 0, 0, AR_PRESENT | AR_DATA | AR_WRITABLE | DPL_KERNEL, 0xf, 0, 0, 1, 1, 0 },
67
    { 0xffff, 0, 0, AR_PRESENT | AR_DATA | AR_WRITABLE | DPL_KERNEL, 0xf, 0, 0, 1, 1, 0 },
62
    /* UTEXT descriptor */
68
    /* UTEXT descriptor */
63
    { 0xffff, 0, 0, AR_PRESENT | AR_CODE | DPL_USER, 0xf, 0, 0, 1, 1, 0 },
69
    { 0xffff, 0, 0, AR_PRESENT | AR_CODE | DPL_USER, 0xf, 0, 0, 1, 1, 0 },
64
    /* UDATA descriptor */
70
    /* UDATA descriptor */
65
    { 0xffff, 0, 0, AR_PRESENT | AR_DATA | AR_WRITABLE | DPL_USER, 0xf, 0, 0, 1, 1, 0 },
71
    { 0xffff, 0, 0, AR_PRESENT | AR_DATA | AR_WRITABLE | DPL_USER, 0xf, 0, 0, 1, 1, 0 },
66
    /* TSS descriptor - set up will be completed later */
72
    /* TSS descriptor - set up will be completed later */
67
    { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
73
    { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
68
    /* TLS descriptor */
74
    /* TLS descriptor */
69
    { 0xffff, 0, 0, AR_PRESENT | AR_DATA | AR_WRITABLE | DPL_USER, 0xf, 0, 0, 1, 1, 0 },
75
    { 0xffff, 0, 0, AR_PRESENT | AR_DATA | AR_WRITABLE | DPL_USER, 0xf, 0, 0, 1, 1, 0 },
70
    /* VESA Init descriptor */
76
    /* VESA Init descriptor */
71
#ifdef CONFIG_FB
77
#ifdef CONFIG_FB
72
    { 0xffff, 0, VESA_INIT_SEGMENT>>12, AR_PRESENT | AR_CODE | DPL_KERNEL, 0xf, 0, 0, 0, 0, 0 }
78
    { 0xffff, 0, VESA_INIT_SEGMENT>>12, AR_PRESENT | AR_CODE | DPL_KERNEL, 0xf, 0, 0, 0, 0, 0 }
73
#endif  
79
#endif  
74
};
80
};
75
 
81
 
76
static idescriptor_t idt[IDT_ITEMS];
82
static idescriptor_t idt[IDT_ITEMS];
77
 
83
 
78
static tss_t tss;
84
static tss_t tss;
79
 
85
 
80
tss_t *tss_p = NULL;
86
tss_t *tss_p = NULL;
81
 
87
 
82
/* gdtr is changed by kmp before next CPU is initialized */
88
/* gdtr is changed by kmp before next CPU is initialized */
83
ptr_16_32_t bootstrap_gdtr = { .limit = sizeof(gdt), .base = KA2PA((__address) gdt) };
89
ptr_16_32_t bootstrap_gdtr = { .limit = sizeof(gdt), .base = KA2PA((__address) gdt) };
84
ptr_16_32_t gdtr = { .limit = sizeof(gdt), .base = (__address) gdt };
90
ptr_16_32_t gdtr = { .limit = sizeof(gdt), .base = (__address) gdt };
85
 
91
 
86
void gdt_setbase(descriptor_t *d, __address base)
92
void gdt_setbase(descriptor_t *d, __address base)
87
{
93
{
88
    d->base_0_15 = base & 0xffff;
94
    d->base_0_15 = base & 0xffff;
89
    d->base_16_23 = ((base) >> 16) & 0xff;
95
    d->base_16_23 = ((base) >> 16) & 0xff;
90
    d->base_24_31 = ((base) >> 24) & 0xff;
96
    d->base_24_31 = ((base) >> 24) & 0xff;
91
}
97
}
92
 
98
 
93
void gdt_setlimit(descriptor_t *d, __u32 limit)
99
void gdt_setlimit(descriptor_t *d, __u32 limit)
94
{
100
{
95
    d->limit_0_15 = limit & 0xffff;
101
    d->limit_0_15 = limit & 0xffff;
96
    d->limit_16_19 = (limit >> 16) & 0xf;
102
    d->limit_16_19 = (limit >> 16) & 0xf;
97
}
103
}
98
 
104
 
99
void idt_setoffset(idescriptor_t *d, __address offset)
105
void idt_setoffset(idescriptor_t *d, __address offset)
100
{
106
{
101
    /*
107
    /*
102
     * Offset is a linear address.
108
     * Offset is a linear address.
103
     */
109
     */
104
    d->offset_0_15 = offset & 0xffff;
110
    d->offset_0_15 = offset & 0xffff;
105
    d->offset_16_31 = offset >> 16;
111
    d->offset_16_31 = offset >> 16;
106
}
112
}
107
 
113
 
108
void tss_initialize(tss_t *t)
114
void tss_initialize(tss_t *t)
109
{
115
{
110
    memsetb((__address) t, sizeof(struct tss), 0);
116
    memsetb((__address) t, sizeof(struct tss), 0);
111
}
117
}
112
 
118
 
113
/*
119
/*
114
 * This function takes care of proper setup of IDT and IDTR.
120
 * This function takes care of proper setup of IDT and IDTR.
115
 */
121
 */
116
void idt_init(void)
122
void idt_init(void)
117
{
123
{
118
    idescriptor_t *d;
124
    idescriptor_t *d;
119
    int i;
125
    int i;
120
 
126
 
121
    for (i = 0; i < IDT_ITEMS; i++) {
127
    for (i = 0; i < IDT_ITEMS; i++) {
122
        d = &idt[i];
128
        d = &idt[i];
123
 
129
 
124
        d->unused = 0;
130
        d->unused = 0;
125
        d->selector = selector(KTEXT_DES);
131
        d->selector = selector(KTEXT_DES);
126
 
132
 
127
        d->access = AR_PRESENT | AR_INTERRUPT;  /* masking interrupt */
133
        d->access = AR_PRESENT | AR_INTERRUPT;  /* masking interrupt */
128
 
134
 
129
        if (i == VECTOR_SYSCALL) {
135
        if (i == VECTOR_SYSCALL) {
130
            /*
136
            /*
131
             * The syscall interrupt gate must be calleable from userland.
137
             * The syscall interrupt gate must be calleable from userland.
132
             */
138
             */
133
            d->access |= DPL_USER;
139
            d->access |= DPL_USER;
134
        }
140
        }
135
       
141
       
136
        idt_setoffset(d, ((__address) interrupt_handlers) + i*interrupt_handler_size);
142
        idt_setoffset(d, ((__address) interrupt_handlers) + i*interrupt_handler_size);
137
        exc_register(i, "undef", (iroutine) null_interrupt);
143
        exc_register(i, "undef", (iroutine) null_interrupt);
138
    }
144
    }
139
    exc_register(13, "gp_fault", (iroutine) gp_fault);
145
    exc_register(13, "gp_fault", (iroutine) gp_fault);
140
    exc_register( 7, "nm_fault", (iroutine) nm_fault);
146
    exc_register( 7, "nm_fault", (iroutine) nm_fault);
141
    exc_register(12, "ss_fault", (iroutine) ss_fault);
147
    exc_register(12, "ss_fault", (iroutine) ss_fault);
142
    exc_register(19, "simd_fp", (iroutine) simd_fp_exception);
148
    exc_register(19, "simd_fp", (iroutine) simd_fp_exception);
143
}
149
}
144
 
150
 
145
 
151
 
146
/* Clean IOPL(12,13) and NT(14) flags in EFLAGS register */
152
/* Clean IOPL(12,13) and NT(14) flags in EFLAGS register */
147
static void clean_IOPL_NT_flags(void)
153
static void clean_IOPL_NT_flags(void)
148
{
154
{
149
    __asm__ volatile (
155
    __asm__ volatile (
150
        "pushfl\n"
156
        "pushfl\n"
151
        "pop %%eax\n"
157
        "pop %%eax\n"
152
        "and $0xffff8fff, %%eax\n"
158
        "and $0xffff8fff, %%eax\n"
153
        "push %%eax\n"
159
        "push %%eax\n"
154
        "popfl\n"
160
        "popfl\n"
155
        : : : "eax"
161
        : : : "eax"
156
    );
162
    );
157
}
163
}
158
 
164
 
159
/* Clean AM(18) flag in CR0 register */
165
/* Clean AM(18) flag in CR0 register */
160
static void clean_AM_flag(void)
166
static void clean_AM_flag(void)
161
{
167
{
162
    __asm__ volatile (
168
    __asm__ volatile (
163
        "mov %%cr0, %%eax\n"
169
        "mov %%cr0, %%eax\n"
164
        "and $0xfffbffff, %%eax\n"
170
        "and $0xfffbffff, %%eax\n"
165
        "mov %%eax, %%cr0\n"
171
        "mov %%eax, %%cr0\n"
166
        : : : "eax"
172
        : : : "eax"
167
    );
173
    );
168
}
174
}
169
 
175
 
170
void pm_init(void)
176
void pm_init(void)
171
{
177
{
172
    descriptor_t *gdt_p = (descriptor_t *) gdtr.base;
178
    descriptor_t *gdt_p = (descriptor_t *) gdtr.base;
173
    ptr_16_32_t idtr;
179
    ptr_16_32_t idtr;
174
 
180
 
175
    /*
181
    /*
176
     * Update addresses in GDT and IDT to their virtual counterparts.
182
     * Update addresses in GDT and IDT to their virtual counterparts.
177
     */
183
     */
178
    idtr.limit = sizeof(idt);
184
    idtr.limit = sizeof(idt);
179
    idtr.base = (__address) idt;
185
    idtr.base = (__address) idt;
180
    gdtr_load(&gdtr);
186
    gdtr_load(&gdtr);
181
    idtr_load(&idtr);
187
    idtr_load(&idtr);
182
   
188
   
183
    /*
189
    /*
184
     * Each CPU has its private GDT and TSS.
190
     * Each CPU has its private GDT and TSS.
185
     * All CPUs share one IDT.
191
     * All CPUs share one IDT.
186
     */
192
     */
187
 
193
 
188
    if (config.cpu_active == 1) {
194
    if (config.cpu_active == 1) {
189
        idt_init();
195
        idt_init();
190
        /*
196
        /*
191
         * NOTE: bootstrap CPU has statically allocated TSS, because
197
         * NOTE: bootstrap CPU has statically allocated TSS, because
192
         * the heap hasn't been initialized so far.
198
         * the heap hasn't been initialized so far.
193
         */
199
         */
194
        tss_p = &tss;
200
        tss_p = &tss;
195
    }
201
    }
196
    else {
202
    else {
197
        tss_p = (tss_t *) malloc(sizeof(tss_t), FRAME_ATOMIC);
203
        tss_p = (tss_t *) malloc(sizeof(tss_t), FRAME_ATOMIC);
198
        if (!tss_p)
204
        if (!tss_p)
199
            panic("could not allocate TSS\n");
205
            panic("could not allocate TSS\n");
200
    }
206
    }
201
 
207
 
202
    tss_initialize(tss_p);
208
    tss_initialize(tss_p);
203
   
209
   
204
    gdt_p[TSS_DES].access = AR_PRESENT | AR_TSS | DPL_KERNEL;
210
    gdt_p[TSS_DES].access = AR_PRESENT | AR_TSS | DPL_KERNEL;
205
    gdt_p[TSS_DES].special = 1;
211
    gdt_p[TSS_DES].special = 1;
206
    gdt_p[TSS_DES].granularity = 0;
212
    gdt_p[TSS_DES].granularity = 0;
207
   
213
   
208
    gdt_setbase(&gdt_p[TSS_DES], (__address) tss_p);
214
    gdt_setbase(&gdt_p[TSS_DES], (__address) tss_p);
209
    gdt_setlimit(&gdt_p[TSS_DES], TSS_BASIC_SIZE - 1);
215
    gdt_setlimit(&gdt_p[TSS_DES], TSS_BASIC_SIZE - 1);
210
 
216
 
211
    /*
217
    /*
212
     * As of this moment, the current CPU has its own GDT pointing
218
     * As of this moment, the current CPU has its own GDT pointing
213
     * to its own TSS. We just need to load the TR register.
219
     * to its own TSS. We just need to load the TR register.
214
     */
220
     */
215
    tr_load(selector(TSS_DES));
221
    tr_load(selector(TSS_DES));
216
   
222
   
217
    clean_IOPL_NT_flags();    /* Disable I/O on nonprivileged levels and clear NT flag. */
223
    clean_IOPL_NT_flags();    /* Disable I/O on nonprivileged levels and clear NT flag. */
218
    clean_AM_flag();          /* Disable alignment check */
224
    clean_AM_flag();          /* Disable alignment check */
219
}
225
}
220
 
226
 
221
void set_tls_desc(__address tls)
227
void set_tls_desc(__address tls)
222
{
228
{
223
    ptr_16_32_t cpugdtr;
229
    ptr_16_32_t cpugdtr;
224
    descriptor_t *gdt_p;
230
    descriptor_t *gdt_p;
225
 
231
 
226
    gdtr_store(&cpugdtr);
232
    gdtr_store(&cpugdtr);
227
    gdt_p = (descriptor_t *) cpugdtr.base;
233
    gdt_p = (descriptor_t *) cpugdtr.base;
228
    gdt_setbase(&gdt_p[TLS_DES], tls);
234
    gdt_setbase(&gdt_p[TLS_DES], tls);
229
    /* Reload gdt register to update GS in CPU */
235
    /* Reload gdt register to update GS in CPU */
230
    gdtr_load(&cpugdtr);
236
    gdtr_load(&cpugdtr);
231
}
237
}
-
 
238
 
-
 
239
 /** @}
-
 
240
 */
-
 
241
 
232
 
242