Subversion Repositories HelenOS-historic

Rev

Rev 195 | Rev 244 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 195 Rev 232
1
/*
1
/*
2
 * Copyright (C) 2001-2004 Jakub Jermar
2
 * Copyright (C) 2001-2004 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
#include <arch/pm.h>
29
#include <arch/pm.h>
30
#include <config.h>
30
#include <config.h>
31
#include <arch/types.h>
31
#include <arch/types.h>
32
#include <typedefs.h>
32
#include <typedefs.h>
33
#include <arch/interrupt.h>
33
#include <arch/interrupt.h>
34
#include <arch/asm.h>
34
#include <arch/asm.h>
35
#include <arch/context.h>
35
#include <arch/context.h>
36
#include <panic.h>
36
#include <panic.h>
37
#include <arch/mm/page.h>
37
#include <arch/mm/page.h>
38
#include <mm/heap.h>
38
#include <mm/heap.h>
39
#include <memstr.h>
39
#include <memstr.h>
40
 
40
 
41
/*
41
/*
42
 * Early ia32 configuration functions and data structures.
42
 * Early ia32 configuration functions and data structures.
43
 */
43
 */
44
 
44
 
45
/*
45
/*
46
 * We have no use for segmentation so we set up flat mode. In this
46
 * We have no use for segmentation so we set up flat mode. In this
47
 * mode, we use, for each privilege level, two segments spanning the
47
 * mode, we use, for each privilege level, two segments spanning the
48
 * whole memory. One is for code and one is for data.
48
 * whole memory. One is for code and one is for data.
49
 */
49
 */
50
struct descriptor gdt[GDT_ITEMS] = {
50
struct descriptor gdt[GDT_ITEMS] = {
51
    /* NULL descriptor */
51
    /* NULL descriptor */
52
    { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
52
    { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
53
    /* KTEXT descriptor */
53
    /* KTEXT descriptor */
54
    { 0xffff, 0, 0, AR_PRESENT | AR_CODE | DPL_KERNEL, 0xf, 0, 0, 1, 1, 0 },
54
    { 0xffff, 0, 0, AR_PRESENT | AR_CODE | DPL_KERNEL, 0xf, 0, 0, 1, 1, 0 },
55
    /* KDATA descriptor */
55
    /* KDATA descriptor */
56
    { 0xffff, 0, 0, AR_PRESENT | AR_DATA | AR_WRITABLE | DPL_KERNEL, 0xf, 0, 0, 1, 1, 0 },
56
    { 0xffff, 0, 0, AR_PRESENT | AR_DATA | AR_WRITABLE | DPL_KERNEL, 0xf, 0, 0, 1, 1, 0 },
57
    /* UTEXT descriptor */
57
    /* UTEXT descriptor */
58
    { 0xffff, 0, 0, AR_PRESENT | AR_CODE | DPL_USER, 0xf, 0, 0, 1, 1, 0 },
58
    { 0xffff, 0, 0, AR_PRESENT | AR_CODE | DPL_USER, 0xf, 0, 0, 1, 1, 0 },
59
    /* UDATA descriptor */
59
    /* UDATA descriptor */
60
    { 0xffff, 0, 0, AR_PRESENT | AR_DATA | AR_WRITABLE | DPL_USER, 0xf, 0, 0, 1, 1, 0 },
60
    { 0xffff, 0, 0, AR_PRESENT | AR_DATA | AR_WRITABLE | DPL_USER, 0xf, 0, 0, 1, 1, 0 },
61
    /* TSS descriptor - set up will be completed later */
61
    /* TSS descriptor - set up will be completed later */
62
    { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
62
    { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
63
};
63
};
64
 
64
 
65
static struct idescriptor idt[IDT_ITEMS];
65
static struct idescriptor idt[IDT_ITEMS];
66
 
66
 
67
static struct tss tss;
67
static struct tss tss;
68
 
68
 
69
struct tss *tss_p = NULL;
69
struct tss *tss_p = NULL;
70
 
70
 
71
/* gdtr is changed by kmp before next CPU is initialized */
71
/* gdtr is changed by kmp before next CPU is initialized */
72
struct ptr_16_32 gdtr __attribute__ ((section ("K_DATA_START"))) = { .limit = sizeof(gdt), .base = KA2PA((__address) gdt) };
72
struct ptr_16_32 gdtr __attribute__ ((section ("K_DATA_START"))) = { .limit = sizeof(gdt), .base = KA2PA((__address) gdt) };
73
struct ptr_16_32 idtr __attribute__ ((section ("K_DATA_START"))) = { .limit = sizeof(idt), .base = KA2PA((__address) idt) };
73
struct ptr_16_32 idtr __attribute__ ((section ("K_DATA_START"))) = { .limit = sizeof(idt), .base = KA2PA((__address) idt) };
74
 
74
 
75
void gdt_setbase(struct descriptor *d, __address base)
75
void gdt_setbase(struct descriptor *d, __address base)
76
{
76
{
77
    d->base_0_15 = base & 0xffff;
77
    d->base_0_15 = base & 0xffff;
78
    d->base_16_23 = ((base) >> 16) & 0xff;
78
    d->base_16_23 = ((base) >> 16) & 0xff;
79
    d->base_24_31 = ((base) >> 24) & 0xff;
79
    d->base_24_31 = ((base) >> 24) & 0xff;
80
}
80
}
81
 
81
 
82
void gdt_setlimit(struct descriptor *d, __u32 limit)
82
void gdt_setlimit(struct descriptor *d, __u32 limit)
83
{
83
{
84
    d->limit_0_15 = limit & 0xffff;
84
    d->limit_0_15 = limit & 0xffff;
85
    d->limit_16_19 = (limit >> 16) & 0xf;
85
    d->limit_16_19 = (limit >> 16) & 0xf;
86
}
86
}
87
 
87
 
88
void idt_setoffset(struct idescriptor *d, __address offset)
88
void idt_setoffset(struct idescriptor *d, __address offset)
89
{
89
{
90
    /*
90
    /*
91
     * Offset is a linear address.
91
     * Offset is a linear address.
92
     */
92
     */
93
    d->offset_0_15 = offset & 0xffff;
93
    d->offset_0_15 = offset & 0xffff;
94
    d->offset_16_31 = offset >> 16;
94
    d->offset_16_31 = offset >> 16;
95
}
95
}
96
 
96
 
97
void tss_initialize(struct tss *t)
97
void tss_initialize(struct tss *t)
98
{
98
{
99
    memsetb((__address) t, sizeof(struct tss), 0);
99
    memsetb((__address) t, sizeof(struct tss), 0);
100
}
100
}
101
 
101
 
102
/*
102
/*
103
 * This function takes care of proper setup of IDT and IDTR.
103
 * This function takes care of proper setup of IDT and IDTR.
104
 */
104
 */
105
void idt_init(void)
105
void idt_init(void)
106
{
106
{
107
    struct idescriptor *d;
107
    struct idescriptor *d;
108
    int i;
108
    int i;
109
 
109
 
110
    for (i = 0; i < IDT_ITEMS; i++) {
110
    for (i = 0; i < IDT_ITEMS; i++) {
111
        d = &idt[i];
111
        d = &idt[i];
112
 
112
 
113
        d->unused = 0;
113
        d->unused = 0;
114
        d->selector = selector(KTEXT_DES);
114
        d->selector = selector(KTEXT_DES);
115
 
115
 
116
        d->access = AR_PRESENT | AR_INTERRUPT;  /* masking interrupt */
116
        d->access = AR_PRESENT | AR_INTERRUPT;  /* masking interrupt */
117
 
117
 
118
        if (i == VECTOR_SYSCALL) {
118
        if (i == VECTOR_SYSCALL) {
119
            /*
119
            /*
120
             * The syscall interrupt gate must be calleable from userland.
120
             * The syscall interrupt gate must be calleable from userland.
121
             */
121
             */
122
            d->access |= DPL_USER;
122
            d->access |= DPL_USER;
123
        }
123
        }
124
       
124
       
125
        idt_setoffset(d, ((__address) interrupt_handlers) + i*interrupt_handler_size);
125
        idt_setoffset(d, ((__address) interrupt_handlers) + i*interrupt_handler_size);
126
        trap_register(i, null_interrupt);
126
        trap_register(i, null_interrupt);
127
    }
127
    }
128
    trap_register(13, gp_fault);
128
    trap_register(13, gp_fault);
129
    trap_register( 7, nm_fault);
129
    trap_register( 7, nm_fault);
130
    trap_register(12, ss_fault);
130
    trap_register(12, ss_fault);
131
}
131
}
132
 
132
 
133
 
133
 
134
/* Clean IOPL(12,13) and NT(14) flags in EFLAGS register */
134
/* Clean IOPL(12,13) and NT(14) flags in EFLAGS register */
135
static void clean_IOPL_NT_flags(void)
135
static void clean_IOPL_NT_flags(void)
136
{
136
{
137
    asm
137
    asm
138
    (
138
    (
139
        "pushfl;"
139
        "pushfl;"
140
        "pop %%eax;"
140
        "pop %%eax;"
141
        "and $0xffff8fff,%%eax;"
141
        "and $0xffff8fff,%%eax;"
142
        "push %%eax;"
142
        "push %%eax;"
143
        "popfl;"
143
        "popfl;"
144
        :
144
        :
145
        :
145
        :
146
        :"%eax"
146
        :"%eax"
147
    );
147
    );
148
}
148
}
149
 
149
 
150
/* Clean AM(18) flag in CR0 register */
150
/* Clean AM(18) flag in CR0 register */
151
static void clean_AM_flag(void)
151
static void clean_AM_flag(void)
152
{
152
{
153
    asm
153
    asm
154
    (
154
    (
155
        "mov %%cr0,%%eax;"
155
        "mov %%cr0,%%eax;"
156
        "and $0xFFFBFFFF,%%eax;"
156
        "and $0xFFFBFFFF,%%eax;"
157
        "mov %%eax,%%cr0;"
157
        "mov %%eax,%%cr0;"
158
        :
158
        :
159
        :
159
        :
160
        :"%eax"
160
        :"%eax"
161
    );
161
    );
162
}
162
}
163
 
163
 
164
void pm_init(void)
164
void pm_init(void)
165
{
165
{
166
    struct descriptor *gdt_p = (struct descriptor *) PA2KA(gdtr.base);
166
    struct descriptor *gdt_p = (struct descriptor *) PA2KA(gdtr.base);
167
 
167
 
-
 
168
 
-
 
169
    /*
-
 
170
     * Update addresses in GDT and IDT to their virtual counterparts.
-
 
171
     */
-
 
172
    gdtr.base = KA2PA(gdtr.base);
-
 
173
    idtr.base = (__address) idt;
-
 
174
    __asm__ volatile ("lgdt %0\n" : : "m" (gdtr));
-
 
175
    __asm__ volatile ("lidt %0\n" : : "m" (idtr)); 
-
 
176
   
168
    /*
177
    /*
169
     * Each CPU has its private GDT and TSS.
178
     * Each CPU has its private GDT and TSS.
170
     * All CPUs share one IDT.
179
     * All CPUs share one IDT.
171
     */
180
     */
172
 
181
 
173
    if (config.cpu_active == 1) {
182
    if (config.cpu_active == 1) {
174
        idt_init();
183
        idt_init();
175
        /*
184
        /*
176
         * NOTE: bootstrap CPU has statically allocated TSS, because
185
         * NOTE: bootstrap CPU has statically allocated TSS, because
177
         * the heap hasn't been initialized so far.
186
         * the heap hasn't been initialized so far.
178
         */
187
         */
179
        tss_p = &tss;
188
        tss_p = &tss;
180
    }
189
    }
181
    else {
190
    else {
182
        tss_p = (struct tss *) malloc(sizeof(struct tss));
191
        tss_p = (struct tss *) malloc(sizeof(struct tss));
183
        if (!tss_p)
192
        if (!tss_p)
184
            panic("could not allocate TSS\n");
193
            panic("could not allocate TSS\n");
185
    }
194
    }
186
 
195
 
187
    tss_initialize(tss_p);
196
    tss_initialize(tss_p);
188
   
197
   
189
    gdt_p[TSS_DES].access = AR_PRESENT | AR_TSS | DPL_KERNEL;
198
    gdt_p[TSS_DES].access = AR_PRESENT | AR_TSS | DPL_KERNEL;
190
    gdt_p[TSS_DES].special = 1;
199
    gdt_p[TSS_DES].special = 1;
191
    gdt_p[TSS_DES].granularity = 1;
200
    gdt_p[TSS_DES].granularity = 1;
192
   
201
   
193
    gdt_setbase(&gdt_p[TSS_DES], (__address) tss_p);
202
    gdt_setbase(&gdt_p[TSS_DES], (__address) tss_p);
194
    gdt_setlimit(&gdt_p[TSS_DES], sizeof(struct tss) - 1);
203
    gdt_setlimit(&gdt_p[TSS_DES], sizeof(struct tss) - 1);
195
 
204
 
196
    /*
205
    /*
197
     * As of this moment, the current CPU has its own GDT pointing
206
     * As of this moment, the current CPU has its own GDT pointing
198
     * to its own TSS. We just need to load the TR register.
207
     * to its own TSS. We just need to load the TR register.
199
     */
208
     */
200
    __asm__("ltr %0" : : "r" ((__u16) selector(TSS_DES)));
209
    __asm__ volatile ("ltr %0" : : "r" ((__u16) selector(TSS_DES)));
201
   
210
   
202
    clean_IOPL_NT_flags();    /* Disable I/O on nonprivileged levels */
211
    clean_IOPL_NT_flags();    /* Disable I/O on nonprivileged levels */
203
    clean_AM_flag();          /* Disable alignment check */
212
    clean_AM_flag();          /* Disable alignment check */
204
}
213
}
205
 
214