Subversion Repositories HelenOS-historic

Rev

Rev 143 | Rev 167 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 143 Rev 144
1
/*
1
/*
2
 * Copyright (C) 2001-2004 Jakub Jermar
2
 * Copyright (C) 2001-2004 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
#include <arch/pm.h>
29
#include <arch/pm.h>
30
#include <config.h>
30
#include <config.h>
31
#include <arch/types.h>
31
#include <arch/types.h>
32
#include <typedefs.h>
32
#include <typedefs.h>
33
#include <arch/interrupt.h>
33
#include <arch/interrupt.h>
34
#include <arch/asm.h>
34
#include <arch/asm.h>
35
#include <arch/context.h>
35
#include <arch/context.h>
36
#include <panic.h>
36
#include <panic.h>
37
 
37
 
38
/*
38
/*
39
 * Early ia32 configuration functions and data structures.
39
 * Early ia32 configuration functions and data structures.
40
 */
40
 */
41
 
41
 
42
/*
42
/*
43
 * We have no use for segmentation so we set up flat mode. In this
43
 * We have no use for segmentation so we set up flat mode. In this
44
 * mode, we use, for each privilege level, two segments spanning the
44
 * mode, we use, for each privilege level, two segments spanning the
45
 * whole memory. One is for code and one is for data.
45
 * whole memory. One is for code and one is for data.
46
 */
46
 */
47
struct descriptor gdt[GDT_ITEMS] = {
47
struct descriptor gdt[GDT_ITEMS] = {
48
    /* NULL descriptor */
48
    /* NULL descriptor */
49
    { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
49
    { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
50
    /* KTEXT descriptor */
50
    /* KTEXT descriptor */
51
    { 0xffff, 0, 0, AR_PRESENT | AR_CODE | DPL_KERNEL, 0xf, 0, 0, 1, 1, 0 },
51
    { 0xffff, 0, 0, AR_PRESENT | AR_CODE | DPL_KERNEL, 0xf, 0, 0, 1, 1, 0 },
52
    /* KDATA descriptor */
52
    /* KDATA descriptor */
53
    { 0xffff, 0, 0, AR_PRESENT | AR_DATA | AR_WRITABLE | DPL_KERNEL, 0xf, 0, 0, 1, 1, 0 },
53
    { 0xffff, 0, 0, AR_PRESENT | AR_DATA | AR_WRITABLE | DPL_KERNEL, 0xf, 0, 0, 1, 1, 0 },
54
    /* UTEXT descriptor */
54
    /* UTEXT descriptor */
55
    { 0xffff, 0, 0, AR_PRESENT | AR_CODE | DPL_USER, 0xf, 0, 0, 1, 1, 0 },
55
    { 0xffff, 0, 0, AR_PRESENT | AR_CODE | DPL_USER, 0xf, 0, 0, 1, 1, 0 },
56
    /* UDATA descriptor */
56
    /* UDATA descriptor */
57
    { 0xffff, 0, 0, AR_PRESENT | AR_DATA | AR_WRITABLE | DPL_USER, 0xf, 0, 0, 1, 1, 0 },
57
    { 0xffff, 0, 0, AR_PRESENT | AR_DATA | AR_WRITABLE | DPL_USER, 0xf, 0, 0, 1, 1, 0 },
58
    /* TSS descriptor - set up will be completed later */
58
    /* TSS descriptor - set up will be completed later */
59
    { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
59
    { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
60
};
60
};
61
 
61
 
62
static struct idescriptor idt[IDT_ITEMS];
62
static struct idescriptor idt[IDT_ITEMS];
63
 
63
 
64
static struct tss tss;
64
static struct tss tss;
65
 
65
 
66
struct tss *tss_p = NULL;
66
struct tss *tss_p = NULL;
67
 
67
 
68
/* gdtr is changed by kmp before next CPU is initialized */
68
/* gdtr is changed by kmp before next CPU is initialized */
69
struct ptr_16_32 gdtr __attribute__ ((section ("K_DATA_START"))) = { .limit = sizeof(gdt), .base = KA2PA((__address) gdt) };
69
struct ptr_16_32 gdtr __attribute__ ((section ("K_DATA_START"))) = { .limit = sizeof(gdt), .base = KA2PA((__address) gdt) };
70
struct ptr_16_32 idtr __attribute__ ((section ("K_DATA_START"))) = { .limit = sizeof(idt), .base = KA2PA((__address) idt) };
70
struct ptr_16_32 idtr __attribute__ ((section ("K_DATA_START"))) = { .limit = sizeof(idt), .base = KA2PA((__address) idt) };
71
 
71
 
72
void gdt_setbase(struct descriptor *d, __address base)
72
void gdt_setbase(struct descriptor *d, __address base)
73
{
73
{
74
    d->base_0_15 = base & 0xffff;
74
    d->base_0_15 = base & 0xffff;
75
    d->base_16_23 = ((base) >> 16) & 0xff;
75
    d->base_16_23 = ((base) >> 16) & 0xff;
76
    d->base_24_31 = ((base) >> 24) & 0xff;
76
    d->base_24_31 = ((base) >> 24) & 0xff;
77
}
77
}
78
 
78
 
79
void gdt_setlimit(struct descriptor *d, __u32 limit)
79
void gdt_setlimit(struct descriptor *d, __u32 limit)
80
{
80
{
81
    d->limit_0_15 = limit & 0xffff;
81
    d->limit_0_15 = limit & 0xffff;
82
    d->limit_16_19 = (limit >> 16) & 0xf;
82
    d->limit_16_19 = (limit >> 16) & 0xf;
83
}
83
}
84
 
84
 
85
void idt_setoffset(struct idescriptor *d, __address offset)
85
void idt_setoffset(struct idescriptor *d, __address offset)
86
{
86
{
87
    /*
87
    /*
88
     * Offset is a linear address.
88
     * Offset is a linear address.
89
     */
89
     */
90
    d->offset_0_15 = offset & 0xffff;
90
    d->offset_0_15 = offset & 0xffff;
91
    d->offset_16_31 = offset >> 16;
91
    d->offset_16_31 = offset >> 16;
92
}
92
}
93
 
93
 
94
void tss_initialize(struct tss *t)
94
void tss_initialize(struct tss *t)
95
{
95
{
96
    memsetb((__address) t, sizeof(struct tss), 0);
96
    memsetb((__address) t, sizeof(struct tss), 0);
97
}
97
}
98
 
98
 
99
/*
99
/*
100
 * This function takes care of proper setup of IDT and IDTR.
100
 * This function takes care of proper setup of IDT and IDTR.
101
 */
101
 */
102
void idt_init(void)
102
void idt_init(void)
103
{
103
{
104
    struct idescriptor *d;
104
    struct idescriptor *d;
105
    int i;
105
    int i;
106
 
106
 
107
    for (i = 0; i < IDT_ITEMS; i++) {
107
    for (i = 0; i < IDT_ITEMS; i++) {
108
        d = &idt[i];
108
        d = &idt[i];
109
 
109
 
110
        d->unused = 0;
110
        d->unused = 0;
111
        d->selector = selector(KTEXT_DES);
111
        d->selector = selector(KTEXT_DES);
112
 
112
 
113
        d->access = AR_PRESENT | AR_INTERRUPT;  /* masking interrupt */
113
        d->access = AR_PRESENT | AR_INTERRUPT;  /* masking interrupt */
114
 
114
 
115
        if (i == VECTOR_SYSCALL) {
115
        if (i == VECTOR_SYSCALL) {
116
            /*
116
            /*
117
             * The syscall interrupt gate must be calleable from userland.
117
             * The syscall interrupt gate must be calleable from userland.
118
             */
118
             */
119
            d->access |= DPL_USER;
119
            d->access |= DPL_USER;
120
        }
120
        }
121
       
121
       
122
        idt_setoffset(d, ((__address) interrupt_handlers) + i*interrupt_handler_size);
122
        idt_setoffset(d, ((__address) interrupt_handlers) + i*interrupt_handler_size);
123
        trap_register(i, null_interrupt);
123
        trap_register(i, null_interrupt);
124
    }
124
    }
125
    trap_register(13, gp_fault);
125
    trap_register(13, gp_fault);
126
    trap_register( 7, nm_fault);
126
    trap_register( 7, nm_fault);
127
    trap_register(12, ss_fault);
127
    trap_register(12, ss_fault);
128
}
128
}
129
 
129
 
130
 
130
 
131
// Clean IOPL(12,13) and NT(14) flags in EFLAGS register
131
/* Clean IOPL(12,13) and NT(14) flags in EFLAGS register */
132
static void clean_IOPL_NT_flags(void)
132
static void clean_IOPL_NT_flags(void)
133
{
133
{
134
  asm
134
  asm
135
    (
135
    (
136
    "pushfl;"
136
    "pushfl;"
137
        "pop %%eax;"
137
        "pop %%eax;"
138
        "and $0xffff8fff,%%eax;"
138
        "and $0xffff8fff,%%eax;"
139
        "push %%eax;"
139
        "push %%eax;"
140
        "popfl;"
140
        "popfl;"
141
        :
141
        :
142
        :
142
        :
143
        :"%eax"
143
        :"%eax"
144
    );
144
    );
145
}
145
}
146
 
146
 
147
// Clean AM(18) flag in CR0 register
147
/* Clean AM(18) flag in CR0 register */
148
static void clean_AM_flag(void)
148
static void clean_AM_flag(void)
149
{
149
{
150
  asm
150
  asm
151
    (
151
    (
152
    "mov %%cr0,%%eax;"
152
    "mov %%cr0,%%eax;"
153
        "and $0xFFFBFFFF,%%eax;"
153
        "and $0xFFFBFFFF,%%eax;"
154
        "mov %%eax,%%cr0;"
154
        "mov %%eax,%%cr0;"
155
        :
155
        :
156
        :
156
        :
157
        :"%eax"
157
        :"%eax"
158
    );
158
    );
159
}
159
}
160
 
160
 
161
 
161
 
162
 
162
 
163
 
163
 
164
 
164
 
165
void pm_init(void)
165
void pm_init(void)
166
{
166
{
167
    struct descriptor *gdt_p = (struct descriptor *) PA2KA(gdtr.base);
167
    struct descriptor *gdt_p = (struct descriptor *) PA2KA(gdtr.base);
168
 
168
 
169
    /*
169
    /*
170
     * Each CPU has its private GDT and TSS.
170
     * Each CPU has its private GDT and TSS.
171
     * All CPUs share one IDT.
171
     * All CPUs share one IDT.
172
     */
172
     */
173
 
173
 
174
    if (config.cpu_active == 1) {
174
    if (config.cpu_active == 1) {
175
        idt_init();
175
        idt_init();
176
        /*
176
        /*
177
         * NOTE: bootstrap CPU has statically allocated TSS, because
177
         * NOTE: bootstrap CPU has statically allocated TSS, because
178
         * the heap hasn't been initialized so far.
178
         * the heap hasn't been initialized so far.
179
         */
179
         */
180
        tss_p = &tss;
180
        tss_p = &tss;
181
    }
181
    }
182
    else {
182
    else {
183
        tss_p = (struct tss *) malloc(sizeof(struct tss));
183
        tss_p = (struct tss *) malloc(sizeof(struct tss));
184
        if (!tss_p)
184
        if (!tss_p)
185
            panic("could not allocate TSS\n");
185
            panic("could not allocate TSS\n");
186
    }
186
    }
187
 
187
 
188
    tss_initialize(tss_p);
188
    tss_initialize(tss_p);
189
   
189
   
190
    gdt_p[TSS_DES].access = AR_PRESENT | AR_TSS | DPL_KERNEL;
190
    gdt_p[TSS_DES].access = AR_PRESENT | AR_TSS | DPL_KERNEL;
191
    gdt_p[TSS_DES].special = 1;
191
    gdt_p[TSS_DES].special = 1;
192
    gdt_p[TSS_DES].granularity = 1;
192
    gdt_p[TSS_DES].granularity = 1;
193
   
193
   
194
    gdt_setbase(&gdt_p[TSS_DES], (__address) tss_p);
194
    gdt_setbase(&gdt_p[TSS_DES], (__address) tss_p);
195
    gdt_setlimit(&gdt_p[TSS_DES], sizeof(struct tss) - 1);
195
    gdt_setlimit(&gdt_p[TSS_DES], sizeof(struct tss) - 1);
196
 
196
 
197
    /*
197
    /*
198
     * As of this moment, the current CPU has its own GDT pointing
198
     * As of this moment, the current CPU has its own GDT pointing
199
     * to its own TSS. We just need to load the TR register.
199
     * to its own TSS. We just need to load the TR register.
200
     */
200
     */
201
    __asm__("ltr %0" : : "r" ((__u16) selector(TSS_DES)));
201
    __asm__("ltr %0" : : "r" ((__u16) selector(TSS_DES)));
202
   
202
   
203
    clean_IOPL_NT_flags();    //Disable I/O on nonprivileged levels 
203
    clean_IOPL_NT_flags();    /* Disable I/O on nonprivileged levels */
204
    clean_AM_flag();          //Disable alignment check
204
    clean_AM_flag();          /* Disable alignment check */
205
}
205
}
206
 
206