Subversion Repositories HelenOS

Rev

Rev 1963 | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 1963 Rev 1968
1
/*
1
/*
2
 * Copyright (C) 2001-2004 Jakub Jermar
2
 * Copyright (C) 2001-2004 Jakub Jermar
3
 * Copyright (C) 2005-2006 Ondrej Palkovsky
3
 * Copyright (C) 2005-2006 Ondrej Palkovsky
4
 * All rights reserved.
4
 * All rights reserved.
5
 *
5
 *
6
 * Redistribution and use in source and binary forms, with or without
6
 * Redistribution and use in source and binary forms, with or without
7
 * modification, are permitted provided that the following conditions
7
 * modification, are permitted provided that the following conditions
8
 * are met:
8
 * are met:
9
 *
9
 *
10
 * - Redistributions of source code must retain the above copyright
10
 * - Redistributions of source code must retain the above copyright
11
 *   notice, this list of conditions and the following disclaimer.
11
 *   notice, this list of conditions and the following disclaimer.
12
 * - Redistributions in binary form must reproduce the above copyright
12
 * - Redistributions in binary form must reproduce the above copyright
13
 *   notice, this list of conditions and the following disclaimer in the
13
 *   notice, this list of conditions and the following disclaimer in the
14
 *   documentation and/or other materials provided with the distribution.
14
 *   documentation and/or other materials provided with the distribution.
15
 * - The name of the author may not be used to endorse or promote products
15
 * - The name of the author may not be used to endorse or promote products
16
 *   derived from this software without specific prior written permission.
16
 *   derived from this software without specific prior written permission.
17
 *
17
 *
18
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28
 */
28
 */
29
 
29
 
30
#include <arch/pm.h>
30
#include <arch/pm.h>
31
#include <arch/mm/page.h>
31
#include <arch/mm/page.h>
32
#include <arch/types.h>
32
#include <arch/types.h>
33
#include <arch/interrupt.h>
33
#include <arch/interrupt.h>
34
#include <arch/asm.h>
34
#include <arch/asm.h>
35
#include <interrupt.h>
35
#include <interrupt.h>
36
#include <mm/as.h>
36
#include <mm/as.h>
37
 
37
 
38
#include <config.h>
38
#include <config.h>
39
 
39
 
40
#include <memstr.h>
40
#include <memstr.h>
41
#include <mm/slab.h>
41
#include <mm/slab.h>
42
#include <debug.h>
42
#include <debug.h>
43
 
43
 
44
/*
44
/*
45
 * There is no segmentation in long mode so we set up flat mode. In this
45
 * There is no segmentation in long mode so we set up flat mode. In this
46
 * mode, we use, for each privilege level, two segments spanning the
46
 * mode, we use, for each privilege level, two segments spanning the
47
 * whole memory. One is for code and one is for data.
47
 * whole memory. One is for code and one is for data.
48
 */
48
 */
49
 
49
 
50
descriptor_t gdt[GDT_ITEMS] = {
50
descriptor_t gdt[GDT_ITEMS] = {
51
    /* NULL descriptor */
51
    /* NULL descriptor */
52
    { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
52
    { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
53
    /* KTEXT descriptor */
53
    /* KTEXT descriptor */
54
    { .limit_0_15  = 0xffff,
54
    { .limit_0_15  = 0xffff,
55
      .base_0_15   = 0,
55
      .base_0_15   = 0,
56
      .base_16_23  = 0,
56
      .base_16_23  = 0,
57
      .access      = AR_PRESENT | AR_CODE | DPL_KERNEL | AR_READABLE ,
57
      .access      = AR_PRESENT | AR_CODE | DPL_KERNEL | AR_READABLE ,
58
      .limit_16_19 = 0xf,
58
      .limit_16_19 = 0xf,
59
      .available   = 0,
59
      .available   = 0,
60
      .longmode    = 1,
60
      .longmode    = 1,
61
      .special     = 0,
61
      .special     = 0,
62
      .granularity = 1,
62
      .granularity = 1,
63
      .base_24_31  = 0 },
63
      .base_24_31  = 0 },
64
    /* KDATA descriptor */
64
    /* KDATA descriptor */
65
    { .limit_0_15  = 0xffff,
65
    { .limit_0_15  = 0xffff,
66
      .base_0_15   = 0,
66
      .base_0_15   = 0,
67
      .base_16_23  = 0,
67
      .base_16_23  = 0,
68
      .access      = AR_PRESENT | AR_DATA | AR_WRITABLE | DPL_KERNEL,
68
      .access      = AR_PRESENT | AR_DATA | AR_WRITABLE | DPL_KERNEL,
69
      .limit_16_19 = 0xf,
69
      .limit_16_19 = 0xf,
70
      .available   = 0,
70
      .available   = 0,
71
      .longmode    = 0,
71
      .longmode    = 0,
72
      .special     = 0,
72
      .special     = 0,
73
      .granularity = 1,
73
      .granularity = 1,
74
      .base_24_31  = 0 },
74
      .base_24_31  = 0 },
75
    /* UDATA descriptor */
75
    /* UDATA descriptor */
76
    { .limit_0_15  = 0xffff,
76
    { .limit_0_15  = 0xffff,
77
      .base_0_15   = 0,
77
      .base_0_15   = 0,
78
      .base_16_23  = 0,
78
      .base_16_23  = 0,
79
      .access      = AR_PRESENT | AR_DATA | AR_WRITABLE | DPL_USER,
79
      .access      = AR_PRESENT | AR_DATA | AR_WRITABLE | DPL_USER,
80
      .limit_16_19 = 0xf,
80
      .limit_16_19 = 0xf,
81
      .available   = 0,
81
      .available   = 0,
82
      .longmode    = 0,
82
      .longmode    = 0,
83
      .special     = 1,
83
      .special     = 1,
84
      .granularity = 1,
84
      .granularity = 1,
85
      .base_24_31  = 0 },
85
      .base_24_31  = 0 },
86
    /* UTEXT descriptor */
86
    /* UTEXT descriptor */
87
    { .limit_0_15  = 0xffff,
87
    { .limit_0_15  = 0xffff,
88
      .base_0_15   = 0,
88
      .base_0_15   = 0,
89
      .base_16_23  = 0,
89
      .base_16_23  = 0,
90
      .access      = AR_PRESENT | AR_CODE | DPL_USER,
90
      .access      = AR_PRESENT | AR_CODE | DPL_USER,
91
      .limit_16_19 = 0xf,
91
      .limit_16_19 = 0xf,
92
      .available   = 0,
92
      .available   = 0,
93
      .longmode    = 1,
93
      .longmode    = 1,
94
      .special     = 0,
94
      .special     = 0,
95
      .granularity = 1,
95
      .granularity = 1,
96
      .base_24_31  = 0 },
96
      .base_24_31  = 0 },
97
    /* KTEXT 32-bit protected, for protected mode before long mode */
97
    /* KTEXT 32-bit protected, for protected mode before long mode */
98
    { .limit_0_15  = 0xffff,
98
    { .limit_0_15  = 0xffff,
99
      .base_0_15   = 0,
99
      .base_0_15   = 0,
100
      .base_16_23  = 0,
100
      .base_16_23  = 0,
101
      .access      = AR_PRESENT | AR_CODE | DPL_KERNEL | AR_READABLE,
101
      .access      = AR_PRESENT | AR_CODE | DPL_KERNEL | AR_READABLE,
102
      .limit_16_19 = 0xf,
102
      .limit_16_19 = 0xf,
103
      .available   = 0,
103
      .available   = 0,
104
      .longmode    = 0,
104
      .longmode    = 0,
105
      .special     = 1,
105
      .special     = 1,
106
      .granularity = 1,
106
      .granularity = 1,
107
      .base_24_31  = 0 },
107
      .base_24_31  = 0 },
108
    /* TSS descriptor - set up will be completed later,
108
    /* TSS descriptor - set up will be completed later,
109
     * on AMD64 it is 64-bit - 2 items in table */
109
     * on AMD64 it is 64-bit - 2 items in table */
110
    { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
110
    { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
111
    { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
111
    { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
112
    /* VESA Init descriptor */
112
    /* VESA Init descriptor */
113
#ifdef CONFIG_FB    
113
#ifdef CONFIG_FB    
114
    { 0xffff, 0, VESA_INIT_SEGMENT>>12, AR_PRESENT | AR_CODE | DPL_KERNEL, 0xf, 0, 0, 0, 0, 0 }
114
    { 0xffff, 0, VESA_INIT_SEGMENT>>12, AR_PRESENT | AR_CODE | DPL_KERNEL, 0xf, 0, 0, 0, 0, 0 }
115
#endif
115
#endif
116
};
116
};
117
 
117
 
118
idescriptor_t idt[IDT_ITEMS];
118
idescriptor_t idt[IDT_ITEMS];
119
 
119
 
120
ptr_16_64_t gdtr = {.limit = sizeof(gdt), .base= (__u64) gdt };
120
ptr_16_64_t gdtr = {.limit = sizeof(gdt), .base= (__u64) gdt };
121
ptr_16_64_t idtr = {.limit = sizeof(idt), .base= (__u64) idt };
121
ptr_16_64_t idtr = {.limit = sizeof(idt), .base= (__u64) idt };
122
 
122
 
123
static tss_t tss;
123
static tss_t tss;
124
tss_t *tss_p = NULL;
124
tss_t *tss_p = NULL;
125
 
125
 
126
void gdt_tss_setbase(descriptor_t *d, __address base)
126
void gdt_tss_setbase(descriptor_t *d, __address base)
127
{
127
{
128
    tss_descriptor_t *td = (tss_descriptor_t *) d;
128
    tss_descriptor_t *td = (tss_descriptor_t *) d;
129
 
129
 
130
    td->base_0_15 = base & 0xffff;
130
    td->base_0_15 = base & 0xffff;
131
    td->base_16_23 = ((base) >> 16) & 0xff;
131
    td->base_16_23 = ((base) >> 16) & 0xff;
132
    td->base_24_31 = ((base) >> 24) & 0xff;
132
    td->base_24_31 = ((base) >> 24) & 0xff;
133
    td->base_32_63 = ((base) >> 32);
133
    td->base_32_63 = ((base) >> 32);
134
}
134
}
135
 
135
 
136
void gdt_tss_setlimit(descriptor_t *d, __u32 limit)
136
void gdt_tss_setlimit(descriptor_t *d, __u32 limit)
137
{
137
{
138
    struct tss_descriptor *td = (tss_descriptor_t *) d;
138
    struct tss_descriptor *td = (tss_descriptor_t *) d;
139
 
139
 
140
    td->limit_0_15 = limit & 0xffff;
140
    td->limit_0_15 = limit & 0xffff;
141
    td->limit_16_19 = (limit >> 16) & 0xf;
141
    td->limit_16_19 = (limit >> 16) & 0xf;
142
}
142
}
143
 
143
 
144
void idt_setoffset(idescriptor_t *d, __address offset)
144
void idt_setoffset(idescriptor_t *d, __address offset)
145
{
145
{
146
    /*
146
    /*
147
     * Offset is a linear address.
147
     * Offset is a linear address.
148
     */
148
     */
149
    d->offset_0_15 = offset & 0xffff;
149
    d->offset_0_15 = offset & 0xffff;
150
    d->offset_16_31 = offset >> 16 & 0xffff;
150
    d->offset_16_31 = offset >> 16 & 0xffff;
151
    d->offset_32_63 = offset >> 32;
151
    d->offset_32_63 = offset >> 32;
152
}
152
}
153
 
153
 
154
void tss_initialize(tss_t *t)
154
void tss_initialize(tss_t *t)
155
{
155
{
156
    memsetb((__address) t, sizeof(tss_t), 0);
156
    memsetb((__address) t, sizeof(tss_t), 0);
157
}
157
}
158
 
158
 
159
/*
159
/*
160
 * This function takes care of proper setup of IDT and IDTR.
160
 * This function takes care of proper setup of IDT and IDTR.
161
 */
161
 */
162
void idt_init(void)
162
void idt_init(void)
163
{
163
{
164
    idescriptor_t *d;
164
    idescriptor_t *d;
165
    int i;
165
    int i;
166
 
166
 
167
    for (i = 0; i < IDT_ITEMS; i++) {
167
    for (i = 0; i < IDT_ITEMS; i++) {
168
        d = &idt[i];
168
        d = &idt[i];
169
 
169
 
170
        d->unused = 0;
170
        d->unused = 0;
171
        d->selector = gdtselector(KTEXT_DES);
171
        d->selector = gdtselector(KTEXT_DES);
172
 
172
 
173
        d->present = 1;
173
        d->present = 1;
174
        d->type = AR_INTERRUPT; /* masking interrupt */
174
        d->type = AR_INTERRUPT; /* masking interrupt */
175
 
175
 
176
        idt_setoffset(d, ((__address) interrupt_handlers) + i*interrupt_handler_size);
176
        idt_setoffset(d, ((__address) interrupt_handlers) + i*interrupt_handler_size);
177
        exc_register(i, "undef", (iroutine)null_interrupt);
177
        exc_register(i, "undef", (iroutine)null_interrupt);
178
    }
178
    }
179
 
179
 
180
    exc_register( 7, "nm_fault", nm_fault);
180
    exc_register( 7, "nm_fault", nm_fault);
181
    exc_register(12, "ss_fault", ss_fault);
181
    exc_register(12, "ss_fault", ss_fault);
182
    exc_register(13, "gp_fault", gp_fault);
182
    exc_register(13, "gp_fault", gp_fault);
183
    exc_register(14, "ident_mapper", ident_page_fault);
183
    exc_register(14, "ident_mapper", ident_page_fault);
184
}
184
}
185
 
185
 
186
/** Initialize segmentation - code/data/idt tables
186
/** Initialize segmentation - code/data/idt tables
187
 *
187
 *
188
 */
188
 */
189
void pm_init(void)
189
void pm_init(void)
190
{
190
{
191
    descriptor_t *gdt_p = (struct descriptor *) gdtr.base;
191
    descriptor_t *gdt_p = (struct descriptor *) gdtr.base;
192
    tss_descriptor_t *tss_desc;
192
    tss_descriptor_t *tss_desc;
193
 
193
 
194
    /*
194
    /*
195
     * Each CPU has its private GDT and TSS.
195
     * Each CPU has its private GDT and TSS.
196
     * All CPUs share one IDT.
196
     * All CPUs share one IDT.
197
     */
197
     */
198
 
198
 
199
    if (config.cpu_active == 1) {
199
    if (config.cpu_active == 1) {
200
        idt_init();
200
        idt_init();
201
        /*
201
        /*
202
         * NOTE: bootstrap CPU has statically allocated TSS, because
202
         * NOTE: bootstrap CPU has statically allocated TSS, because
203
         * the heap hasn't been initialized so far.
203
         * the heap hasn't been initialized so far.
204
         */
204
         */
205
        tss_p = &tss;
205
        tss_p = &tss;
206
    }
206
    }
207
    else {
207
    else {
208
        /* We are going to use malloc, which may return
208
        /* We are going to use malloc, which may return
209
         * non boot-mapped pointer, initialize the CR3 register
209
         * non boot-mapped pointer, initialize the CR3 register
210
         * ahead of page_init */
210
         * ahead of page_init */
211
        write_cr3((__address) AS_KERNEL->page_table);
211
        write_cr3((__address) AS_KERNEL->page_table);
212
 
212
 
213
        tss_p = (struct tss *) malloc(sizeof(tss_t), FRAME_ATOMIC);
213
        tss_p = (struct tss *) malloc(sizeof(tss_t), FRAME_ATOMIC);
214
        if (!tss_p)
214
        if (!tss_p)
215
            panic("could not allocate TSS\n");
215
            panic("could not allocate TSS\n");
216
    }
216
    }
217
 
217
 
218
    tss_initialize(tss_p);
218
    tss_initialize(tss_p);
219
 
219
 
220
    tss_desc = (tss_descriptor_t *) (&gdt_p[TSS_DES]);
220
    tss_desc = (tss_descriptor_t *) (&gdt_p[TSS_DES]);
221
    tss_desc->present = 1;
221
    tss_desc->present = 1;
222
    tss_desc->type = AR_TSS;
222
    tss_desc->type = AR_TSS;
223
    tss_desc->dpl = PL_KERNEL;
223
    tss_desc->dpl = PL_KERNEL;
224
   
224
   
225
    gdt_tss_setbase(&gdt_p[TSS_DES], (__address) tss_p);
225
    gdt_tss_setbase(&gdt_p[TSS_DES], (__address) tss_p);
226
    gdt_tss_setlimit(&gdt_p[TSS_DES], TSS_BASIC_SIZE - 1);
226
    gdt_tss_setlimit(&gdt_p[TSS_DES], TSS_BASIC_SIZE - 1);
227
 
227
 
228
    gdtr_load(&gdtr);
228
    gdtr_load(&gdtr);
229
    idtr_load(&idtr);
229
    idtr_load(&idtr);
230
    /*
230
    /*
231
     * As of this moment, the current CPU has its own GDT pointing
231
     * As of this moment, the current CPU has its own GDT pointing
232
     * to its own TSS. We just need to load the TR register.
232
     * to its own TSS. We just need to load the TR register.
233
     */
233
     */
234
    tr_load(gdtselector(TSS_DES));
234
    tr_load(gdtselector(TSS_DES));
235
}
235
}
236
 
236