Subversion Repositories HelenOS-historic

Rev

Rev 1252 | Rev 1292 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
178 palkovsky 1
/*
2
 * Copyright (C) 2001-2004 Jakub Jermar
799 palkovsky 3
 * Copyright (C) 2005-2006 Ondrej Palkovsky
178 palkovsky 4
 * All rights reserved.
5
 *
6
 * Redistribution and use in source and binary forms, with or without
7
 * modification, are permitted provided that the following conditions
8
 * are met:
9
 *
10
 * - Redistributions of source code must retain the above copyright
11
 *   notice, this list of conditions and the following disclaimer.
12
 * - Redistributions in binary form must reproduce the above copyright
13
 *   notice, this list of conditions and the following disclaimer in the
14
 *   documentation and/or other materials provided with the distribution.
15
 * - The name of the author may not be used to endorse or promote products
16
 *   derived from this software without specific prior written permission.
17
 *
18
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28
 */
29
 
30
#include <arch/pm.h>
31
#include <arch/mm/page.h>
32
#include <arch/types.h>
206 palkovsky 33
#include <arch/interrupt.h>
34
#include <arch/asm.h>
576 palkovsky 35
#include <interrupt.h>
1252 palkovsky 36
#include <mm/as.h>
178 palkovsky 37
 
206 palkovsky 38
#include <config.h>
178 palkovsky 39
 
206 palkovsky 40
#include <memstr.h>
814 palkovsky 41
#include <mm/slab.h>
206 palkovsky 42
#include <debug.h>
43
 
178 palkovsky 44
/*
45
 * There is no segmentation in long mode so we set up flat mode. In this
46
 * mode, we use, for each privilege level, two segments spanning the
47
 * whole memory. One is for code and one is for data.
48
 */
49
 
1187 jermar 50
descriptor_t gdt[GDT_ITEMS] = {
178 palkovsky 51
    /* NULL descriptor */
52
    { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
53
    /* KTEXT descriptor */
54
    { .limit_0_15  = 0xffff,
55
      .base_0_15   = 0,
56
      .base_16_23  = 0,
188 palkovsky 57
      .access      = AR_PRESENT | AR_CODE | DPL_KERNEL | AR_READABLE ,
178 palkovsky 58
      .limit_16_19 = 0xf,
59
      .available   = 0,
60
      .longmode    = 1,
188 palkovsky 61
      .special     = 0,
178 palkovsky 62
      .granularity = 1,
63
      .base_24_31  = 0 },
64
    /* KDATA descriptor */
65
    { .limit_0_15  = 0xffff,
66
      .base_0_15   = 0,
67
      .base_16_23  = 0,
68
      .access      = AR_PRESENT | AR_DATA | AR_WRITABLE | DPL_KERNEL,
69
      .limit_16_19 = 0xf,
70
      .available   = 0,
71
      .longmode    = 0,
72
      .special     = 0,
188 palkovsky 73
      .granularity = 1,
178 palkovsky 74
      .base_24_31  = 0 },
803 palkovsky 75
    /* UDATA descriptor */
178 palkovsky 76
    { .limit_0_15  = 0xffff,
77
      .base_0_15   = 0,
78
      .base_16_23  = 0,
803 palkovsky 79
      .access      = AR_PRESENT | AR_DATA | AR_WRITABLE | DPL_USER,
178 palkovsky 80
      .limit_16_19 = 0xf,
81
      .available   = 0,
803 palkovsky 82
      .longmode    = 0,
83
      .special     = 1,
206 palkovsky 84
      .granularity = 1,
178 palkovsky 85
      .base_24_31  = 0 },
803 palkovsky 86
    /* UTEXT descriptor */
178 palkovsky 87
    { .limit_0_15  = 0xffff,
88
      .base_0_15   = 0,
89
      .base_16_23  = 0,
803 palkovsky 90
      .access      = AR_PRESENT | AR_CODE | DPL_USER,
178 palkovsky 91
      .limit_16_19 = 0xf,
92
      .available   = 0,
803 palkovsky 93
      .longmode    = 1,
94
      .special     = 0,
178 palkovsky 95
      .granularity = 1,
96
      .base_24_31  = 0 },
332 palkovsky 97
    /* KTEXT 32-bit protected, for protected mode before long mode */
188 palkovsky 98
    { .limit_0_15  = 0xffff,
99
      .base_0_15   = 0,
100
      .base_16_23  = 0,
101
      .access      = AR_PRESENT | AR_CODE | DPL_KERNEL | AR_READABLE,
102
      .limit_16_19 = 0xf,
103
      .available   = 0,
104
      .longmode    = 0,
277 palkovsky 105
      .special     = 1,
188 palkovsky 106
      .granularity = 1,
107
      .base_24_31  = 0 },
206 palkovsky 108
    /* TSS descriptor - set up will be completed later,
109
     * on AMD64 it is 64-bit - 2 items in table */
110
    { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
1289 vana 111
    { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
112
    /* VESA Init descriptor */
113
    { 0xffff, 0, VESA_INIT_SEGMENT>>12, AR_PRESENT | AR_CODE | DPL_KERNEL, 0xf, 0, 0, 0, 0, 0 }
178 palkovsky 114
};
115
 
1187 jermar 116
idescriptor_t idt[IDT_ITEMS];
178 palkovsky 117
 
1187 jermar 118
ptr_16_64_t gdtr = {.limit = sizeof(gdt), .base= (__u64) gdt };
119
ptr_16_64_t idtr = {.limit = sizeof(idt), .base= (__u64) idt };
229 palkovsky 120
 
1187 jermar 121
static tss_t tss;
122
tss_t *tss_p = NULL;
178 palkovsky 123
 
1187 jermar 124
void gdt_tss_setbase(descriptor_t *d, __address base)
206 palkovsky 125
{
1187 jermar 126
    tss_descriptor_t *td = (tss_descriptor_t *) d;
206 palkovsky 127
 
128
    td->base_0_15 = base & 0xffff;
129
    td->base_16_23 = ((base) >> 16) & 0xff;
130
    td->base_24_31 = ((base) >> 24) & 0xff;
131
    td->base_32_63 = ((base) >> 32);
132
}
133
 
1187 jermar 134
void gdt_tss_setlimit(descriptor_t *d, __u32 limit)
206 palkovsky 135
{
1187 jermar 136
    struct tss_descriptor *td = (tss_descriptor_t *) d;
206 palkovsky 137
 
138
    td->limit_0_15 = limit & 0xffff;
139
    td->limit_16_19 = (limit >> 16) & 0xf;
140
}
141
 
1187 jermar 142
void idt_setoffset(idescriptor_t *d, __address offset)
206 palkovsky 143
{
144
    /*
145
     * Offset is a linear address.
146
     */
147
    d->offset_0_15 = offset & 0xffff;
148
    d->offset_16_31 = offset >> 16 & 0xffff;
149
    d->offset_32_63 = offset >> 32;
150
}
151
 
1187 jermar 152
void tss_initialize(tss_t *t)
206 palkovsky 153
{
1187 jermar 154
    memsetb((__address) t, sizeof(tss_t), 0);
206 palkovsky 155
}
156
 
157
/*
158
 * This function takes care of proper setup of IDT and IDTR.
159
 */
160
void idt_init(void)
161
{
1187 jermar 162
    idescriptor_t *d;
206 palkovsky 163
    int i;
164
 
165
    for (i = 0; i < IDT_ITEMS; i++) {
166
        d = &idt[i];
167
 
168
        d->unused = 0;
211 palkovsky 169
        d->selector = gdtselector(KTEXT_DES);
206 palkovsky 170
 
171
        d->present = 1;
172
        d->type = AR_INTERRUPT; /* masking interrupt */
173
 
174
        idt_setoffset(d, ((__address) interrupt_handlers) + i*interrupt_handler_size);
799 palkovsky 175
        exc_register(i, "undef", (iroutine)null_interrupt);
206 palkovsky 176
    }
1051 jermar 177
 
576 palkovsky 178
    exc_register( 7, "nm_fault", nm_fault);
179
    exc_register(12, "ss_fault", ss_fault);
1051 jermar 180
    exc_register(13, "gp_fault", gp_fault);
1050 palkovsky 181
    exc_register(14, "ident_mapper", ident_page_fault);
206 palkovsky 182
}
183
 
799 palkovsky 184
/** Initialize segmentation - code/data/idt tables
185
 *
186
 */
206 palkovsky 187
void pm_init(void)
188
{
1187 jermar 189
    descriptor_t *gdt_p = (struct descriptor *) gdtr.base;
190
    tss_descriptor_t *tss_desc;
206 palkovsky 191
 
192
    /*
193
     * Each CPU has its private GDT and TSS.
194
     * All CPUs share one IDT.
195
     */
196
 
197
    if (config.cpu_active == 1) {
198
        idt_init();
199
        /*
200
         * NOTE: bootstrap CPU has statically allocated TSS, because
201
         * the heap hasn't been initialized so far.
202
         */
203
        tss_p = &tss;
204
    }
205
    else {
1252 palkovsky 206
        /* We are going to use malloc, which may return
207
         * non boot-mapped pointer, initialize the CR3 register
208
         * ahead of page_init */
209
        write_cr3((__address) AS_KERNEL->page_table);
210
 
1187 jermar 211
        tss_p = (struct tss *) malloc(sizeof(tss_t), FRAME_ATOMIC);
206 palkovsky 212
        if (!tss_p)
213
            panic("could not allocate TSS\n");
214
    }
215
 
216
    tss_initialize(tss_p);
217
 
1187 jermar 218
    tss_desc = (tss_descriptor_t *) (&gdt_p[TSS_DES]);
208 palkovsky 219
    tss_desc->present = 1;
220
    tss_desc->type = AR_TSS;
221
    tss_desc->dpl = PL_KERNEL;
206 palkovsky 222
 
223
    gdt_tss_setbase(&gdt_p[TSS_DES], (__address) tss_p);
1251 jermar 224
    gdt_tss_setlimit(&gdt_p[TSS_DES], TSS_BASIC_SIZE - 1);
206 palkovsky 225
 
1186 jermar 226
    gdtr_load(&gdtr);
227
    idtr_load(&idtr);
206 palkovsky 228
    /*
229
     * As of this moment, the current CPU has its own GDT pointing
230
     * to its own TSS. We just need to load the TR register.
231
     */
1186 jermar 232
    tr_load(gdtselector(TSS_DES));
206 palkovsky 233
}