Subversion Repositories HelenOS-historic

Rev

Rev 1251 | Rev 1289 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
178 palkovsky 1
/*
2
 * Copyright (C) 2001-2004 Jakub Jermar
799 palkovsky 3
 * Copyright (C) 2005-2006 Ondrej Palkovsky
178 palkovsky 4
 * All rights reserved.
5
 *
6
 * Redistribution and use in source and binary forms, with or without
7
 * modification, are permitted provided that the following conditions
8
 * are met:
9
 *
10
 * - Redistributions of source code must retain the above copyright
11
 *   notice, this list of conditions and the following disclaimer.
12
 * - Redistributions in binary form must reproduce the above copyright
13
 *   notice, this list of conditions and the following disclaimer in the
14
 *   documentation and/or other materials provided with the distribution.
15
 * - The name of the author may not be used to endorse or promote products
16
 *   derived from this software without specific prior written permission.
17
 *
18
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28
 */
29
 
30
#include <arch/pm.h>
31
#include <arch/mm/page.h>
32
#include <arch/types.h>
206 palkovsky 33
#include <arch/interrupt.h>
34
#include <arch/asm.h>
576 palkovsky 35
#include <interrupt.h>
1252 palkovsky 36
#include <mm/as.h>
178 palkovsky 37
 
206 palkovsky 38
#include <config.h>
178 palkovsky 39
 
206 palkovsky 40
#include <memstr.h>
814 palkovsky 41
#include <mm/slab.h>
206 palkovsky 42
#include <debug.h>
43
 
178 palkovsky 44
/*
45
 * There is no segmentation in long mode so we set up flat mode. In this
46
 * mode, we use, for each privilege level, two segments spanning the
47
 * whole memory. One is for code and one is for data.
48
 */
49
 
1187 jermar 50
descriptor_t gdt[GDT_ITEMS] = {
178 palkovsky 51
    /* NULL descriptor */
52
    { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
53
    /* KTEXT descriptor */
54
    { .limit_0_15  = 0xffff,
55
      .base_0_15   = 0,
56
      .base_16_23  = 0,
188 palkovsky 57
      .access      = AR_PRESENT | AR_CODE | DPL_KERNEL | AR_READABLE ,
178 palkovsky 58
      .limit_16_19 = 0xf,
59
      .available   = 0,
60
      .longmode    = 1,
188 palkovsky 61
      .special     = 0,
178 palkovsky 62
      .granularity = 1,
63
      .base_24_31  = 0 },
64
    /* KDATA descriptor */
65
    { .limit_0_15  = 0xffff,
66
      .base_0_15   = 0,
67
      .base_16_23  = 0,
68
      .access      = AR_PRESENT | AR_DATA | AR_WRITABLE | DPL_KERNEL,
69
      .limit_16_19 = 0xf,
70
      .available   = 0,
71
      .longmode    = 0,
72
      .special     = 0,
188 palkovsky 73
      .granularity = 1,
178 palkovsky 74
      .base_24_31  = 0 },
803 palkovsky 75
    /* UDATA descriptor */
178 palkovsky 76
    { .limit_0_15  = 0xffff,
77
      .base_0_15   = 0,
78
      .base_16_23  = 0,
803 palkovsky 79
      .access      = AR_PRESENT | AR_DATA | AR_WRITABLE | DPL_USER,
178 palkovsky 80
      .limit_16_19 = 0xf,
81
      .available   = 0,
803 palkovsky 82
      .longmode    = 0,
83
      .special     = 1,
206 palkovsky 84
      .granularity = 1,
178 palkovsky 85
      .base_24_31  = 0 },
803 palkovsky 86
    /* UTEXT descriptor */
178 palkovsky 87
    { .limit_0_15  = 0xffff,
88
      .base_0_15   = 0,
89
      .base_16_23  = 0,
803 palkovsky 90
      .access      = AR_PRESENT | AR_CODE | DPL_USER,
178 palkovsky 91
      .limit_16_19 = 0xf,
92
      .available   = 0,
803 palkovsky 93
      .longmode    = 1,
94
      .special     = 0,
178 palkovsky 95
      .granularity = 1,
96
      .base_24_31  = 0 },
332 palkovsky 97
    /* KTEXT 32-bit protected, for protected mode before long mode */
188 palkovsky 98
    { .limit_0_15  = 0xffff,
99
      .base_0_15   = 0,
100
      .base_16_23  = 0,
101
      .access      = AR_PRESENT | AR_CODE | DPL_KERNEL | AR_READABLE,
102
      .limit_16_19 = 0xf,
103
      .available   = 0,
104
      .longmode    = 0,
277 palkovsky 105
      .special     = 1,
188 palkovsky 106
      .granularity = 1,
107
      .base_24_31  = 0 },
206 palkovsky 108
    /* TSS descriptor - set up will be completed later,
109
     * on AMD64 it is 64-bit - 2 items in table */
110
    { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
178 palkovsky 111
    { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
112
};
113
 
1187 jermar 114
idescriptor_t idt[IDT_ITEMS];
178 palkovsky 115
 
1187 jermar 116
ptr_16_64_t gdtr = {.limit = sizeof(gdt), .base= (__u64) gdt };
117
ptr_16_64_t idtr = {.limit = sizeof(idt), .base= (__u64) idt };
229 palkovsky 118
 
1187 jermar 119
static tss_t tss;
120
tss_t *tss_p = NULL;
178 palkovsky 121
 
1187 jermar 122
void gdt_tss_setbase(descriptor_t *d, __address base)
206 palkovsky 123
{
1187 jermar 124
    tss_descriptor_t *td = (tss_descriptor_t *) d;
206 palkovsky 125
 
126
    td->base_0_15 = base & 0xffff;
127
    td->base_16_23 = ((base) >> 16) & 0xff;
128
    td->base_24_31 = ((base) >> 24) & 0xff;
129
    td->base_32_63 = ((base) >> 32);
130
}
131
 
1187 jermar 132
void gdt_tss_setlimit(descriptor_t *d, __u32 limit)
206 palkovsky 133
{
1187 jermar 134
    struct tss_descriptor *td = (tss_descriptor_t *) d;
206 palkovsky 135
 
136
    td->limit_0_15 = limit & 0xffff;
137
    td->limit_16_19 = (limit >> 16) & 0xf;
138
}
139
 
1187 jermar 140
void idt_setoffset(idescriptor_t *d, __address offset)
206 palkovsky 141
{
142
    /*
143
     * Offset is a linear address.
144
     */
145
    d->offset_0_15 = offset & 0xffff;
146
    d->offset_16_31 = offset >> 16 & 0xffff;
147
    d->offset_32_63 = offset >> 32;
148
}
149
 
1187 jermar 150
void tss_initialize(tss_t *t)
206 palkovsky 151
{
1187 jermar 152
    memsetb((__address) t, sizeof(tss_t), 0);
206 palkovsky 153
}
154
 
155
/*
156
 * This function takes care of proper setup of IDT and IDTR.
157
 */
158
void idt_init(void)
159
{
1187 jermar 160
    idescriptor_t *d;
206 palkovsky 161
    int i;
162
 
163
    for (i = 0; i < IDT_ITEMS; i++) {
164
        d = &idt[i];
165
 
166
        d->unused = 0;
211 palkovsky 167
        d->selector = gdtselector(KTEXT_DES);
206 palkovsky 168
 
169
        d->present = 1;
170
        d->type = AR_INTERRUPT; /* masking interrupt */
171
 
172
        idt_setoffset(d, ((__address) interrupt_handlers) + i*interrupt_handler_size);
799 palkovsky 173
        exc_register(i, "undef", (iroutine)null_interrupt);
206 palkovsky 174
    }
1051 jermar 175
 
576 palkovsky 176
    exc_register( 7, "nm_fault", nm_fault);
177
    exc_register(12, "ss_fault", ss_fault);
1051 jermar 178
    exc_register(13, "gp_fault", gp_fault);
1050 palkovsky 179
    exc_register(14, "ident_mapper", ident_page_fault);
206 palkovsky 180
}
181
 
799 palkovsky 182
/** Initialize segmentation - code/data/idt tables
183
 *
184
 */
206 palkovsky 185
void pm_init(void)
186
{
1187 jermar 187
    descriptor_t *gdt_p = (struct descriptor *) gdtr.base;
188
    tss_descriptor_t *tss_desc;
206 palkovsky 189
 
190
    /*
191
     * Each CPU has its private GDT and TSS.
192
     * All CPUs share one IDT.
193
     */
194
 
195
    if (config.cpu_active == 1) {
196
        idt_init();
197
        /*
198
         * NOTE: bootstrap CPU has statically allocated TSS, because
199
         * the heap hasn't been initialized so far.
200
         */
201
        tss_p = &tss;
202
    }
203
    else {
1252 palkovsky 204
        /* We are going to use malloc, which may return
205
         * non boot-mapped pointer, initialize the CR3 register
206
         * ahead of page_init */
207
        write_cr3((__address) AS_KERNEL->page_table);
208
 
1187 jermar 209
        tss_p = (struct tss *) malloc(sizeof(tss_t), FRAME_ATOMIC);
206 palkovsky 210
        if (!tss_p)
211
            panic("could not allocate TSS\n");
212
    }
213
 
214
    tss_initialize(tss_p);
215
 
1187 jermar 216
    tss_desc = (tss_descriptor_t *) (&gdt_p[TSS_DES]);
208 palkovsky 217
    tss_desc->present = 1;
218
    tss_desc->type = AR_TSS;
219
    tss_desc->dpl = PL_KERNEL;
206 palkovsky 220
 
221
    gdt_tss_setbase(&gdt_p[TSS_DES], (__address) tss_p);
1251 jermar 222
    gdt_tss_setlimit(&gdt_p[TSS_DES], TSS_BASIC_SIZE - 1);
206 palkovsky 223
 
1186 jermar 224
    gdtr_load(&gdtr);
225
    idtr_load(&idtr);
206 palkovsky 226
    /*
227
     * As of this moment, the current CPU has its own GDT pointing
228
     * to its own TSS. We just need to load the TR register.
229
     */
1186 jermar 230
    tr_load(gdtselector(TSS_DES));
206 palkovsky 231
}