Subversion Repositories HelenOS-historic

Rev

Rev 1289 | Rev 1702 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
178 palkovsky 1
/*
2
 * Copyright (C) 2001-2004 Jakub Jermar
799 palkovsky 3
 * Copyright (C) 2005-2006 Ondrej Palkovsky
178 palkovsky 4
 * All rights reserved.
5
 *
6
 * Redistribution and use in source and binary forms, with or without
7
 * modification, are permitted provided that the following conditions
8
 * are met:
9
 *
10
 * - Redistributions of source code must retain the above copyright
11
 *   notice, this list of conditions and the following disclaimer.
12
 * - Redistributions in binary form must reproduce the above copyright
13
 *   notice, this list of conditions and the following disclaimer in the
14
 *   documentation and/or other materials provided with the distribution.
15
 * - The name of the author may not be used to endorse or promote products
16
 *   derived from this software without specific prior written permission.
17
 *
18
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28
 */
29
 
30
#include <arch/pm.h>
31
#include <arch/mm/page.h>
32
#include <arch/types.h>
206 palkovsky 33
#include <arch/interrupt.h>
34
#include <arch/asm.h>
576 palkovsky 35
#include <interrupt.h>
1252 palkovsky 36
#include <mm/as.h>
178 palkovsky 37
 
206 palkovsky 38
#include <config.h>
178 palkovsky 39
 
206 palkovsky 40
#include <memstr.h>
814 palkovsky 41
#include <mm/slab.h>
206 palkovsky 42
#include <debug.h>
43
 
178 palkovsky 44
/*
45
 * There is no segmentation in long mode so we set up flat mode. In this
46
 * mode, we use, for each privilege level, two segments spanning the
47
 * whole memory. One is for code and one is for data.
48
 */
49
 
1187 jermar 50
descriptor_t gdt[GDT_ITEMS] = {
178 palkovsky 51
    /* NULL descriptor */
52
    { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
53
    /* KTEXT descriptor */
54
    { .limit_0_15  = 0xffff,
55
      .base_0_15   = 0,
56
      .base_16_23  = 0,
188 palkovsky 57
      .access      = AR_PRESENT | AR_CODE | DPL_KERNEL | AR_READABLE ,
178 palkovsky 58
      .limit_16_19 = 0xf,
59
      .available   = 0,
60
      .longmode    = 1,
188 palkovsky 61
      .special     = 0,
178 palkovsky 62
      .granularity = 1,
63
      .base_24_31  = 0 },
64
    /* KDATA descriptor */
65
    { .limit_0_15  = 0xffff,
66
      .base_0_15   = 0,
67
      .base_16_23  = 0,
68
      .access      = AR_PRESENT | AR_DATA | AR_WRITABLE | DPL_KERNEL,
69
      .limit_16_19 = 0xf,
70
      .available   = 0,
71
      .longmode    = 0,
72
      .special     = 0,
188 palkovsky 73
      .granularity = 1,
178 palkovsky 74
      .base_24_31  = 0 },
803 palkovsky 75
    /* UDATA descriptor */
178 palkovsky 76
    { .limit_0_15  = 0xffff,
77
      .base_0_15   = 0,
78
      .base_16_23  = 0,
803 palkovsky 79
      .access      = AR_PRESENT | AR_DATA | AR_WRITABLE | DPL_USER,
178 palkovsky 80
      .limit_16_19 = 0xf,
81
      .available   = 0,
803 palkovsky 82
      .longmode    = 0,
83
      .special     = 1,
206 palkovsky 84
      .granularity = 1,
178 palkovsky 85
      .base_24_31  = 0 },
803 palkovsky 86
    /* UTEXT descriptor */
178 palkovsky 87
    { .limit_0_15  = 0xffff,
88
      .base_0_15   = 0,
89
      .base_16_23  = 0,
803 palkovsky 90
      .access      = AR_PRESENT | AR_CODE | DPL_USER,
178 palkovsky 91
      .limit_16_19 = 0xf,
92
      .available   = 0,
803 palkovsky 93
      .longmode    = 1,
94
      .special     = 0,
178 palkovsky 95
      .granularity = 1,
96
      .base_24_31  = 0 },
332 palkovsky 97
    /* KTEXT 32-bit protected, for protected mode before long mode */
188 palkovsky 98
    { .limit_0_15  = 0xffff,
99
      .base_0_15   = 0,
100
      .base_16_23  = 0,
101
      .access      = AR_PRESENT | AR_CODE | DPL_KERNEL | AR_READABLE,
102
      .limit_16_19 = 0xf,
103
      .available   = 0,
104
      .longmode    = 0,
277 palkovsky 105
      .special     = 1,
188 palkovsky 106
      .granularity = 1,
107
      .base_24_31  = 0 },
206 palkovsky 108
    /* TSS descriptor - set up will be completed later,
109
     * on AMD64 it is 64-bit - 2 items in table */
110
    { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
1289 vana 111
    { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
112
    /* VESA Init descriptor */
1292 vana 113
#ifdef CONFIG_FB    
1289 vana 114
    { 0xffff, 0, VESA_INIT_SEGMENT>>12, AR_PRESENT | AR_CODE | DPL_KERNEL, 0xf, 0, 0, 0, 0, 0 }
1292 vana 115
#endif
178 palkovsky 116
};
117
 
1187 jermar 118
idescriptor_t idt[IDT_ITEMS];
178 palkovsky 119
 
1187 jermar 120
ptr_16_64_t gdtr = {.limit = sizeof(gdt), .base= (__u64) gdt };
121
ptr_16_64_t idtr = {.limit = sizeof(idt), .base= (__u64) idt };
229 palkovsky 122
 
1187 jermar 123
static tss_t tss;
124
tss_t *tss_p = NULL;
178 palkovsky 125
 
1187 jermar 126
void gdt_tss_setbase(descriptor_t *d, __address base)
206 palkovsky 127
{
1187 jermar 128
    tss_descriptor_t *td = (tss_descriptor_t *) d;
206 palkovsky 129
 
130
    td->base_0_15 = base & 0xffff;
131
    td->base_16_23 = ((base) >> 16) & 0xff;
132
    td->base_24_31 = ((base) >> 24) & 0xff;
133
    td->base_32_63 = ((base) >> 32);
134
}
135
 
1187 jermar 136
void gdt_tss_setlimit(descriptor_t *d, __u32 limit)
206 palkovsky 137
{
1187 jermar 138
    struct tss_descriptor *td = (tss_descriptor_t *) d;
206 palkovsky 139
 
140
    td->limit_0_15 = limit & 0xffff;
141
    td->limit_16_19 = (limit >> 16) & 0xf;
142
}
143
 
1187 jermar 144
void idt_setoffset(idescriptor_t *d, __address offset)
206 palkovsky 145
{
146
    /*
147
     * Offset is a linear address.
148
     */
149
    d->offset_0_15 = offset & 0xffff;
150
    d->offset_16_31 = offset >> 16 & 0xffff;
151
    d->offset_32_63 = offset >> 32;
152
}
153
 
1187 jermar 154
void tss_initialize(tss_t *t)
206 palkovsky 155
{
1187 jermar 156
    memsetb((__address) t, sizeof(tss_t), 0);
206 palkovsky 157
}
158
 
159
/*
160
 * This function takes care of proper setup of IDT and IDTR.
161
 */
162
void idt_init(void)
163
{
1187 jermar 164
    idescriptor_t *d;
206 palkovsky 165
    int i;
166
 
167
    for (i = 0; i < IDT_ITEMS; i++) {
168
        d = &idt[i];
169
 
170
        d->unused = 0;
211 palkovsky 171
        d->selector = gdtselector(KTEXT_DES);
206 palkovsky 172
 
173
        d->present = 1;
174
        d->type = AR_INTERRUPT; /* masking interrupt */
175
 
176
        idt_setoffset(d, ((__address) interrupt_handlers) + i*interrupt_handler_size);
799 palkovsky 177
        exc_register(i, "undef", (iroutine)null_interrupt);
206 palkovsky 178
    }
1051 jermar 179
 
576 palkovsky 180
    exc_register( 7, "nm_fault", nm_fault);
181
    exc_register(12, "ss_fault", ss_fault);
1051 jermar 182
    exc_register(13, "gp_fault", gp_fault);
1050 palkovsky 183
    exc_register(14, "ident_mapper", ident_page_fault);
206 palkovsky 184
}
185
 
799 palkovsky 186
/** Initialize segmentation - code/data/idt tables
187
 *
188
 */
206 palkovsky 189
void pm_init(void)
190
{
1187 jermar 191
    descriptor_t *gdt_p = (struct descriptor *) gdtr.base;
192
    tss_descriptor_t *tss_desc;
206 palkovsky 193
 
194
    /*
195
     * Each CPU has its private GDT and TSS.
196
     * All CPUs share one IDT.
197
     */
198
 
199
    if (config.cpu_active == 1) {
200
        idt_init();
201
        /*
202
         * NOTE: bootstrap CPU has statically allocated TSS, because
203
         * the heap hasn't been initialized so far.
204
         */
205
        tss_p = &tss;
206
    }
207
    else {
1252 palkovsky 208
        /* We are going to use malloc, which may return
209
         * non boot-mapped pointer, initialize the CR3 register
210
         * ahead of page_init */
211
        write_cr3((__address) AS_KERNEL->page_table);
212
 
1187 jermar 213
        tss_p = (struct tss *) malloc(sizeof(tss_t), FRAME_ATOMIC);
206 palkovsky 214
        if (!tss_p)
215
            panic("could not allocate TSS\n");
216
    }
217
 
218
    tss_initialize(tss_p);
219
 
1187 jermar 220
    tss_desc = (tss_descriptor_t *) (&gdt_p[TSS_DES]);
208 palkovsky 221
    tss_desc->present = 1;
222
    tss_desc->type = AR_TSS;
223
    tss_desc->dpl = PL_KERNEL;
206 palkovsky 224
 
225
    gdt_tss_setbase(&gdt_p[TSS_DES], (__address) tss_p);
1251 jermar 226
    gdt_tss_setlimit(&gdt_p[TSS_DES], TSS_BASIC_SIZE - 1);
206 palkovsky 227
 
1186 jermar 228
    gdtr_load(&gdtr);
229
    idtr_load(&idtr);
206 palkovsky 230
    /*
231
     * As of this moment, the current CPU has its own GDT pointing
232
     * to its own TSS. We just need to load the TR register.
233
     */
1186 jermar 234
    tr_load(gdtselector(TSS_DES));
206 palkovsky 235
}