Rev 1112 | Rev 1187 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 1112 | Rev 1186 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | #include <arch/pm.h> |
29 | #include <arch/pm.h> |
30 | #include <config.h> |
30 | #include <config.h> |
31 | #include <arch/types.h> |
31 | #include <arch/types.h> |
32 | #include <typedefs.h> |
32 | #include <typedefs.h> |
33 | #include <arch/interrupt.h> |
33 | #include <arch/interrupt.h> |
34 | #include <arch/asm.h> |
34 | #include <arch/asm.h> |
35 | #include <arch/context.h> |
35 | #include <arch/context.h> |
36 | #include <panic.h> |
36 | #include <panic.h> |
37 | #include <arch/mm/page.h> |
37 | #include <arch/mm/page.h> |
38 | #include <mm/slab.h> |
38 | #include <mm/slab.h> |
39 | #include <memstr.h> |
39 | #include <memstr.h> |
40 | #include <arch/boot/boot.h> |
40 | #include <arch/boot/boot.h> |
41 | #include <interrupt.h> |
41 | #include <interrupt.h> |
42 | 42 | ||
43 | /* |
43 | /* |
44 | * Early ia32 configuration functions and data structures. |
44 | * Early ia32 configuration functions and data structures. |
45 | */ |
45 | */ |
46 | 46 | ||
47 | /* |
47 | /* |
48 | * We have no use for segmentation so we set up flat mode. In this |
48 | * We have no use for segmentation so we set up flat mode. In this |
49 | * mode, we use, for each privilege level, two segments spanning the |
49 | * mode, we use, for each privilege level, two segments spanning the |
50 | * whole memory. One is for code and one is for data. |
50 | * whole memory. One is for code and one is for data. |
51 | * |
51 | * |
52 | * One is for GS register which holds pointer to the TLS thread |
52 | * One is for GS register which holds pointer to the TLS thread |
53 | * structure in it's base. |
53 | * structure in it's base. |
54 | */ |
54 | */ |
55 | struct descriptor gdt[GDT_ITEMS] = { |
55 | struct descriptor gdt[GDT_ITEMS] = { |
56 | /* NULL descriptor */ |
56 | /* NULL descriptor */ |
57 | { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, |
57 | { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, |
58 | /* KTEXT descriptor */ |
58 | /* KTEXT descriptor */ |
59 | { 0xffff, 0, 0, AR_PRESENT | AR_CODE | DPL_KERNEL, 0xf, 0, 0, 1, 1, 0 }, |
59 | { 0xffff, 0, 0, AR_PRESENT | AR_CODE | DPL_KERNEL, 0xf, 0, 0, 1, 1, 0 }, |
60 | /* KDATA descriptor */ |
60 | /* KDATA descriptor */ |
61 | { 0xffff, 0, 0, AR_PRESENT | AR_DATA | AR_WRITABLE | DPL_KERNEL, 0xf, 0, 0, 1, 1, 0 }, |
61 | { 0xffff, 0, 0, AR_PRESENT | AR_DATA | AR_WRITABLE | DPL_KERNEL, 0xf, 0, 0, 1, 1, 0 }, |
62 | /* UTEXT descriptor */ |
62 | /* UTEXT descriptor */ |
63 | { 0xffff, 0, 0, AR_PRESENT | AR_CODE | DPL_USER, 0xf, 0, 0, 1, 1, 0 }, |
63 | { 0xffff, 0, 0, AR_PRESENT | AR_CODE | DPL_USER, 0xf, 0, 0, 1, 1, 0 }, |
64 | /* UDATA descriptor */ |
64 | /* UDATA descriptor */ |
65 | { 0xffff, 0, 0, AR_PRESENT | AR_DATA | AR_WRITABLE | DPL_USER, 0xf, 0, 0, 1, 1, 0 }, |
65 | { 0xffff, 0, 0, AR_PRESENT | AR_DATA | AR_WRITABLE | DPL_USER, 0xf, 0, 0, 1, 1, 0 }, |
66 | /* TSS descriptor - set up will be completed later */ |
66 | /* TSS descriptor - set up will be completed later */ |
67 | { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, |
67 | { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, |
68 | { 0xffff, 0, 0, AR_PRESENT | AR_DATA | AR_WRITABLE | DPL_USER, 0xf, 0, 0, 1, 1, 0 } |
68 | { 0xffff, 0, 0, AR_PRESENT | AR_DATA | AR_WRITABLE | DPL_USER, 0xf, 0, 0, 1, 1, 0 } |
69 | }; |
69 | }; |
70 | 70 | ||
71 | static struct idescriptor idt[IDT_ITEMS]; |
71 | static struct idescriptor idt[IDT_ITEMS]; |
72 | 72 | ||
73 | static struct tss tss; |
73 | static struct tss tss; |
74 | 74 | ||
75 | struct tss *tss_p = NULL; |
75 | struct tss *tss_p = NULL; |
76 | 76 | ||
77 | /* gdtr is changed by kmp before next CPU is initialized */ |
77 | /* gdtr is changed by kmp before next CPU is initialized */ |
78 | struct ptr_16_32 bootstrap_gdtr = { .limit = sizeof(gdt), .base = KA2PA((__address) gdt) }; |
78 | struct ptr_16_32 bootstrap_gdtr = { .limit = sizeof(gdt), .base = KA2PA((__address) gdt) }; |
79 | struct ptr_16_32 gdtr = { .limit = sizeof(gdt), .base = (__address) gdt }; |
79 | struct ptr_16_32 gdtr = { .limit = sizeof(gdt), .base = (__address) gdt }; |
80 | 80 | ||
81 | void gdt_setbase(struct descriptor *d, __address base) |
81 | void gdt_setbase(struct descriptor *d, __address base) |
82 | { |
82 | { |
83 | d->base_0_15 = base & 0xffff; |
83 | d->base_0_15 = base & 0xffff; |
84 | d->base_16_23 = ((base) >> 16) & 0xff; |
84 | d->base_16_23 = ((base) >> 16) & 0xff; |
85 | d->base_24_31 = ((base) >> 24) & 0xff; |
85 | d->base_24_31 = ((base) >> 24) & 0xff; |
86 | } |
86 | } |
87 | 87 | ||
88 | void gdt_setlimit(struct descriptor *d, __u32 limit) |
88 | void gdt_setlimit(struct descriptor *d, __u32 limit) |
89 | { |
89 | { |
90 | d->limit_0_15 = limit & 0xffff; |
90 | d->limit_0_15 = limit & 0xffff; |
91 | d->limit_16_19 = (limit >> 16) & 0xf; |
91 | d->limit_16_19 = (limit >> 16) & 0xf; |
92 | } |
92 | } |
93 | 93 | ||
94 | void idt_setoffset(struct idescriptor *d, __address offset) |
94 | void idt_setoffset(struct idescriptor *d, __address offset) |
95 | { |
95 | { |
96 | /* |
96 | /* |
97 | * Offset is a linear address. |
97 | * Offset is a linear address. |
98 | */ |
98 | */ |
99 | d->offset_0_15 = offset & 0xffff; |
99 | d->offset_0_15 = offset & 0xffff; |
100 | d->offset_16_31 = offset >> 16; |
100 | d->offset_16_31 = offset >> 16; |
101 | } |
101 | } |
102 | 102 | ||
103 | void tss_initialize(struct tss *t) |
103 | void tss_initialize(struct tss *t) |
104 | { |
104 | { |
105 | memsetb((__address) t, sizeof(struct tss), 0); |
105 | memsetb((__address) t, sizeof(struct tss), 0); |
106 | } |
106 | } |
107 | 107 | ||
108 | /* |
108 | /* |
109 | * This function takes care of proper setup of IDT and IDTR. |
109 | * This function takes care of proper setup of IDT and IDTR. |
110 | */ |
110 | */ |
111 | void idt_init(void) |
111 | void idt_init(void) |
112 | { |
112 | { |
113 | struct idescriptor *d; |
113 | struct idescriptor *d; |
114 | int i; |
114 | int i; |
115 | 115 | ||
116 | for (i = 0; i < IDT_ITEMS; i++) { |
116 | for (i = 0; i < IDT_ITEMS; i++) { |
117 | d = &idt[i]; |
117 | d = &idt[i]; |
118 | 118 | ||
119 | d->unused = 0; |
119 | d->unused = 0; |
120 | d->selector = selector(KTEXT_DES); |
120 | d->selector = selector(KTEXT_DES); |
121 | 121 | ||
122 | d->access = AR_PRESENT | AR_INTERRUPT; /* masking interrupt */ |
122 | d->access = AR_PRESENT | AR_INTERRUPT; /* masking interrupt */ |
123 | 123 | ||
124 | if (i == VECTOR_SYSCALL) { |
124 | if (i == VECTOR_SYSCALL) { |
125 | /* |
125 | /* |
126 | * The syscall interrupt gate must be calleable from userland. |
126 | * The syscall interrupt gate must be calleable from userland. |
127 | */ |
127 | */ |
128 | d->access |= DPL_USER; |
128 | d->access |= DPL_USER; |
129 | } |
129 | } |
130 | 130 | ||
131 | idt_setoffset(d, ((__address) interrupt_handlers) + i*interrupt_handler_size); |
131 | idt_setoffset(d, ((__address) interrupt_handlers) + i*interrupt_handler_size); |
132 | exc_register(i, "undef", (iroutine) null_interrupt); |
132 | exc_register(i, "undef", (iroutine) null_interrupt); |
133 | } |
133 | } |
134 | exc_register(13, "gp_fault", (iroutine) gp_fault); |
134 | exc_register(13, "gp_fault", (iroutine) gp_fault); |
135 | exc_register( 7, "nm_fault", (iroutine) nm_fault); |
135 | exc_register( 7, "nm_fault", (iroutine) nm_fault); |
136 | exc_register(12, "ss_fault", (iroutine) ss_fault); |
136 | exc_register(12, "ss_fault", (iroutine) ss_fault); |
137 | exc_register(19, "simd_fp", (iroutine) simd_fp_exception); |
137 | exc_register(19, "simd_fp", (iroutine) simd_fp_exception); |
138 | } |
138 | } |
139 | 139 | ||
140 | 140 | ||
141 | /* Clean IOPL(12,13) and NT(14) flags in EFLAGS register */ |
141 | /* Clean IOPL(12,13) and NT(14) flags in EFLAGS register */ |
142 | static void clean_IOPL_NT_flags(void) |
142 | static void clean_IOPL_NT_flags(void) |
143 | { |
143 | { |
144 | asm |
144 | asm |
145 | ( |
145 | ( |
146 | "pushfl;" |
146 | "pushfl;" |
147 | "pop %%eax;" |
147 | "pop %%eax;" |
148 | "and $0xffff8fff,%%eax;" |
148 | "and $0xffff8fff,%%eax;" |
149 | "push %%eax;" |
149 | "push %%eax;" |
150 | "popfl;" |
150 | "popfl;" |
151 | : |
151 | : |
152 | : |
152 | : |
153 | :"%eax" |
153 | :"%eax" |
154 | ); |
154 | ); |
155 | } |
155 | } |
156 | 156 | ||
157 | /* Clean AM(18) flag in CR0 register */ |
157 | /* Clean AM(18) flag in CR0 register */ |
158 | static void clean_AM_flag(void) |
158 | static void clean_AM_flag(void) |
159 | { |
159 | { |
160 | asm |
160 | asm |
161 | ( |
161 | ( |
162 | "mov %%cr0,%%eax;" |
162 | "mov %%cr0,%%eax;" |
163 | "and $0xFFFBFFFF,%%eax;" |
163 | "and $0xFFFBFFFF,%%eax;" |
164 | "mov %%eax,%%cr0;" |
164 | "mov %%eax,%%cr0;" |
165 | : |
165 | : |
166 | : |
166 | : |
167 | :"%eax" |
167 | :"%eax" |
168 | ); |
168 | ); |
169 | } |
169 | } |
170 | 170 | ||
171 | void pm_init(void) |
171 | void pm_init(void) |
172 | { |
172 | { |
173 | struct descriptor *gdt_p = (struct descriptor *) gdtr.base; |
173 | struct descriptor *gdt_p = (struct descriptor *) gdtr.base; |
174 | struct ptr_16_32 idtr; |
174 | struct ptr_16_32 idtr; |
175 | 175 | ||
176 | /* |
176 | /* |
177 | * Update addresses in GDT and IDT to their virtual counterparts. |
177 | * Update addresses in GDT and IDT to their virtual counterparts. |
178 | */ |
178 | */ |
179 | idtr.limit = sizeof(idt); |
179 | idtr.limit = sizeof(idt); |
180 | idtr.base = (__address) idt; |
180 | idtr.base = (__address) idt; |
181 | __asm__ volatile ("lgdt %0\n" : : "m" (gdtr)); |
181 | gdtr_load(&gdtr); |
182 | __asm__ volatile ("lidt %0\n" : : "m" (idtr)); |
182 | idtr_load(&idtr); |
183 | 183 | ||
184 | /* |
184 | /* |
185 | * Each CPU has its private GDT and TSS. |
185 | * Each CPU has its private GDT and TSS. |
186 | * All CPUs share one IDT. |
186 | * All CPUs share one IDT. |
187 | */ |
187 | */ |
188 | 188 | ||
189 | if (config.cpu_active == 1) { |
189 | if (config.cpu_active == 1) { |
190 | idt_init(); |
190 | idt_init(); |
191 | /* |
191 | /* |
192 | * NOTE: bootstrap CPU has statically allocated TSS, because |
192 | * NOTE: bootstrap CPU has statically allocated TSS, because |
193 | * the heap hasn't been initialized so far. |
193 | * the heap hasn't been initialized so far. |
194 | */ |
194 | */ |
195 | tss_p = &tss; |
195 | tss_p = &tss; |
196 | } |
196 | } |
197 | else { |
197 | else { |
198 | tss_p = (struct tss *) malloc(sizeof(struct tss),FRAME_ATOMIC); |
198 | tss_p = (struct tss *) malloc(sizeof(struct tss),FRAME_ATOMIC); |
199 | if (!tss_p) |
199 | if (!tss_p) |
200 | panic("could not allocate TSS\n"); |
200 | panic("could not allocate TSS\n"); |
201 | } |
201 | } |
202 | 202 | ||
203 | tss_initialize(tss_p); |
203 | tss_initialize(tss_p); |
204 | 204 | ||
205 | gdt_p[TSS_DES].access = AR_PRESENT | AR_TSS | DPL_KERNEL; |
205 | gdt_p[TSS_DES].access = AR_PRESENT | AR_TSS | DPL_KERNEL; |
206 | gdt_p[TSS_DES].special = 1; |
206 | gdt_p[TSS_DES].special = 1; |
207 | gdt_p[TSS_DES].granularity = 1; |
207 | gdt_p[TSS_DES].granularity = 1; |
208 | 208 | ||
209 | gdt_setbase(&gdt_p[TSS_DES], (__address) tss_p); |
209 | gdt_setbase(&gdt_p[TSS_DES], (__address) tss_p); |
210 | gdt_setlimit(&gdt_p[TSS_DES], sizeof(struct tss) - 1); |
210 | gdt_setlimit(&gdt_p[TSS_DES], sizeof(struct tss) - 1); |
211 | 211 | ||
212 | /* |
212 | /* |
213 | * As of this moment, the current CPU has its own GDT pointing |
213 | * As of this moment, the current CPU has its own GDT pointing |
214 | * to its own TSS. We just need to load the TR register. |
214 | * to its own TSS. We just need to load the TR register. |
215 | */ |
215 | */ |
216 | __asm__ volatile ("ltr %0" : : "r" ((__u16) selector(TSS_DES))); |
216 | tr_load(selector(TSS_DES)); |
217 | 217 | ||
218 | clean_IOPL_NT_flags(); /* Disable I/O on nonprivileged levels */ |
218 | clean_IOPL_NT_flags(); /* Disable I/O on nonprivileged levels */ |
219 | clean_AM_flag(); /* Disable alignment check */ |
219 | clean_AM_flag(); /* Disable alignment check */ |
220 | } |
220 | } |
221 | 221 | ||
222 | void set_tls_desc(__address tls) |
222 | void set_tls_desc(__address tls) |
223 | { |
223 | { |
224 | struct ptr_16_32 cpugdtr; |
224 | struct ptr_16_32 cpugdtr; |
225 | struct descriptor *gdt_p = (struct descriptor *) cpugdtr.base; |
225 | struct descriptor *gdt_p = (struct descriptor *) cpugdtr.base; |
226 | 226 | ||
227 | __asm__ volatile ("sgdt %0\n" : : "m" (cpugdtr)); |
227 | gdtr_store(&cpugdtr); |
228 | - | ||
229 | gdt_setbase(&gdt_p[TLS_DES], tls); |
228 | gdt_setbase(&gdt_p[TLS_DES], tls); |
230 | /* Reload gdt register to update GS in CPU */ |
229 | /* Reload gdt register to update GS in CPU */ |
231 | __asm__ volatile ("lgdt %0\n" : : "m" (cpugdtr)); |
230 | gdtr_load(&cpugdtr); |
232 | } |
231 | } |
233 | 232 |