Rev 3104 | Rev 4153 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 3104 | Rev 3386 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (c) 2001-2004 Jakub Jermar |
2 | * Copyright (c) 2001-2004 Jakub Jermar |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | /** @addtogroup ia32 |
29 | /** @addtogroup ia32 |
30 | * @{ |
30 | * @{ |
31 | */ |
31 | */ |
32 | /** @file |
32 | /** @file |
33 | */ |
33 | */ |
34 | 34 | ||
35 | #include <arch/pm.h> |
35 | #include <arch/pm.h> |
36 | #include <config.h> |
36 | #include <config.h> |
37 | #include <arch/types.h> |
37 | #include <arch/types.h> |
38 | #include <arch/interrupt.h> |
38 | #include <arch/interrupt.h> |
39 | #include <arch/asm.h> |
39 | #include <arch/asm.h> |
40 | #include <arch/context.h> |
40 | #include <arch/context.h> |
41 | #include <panic.h> |
41 | #include <panic.h> |
42 | #include <arch/mm/page.h> |
42 | #include <arch/mm/page.h> |
43 | #include <mm/slab.h> |
43 | #include <mm/slab.h> |
44 | #include <memstr.h> |
44 | #include <memstr.h> |
45 | #include <arch/boot/boot.h> |
45 | #include <arch/boot/boot.h> |
46 | #include <interrupt.h> |
46 | #include <interrupt.h> |
47 | 47 | ||
48 | /* |
48 | /* |
49 | * Early ia32 configuration functions and data structures. |
49 | * Early ia32 configuration functions and data structures. |
50 | */ |
50 | */ |
51 | 51 | ||
52 | /* |
52 | /* |
53 | * We have no use for segmentation so we set up flat mode. In this |
53 | * We have no use for segmentation so we set up flat mode. In this |
54 | * mode, we use, for each privilege level, two segments spanning the |
54 | * mode, we use, for each privilege level, two segments spanning the |
55 | * whole memory. One is for code and one is for data. |
55 | * whole memory. One is for code and one is for data. |
56 | * |
56 | * |
57 | * One is for GS register which holds pointer to the TLS thread |
57 | * One is for GS register which holds pointer to the TLS thread |
58 | * structure in it's base. |
58 | * structure in it's base. |
59 | */ |
59 | */ |
60 | descriptor_t gdt[GDT_ITEMS] = { |
60 | descriptor_t gdt[GDT_ITEMS] = { |
61 | /* NULL descriptor */ |
61 | /* NULL descriptor */ |
62 | { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, |
62 | { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, |
63 | /* KTEXT descriptor */ |
63 | /* KTEXT descriptor */ |
64 | { 0xffff, 0, 0, AR_PRESENT | AR_CODE | DPL_KERNEL, 0xf, 0, 0, 1, 1, 0 }, |
64 | { 0xffff, 0, 0, AR_PRESENT | AR_CODE | DPL_KERNEL, 0xf, 0, 0, 1, 1, 0 }, |
65 | /* KDATA descriptor */ |
65 | /* KDATA descriptor */ |
66 | { 0xffff, 0, 0, AR_PRESENT | AR_DATA | AR_WRITABLE | DPL_KERNEL, 0xf, 0, 0, 1, 1, 0 }, |
66 | { 0xffff, 0, 0, AR_PRESENT | AR_DATA | AR_WRITABLE | DPL_KERNEL, 0xf, 0, 0, 1, 1, 0 }, |
67 | /* UTEXT descriptor */ |
67 | /* UTEXT descriptor */ |
68 | { 0xffff, 0, 0, AR_PRESENT | AR_CODE | DPL_USER, 0xf, 0, 0, 1, 1, 0 }, |
68 | { 0xffff, 0, 0, AR_PRESENT | AR_CODE | DPL_USER, 0xf, 0, 0, 1, 1, 0 }, |
69 | /* UDATA descriptor */ |
69 | /* UDATA descriptor */ |
70 | { 0xffff, 0, 0, AR_PRESENT | AR_DATA | AR_WRITABLE | DPL_USER, 0xf, 0, 0, 1, 1, 0 }, |
70 | { 0xffff, 0, 0, AR_PRESENT | AR_DATA | AR_WRITABLE | DPL_USER, 0xf, 0, 0, 1, 1, 0 }, |
71 | /* TSS descriptor - set up will be completed later */ |
71 | /* TSS descriptor - set up will be completed later */ |
72 | { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, |
72 | { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, |
73 | /* TLS descriptor */ |
73 | /* TLS descriptor */ |
74 | { 0xffff, 0, 0, AR_PRESENT | AR_DATA | AR_WRITABLE | DPL_USER, 0xf, 0, 0, 1, 1, 0 }, |
74 | { 0xffff, 0, 0, AR_PRESENT | AR_DATA | AR_WRITABLE | DPL_USER, 0xf, 0, 0, 1, 1, 0 }, |
75 | /* VESA Init descriptor */ |
75 | /* VESA Init descriptor */ |
76 | #ifdef CONFIG_FB |
76 | #ifdef CONFIG_FB |
77 | { 0xffff, 0, VESA_INIT_SEGMENT>>12, AR_PRESENT | AR_CODE | DPL_KERNEL, 0xf, 0, 0, 0, 0, 0 } |
77 | { 0xffff, 0, VESA_INIT_SEGMENT>>12, AR_PRESENT | AR_CODE | DPL_KERNEL, 0xf, 0, 0, 0, 0, 0 } |
78 | #endif |
78 | #endif |
79 | }; |
79 | }; |
80 | 80 | ||
81 | static idescriptor_t idt[IDT_ITEMS]; |
81 | static idescriptor_t idt[IDT_ITEMS]; |
82 | 82 | ||
83 | static tss_t tss; |
83 | static tss_t tss; |
84 | 84 | ||
85 | tss_t *tss_p = NULL; |
85 | tss_t *tss_p = NULL; |
86 | 86 | ||
87 | /* gdtr is changed by kmp before next CPU is initialized */ |
87 | /* gdtr is changed by kmp before next CPU is initialized */ |
88 | ptr_16_32_t bootstrap_gdtr = { .limit = sizeof(gdt), .base = KA2PA((uintptr_t) gdt) }; |
88 | ptr_16_32_t bootstrap_gdtr = { .limit = sizeof(gdt), .base = KA2PA((uintptr_t) gdt) }; |
89 | ptr_16_32_t gdtr = { .limit = sizeof(gdt), .base = (uintptr_t) gdt }; |
89 | ptr_16_32_t gdtr = { .limit = sizeof(gdt), .base = (uintptr_t) gdt }; |
90 | 90 | ||
91 | void gdt_setbase(descriptor_t *d, uintptr_t base) |
91 | void gdt_setbase(descriptor_t *d, uintptr_t base) |
92 | { |
92 | { |
93 | d->base_0_15 = base & 0xffff; |
93 | d->base_0_15 = base & 0xffff; |
94 | d->base_16_23 = ((base) >> 16) & 0xff; |
94 | d->base_16_23 = ((base) >> 16) & 0xff; |
95 | d->base_24_31 = ((base) >> 24) & 0xff; |
95 | d->base_24_31 = ((base) >> 24) & 0xff; |
96 | } |
96 | } |
97 | 97 | ||
98 | void gdt_setlimit(descriptor_t *d, uint32_t limit) |
98 | void gdt_setlimit(descriptor_t *d, uint32_t limit) |
99 | { |
99 | { |
100 | d->limit_0_15 = limit & 0xffff; |
100 | d->limit_0_15 = limit & 0xffff; |
101 | d->limit_16_19 = (limit >> 16) & 0xf; |
101 | d->limit_16_19 = (limit >> 16) & 0xf; |
102 | } |
102 | } |
103 | 103 | ||
104 | void idt_setoffset(idescriptor_t *d, uintptr_t offset) |
104 | void idt_setoffset(idescriptor_t *d, uintptr_t offset) |
105 | { |
105 | { |
106 | /* |
106 | /* |
107 | * Offset is a linear address. |
107 | * Offset is a linear address. |
108 | */ |
108 | */ |
109 | d->offset_0_15 = offset & 0xffff; |
109 | d->offset_0_15 = offset & 0xffff; |
110 | d->offset_16_31 = offset >> 16; |
110 | d->offset_16_31 = offset >> 16; |
111 | } |
111 | } |
112 | 112 | ||
113 | void tss_initialize(tss_t *t) |
113 | void tss_initialize(tss_t *t) |
114 | { |
114 | { |
115 | memsetb(t, sizeof(struct tss), 0); |
115 | memsetb(t, sizeof(struct tss), 0); |
116 | } |
116 | } |
117 | 117 | ||
118 | /* |
118 | /* |
119 | * This function takes care of proper setup of IDT and IDTR. |
119 | * This function takes care of proper setup of IDT and IDTR. |
120 | */ |
120 | */ |
121 | void idt_init(void) |
121 | void idt_init(void) |
122 | { |
122 | { |
123 | idescriptor_t *d; |
123 | idescriptor_t *d; |
124 | unsigned int i; |
124 | unsigned int i; |
125 | 125 | ||
126 | for (i = 0; i < IDT_ITEMS; i++) { |
126 | for (i = 0; i < IDT_ITEMS; i++) { |
127 | d = &idt[i]; |
127 | d = &idt[i]; |
128 | 128 | ||
129 | d->unused = 0; |
129 | d->unused = 0; |
130 | d->selector = selector(KTEXT_DES); |
130 | d->selector = selector(KTEXT_DES); |
131 | 131 | ||
132 | d->access = AR_PRESENT | AR_INTERRUPT; /* masking interrupt */ |
132 | d->access = AR_PRESENT | AR_INTERRUPT; /* masking interrupt */ |
133 | 133 | ||
134 | if (i == VECTOR_SYSCALL) { |
134 | if (i == VECTOR_SYSCALL) { |
135 | /* |
135 | /* |
136 | * The syscall interrupt gate must be calleable from |
136 | * The syscall interrupt gate must be calleable from |
137 | * userland. |
137 | * userland. |
138 | */ |
138 | */ |
139 | d->access |= DPL_USER; |
139 | d->access |= DPL_USER; |
140 | } |
140 | } |
141 | 141 | ||
142 | idt_setoffset(d, ((uintptr_t) interrupt_handlers) + |
142 | idt_setoffset(d, ((uintptr_t) interrupt_handlers) + |
143 | i * interrupt_handler_size); |
143 | i * interrupt_handler_size); |
144 | } |
144 | } |
145 | } |
145 | } |
146 | 146 | ||
147 | 147 | ||
148 | /* Clean IOPL(12,13) and NT(14) flags in EFLAGS register */ |
148 | /* Clean IOPL(12,13) and NT(14) flags in EFLAGS register */ |
149 | static void clean_IOPL_NT_flags(void) |
149 | static void clean_IOPL_NT_flags(void) |
150 | { |
150 | { |
151 | asm volatile ( |
151 | asm volatile ( |
152 | "pushfl\n" |
152 | "pushfl\n" |
153 | "pop %%eax\n" |
153 | "pop %%eax\n" |
154 | "and $0xffff8fff, %%eax\n" |
154 | "and $0xffff8fff, %%eax\n" |
155 | "push %%eax\n" |
155 | "push %%eax\n" |
156 | "popfl\n" |
156 | "popfl\n" |
157 | : : : "eax" |
157 | : : : "eax" |
158 | ); |
158 | ); |
159 | } |
159 | } |
160 | 160 | ||
161 | /* Clean AM(18) flag in CR0 register */ |
161 | /* Clean AM(18) flag in CR0 register */ |
162 | static void clean_AM_flag(void) |
162 | static void clean_AM_flag(void) |
163 | { |
163 | { |
164 | asm volatile ( |
164 | asm volatile ( |
165 | "mov %%cr0, %%eax\n" |
165 | "mov %%cr0, %%eax\n" |
166 | "and $0xfffbffff, %%eax\n" |
166 | "and $0xfffbffff, %%eax\n" |
167 | "mov %%eax, %%cr0\n" |
167 | "mov %%eax, %%cr0\n" |
168 | : : : "eax" |
168 | : : : "eax" |
169 | ); |
169 | ); |
170 | } |
170 | } |
171 | 171 | ||
172 | void pm_init(void) |
172 | void pm_init(void) |
173 | { |
173 | { |
174 | descriptor_t *gdt_p = (descriptor_t *) gdtr.base; |
174 | descriptor_t *gdt_p = (descriptor_t *) gdtr.base; |
175 | ptr_16_32_t idtr; |
175 | ptr_16_32_t idtr; |
176 | 176 | ||
177 | /* |
177 | /* |
178 | * Update addresses in GDT and IDT to their virtual counterparts. |
178 | * Update addresses in GDT and IDT to their virtual counterparts. |
179 | */ |
179 | */ |
180 | idtr.limit = sizeof(idt); |
180 | idtr.limit = sizeof(idt); |
181 | idtr.base = (uintptr_t) idt; |
181 | idtr.base = (uintptr_t) idt; |
182 | gdtr_load(&gdtr); |
182 | gdtr_load(&gdtr); |
183 | idtr_load(&idtr); |
183 | idtr_load(&idtr); |
184 | 184 | ||
185 | /* |
185 | /* |
186 | * Each CPU has its private GDT and TSS. |
186 | * Each CPU has its private GDT and TSS. |
187 | * All CPUs share one IDT. |
187 | * All CPUs share one IDT. |
188 | */ |
188 | */ |
189 | 189 | ||
190 | if (config.cpu_active == 1) { |
190 | if (config.cpu_active == 1) { |
191 | idt_init(); |
191 | idt_init(); |
192 | /* |
192 | /* |
193 | * NOTE: bootstrap CPU has statically allocated TSS, because |
193 | * NOTE: bootstrap CPU has statically allocated TSS, because |
194 | * the heap hasn't been initialized so far. |
194 | * the heap hasn't been initialized so far. |
195 | */ |
195 | */ |
196 | tss_p = &tss; |
196 | tss_p = &tss; |
197 | } |
197 | } |
198 | else { |
198 | else { |
199 | tss_p = (tss_t *) malloc(sizeof(tss_t), FRAME_ATOMIC); |
199 | tss_p = (tss_t *) malloc(sizeof(tss_t), FRAME_ATOMIC); |
200 | if (!tss_p) |
200 | if (!tss_p) |
201 | panic("could not allocate TSS\n"); |
201 | panic("could not allocate TSS\n"); |
202 | } |
202 | } |
203 | 203 | ||
204 | tss_initialize(tss_p); |
204 | tss_initialize(tss_p); |
205 | 205 | ||
206 | gdt_p[TSS_DES].access = AR_PRESENT | AR_TSS | DPL_KERNEL; |
206 | gdt_p[TSS_DES].access = AR_PRESENT | AR_TSS | DPL_KERNEL; |
207 | gdt_p[TSS_DES].special = 1; |
207 | gdt_p[TSS_DES].special = 1; |
208 | gdt_p[TSS_DES].granularity = 0; |
208 | gdt_p[TSS_DES].granularity = 0; |
209 | 209 | ||
210 | gdt_setbase(&gdt_p[TSS_DES], (uintptr_t) tss_p); |
210 | gdt_setbase(&gdt_p[TSS_DES], (uintptr_t) tss_p); |
211 | gdt_setlimit(&gdt_p[TSS_DES], TSS_BASIC_SIZE - 1); |
211 | gdt_setlimit(&gdt_p[TSS_DES], TSS_BASIC_SIZE - 1); |
212 | 212 | ||
213 | /* |
213 | /* |
214 | * As of this moment, the current CPU has its own GDT pointing |
214 | * As of this moment, the current CPU has its own GDT pointing |
215 | * to its own TSS. We just need to load the TR register. |
215 | * to its own TSS. We just need to load the TR register. |
216 | */ |
216 | */ |
217 | tr_load(selector(TSS_DES)); |
217 | tr_load(selector(TSS_DES)); |
218 | 218 | ||
219 | clean_IOPL_NT_flags(); /* Disable I/O on nonprivileged levels and clear NT flag. */ |
219 | clean_IOPL_NT_flags(); /* Disable I/O on nonprivileged levels and clear NT flag. */ |
220 | clean_AM_flag(); /* Disable alignment check */ |
220 | clean_AM_flag(); /* Disable alignment check */ |
221 | } |
221 | } |
222 | 222 | ||
223 | void set_tls_desc(uintptr_t tls) |
223 | void set_tls_desc(uintptr_t tls) |
224 | { |
224 | { |
225 | ptr_16_32_t cpugdtr; |
225 | ptr_16_32_t cpugdtr; |
226 | descriptor_t *gdt_p; |
226 | descriptor_t *gdt_p; |
227 | 227 | ||
228 | gdtr_store(&cpugdtr); |
228 | gdtr_store(&cpugdtr); |
229 | gdt_p = (descriptor_t *) cpugdtr.base; |
229 | gdt_p = (descriptor_t *) cpugdtr.base; |
230 | gdt_setbase(&gdt_p[TLS_DES], tls); |
230 | gdt_setbase(&gdt_p[TLS_DES], tls); |
231 | /* Reload gdt register to update GS in CPU */ |
231 | /* Reload gdt register to update GS in CPU */ |
232 | gdtr_load(&cpugdtr); |
232 | gdtr_load(&cpugdtr); |
233 | } |
233 | } |
234 | 234 | ||
235 | /* Reboot the machine by initiating |
235 | /* Reboot the machine by initiating |
236 | * a triple fault |
236 | * a triple fault |
237 | */ |
237 | */ |
238 | void arch_reboot(void) |
238 | void arch_reboot(void) |
239 | { |
239 | { |
240 | preemption_disable(); |
240 | preemption_disable(); |
241 | ipl_t ipl = interrupts_disable(); |
241 | ipl_t ipl = interrupts_disable(); |
242 | 242 | ||
243 | memsetb(idt, sizeof(idt), 0); |
243 | memsetb(idt, sizeof(idt), 0); |
244 | 244 | ||
245 | ptr_16_32_t idtr; |
245 | ptr_16_32_t idtr; |
246 | idtr.limit = sizeof(idt); |
246 | idtr.limit = sizeof(idt); |
247 | idtr.base = (uintptr_t) idt; |
247 | idtr.base = (uintptr_t) idt; |
248 | idtr_load(&idtr); |
248 | idtr_load(&idtr); |
249 | 249 | ||
250 | interrupts_restore(ipl); |
250 | interrupts_restore(ipl); |
251 | asm volatile ( |
251 | asm volatile ( |
252 | "int $0x03\n" |
252 | "int $0x03\n" |
253 | "cli\n" |
253 | "cli\n" |
254 | "hlt\n" |
254 | "hlt\n" |
255 | ); |
255 | ); |
256 | } |
256 | } |
257 | 257 | ||
258 | /** @} |
258 | /** @} |
259 | */ |
259 | */ |
260 | 260 |