Rev 3386 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 3386 | Rev 4153 | ||
---|---|---|---|
Line 135... | Line 135... | ||
135 | td->base_32_63 = ((base) >> 32); |
135 | td->base_32_63 = ((base) >> 32); |
136 | } |
136 | } |
137 | 137 | ||
138 | void gdt_tss_setlimit(descriptor_t *d, uint32_t limit) |
138 | void gdt_tss_setlimit(descriptor_t *d, uint32_t limit) |
139 | { |
139 | { |
140 | struct tss_descriptor *td = (tss_descriptor_t *) d; |
140 | tss_descriptor_t *td = (tss_descriptor_t *) d; |
141 | 141 | ||
142 | td->limit_0_15 = limit & 0xffff; |
142 | td->limit_0_15 = limit & 0xffff; |
143 | td->limit_16_19 = (limit >> 16) & 0xf; |
143 | td->limit_16_19 = (limit >> 16) & 0xf; |
144 | } |
144 | } |
145 | 145 | ||
146 | void idt_setoffset(idescriptor_t *d, uintptr_t offset) |
146 | void idt_setoffset(idescriptor_t *d, uintptr_t offset) |
Line 183... | Line 183... | ||
183 | /** Initialize segmentation - code/data/idt tables |
183 | /** Initialize segmentation - code/data/idt tables |
184 | * |
184 | * |
185 | */ |
185 | */ |
186 | void pm_init(void) |
186 | void pm_init(void) |
187 | { |
187 | { |
188 | descriptor_t *gdt_p = (struct descriptor *) gdtr.base; |
188 | descriptor_t *gdt_p = (descriptor_t *) gdtr.base; |
189 | tss_descriptor_t *tss_desc; |
189 | tss_descriptor_t *tss_desc; |
190 | 190 | ||
191 | /* |
191 | /* |
192 | * Each CPU has its private GDT and TSS. |
192 | * Each CPU has its private GDT and TSS. |
193 | * All CPUs share one IDT. |
193 | * All CPUs share one IDT. |
194 | */ |
194 | */ |
195 | 195 | ||
196 | if (config.cpu_active == 1) { |
196 | if (config.cpu_active == 1) { |
197 | idt_init(); |
197 | idt_init(); |
198 | /* |
198 | /* |
199 | * NOTE: bootstrap CPU has statically allocated TSS, because |
199 | * NOTE: bootstrap CPU has statically allocated TSS, because |
200 | * the heap hasn't been initialized so far. |
200 | * the heap hasn't been initialized so far. |
201 | */ |
201 | */ |
202 | tss_p = &tss; |
202 | tss_p = &tss; |
203 | } |
- | |
204 | else { |
203 | } else { |
205 | /* We are going to use malloc, which may return |
204 | /* We are going to use malloc, which may return |
206 | * non boot-mapped pointer, initialize the CR3 register |
205 | * non boot-mapped pointer, initialize the CR3 register |
207 | * ahead of page_init */ |
206 | * ahead of page_init */ |
208 | write_cr3((uintptr_t) AS_KERNEL->genarch.page_table); |
207 | write_cr3((uintptr_t) AS_KERNEL->genarch.page_table); |
209 | 208 | ||
210 | tss_p = (struct tss *) malloc(sizeof(tss_t), FRAME_ATOMIC); |
209 | tss_p = (tss_t *) malloc(sizeof(tss_t), FRAME_ATOMIC); |
211 | if (!tss_p) |
210 | if (!tss_p) |
212 | panic("could not allocate TSS\n"); |
211 | panic("Cannot allocate TSS."); |
213 | } |
212 | } |
214 | 213 | ||
215 | tss_initialize(tss_p); |
214 | tss_initialize(tss_p); |
216 | 215 | ||
217 | tss_desc = (tss_descriptor_t *) (&gdt_p[TSS_DES]); |
216 | tss_desc = (tss_descriptor_t *) (&gdt_p[TSS_DES]); |
218 | tss_desc->present = 1; |
217 | tss_desc->present = 1; |
219 | tss_desc->type = AR_TSS; |
218 | tss_desc->type = AR_TSS; |
220 | tss_desc->dpl = PL_KERNEL; |
219 | tss_desc->dpl = PL_KERNEL; |
221 | 220 | ||
222 | gdt_tss_setbase(&gdt_p[TSS_DES], (uintptr_t) tss_p); |
221 | gdt_tss_setbase(&gdt_p[TSS_DES], (uintptr_t) tss_p); |
223 | gdt_tss_setlimit(&gdt_p[TSS_DES], TSS_BASIC_SIZE - 1); |
222 | gdt_tss_setlimit(&gdt_p[TSS_DES], TSS_BASIC_SIZE - 1); |
224 | 223 | ||
225 | gdtr_load(&gdtr); |
224 | gdtr_load(&gdtr); |
226 | idtr_load(&idtr); |
225 | idtr_load(&idtr); |
227 | /* |
226 | /* |
228 | * As of this moment, the current CPU has its own GDT pointing |
227 | * As of this moment, the current CPU has its own GDT pointing |
229 | * to its own TSS. We just need to load the TR register. |
228 | * to its own TSS. We just need to load the TR register. |