/kernel/trunk/arch/ia32/include/pm.h |
---|
55,7 → 55,7 |
#define DPL_KERNEL (PL_KERNEL<<5) |
#define DPL_USER (PL_USER<<5) |
#define IO_MAP_BASE (104) |
#define TSS_BASIC_SIZE 104 |
#ifndef __ASM__ |
67,6 → 67,7 |
__u16 limit; |
__u32 base; |
} __attribute__ ((packed)); |
typedef struct ptr_16_32 ptr_16_32_t; |
struct descriptor { |
unsigned limit_0_15: 16; |
80,6 → 81,7 |
unsigned granularity : 1; |
unsigned base_24_31: 8; |
} __attribute__ ((packed)); |
typedef struct descriptor descriptor_t; |
struct idescriptor { |
unsigned offset_0_15: 16; |
88,8 → 90,8 |
unsigned access: 8; |
unsigned offset_16_31: 16; |
} __attribute__ ((packed)); |
typedef struct idescriptor idescriptor_t; |
struct tss { |
__u16 link; |
unsigned : 16; |
131,23 → 133,24 |
__u16 iomap_base; |
__u8 iomap[0x10000+1]; /* 64K + 1 terminating byte */ |
} __attribute__ ((packed)); |
typedef struct tss tss_t; |
extern struct ptr_16_32 gdtr; |
extern struct ptr_16_32 bootstrap_gdtr; |
extern struct ptr_16_32 protected_ap_gdtr; |
extern ptr_16_32_t gdtr; |
extern ptr_16_32_t bootstrap_gdtr; |
extern ptr_16_32_t protected_ap_gdtr; |
extern struct tss *tss_p; |
extern struct descriptor gdt[]; |
extern descriptor_t gdt[]; |
extern void pm_init(void); |
extern void gdt_setbase(struct descriptor *d, __address base); |
extern void gdt_setlimit(struct descriptor *d, __u32 limit); |
extern void gdt_setbase(descriptor_t *d, __address base); |
extern void gdt_setlimit(descriptor_t *d, __u32 limit); |
extern void idt_init(void); |
extern void idt_setoffset(struct idescriptor *d, __address offset); |
extern void idt_setoffset(idescriptor_t *d, __address offset); |
extern void tss_initialize(struct tss *t); |
extern void tss_initialize(tss_t *t); |
extern void set_tls_desc(__address tls); |
#endif /* __ASM__ */ |
/kernel/trunk/arch/ia32/include/asm.h |
---|
256,7 → 256,7 |
* |
* @param gdtr_reg Address of memory from where to load GDTR. |
*/ |
static inline void gdtr_load(struct ptr_16_32 *gdtr_reg) |
static inline void gdtr_load(ptr_16_32_t *gdtr_reg) |
{ |
__asm__ volatile ("lgdt %0\n" : : "m" (*gdtr_reg)); |
} |
265,7 → 265,7 |
* |
* @param gdtr_reg Address of memory to where to load GDTR. |
*/ |
static inline void gdtr_store(struct ptr_16_32 *gdtr_reg) |
static inline void gdtr_store(ptr_16_32_t *gdtr_reg) |
{ |
__asm__ volatile ("sgdt %0\n" : : "m" (*gdtr_reg)); |
} |
274,7 → 274,7 |
* |
* @param idtr_reg Address of memory from where to load IDTR. |
*/ |
static inline void idtr_load(struct ptr_16_32 *idtr_reg) |
static inline void idtr_load(ptr_16_32_t *idtr_reg) |
{ |
__asm__ volatile ("lidt %0\n" : : "m" (*idtr_reg)); |
} |
/kernel/trunk/arch/ia32/src/cpu/cpu.c |
---|
87,39 → 87,39 |
); |
} |
void cpu_arch_init(void) |
{ |
__u32 help=0; |
cpuid_feature_info fi; |
cpuid_extended_feature_info efi; |
cpu_info_t info; |
__u32 help = 0; |
CPU->arch.tss = tss_p; |
CPU->fpu_owner=NULL; |
CPU->arch.tss->iomap_base = &CPU->arch.tss->iomap[0] - ((__u8 *) CPU->arch.tss); |
cpuid_feature_info fi; |
cpuid_extended_feature_info efi; |
CPU->fpu_owner = NULL; |
cpu_info_t info; |
cpuid(1, &info); |
fi.word=info.cpuid_edx; |
efi.word=info.cpuid_ecx; |
fi.word = info.cpuid_edx; |
efi.word = info.cpuid_ecx; |
if(fi.bits.fxsr) fpu_fxsr(); |
else fpu_fsr(); |
if (fi.bits.fxsr) |
fpu_fxsr(); |
else |
fpu_fsr(); |
if(fi.bits.sse) asm volatile ( |
"mov %%cr4,%0;\n" |
"or %1,%0;\n" |
"mov %0,%%cr4;\n" |
:"+r"(help) |
:"i"(CR4_OSFXSR_MASK|(1<<10)) |
); |
if (fi.bits.sse) { |
asm volatile ( |
"mov %%cr4,%0\n" |
"or %1,%0\n" |
"mov %0,%%cr4\n" |
: "+r" (help) |
: "i" (CR4_OSFXSR_MASK|(1<<10)) |
); |
} |
} |
void cpu_identify(void) |
{ |
cpu_info_t info; |
/kernel/trunk/arch/ia32/src/pm.c |
---|
52,7 → 52,7 |
* One is for GS register which holds pointer to the TLS thread |
* structure in it's base. |
*/ |
struct descriptor gdt[GDT_ITEMS] = { |
descriptor_t gdt[GDT_ITEMS] = { |
/* NULL descriptor */ |
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, |
/* KTEXT descriptor */ |
68,17 → 68,17 |
{ 0xffff, 0, 0, AR_PRESENT | AR_DATA | AR_WRITABLE | DPL_USER, 0xf, 0, 0, 1, 1, 0 } |
}; |
static struct idescriptor idt[IDT_ITEMS]; |
static idescriptor_t idt[IDT_ITEMS]; |
static struct tss tss; |
static tss_t tss; |
struct tss *tss_p = NULL; |
tss_t *tss_p = NULL; |
/* gdtr is changed by kmp before next CPU is initialized */ |
struct ptr_16_32 bootstrap_gdtr = { .limit = sizeof(gdt), .base = KA2PA((__address) gdt) }; |
struct ptr_16_32 gdtr = { .limit = sizeof(gdt), .base = (__address) gdt }; |
ptr_16_32_t bootstrap_gdtr = { .limit = sizeof(gdt), .base = KA2PA((__address) gdt) }; |
ptr_16_32_t gdtr = { .limit = sizeof(gdt), .base = (__address) gdt }; |
void gdt_setbase(struct descriptor *d, __address base) |
void gdt_setbase(descriptor_t *d, __address base) |
{ |
d->base_0_15 = base & 0xffff; |
d->base_16_23 = ((base) >> 16) & 0xff; |
85,13 → 85,13 |
d->base_24_31 = ((base) >> 24) & 0xff; |
} |
void gdt_setlimit(struct descriptor *d, __u32 limit) |
void gdt_setlimit(descriptor_t *d, __u32 limit) |
{ |
d->limit_0_15 = limit & 0xffff; |
d->limit_16_19 = (limit >> 16) & 0xf; |
} |
void idt_setoffset(struct idescriptor *d, __address offset) |
void idt_setoffset(idescriptor_t *d, __address offset) |
{ |
/* |
* Offset is a linear address. |
100,7 → 100,7 |
d->offset_16_31 = offset >> 16; |
} |
void tss_initialize(struct tss *t) |
void tss_initialize(tss_t *t) |
{ |
memsetb((__address) t, sizeof(struct tss), 0); |
} |
110,7 → 110,7 |
*/ |
void idt_init(void) |
{ |
struct idescriptor *d; |
idescriptor_t *d; |
int i; |
for (i = 0; i < IDT_ITEMS; i++) { |
141,16 → 141,13 |
/* Clean IOPL(12,13) and NT(14) flags in EFLAGS register */ |
static void clean_IOPL_NT_flags(void) |
{ |
asm |
( |
"pushfl;" |
"pop %%eax;" |
"and $0xffff8fff,%%eax;" |
"push %%eax;" |
"popfl;" |
: |
: |
:"%eax" |
__asm__ volatile ( |
"pushfl\n" |
"pop %%eax\n" |
"and $0xffff8fff, %%eax\n" |
"push %%eax\n" |
"popfl\n" |
: : : "eax" |
); |
} |
157,21 → 154,18 |
/* Clean AM(18) flag in CR0 register */ |
static void clean_AM_flag(void) |
{ |
asm |
( |
"mov %%cr0,%%eax;" |
"and $0xFFFBFFFF,%%eax;" |
"mov %%eax,%%cr0;" |
: |
: |
:"%eax" |
__asm__ volatile ( |
"mov %%cr0, %%eax\n" |
"and $0xfffbffff, %%eax\n" |
"mov %%eax, %%cr0\n" |
: : : "eax" |
); |
} |
void pm_init(void) |
{ |
struct descriptor *gdt_p = (struct descriptor *) gdtr.base; |
struct ptr_16_32 idtr; |
descriptor_t *gdt_p = (descriptor_t *) gdtr.base; |
ptr_16_32_t idtr; |
/* |
* Update addresses in GDT and IDT to their virtual counterparts. |
195,7 → 189,7 |
tss_p = &tss; |
} |
else { |
tss_p = (struct tss *) malloc(sizeof(struct tss),FRAME_ATOMIC); |
tss_p = (tss_t *) malloc(sizeof(tss_t), FRAME_ATOMIC); |
if (!tss_p) |
panic("could not allocate TSS\n"); |
} |
207,7 → 201,7 |
gdt_p[TSS_DES].granularity = 1; |
gdt_setbase(&gdt_p[TSS_DES], (__address) tss_p); |
gdt_setlimit(&gdt_p[TSS_DES], sizeof(struct tss) - 1); |
gdt_setlimit(&gdt_p[TSS_DES], sizeof(tss_t) - 1); |
/* |
* As of this moment, the current CPU has its own GDT pointing |
221,8 → 215,8 |
void set_tls_desc(__address tls) |
{ |
struct ptr_16_32 cpugdtr; |
struct descriptor *gdt_p = (struct descriptor *) cpugdtr.base; |
ptr_16_32_t cpugdtr; |
descriptor_t *gdt_p = (descriptor_t *) cpugdtr.base; |
gdtr_store(&cpugdtr); |
gdt_setbase(&gdt_p[TLS_DES], tls); |
/kernel/trunk/arch/ia32/src/proc/scheduler.c |
---|
28,14 → 28,29 |
#include <proc/scheduler.h> |
#include <cpu.h> |
#include <proc/task.h> |
#include <proc/thread.h> |
#include <arch.h> |
#include <arch/context.h> /* SP_DELTA */ |
#include <arch/debugger.h> |
#include <arch/pm.h> |
#include <arch/asm.h> |
/** Perform ia32 specific tasks needed before the new task is run. */ |
void before_task_runs_arch(void) |
{ |
} |
/** Perform ia32 specific tasks needed before the new thread is scheduled. |
* |
* THREAD is locked and interrupts are disabled. |
*/ |
void before_thread_runs_arch(void) |
{ |
size_t iomap_size; |
ptr_16_32_t cpugdtr; |
descriptor_t *gdt_p; |
CPU->arch.tss->esp0 = (__address) &THREAD->kstack[THREAD_STACK_SIZE-SP_DELTA]; |
CPU->arch.tss->ss0 = selector(KDATA_DES); |
42,6 → 57,30 |
/* Set up TLS in GS register */ |
set_tls_desc(THREAD->arch.tls); |
/* |
* Switch the I/O Permission Bitmap, if necessary. |
* |
* First, copy the I/O Permission Bitmap. |
* This needs to be changed so that the |
* copying is avoided if the same task |
* was already running and the iomap did |
* not change. |
*/ |
spinlock_lock(&TASK->lock); |
iomap_size = TASK->arch.iomap_size; |
if (iomap_size) { |
ASSERT(TASK->arch.iomap); |
memcpy(CPU->arch.tss->iomap, TASK->arch.iomap, iomap_size); |
CPU->arch.tss->iomap[iomap_size] = 0xff; /* terminating byte */ |
} |
spinlock_unlock(&TASK->lock); |
/* Second, adjust TSS segment limit. */ |
gdtr_store(&cpugdtr); |
gdt_p = (descriptor_t *) cpugdtr.base; |
gdt_setlimit(&gdt_p[TSS_DES], TSS_BASIC_SIZE + iomap_size - 1); |
gdtr_load(&cpugdtr); |
#ifdef CONFIG_DEBUG_AS_WATCHPOINT |
/* Set watchpoint on AS to ensure that nobody sets it to zero */ |
if (CPU->id < BKPOINTS_MAX) |