/kernel/trunk/generic/include/proc/scheduler.h |
---|
52,14 → 52,12 |
extern void scheduler(void); |
extern void kcpulb(void *arg); |
extern void before_thread_runs(void); |
extern void after_thread_ran(void); |
extern void sched_print_list(void); |
/* |
* To be defined by architectures: |
*/ |
extern void before_task_runs_arch(void); |
extern void before_thread_runs_arch(void); |
extern void after_thread_ran_arch(void); |
/kernel/trunk/generic/include/proc/task.h |
---|
52,7 → 52,7 |
phone_t phones[IPC_MAX_PHONES]; |
atomic_t active_calls; /**< Active asynchronous messages */ |
task_arch_t arch; |
task_arch_t arch; /**< Architecture specific task data. */ |
}; |
extern spinlock_t tasks_lock; |
/kernel/trunk/generic/src/proc/scheduler.c |
---|
47,10 → 47,19 |
#include <print.h> |
#include <debug.h> |
static void before_task_runs(void); |
static void before_thread_runs(void); |
static void after_thread_ran(void); |
static void scheduler_separated_stack(void); |
atomic_t nrdy; /**< Number of ready threads in the system. */ |
/** Carry out actions before new task runs. */ |
void before_task_runs(void) |
{ |
before_task_runs_arch(); |
} |
/** Take actions before new thread runs. |
* |
* Perform actions that need to be |
434,6 → 443,7 |
as_switch(as1, as2); |
} |
TASK = THREAD->task; |
before_task_runs(); |
} |
THREAD->state = Running; |
/kernel/trunk/arch/sparc64/src/proc/scheduler.c |
---|
34,6 → 34,11 |
#include <config.h> |
#include <align.h> |
/** Perform sparc64 specific tasks needed before the new task is run. */ |
void before_task_runs_arch(void) |
{ |
} |
/** Ensure that thread's kernel stack is locked in TLB. */ |
void before_thread_runs_arch(void) |
{ |
/kernel/trunk/arch/ia64/src/proc/scheduler.c |
---|
36,6 → 36,11 |
#include <config.h> |
#include <align.h> |
/** Perform ia64 specific tasks needed before the new task is run. */ |
void before_task_runs_arch(void) |
{ |
} |
/** Prepare kernel stack pointers in bank 0 r22 and r23 and make sure the stack is mapped in DTR. */ |
void before_thread_runs_arch(void) |
{ |
/kernel/trunk/arch/ppc32/src/proc/scheduler.c |
---|
34,6 → 34,12 |
__address supervisor_sp; |
__address supervisor_sp_physical; |
/** Perform ppc32 specific tasks needed before the new task is run. */ |
void before_task_runs_arch(void) |
{ |
} |
/** Perform ppc32 specific tasks needed before the new thread is scheduled. */ |
void before_thread_runs_arch(void) |
{ |
supervisor_sp = (__address) &THREAD->kstack[THREAD_STACK_SIZE - SP_DELTA]; |
/kernel/trunk/arch/amd64/include/pm.h |
---|
66,7 → 66,7 |
#define DPL_KERNEL (PL_KERNEL<<5) |
#define DPL_USER (PL_USER<<5) |
#define IO_MAP_BASE (104) |
#define TSS_BASIC_SIZE 104 |
#ifndef __ASM__ |
82,6 → 82,7 |
unsigned granularity : 1; |
unsigned base_24_31: 8; |
} __attribute__ ((packed)); |
typedef struct descriptor descriptor_t; |
struct tss_descriptor { |
unsigned limit_0_15: 16; |
88,7 → 89,7 |
unsigned base_0_15: 16; |
unsigned base_16_23: 8; |
unsigned type: 4; |
unsigned : 1; |
unsigned : 1; |
unsigned dpl : 2; |
unsigned present : 1; |
unsigned limit_16_19: 4; |
99,6 → 100,7 |
unsigned base_32_63 : 32; |
unsigned : 32; |
} __attribute__ ((packed)); |
typedef struct tss_descriptor tss_descriptor_t; |
struct idescriptor { |
unsigned offset_0_15: 16; |
112,16 → 114,19 |
unsigned offset_32_63: 32; |
unsigned : 32; |
} __attribute__ ((packed)); |
typedef struct idescriptor idescriptor_t; |
struct ptr_16_64 { |
__u16 limit; |
__u64 base; |
} __attribute__ ((packed)); |
typedef struct ptr_16_64 ptr_16_64_t; |
struct ptr_16_32 { |
__u16 limit; |
__u32 base; |
} __attribute__ ((packed)); |
typedef struct ptr_16_32 ptr_16_32_t; |
struct tss { |
__u32 reserve1; |
141,25 → 146,26 |
__u16 iomap_base; |
__u8 iomap[0x10000 + 1]; /* 64K + 1 terminating byte */ |
} __attribute__ ((packed)); |
typedef struct tss tss_t; |
extern struct tss *tss_p; |
extern tss_t *tss_p; |
extern struct descriptor gdt[]; |
extern struct idescriptor idt[]; |
extern descriptor_t gdt[]; |
extern idescriptor_t idt[]; |
extern struct ptr_16_64 gdtr; |
extern struct ptr_16_32 bootstrap_gdtr; |
extern struct ptr_16_32 protected_ap_gdtr; |
extern ptr_16_64_t gdtr; |
extern ptr_16_32_t bootstrap_gdtr; |
extern ptr_16_32_t protected_ap_gdtr; |
extern void pm_init(void); |
extern void gdt_tss_setbase(struct descriptor *d, __address base); |
extern void gdt_tss_setlimit(struct descriptor *d, __u32 limit); |
extern void gdt_tss_setbase(descriptor_t *d, __address base); |
extern void gdt_tss_setlimit(descriptor_t *d, __u32 limit); |
extern void idt_init(void); |
extern void idt_setoffset(struct idescriptor *d, __address offset); |
extern void idt_setoffset(idescriptor_t *d, __address offset); |
extern void tss_initialize(struct tss *t); |
extern void tss_initialize(tss_t *t); |
#endif /* __ASM__ */ |
/kernel/trunk/arch/amd64/src/cpu/cpu.c |
---|
118,10 → 118,10 |
void cpu_arch_init(void) |
{ |
CPU->arch.tss = tss_p; |
CPU->fpu_owner=NULL; |
CPU->arch.tss->iomap_base = &CPU->arch.tss->iomap[0] - ((__u8 *) CPU->arch.tss); |
CPU->fpu_owner = NULL; |
} |
void cpu_identify(void) |
{ |
cpu_info_t info; |
/kernel/trunk/arch/amd64/src/pm.c |
---|
46,7 → 46,7 |
* whole memory. One is for code and one is for data. |
*/ |
struct descriptor gdt[GDT_ITEMS] = { |
descriptor_t gdt[GDT_ITEMS] = { |
/* NULL descriptor */ |
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, |
/* KTEXT descriptor */ |
110,17 → 110,17 |
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } |
}; |
struct idescriptor idt[IDT_ITEMS]; |
idescriptor_t idt[IDT_ITEMS]; |
struct ptr_16_64 gdtr = {.limit = sizeof(gdt), .base= (__u64) gdt }; |
struct ptr_16_64 idtr = {.limit = sizeof(idt), .base= (__u64) idt }; |
ptr_16_64_t gdtr = {.limit = sizeof(gdt), .base= (__u64) gdt }; |
ptr_16_64_t idtr = {.limit = sizeof(idt), .base= (__u64) idt }; |
static struct tss tss; |
struct tss *tss_p = NULL; |
static tss_t tss; |
tss_t *tss_p = NULL; |
void gdt_tss_setbase(struct descriptor *d, __address base) |
void gdt_tss_setbase(descriptor_t *d, __address base) |
{ |
struct tss_descriptor *td = (struct tss_descriptor *) d; |
tss_descriptor_t *td = (tss_descriptor_t *) d; |
td->base_0_15 = base & 0xffff; |
td->base_16_23 = ((base) >> 16) & 0xff; |
128,15 → 128,15 |
td->base_32_63 = ((base) >> 32); |
} |
void gdt_tss_setlimit(struct descriptor *d, __u32 limit) |
void gdt_tss_setlimit(descriptor_t *d, __u32 limit) |
{ |
struct tss_descriptor *td = (struct tss_descriptor *) d; |
struct tss_descriptor *td = (tss_descriptor_t *) d; |
td->limit_0_15 = limit & 0xffff; |
td->limit_16_19 = (limit >> 16) & 0xf; |
} |
void idt_setoffset(struct idescriptor *d, __address offset) |
void idt_setoffset(idescriptor_t *d, __address offset) |
{ |
/* |
* Offset is a linear address. |
146,9 → 146,9 |
d->offset_32_63 = offset >> 32; |
} |
void tss_initialize(struct tss *t) |
void tss_initialize(tss_t *t) |
{ |
memsetb((__address) t, sizeof(struct tss), 0); |
memsetb((__address) t, sizeof(tss_t), 0); |
} |
/* |
156,7 → 156,7 |
*/ |
void idt_init(void) |
{ |
struct idescriptor *d; |
idescriptor_t *d; |
int i; |
for (i = 0; i < IDT_ITEMS; i++) { |
183,8 → 183,8 |
*/ |
void pm_init(void) |
{ |
struct descriptor *gdt_p = (struct descriptor *) gdtr.base; |
struct tss_descriptor *tss_desc; |
descriptor_t *gdt_p = (struct descriptor *) gdtr.base; |
tss_descriptor_t *tss_desc; |
/* |
* Each CPU has its private GDT and TSS. |
200,7 → 200,7 |
tss_p = &tss; |
} |
else { |
tss_p = (struct tss *) malloc(sizeof(struct tss),FRAME_ATOMIC); |
tss_p = (struct tss *) malloc(sizeof(tss_t), FRAME_ATOMIC); |
if (!tss_p) |
panic("could not allocate TSS\n"); |
} |
207,13 → 207,13 |
tss_initialize(tss_p); |
tss_desc = (struct tss_descriptor *) (&gdt_p[TSS_DES]); |
tss_desc = (tss_descriptor_t *) (&gdt_p[TSS_DES]); |
tss_desc->present = 1; |
tss_desc->type = AR_TSS; |
tss_desc->dpl = PL_KERNEL; |
gdt_tss_setbase(&gdt_p[TSS_DES], (__address) tss_p); |
gdt_tss_setlimit(&gdt_p[TSS_DES], sizeof(struct tss) - 1); |
gdt_tss_setlimit(&gdt_p[TSS_DES], sizeof(tss_t) - 1); |
gdtr_load(&gdtr); |
idtr_load(&idtr); |
/kernel/trunk/arch/amd64/src/proc/scheduler.c |
---|
28,6 → 28,7 |
#include <proc/scheduler.h> |
#include <cpu.h> |
#include <proc/task.h> |
#include <proc/thread.h> |
#include <arch.h> |
#include <arch/context.h> /* SP_DELTA */ |
34,21 → 35,55 |
#include <arch/asm.h> |
#include <arch/debugger.h> |
#include <print.h> |
#include <arch/pm.h> |
/** Perform amd64 specific tasks needed before the new task is run. */ |
void before_task_runs_arch(void) |
{ |
} |
/** Perform amd64 specific tasks needed before the new thread is scheduled. */ |
void before_thread_runs_arch(void) |
{ |
size_t iomap_size; |
ptr_16_64_t cpugdtr; |
descriptor_t *gdt_p; |
CPU->arch.tss->rsp0 = (__address) &THREAD->kstack[THREAD_STACK_SIZE-SP_DELTA]; |
/* Syscall support - write address of thread stack pointer to |
* hidden part of gs */ |
swapgs(); |
write_msr(AMD_MSR_GS, |
(__u64)&THREAD->kstack); |
write_msr(AMD_MSR_GS, (__u64)&THREAD->kstack); |
swapgs(); |
/* TLS support - set FS to thread local storage */ |
write_msr(AMD_MSR_FS, THREAD->arch.tls); |
/* |
* Switch the I/O Permission Bitmap, if necessary. |
* |
* First, copy the I/O Permission Bitmap. |
* This needs to be changed so that the |
* copying is avoided if the same task |
* was already running and the iomap did |
* not change. |
*/ |
spinlock_lock(&TASK->lock); |
iomap_size = TASK->arch.iomap_size; |
if (iomap_size) { |
ASSERT(TASK->arch.iomap); |
memcpy(CPU->arch.tss->iomap, TASK->arch.iomap, iomap_size); |
CPU->arch.tss->iomap[iomap_size] = 0xff; /* terminating byte */ |
} |
spinlock_unlock(&TASK->lock); |
/* Second, adjust TSS segment limit. */ |
gdtr_store(&cpugdtr); |
gdt_p = (descriptor_t *) cpugdtr.base; |
gdt_tss_setlimit(&gdt_p[TSS_DES], TSS_BASIC_SIZE + iomap_size - 1); |
gdtr_load(&cpugdtr); |
#ifdef CONFIG_DEBUG_AS_WATCHPOINT |
/* Set watchpoint on AS to ensure that nobody sets it to zero */ |
if (CPU->id < BKPOINTS_MAX) |
/kernel/trunk/arch/ppc64/src/proc/scheduler.c |
---|
34,6 → 34,12 |
__address supervisor_sp; |
__address supervisor_sp_physical; |
/** Perform ppc64 specific tasks needed before the new task is run. */ |
void before_task_runs_arch(void) |
{ |
} |
/** Perform ppc64 specific tasks needed before the new thread is scheduled. */ |
void before_thread_runs_arch(void) |
{ |
supervisor_sp = (__address) &THREAD->kstack[THREAD_STACK_SIZE - SP_DELTA]; |
/kernel/trunk/arch/mips32/src/mips32.c |
---|
136,6 → 136,12 |
; |
} |
/** Perform mips32 specific tasks needed before the new task is run. */ |
void before_task_runs_arch(void) |
{ |
} |
/** Perform mips32 specific tasks needed before the new thread is scheduled. */ |
void before_thread_runs_arch(void) |
{ |
supervisor_sp = (__address) &THREAD->kstack[THREAD_STACK_SIZE-SP_DELTA]; |
/kernel/trunk/arch/ia32/include/pm.h |
---|
55,7 → 55,7 |
#define DPL_KERNEL (PL_KERNEL<<5) |
#define DPL_USER (PL_USER<<5) |
#define IO_MAP_BASE (104) |
#define TSS_BASIC_SIZE 104 |
#ifndef __ASM__ |
67,6 → 67,7 |
__u16 limit; |
__u32 base; |
} __attribute__ ((packed)); |
typedef struct ptr_16_32 ptr_16_32_t; |
struct descriptor { |
unsigned limit_0_15: 16; |
80,6 → 81,7 |
unsigned granularity : 1; |
unsigned base_24_31: 8; |
} __attribute__ ((packed)); |
typedef struct descriptor descriptor_t; |
struct idescriptor { |
unsigned offset_0_15: 16; |
88,8 → 90,8 |
unsigned access: 8; |
unsigned offset_16_31: 16; |
} __attribute__ ((packed)); |
typedef struct idescriptor idescriptor_t; |
struct tss { |
__u16 link; |
unsigned : 16; |
131,23 → 133,24 |
__u16 iomap_base; |
__u8 iomap[0x10000+1]; /* 64K + 1 terminating byte */ |
} __attribute__ ((packed)); |
typedef struct tss tss_t; |
extern struct ptr_16_32 gdtr; |
extern struct ptr_16_32 bootstrap_gdtr; |
extern struct ptr_16_32 protected_ap_gdtr; |
extern ptr_16_32_t gdtr; |
extern ptr_16_32_t bootstrap_gdtr; |
extern ptr_16_32_t protected_ap_gdtr; |
extern struct tss *tss_p; |
extern struct descriptor gdt[]; |
extern descriptor_t gdt[]; |
extern void pm_init(void); |
extern void gdt_setbase(struct descriptor *d, __address base); |
extern void gdt_setlimit(struct descriptor *d, __u32 limit); |
extern void gdt_setbase(descriptor_t *d, __address base); |
extern void gdt_setlimit(descriptor_t *d, __u32 limit); |
extern void idt_init(void); |
extern void idt_setoffset(struct idescriptor *d, __address offset); |
extern void idt_setoffset(idescriptor_t *d, __address offset); |
extern void tss_initialize(struct tss *t); |
extern void tss_initialize(tss_t *t); |
extern void set_tls_desc(__address tls); |
#endif /* __ASM__ */ |
/kernel/trunk/arch/ia32/include/asm.h |
---|
256,7 → 256,7 |
* |
* @param gdtr_reg Address of memory from where to load GDTR. |
*/ |
static inline void gdtr_load(struct ptr_16_32 *gdtr_reg) |
static inline void gdtr_load(ptr_16_32_t *gdtr_reg) |
{ |
__asm__ volatile ("lgdt %0\n" : : "m" (*gdtr_reg)); |
} |
265,7 → 265,7 |
* |
* @param gdtr_reg Address of memory to where to load GDTR. |
*/ |
static inline void gdtr_store(struct ptr_16_32 *gdtr_reg) |
static inline void gdtr_store(ptr_16_32_t *gdtr_reg) |
{ |
__asm__ volatile ("sgdt %0\n" : : "m" (*gdtr_reg)); |
} |
274,7 → 274,7 |
* |
* @param idtr_reg Address of memory from where to load IDTR. |
*/ |
static inline void idtr_load(struct ptr_16_32 *idtr_reg) |
static inline void idtr_load(ptr_16_32_t *idtr_reg) |
{ |
__asm__ volatile ("lidt %0\n" : : "m" (*idtr_reg)); |
} |
/kernel/trunk/arch/ia32/src/cpu/cpu.c |
---|
87,39 → 87,39 |
); |
} |
void cpu_arch_init(void) |
{ |
__u32 help=0; |
cpuid_feature_info fi; |
cpuid_extended_feature_info efi; |
cpu_info_t info; |
__u32 help = 0; |
CPU->arch.tss = tss_p; |
CPU->fpu_owner=NULL; |
CPU->arch.tss->iomap_base = &CPU->arch.tss->iomap[0] - ((__u8 *) CPU->arch.tss); |
cpuid_feature_info fi; |
cpuid_extended_feature_info efi; |
CPU->fpu_owner = NULL; |
cpu_info_t info; |
cpuid(1, &info); |
fi.word=info.cpuid_edx; |
efi.word=info.cpuid_ecx; |
fi.word = info.cpuid_edx; |
efi.word = info.cpuid_ecx; |
if(fi.bits.fxsr) fpu_fxsr(); |
else fpu_fsr(); |
if (fi.bits.fxsr) |
fpu_fxsr(); |
else |
fpu_fsr(); |
if(fi.bits.sse) asm volatile ( |
"mov %%cr4,%0;\n" |
"or %1,%0;\n" |
"mov %0,%%cr4;\n" |
:"+r"(help) |
:"i"(CR4_OSFXSR_MASK|(1<<10)) |
); |
if (fi.bits.sse) { |
asm volatile ( |
"mov %%cr4,%0\n" |
"or %1,%0\n" |
"mov %0,%%cr4\n" |
: "+r" (help) |
: "i" (CR4_OSFXSR_MASK|(1<<10)) |
); |
} |
} |
void cpu_identify(void) |
{ |
cpu_info_t info; |
/kernel/trunk/arch/ia32/src/pm.c |
---|
52,7 → 52,7 |
* One is for GS register which holds pointer to the TLS thread |
* structure in it's base. |
*/ |
struct descriptor gdt[GDT_ITEMS] = { |
descriptor_t gdt[GDT_ITEMS] = { |
/* NULL descriptor */ |
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, |
/* KTEXT descriptor */ |
68,17 → 68,17 |
{ 0xffff, 0, 0, AR_PRESENT | AR_DATA | AR_WRITABLE | DPL_USER, 0xf, 0, 0, 1, 1, 0 } |
}; |
static struct idescriptor idt[IDT_ITEMS]; |
static idescriptor_t idt[IDT_ITEMS]; |
static struct tss tss; |
static tss_t tss; |
struct tss *tss_p = NULL; |
tss_t *tss_p = NULL; |
/* gdtr is changed by kmp before next CPU is initialized */ |
struct ptr_16_32 bootstrap_gdtr = { .limit = sizeof(gdt), .base = KA2PA((__address) gdt) }; |
struct ptr_16_32 gdtr = { .limit = sizeof(gdt), .base = (__address) gdt }; |
ptr_16_32_t bootstrap_gdtr = { .limit = sizeof(gdt), .base = KA2PA((__address) gdt) }; |
ptr_16_32_t gdtr = { .limit = sizeof(gdt), .base = (__address) gdt }; |
void gdt_setbase(struct descriptor *d, __address base) |
void gdt_setbase(descriptor_t *d, __address base) |
{ |
d->base_0_15 = base & 0xffff; |
d->base_16_23 = ((base) >> 16) & 0xff; |
85,13 → 85,13 |
d->base_24_31 = ((base) >> 24) & 0xff; |
} |
void gdt_setlimit(struct descriptor *d, __u32 limit) |
void gdt_setlimit(descriptor_t *d, __u32 limit) |
{ |
d->limit_0_15 = limit & 0xffff; |
d->limit_16_19 = (limit >> 16) & 0xf; |
} |
void idt_setoffset(struct idescriptor *d, __address offset) |
void idt_setoffset(idescriptor_t *d, __address offset) |
{ |
/* |
* Offset is a linear address. |
100,7 → 100,7 |
d->offset_16_31 = offset >> 16; |
} |
void tss_initialize(struct tss *t) |
void tss_initialize(tss_t *t) |
{ |
memsetb((__address) t, sizeof(struct tss), 0); |
} |
110,7 → 110,7 |
*/ |
void idt_init(void) |
{ |
struct idescriptor *d; |
idescriptor_t *d; |
int i; |
for (i = 0; i < IDT_ITEMS; i++) { |
141,16 → 141,13 |
/* Clean IOPL(12,13) and NT(14) flags in EFLAGS register */ |
static void clean_IOPL_NT_flags(void) |
{ |
asm |
( |
"pushfl;" |
"pop %%eax;" |
"and $0xffff8fff,%%eax;" |
"push %%eax;" |
"popfl;" |
: |
: |
:"%eax" |
__asm__ volatile ( |
"pushfl\n" |
"pop %%eax\n" |
"and $0xffff8fff, %%eax\n" |
"push %%eax\n" |
"popfl\n" |
: : : "eax" |
); |
} |
157,21 → 154,18 |
/* Clean AM(18) flag in CR0 register */ |
static void clean_AM_flag(void) |
{ |
asm |
( |
"mov %%cr0,%%eax;" |
"and $0xFFFBFFFF,%%eax;" |
"mov %%eax,%%cr0;" |
: |
: |
:"%eax" |
__asm__ volatile ( |
"mov %%cr0, %%eax\n" |
"and $0xfffbffff, %%eax\n" |
"mov %%eax, %%cr0\n" |
: : : "eax" |
); |
} |
void pm_init(void) |
{ |
struct descriptor *gdt_p = (struct descriptor *) gdtr.base; |
struct ptr_16_32 idtr; |
descriptor_t *gdt_p = (descriptor_t *) gdtr.base; |
ptr_16_32_t idtr; |
/* |
* Update addresses in GDT and IDT to their virtual counterparts. |
195,7 → 189,7 |
tss_p = &tss; |
} |
else { |
tss_p = (struct tss *) malloc(sizeof(struct tss),FRAME_ATOMIC); |
tss_p = (tss_t *) malloc(sizeof(tss_t), FRAME_ATOMIC); |
if (!tss_p) |
panic("could not allocate TSS\n"); |
} |
207,7 → 201,7 |
gdt_p[TSS_DES].granularity = 1; |
gdt_setbase(&gdt_p[TSS_DES], (__address) tss_p); |
gdt_setlimit(&gdt_p[TSS_DES], sizeof(struct tss) - 1); |
gdt_setlimit(&gdt_p[TSS_DES], sizeof(tss_t) - 1); |
/* |
* As of this moment, the current CPU has its own GDT pointing |
221,8 → 215,8 |
void set_tls_desc(__address tls) |
{ |
struct ptr_16_32 cpugdtr; |
struct descriptor *gdt_p = (struct descriptor *) cpugdtr.base; |
ptr_16_32_t cpugdtr; |
descriptor_t *gdt_p = (descriptor_t *) cpugdtr.base; |
gdtr_store(&cpugdtr); |
gdt_setbase(&gdt_p[TLS_DES], tls); |
/kernel/trunk/arch/ia32/src/proc/scheduler.c |
---|
28,14 → 28,29 |
#include <proc/scheduler.h> |
#include <cpu.h> |
#include <proc/task.h> |
#include <proc/thread.h> |
#include <arch.h> |
#include <arch/context.h> /* SP_DELTA */ |
#include <arch/debugger.h> |
#include <arch/pm.h> |
#include <arch/asm.h> |
/** Perform ia32 specific tasks needed before the new task is run. */ |
void before_task_runs_arch(void) |
{ |
} |
/** Perform ia32 specific tasks needed before the new thread is scheduled. |
* |
* THREAD is locked and interrupts are disabled. |
*/ |
void before_thread_runs_arch(void) |
{ |
size_t iomap_size; |
ptr_16_32_t cpugdtr; |
descriptor_t *gdt_p; |
CPU->arch.tss->esp0 = (__address) &THREAD->kstack[THREAD_STACK_SIZE-SP_DELTA]; |
CPU->arch.tss->ss0 = selector(KDATA_DES); |
42,6 → 57,30 |
/* Set up TLS in GS register */ |
set_tls_desc(THREAD->arch.tls); |
/* |
* Switch the I/O Permission Bitmap, if necessary. |
* |
* First, copy the I/O Permission Bitmap. |
* This needs to be changed so that the |
* copying is avoided if the same task |
* was already running and the iomap did |
* not change. |
*/ |
spinlock_lock(&TASK->lock); |
iomap_size = TASK->arch.iomap_size; |
if (iomap_size) { |
ASSERT(TASK->arch.iomap); |
memcpy(CPU->arch.tss->iomap, TASK->arch.iomap, iomap_size); |
CPU->arch.tss->iomap[iomap_size] = 0xff; /* terminating byte */ |
} |
spinlock_unlock(&TASK->lock); |
/* Second, adjust TSS segment limit. */ |
gdtr_store(&cpugdtr); |
gdt_p = (descriptor_t *) cpugdtr.base; |
gdt_setlimit(&gdt_p[TSS_DES], TSS_BASIC_SIZE + iomap_size - 1); |
gdtr_load(&cpugdtr); |
#ifdef CONFIG_DEBUG_AS_WATCHPOINT |
/* Set watchpoint on AS to ensure that nobody sets it to zero */ |
if (CPU->id < BKPOINTS_MAX) |