Subversion Repositories HelenOS

Compare Revisions

Ignore whitespace Rev 1186 → Rev 1187

/kernel/trunk/arch/amd64/src/cpu/cpu.c
118,10 → 118,10
void cpu_arch_init(void)
{
CPU->arch.tss = tss_p;
CPU->fpu_owner=NULL;
CPU->arch.tss->iomap_base = &CPU->arch.tss->iomap[0] - ((__u8 *) CPU->arch.tss);
CPU->fpu_owner = NULL;
}
 
 
void cpu_identify(void)
{
cpu_info_t info;
/kernel/trunk/arch/amd64/src/pm.c
46,7 → 46,7
* whole memory. One is for code and one is for data.
*/
 
struct descriptor gdt[GDT_ITEMS] = {
descriptor_t gdt[GDT_ITEMS] = {
/* NULL descriptor */
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
/* KTEXT descriptor */
110,17 → 110,17
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
};
 
struct idescriptor idt[IDT_ITEMS];
idescriptor_t idt[IDT_ITEMS];
 
struct ptr_16_64 gdtr = {.limit = sizeof(gdt), .base= (__u64) gdt };
struct ptr_16_64 idtr = {.limit = sizeof(idt), .base= (__u64) idt };
ptr_16_64_t gdtr = {.limit = sizeof(gdt), .base= (__u64) gdt };
ptr_16_64_t idtr = {.limit = sizeof(idt), .base= (__u64) idt };
 
static struct tss tss;
struct tss *tss_p = NULL;
static tss_t tss;
tss_t *tss_p = NULL;
 
void gdt_tss_setbase(struct descriptor *d, __address base)
void gdt_tss_setbase(descriptor_t *d, __address base)
{
struct tss_descriptor *td = (struct tss_descriptor *) d;
tss_descriptor_t *td = (tss_descriptor_t *) d;
 
td->base_0_15 = base & 0xffff;
td->base_16_23 = ((base) >> 16) & 0xff;
128,15 → 128,15
td->base_32_63 = ((base) >> 32);
}
 
void gdt_tss_setlimit(struct descriptor *d, __u32 limit)
void gdt_tss_setlimit(descriptor_t *d, __u32 limit)
{
struct tss_descriptor *td = (struct tss_descriptor *) d;
struct tss_descriptor *td = (tss_descriptor_t *) d;
 
td->limit_0_15 = limit & 0xffff;
td->limit_16_19 = (limit >> 16) & 0xf;
}
 
void idt_setoffset(struct idescriptor *d, __address offset)
void idt_setoffset(idescriptor_t *d, __address offset)
{
/*
* Offset is a linear address.
146,9 → 146,9
d->offset_32_63 = offset >> 32;
}
 
void tss_initialize(struct tss *t)
void tss_initialize(tss_t *t)
{
memsetb((__address) t, sizeof(struct tss), 0);
memsetb((__address) t, sizeof(tss_t), 0);
}
 
/*
156,7 → 156,7
*/
void idt_init(void)
{
struct idescriptor *d;
idescriptor_t *d;
int i;
 
for (i = 0; i < IDT_ITEMS; i++) {
183,8 → 183,8
*/
void pm_init(void)
{
struct descriptor *gdt_p = (struct descriptor *) gdtr.base;
struct tss_descriptor *tss_desc;
descriptor_t *gdt_p = (struct descriptor *) gdtr.base;
tss_descriptor_t *tss_desc;
 
/*
* Each CPU has its private GDT and TSS.
200,7 → 200,7
tss_p = &tss;
}
else {
tss_p = (struct tss *) malloc(sizeof(struct tss),FRAME_ATOMIC);
tss_p = (struct tss *) malloc(sizeof(tss_t), FRAME_ATOMIC);
if (!tss_p)
panic("could not allocate TSS\n");
}
207,13 → 207,13
 
tss_initialize(tss_p);
 
tss_desc = (struct tss_descriptor *) (&gdt_p[TSS_DES]);
tss_desc = (tss_descriptor_t *) (&gdt_p[TSS_DES]);
tss_desc->present = 1;
tss_desc->type = AR_TSS;
tss_desc->dpl = PL_KERNEL;
gdt_tss_setbase(&gdt_p[TSS_DES], (__address) tss_p);
gdt_tss_setlimit(&gdt_p[TSS_DES], sizeof(struct tss) - 1);
gdt_tss_setlimit(&gdt_p[TSS_DES], sizeof(tss_t) - 1);
 
gdtr_load(&gdtr);
idtr_load(&idtr);
/kernel/trunk/arch/amd64/src/proc/scheduler.c
28,6 → 28,7
 
#include <proc/scheduler.h>
#include <cpu.h>
#include <proc/task.h>
#include <proc/thread.h>
#include <arch.h>
#include <arch/context.h> /* SP_DELTA */
34,21 → 35,55
#include <arch/asm.h>
#include <arch/debugger.h>
#include <print.h>
#include <arch/pm.h>
 
/** Perform amd64 specific tasks needed before the new task is run. */
void before_task_runs_arch(void)
{
}
 
/** Perform amd64 specific tasks needed before the new thread is scheduled. */
void before_thread_runs_arch(void)
{
size_t iomap_size;
ptr_16_64_t cpugdtr;
descriptor_t *gdt_p;
 
CPU->arch.tss->rsp0 = (__address) &THREAD->kstack[THREAD_STACK_SIZE-SP_DELTA];
 
/* Syscall support - write address of thread stack pointer to
* hidden part of gs */
swapgs();
write_msr(AMD_MSR_GS,
(__u64)&THREAD->kstack);
write_msr(AMD_MSR_GS, (__u64)&THREAD->kstack);
swapgs();
 
/* TLS support - set FS to thread local storage */
write_msr(AMD_MSR_FS, THREAD->arch.tls);
 
/*
* Switch the I/O Permission Bitmap, if necessary.
*
* First, copy the I/O Permission Bitmap.
* This needs to be changed so that the
* copying is avoided if the same task
* was already running and the iomap did
* not change.
*/
spinlock_lock(&TASK->lock);
iomap_size = TASK->arch.iomap_size;
if (iomap_size) {
ASSERT(TASK->arch.iomap);
memcpy(CPU->arch.tss->iomap, TASK->arch.iomap, iomap_size);
CPU->arch.tss->iomap[iomap_size] = 0xff; /* terminating byte */
}
spinlock_unlock(&TASK->lock);
 
/* Second, adjust TSS segment limit. */
gdtr_store(&cpugdtr);
gdt_p = (descriptor_t *) cpugdtr.base;
gdt_tss_setlimit(&gdt_p[TSS_DES], TSS_BASIC_SIZE + iomap_size - 1);
gdtr_load(&cpugdtr);
 
#ifdef CONFIG_DEBUG_AS_WATCHPOINT
/* Set watchpoint on AS to ensure that nobody sets it to zero */
if (CPU->id < BKPOINTS_MAX)