Subversion Repositories HelenOS

Compare Revisions

Ignore whitespace Rev 4125 → Rev 4126

/trunk/kernel/arch/amd64/include/pm.h
26,7 → 26,7
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup amd64
/** @addtogroup amd64
* @{
*/
/** @file
36,63 → 36,60
#define KERN_amd64_PM_H_
 
#ifndef __ASM__
# include <arch/types.h>
# include <arch/context.h>
#include <arch/types.h>
#include <arch/context.h>
#endif
 
#define IDT_ITEMS 64
#define GDT_ITEMS 8
#define IDT_ITEMS 64
#define GDT_ITEMS 8
 
 
#define NULL_DES 0
/* Warning: Do not reorder next items, unless you look into syscall.c!!! */
#define KTEXT_DES 1
#define KDATA_DES 2
#define UDATA_DES 3
#define UTEXT_DES 4
#define KTEXT32_DES 5
/* EndOfWarning */
#define TSS_DES 6
#define NULL_DES 0
/* Warning: Do not reorder the following items, unless you look into syscall.c! */
#define KTEXT_DES 1
#define KDATA_DES 2
#define UDATA_DES 3
#define UTEXT_DES 4
#define KTEXT32_DES 5
/* End of warning */
#define TSS_DES 6
 
 
 
#ifdef CONFIG_FB
 
#define VESA_INIT_DES 8
#define VESA_INIT_SEGMENT 0x8000
#undef GDT_ITEMS
#define GDT_ITEMS 9
#define VESA_INIT_DES 8
#define VESA_INIT_SEGMENT 0x8000
 
#endif /*CONFIG_FB*/
#undef GDT_ITEMS
#define GDT_ITEMS 9
 
#endif /* CONFIG_FB */
 
#define gdtselector(des) ((des) << 3)
#define idtselector(des) ((des) << 4)
 
#define gdtselector(des) ((des) << 3)
#define idtselector(des) ((des) << 4)
#define PL_KERNEL 0
#define PL_USER 3
 
#define PL_KERNEL 0
#define PL_USER 3
#define AR_PRESENT ( 1 << 7)
#define AR_DATA (2 << 3)
#define AR_CODE (3 << 3)
#define AR_WRITABLE (1 << 1)
#define AR_READABLE (1 << 1)
#define AR_TSS (0x09)
#define AR_INTERRUPT (0x0e)
#define AR_TRAP (0x0f)
 
#define AR_PRESENT (1<<7)
#define AR_DATA (2<<3)
#define AR_CODE (3<<3)
#define AR_WRITABLE (1<<1)
#define AR_READABLE (1<<1)
#define AR_TSS (0x9)
#define AR_INTERRUPT (0xe)
#define AR_TRAP (0xf)
#define DPL_KERNEL (PL_KERNEL << 5)
#define DPL_USER (PL_USER << 5)
 
#define DPL_KERNEL (PL_KERNEL<<5)
#define DPL_USER (PL_USER<<5)
#define TSS_BASIC_SIZE 104
#define TSS_IOMAP_SIZE (16 * 1024 + 1) /* 16K for bitmap + 1 terminating byte for convenience */
 
#define TSS_BASIC_SIZE 104
#define TSS_IOMAP_SIZE (16*1024+1) /* 16K for bitmap + 1 terminating byte for convenience */
#define IO_PORTS (64 * 1024)
 
#define IO_PORTS (64*1024)
 
#ifndef __ASM__
 
struct descriptor {
typedef struct {
unsigned limit_0_15: 16;
unsigned base_0_15: 16;
unsigned base_16_23: 8;
103,10 → 100,9
unsigned special: 1;
unsigned granularity : 1;
unsigned base_24_31: 8;
} __attribute__ ((packed));
typedef struct descriptor descriptor_t;
} __attribute__ ((packed)) descriptor_t;
 
struct tss_descriptor {
typedef struct {
unsigned limit_0_15: 16;
unsigned base_0_15: 16;
unsigned base_16_23: 8;
121,10 → 117,9
unsigned base_24_31: 8;
unsigned base_32_63 : 32;
unsigned : 32;
} __attribute__ ((packed));
typedef struct tss_descriptor tss_descriptor_t;
} __attribute__ ((packed)) tss_descriptor_t;
 
struct idescriptor {
typedef struct {
unsigned offset_0_15: 16;
unsigned selector: 16;
unsigned ist:3;
135,22 → 130,19
unsigned offset_16_31: 16;
unsigned offset_32_63: 32;
unsigned : 32;
} __attribute__ ((packed));
typedef struct idescriptor idescriptor_t;
} __attribute__ ((packed)) idescriptor_t;
 
struct ptr_16_64 {
typedef struct {
uint16_t limit;
uint64_t base;
} __attribute__ ((packed));
typedef struct ptr_16_64 ptr_16_64_t;
} __attribute__ ((packed)) ptr_16_64_t;
 
struct ptr_16_32 {
typedef struct {
uint16_t limit;
uint32_t base;
} __attribute__ ((packed));
typedef struct ptr_16_32 ptr_16_32_t;
} __attribute__ ((packed)) ptr_16_32_t;
 
struct tss {
typedef struct {
uint32_t reserve1;
uint64_t rsp0;
uint64_t rsp1;
167,8 → 159,7
uint16_t reserve4;
uint16_t iomap_base;
uint8_t iomap[TSS_IOMAP_SIZE];
} __attribute__ ((packed));
typedef struct tss tss_t;
} __attribute__ ((packed)) tss_t;
 
extern tss_t *tss_p;
 
/trunk/kernel/arch/amd64/include/asm.h
342,7 → 342,7
* @param gdtr_reg Address of memory from where to load GDTR.
*
*/
static inline void gdtr_load(struct ptr_16_64 *gdtr_reg)
static inline void gdtr_load(ptr_16_64_t *gdtr_reg)
{
asm volatile (
"lgdtq %[gdtr_reg]\n"
355,7 → 355,7
* @param gdtr_reg Address of memory to where to load GDTR.
*
*/
static inline void gdtr_store(struct ptr_16_64 *gdtr_reg)
static inline void gdtr_store(ptr_16_64_t *gdtr_reg)
{
asm volatile (
"sgdtq %[gdtr_reg]\n"
368,7 → 368,7
* @param idtr_reg Address of memory from where to load IDTR.
*
*/
static inline void idtr_load(struct ptr_16_64 *idtr_reg)
static inline void idtr_load(ptr_16_64_t *idtr_reg)
{
asm volatile (
"lidtq %[idtr_reg]\n"
/trunk/kernel/arch/amd64/include/cpu.h
35,9 → 35,9
#ifndef KERN_amd64_CPU_H_
#define KERN_amd64_CPU_H_
 
#define RFLAGS_IF (1 << 9)
#define RFLAGS_DF (1 << 10)
#define RFLAGS_RF (1 << 16)
#define RFLAGS_IF (1 << 9)
#define RFLAGS_DF (1 << 10)
#define RFLAGS_RF (1 << 16)
 
#define EFER_MSR_NUM 0xc0000080
#define AMD_SCE_FLAG 0
62,17 → 62,15
int family;
int model;
int stepping;
struct tss *tss;
tss_t *tss;
count_t iomapver_copy; /** Copy of TASK's I/O Permission bitmap generation count. */
count_t iomapver_copy; /** Copy of TASK's I/O Permission bitmap generation count. */
} cpu_arch_t;
 
struct star_msr {
};
 
struct lstar_msr {
};
 
extern void set_efer_flag(int flag);
/trunk/kernel/arch/amd64/src/pm.c
137,8 → 137,8
 
void gdt_tss_setlimit(descriptor_t *d, uint32_t limit)
{
struct tss_descriptor *td = (tss_descriptor_t *) d;
 
tss_descriptor_t *td = (tss_descriptor_t *) d;
td->limit_0_15 = limit & 0xffff;
td->limit_16_19 = (limit >> 16) & 0xf;
}
185,14 → 185,14
*/
void pm_init(void)
{
descriptor_t *gdt_p = (struct descriptor *) gdtr.base;
descriptor_t *gdt_p = (descriptor_t *) gdtr.base;
tss_descriptor_t *tss_desc;
 
/*
* Each CPU has its private GDT and TSS.
* All CPUs share one IDT.
*/
 
if (config.cpu_active == 1) {
idt_init();
/*
200,20 → 200,19
* the heap hasn't been initialized so far.
*/
tss_p = &tss;
}
else {
} else {
/* We are going to use malloc, which may return
* non boot-mapped pointer, initialize the CR3 register
* ahead of page_init */
write_cr3((uintptr_t) AS_KERNEL->genarch.page_table);
 
tss_p = (struct tss *) malloc(sizeof(tss_t), FRAME_ATOMIC);
tss_p = (tss_t *) malloc(sizeof(tss_t), FRAME_ATOMIC);
if (!tss_p)
panic("Cannot allocate TSS.");
}
 
tss_initialize(tss_p);
 
tss_desc = (tss_descriptor_t *) (&gdt_p[TSS_DES]);
tss_desc->present = 1;
tss_desc->type = AR_TSS;
221,7 → 220,7
gdt_tss_setbase(&gdt_p[TSS_DES], (uintptr_t) tss_p);
gdt_tss_setlimit(&gdt_p[TSS_DES], TSS_BASIC_SIZE - 1);
 
gdtr_load(&gdtr);
idtr_load(&idtr);
/*
/trunk/kernel/arch/amd64/src/ddi/ddi.c
57,15 → 57,15
int ddi_iospace_enable_arch(task_t *task, uintptr_t ioaddr, size_t size)
{
count_t bits;
 
bits = ioaddr + size;
if (bits > IO_PORTS)
return ENOENT;
 
if (task->arch.iomap.bits < bits) {
bitmap_t oldiomap;
uint8_t *newmap;
/*
* The I/O permission bitmap is too small and needs to be grown.
*/
77,17 → 77,17
bitmap_initialize(&oldiomap, task->arch.iomap.map,
task->arch.iomap.bits);
bitmap_initialize(&task->arch.iomap, newmap, bits);
 
/*
* Mark the new range inaccessible.
*/
bitmap_set_range(&task->arch.iomap, oldiomap.bits,
bits - oldiomap.bits);
 
/*
* In case there really existed smaller iomap,
* copy its contents and deallocate it.
*/
*/
if (oldiomap.bits) {
bitmap_copy(&task->arch.iomap, &oldiomap,
oldiomap.bits);
94,17 → 94,17
free(oldiomap.map);
}
}
 
/*
* Enable the range and we are done.
*/
bitmap_clear_range(&task->arch.iomap, (index_t) ioaddr, (count_t) size);
 
/*
* Increment I/O Permission bitmap generation counter.
*/
task->arch.iomapver++;
 
return 0;
}
 
122,7 → 122,7
descriptor_t *gdt_p;
tss_descriptor_t *tss_desc;
count_t ver;
 
/* First, copy the I/O Permission Bitmap. */
spinlock_lock(&TASK->lock);
ver = TASK->arch.iomapver;
140,7 → 140,7
bitmap_set_range(&iomap, ALIGN_UP(TASK->arch.iomap.bits, 8), 8);
}
spinlock_unlock(&TASK->lock);
 
/*
* Second, adjust TSS segment limit.
* Take the extra ending byte will all bits set into account.
151,10 → 151,10
gdtr_load(&cpugdtr);
/*
* Before we load new TSS limit, the current TSS descriptor
* type must be changed to describe inactive TSS.
*/
tss_desc = (tss_descriptor_t *) &gdt_p[TSS_DES];
* Before we load new TSS limit, the current TSS descriptor
* type must be changed to describe inactive TSS.
*/
tss_desc = (tss_descriptor_t *) &gdt_p[TSS_DES];
tss_desc->type = AR_TSS;
tr_load(gdtselector(TSS_DES));
/trunk/kernel/arch/ia32/include/pm.h
26,7 → 26,7
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup ia32
/** @addtogroup ia32
* @{
*/
/** @file
35,61 → 35,59
#ifndef KERN_ia32_PM_H_
#define KERN_ia32_PM_H_
 
#define IDT_ITEMS 64
#define GDT_ITEMS 7
#define IDT_ITEMS 64
#define GDT_ITEMS 7
 
#define VESA_INIT_SEGMENT 0x8000
#define NULL_DES 0
#define KTEXT_DES 1
#define KDATA_DES 2
#define UTEXT_DES 3
#define UDATA_DES 4
#define TSS_DES 5
#define TLS_DES 6 /* Pointer to Thread-Local-Storage data */
 
#define NULL_DES 0
#define KTEXT_DES 1
#define KDATA_DES 2
#define UTEXT_DES 3
#define UDATA_DES 4
#define TSS_DES 5
#define TLS_DES 6 /* Pointer to Thread-Local-Storage data */
 
#ifdef CONFIG_FB
 
#define VESA_INIT_SEGMENT 0x8000
#define VESA_INIT_DES 7
#define VESA_INIT_SEGMENT 0x8000
#define VESA_INIT_DES 7
#define KTEXT32_DES KTEXT_DES
 
#undef GDT_ITEMS
#define GDT_ITEMS 8
#define GDT_ITEMS 8
 
#endif /* CONFIG_FB */
 
#define gdtselector(des) ((des) << 3)
 
#define gdtselector(des) ((des) << 3)
#define PL_KERNEL 0
#define PL_USER 3
 
#define PL_KERNEL 0
#define PL_USER 3
#define AR_PRESENT (1 << 7)
#define AR_DATA (2 << 3)
#define AR_CODE (3 << 3)
#define AR_WRITABLE (1 << 1)
#define AR_INTERRUPT (0x0e)
#define AR_TSS (0x09)
 
#define AR_PRESENT (1 << 7)
#define AR_DATA (2 << 3)
#define AR_CODE (3 << 3)
#define AR_WRITABLE (1 << 1)
#define AR_INTERRUPT (0xe)
#define AR_TSS (0x9)
#define DPL_KERNEL (PL_KERNEL << 5)
#define DPL_USER (PL_USER << 5)
 
#define DPL_KERNEL (PL_KERNEL << 5)
#define DPL_USER (PL_USER << 5)
#define TSS_BASIC_SIZE 104
#define TSS_IOMAP_SIZE (16 * 1024 + 1) /* 16K for bitmap + 1 terminating byte for convenience */
 
#define TSS_BASIC_SIZE 104
#define TSS_IOMAP_SIZE (16 * 1024 + 1) /* 16K for bitmap + 1 terminating byte for convenience */
#define IO_PORTS (64 * 1024)
 
#define IO_PORTS (64 * 1024)
 
#ifndef __ASM__
 
#include <arch/types.h>
#include <arch/context.h>
 
struct ptr_16_32 {
typedef struct {
uint16_t limit;
uint32_t base;
} __attribute__ ((packed));
typedef struct ptr_16_32 ptr_16_32_t;
} __attribute__ ((packed)) ptr_16_32_t;
 
struct descriptor {
typedef struct {
unsigned limit_0_15: 16;
unsigned base_0_15: 16;
unsigned base_16_23: 8;
100,19 → 98,17
unsigned special: 1;
unsigned granularity : 1;
unsigned base_24_31: 8;
} __attribute__ ((packed));
typedef struct descriptor descriptor_t;
} __attribute__ ((packed)) descriptor_t;
 
struct idescriptor {
typedef struct {
unsigned offset_0_15: 16;
unsigned selector: 16;
unsigned unused: 8;
unsigned access: 8;
unsigned offset_16_31: 16;
} __attribute__ ((packed));
typedef struct idescriptor idescriptor_t;
} __attribute__ ((packed)) idescriptor_t;
 
struct tss {
typedef struct {
uint16_t link;
unsigned : 16;
uint32_t esp0;
152,13 → 148,12
unsigned : 16;
uint16_t iomap_base;
uint8_t iomap[TSS_IOMAP_SIZE];
} __attribute__ ((packed));
typedef struct tss tss_t;
} __attribute__ ((packed)) tss_t;
 
extern ptr_16_32_t gdtr;
extern ptr_16_32_t bootstrap_gdtr;
extern ptr_16_32_t protected_ap_gdtr;
extern struct tss *tss_p;
extern tss_t *tss_p;
 
extern descriptor_t gdt[];
 
/trunk/kernel/arch/ia32/include/cpu.h
55,7 → 55,7
unsigned int family;
unsigned int model;
unsigned int stepping;
struct tss *tss;
tss_t *tss;
count_t iomapver_copy; /** Copy of TASK's I/O Permission bitmap generation count. */
} cpu_arch_t;
/trunk/kernel/arch/ia32/src/pm.c
112,7 → 112,7
 
void tss_initialize(tss_t *t)
{
memsetb(t, sizeof(struct tss), 0);
memsetb(t, sizeof(tss_t), 0);
}
 
/*
/trunk/kernel/arch/ia32/src/smp/smp.c
26,7 → 26,7
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup ia32
/** @addtogroup ia32
* @{
*/
/** @file
131,8 → 131,8
uint8_t apic = l_apic_id();
 
for (i = 0; i < ops->cpu_count(); i++) {
struct descriptor *gdt_new;
descriptor_t *gdt_new;
/*
* Skip processors marked unusable.
*/
159,14 → 159,14
* it needs to be replaced by a generic fuctionality of
* the memory subsystem
*/
gdt_new = (struct descriptor *) malloc(GDT_ITEMS *
sizeof(struct descriptor), FRAME_ATOMIC);
gdt_new = (descriptor_t *) malloc(GDT_ITEMS *
sizeof(descriptor_t), FRAME_ATOMIC);
if (!gdt_new)
panic("Cannot allocate memory for GDT.");
 
memcpy(gdt_new, gdt, GDT_ITEMS * sizeof(struct descriptor));
memsetb(&gdt_new[TSS_DES], sizeof(struct descriptor), 0);
protected_ap_gdtr.limit = GDT_ITEMS * sizeof(struct descriptor);
memcpy(gdt_new, gdt, GDT_ITEMS * sizeof(descriptor_t));
memsetb(&gdt_new[TSS_DES], sizeof(descriptor_t), 0);
protected_ap_gdtr.limit = GDT_ITEMS * sizeof(descriptor_t);
protected_ap_gdtr.base = KA2PA((uintptr_t) gdt_new);
gdtr.base = (uintptr_t) gdt_new;
 
/trunk/kernel/arch/ia32/src/proc/scheduler.c
59,14 → 59,14
{
uintptr_t kstk = (uintptr_t) &THREAD->kstack[THREAD_STACK_SIZE -
SP_DELTA];
 
/* Set kernel stack for CP3 -> CPL0 switch via SYSENTER */
write_msr(IA32_MSR_SYSENTER_ESP, kstk);
 
/* Set kernel stack for CPL3 -> CPL0 switch via interrupt */
CPU->arch.tss->esp0 = kstk;
CPU->arch.tss->ss0 = gdtselector(KDATA_DES);
 
/* Set up TLS in GS register */
set_tls_desc(THREAD->arch.tls);
}