Subversion Repositories HelenOS

Compare Revisions

Ignore whitespace Rev 1828 → Rev 1829

/trunk/kernel/arch/xen32/include/pm.h
92,15 → 92,6
} __attribute__ ((packed));
typedef struct descriptor descriptor_t;
 
struct idescriptor {
unsigned offset_0_15: 16;
unsigned selector: 16;
unsigned unused: 8;
unsigned access: 8;
unsigned offset_16_31: 16;
} __attribute__ ((packed));
typedef struct idescriptor idescriptor_t;
 
struct tss {
uint16_t link;
unsigned : 16;
156,8 → 147,7
extern void gdt_setbase(descriptor_t *d, uintptr_t base);
extern void gdt_setlimit(descriptor_t *d, uint32_t limit);
 
extern void idt_init(void);
extern void idt_setoffset(idescriptor_t *d, uintptr_t offset);
extern void traps_init(void);
 
extern void tss_initialize(tss_t *t);
extern void set_tls_desc(uintptr_t tls);
/trunk/kernel/arch/xen32/include/boot/boot.h
40,10 → 40,11
#define START_INFO_SIZE 1104
 
#define BOOT_OFFSET 0x0000
#define TEMP_STACK_SIZE 0x1000
 
#define XEN_VIRT_START 0xFC000000
#define XEN_CS 0xe019
 
#define TEMP_STACK_SIZE 0x1000
 
#ifndef __ASM__
 
#define mp_map ((pfn_t *) XEN_VIRT_START)
/trunk/kernel/arch/xen32/include/asm.h
272,15 → 272,6
__asm__ volatile ("sgdtl %0\n" : : "m" (*gdtr_reg));
}
 
/** Load IDTR register from memory.
*
* @param idtr_reg Address of memory from where to load IDTR.
*/
static inline void idtr_load(ptr_16_32_t *idtr_reg)
{
__asm__ volatile ("lidtl %0\n" : : "m" (*idtr_reg));
}
 
/** Load TR from descriptor table.
*
* @param sel Selector specifying descriptor of TSS segment.
/trunk/kernel/arch/xen32/include/hypercall.h
36,7 → 36,17
typedef uint16_t domid_t;
 
 
typedef struct {
uint8_t vector; /**< Exception vector */
uint8_t flags; /**< 0-3: privilege level; 4: clear event enable */
uint16_t cs; /**< Code selector */
uintptr_t address; /**< Code offset */
} trap_info_t;
 
 
#define XEN_SET_TRAP_TABLE 0
#define XEN_MMU_UPDATE 1
#define XEN_SET_CALLBACKS 4
#define XEN_UPDATE_VA_MAPPING 14
#define XEN_CONSOLE_IO 18
#define XEN_VM_ASSIST 21
198,4 → 208,14
return hypercall2(XEN_VM_ASSIST, cmd, type);
}
 
static inline int xen_set_callbacks(const unsigned int event_selector, const void *event_address, const unsigned int failsafe_selector, void *failsafe_address)
{
return hypercall4(XEN_SET_CALLBACKS, event_selector, event_address, failsafe_selector, failsafe_address);
}
 
static inline int xen_set_trap_table(const trap_info_t *table)
{
return hypercall1(XEN_SET_TRAP_TABLE, table);
}
 
#endif
/trunk/kernel/arch/xen32/src/xen32.c
64,6 → 64,9
start_info_t start_info;
memzone_t meminfo;
 
extern void xen_callback(void);
extern void xen_failsafe_callback(void);
 
void arch_pre_main(void)
{
xen_vm_assist(VMASST_CMD_ENABLE, VMASST_TYPE_WRITABLE_PAGETABLES);
76,6 → 79,8
pte.frame_address = ADDR2PFN((uintptr_t) start_info.shared_info);
xen_update_va_mapping(&shared_info, pte, UVMF_INVLPG);
xen_set_callbacks(XEN_CS, xen_callback, XEN_CS, xen_failsafe_callback);
/* Create identity mapping */
meminfo.start = ADDR2PFN(ALIGN_UP(KA2PA(start_info.ptl0), PAGE_SIZE)) + start_info.pt_frames;
116,11 → 121,11
if (config.cpu_active == 1) {
// bios_init();
// exc_register(VECTOR_SYSCALL, "syscall", (iroutine) syscall);
exc_register(VECTOR_SYSCALL, "syscall", (iroutine) syscall);
#ifdef CONFIG_SMP
// exc_register(VECTOR_TLB_SHOOTDOWN_IPI, "tlb_shootdown",
// (iroutine) tlb_shootdown_ipi);
exc_register(VECTOR_TLB_SHOOTDOWN_IPI, "tlb_shootdown",
(iroutine) tlb_shootdown_ipi);
#endif /* CONFIG_SMP */
}
}
/trunk/kernel/arch/xen32/src/asm.S
34,6 → 34,8
 
.text
 
.global xen_callback
.global xen_failsafe_callback
.global enable_l_apic_in_msr
.global interrupt_handlers
.global memcpy
43,6 → 45,13
.global memcpy_to_uspace_failover_address
 
 
xen_callback:
iret
 
xen_failsafe_callback:
iret
 
 
#define MEMCPY_DST 4
#define MEMCPY_SRC 8
#define MEMCPY_SIZE 12
/trunk/kernel/arch/xen32/src/pm.c
75,7 → 75,7
{ 0xffff, 0, 0, AR_PRESENT | AR_DATA | AR_WRITABLE | DPL_USER, 0xf, 0, 0, 1, 1, 0 },
};
 
static idescriptor_t idt[IDT_ITEMS];
static trap_info_t traps[IDT_ITEMS + 1];
 
static tss_t tss;
 
98,46 → 98,32
d->limit_16_19 = (limit >> 16) & 0xf;
}
 
void idt_setoffset(idescriptor_t *d, uintptr_t offset)
{
/*
* Offset is a linear address.
*/
d->offset_0_15 = offset & 0xffff;
d->offset_16_31 = offset >> 16;
}
 
void tss_initialize(tss_t *t)
{
memsetb((uintptr_t) t, sizeof(struct tss), 0);
}
 
/*
* This function takes care of proper setup of IDT and IDTR.
*/
void idt_init(void)
void traps_init(void)
{
idescriptor_t *d;
int i;
 
index_t i;
for (i = 0; i < IDT_ITEMS; i++) {
d = &idt[i];
 
d->unused = 0;
d->selector = selector(KTEXT_DES);
 
d->access = AR_PRESENT | AR_INTERRUPT; /* masking interrupt */
 
if (i == VECTOR_SYSCALL) {
/*
* The syscall interrupt gate must be calleable from userland.
*/
d->access |= DPL_USER;
}
traps[i].vector = i;
idt_setoffset(d, ((uintptr_t) interrupt_handlers) + i*interrupt_handler_size);
if (i == VECTOR_SYSCALL)
traps[i].flags = 3;
else
traps[i].flags = 0;
traps[i].cs = XEN_CS;
traps[i].address = ((uintptr_t) interrupt_handlers) + i * interrupt_handler_size;
exc_register(i, "undef", (iroutine) null_interrupt);
}
traps[IDT_ITEMS].vector = 0;
traps[IDT_ITEMS].flags = 0;
traps[IDT_ITEMS].cs = 0;
traps[IDT_ITEMS].address = NULL;
exc_register(13, "gp_fault", (iroutine) gp_fault);
exc_register( 7, "nm_fault", (iroutine) nm_fault);
exc_register(12, "ss_fault", (iroutine) ss_fault);
172,34 → 158,22
void pm_init(void)
{
descriptor_t *gdt_p = (descriptor_t *) gdtr.base;
ptr_16_32_t idtr;
 
/*
* Update addresses in GDT and IDT to their virtual counterparts.
*/
idtr.limit = sizeof(idt);
idtr.base = (uintptr_t) idt;
// gdtr_load(&gdtr);
// idtr_load(&idtr);
/*
* Each CPU has its private GDT and TSS.
* All CPUs share one IDT.
*/
 
// if (config.cpu_active == 1) {
// idt_init();
// /*
// * NOTE: bootstrap CPU has statically allocated TSS, because
// * the heap hasn't been initialized so far.
// */
if (config.cpu_active == 1) {
traps_init();
xen_set_trap_table(traps);
/*
* NOTE: bootstrap CPU has statically allocated TSS, because
* the heap hasn't been initialized so far.
*/
tss_p = &tss;
// }
// else {
// tss_p = (tss_t *) malloc(sizeof(tss_t), FRAME_ATOMIC);
// if (!tss_p)
// panic("could not allocate TSS\n");
// }
} else {
tss_p = (tss_t *) malloc(sizeof(tss_t), FRAME_ATOMIC);
if (!tss_p)
panic("could not allocate TSS\n");
}
 
// tss_initialize(tss_p);
/trunk/kernel/arch/xen32/src/proc/scheduler.c
26,7 → 26,7
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup ia32proc
/** @addtogroup xen32proc
* @{
*/
/** @file
77,6 → 77,5
{
}
 
/** @}
/** @}
*/
 
/trunk/kernel/arch/xen32/src/proc/task.c
26,7 → 26,7
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup ia32proc
/** @addtogroup xen32proc
* @{
*/
/** @file
57,6 → 57,5
free(t->arch.iomap.map);
}
 
/** @}
/** @}
*/
 
/trunk/kernel/arch/xen32/src/proc/thread.c
26,7 → 26,7
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup ia32proc
/** @addtogroup xen32proc
* @{
*/
/** @file
34,7 → 34,7
 
#include <proc/thread.h>
 
/** Perform ia32 specific thread initialization.
/** Perform xen32 specific thread initialization.
*
* @param t Thread to be initialized.
*/
43,6 → 43,5
t->arch.tls = 0;
}
 
/** @}
/** @}
*/