Subversion Repositories HelenOS

Compare Revisions

No changes between revisions

Ignore whitespace Rev 1809 → Rev 1810

/trunk/kernel/arch/xen32/_link.ld.in
2,6 → 2,7
*/
 
#define __ASM__
#include <arch/boot/boot.h>
#include <arch/mm/page.h>
 
ENTRY(kernel_image_start)
11,7 → 12,7
*(__xen_guest);
}
.image PA2KA(0): {
.image PA2KA(BOOT_OFFSET): {
ktext_start = .;
*(.text);
ktext_end = .;
/trunk/kernel/arch/xen32/include/boot/boot.h
35,7 → 35,8
#ifndef __xen32_BOOT_H__
#define __xen32_BOOT_H__
 
#define BOOT_STACK_SIZE 0x400
#define BOOT_OFFSET 0x0000
#define TEMP_STACK_SIZE 0x400
 
#endif
 
/trunk/kernel/arch/xen32/Makefile.inc
114,7 → 114,7
arch/$(ARCH)/src/smp/smp.c \
arch/$(ARCH)/src/atomic.S \
arch/$(ARCH)/src/smp/ipi.c \
arch/$(ARCH)/src/ia32.c \
arch/$(ARCH)/src/xen32.c \
arch/$(ARCH)/src/interrupt.c \
arch/$(ARCH)/src/pm.c \
arch/$(ARCH)/src/userspace.c \
/trunk/kernel/arch/xen32/src/ia32.c
File deleted
\ No newline at end of file
Property changes:
Deleted: svn:special
-*
\ No newline at end of property
/trunk/kernel/arch/xen32/src/xen32.c
0,0 → 1,159
/*
* Copyright (C) 2001-2004 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup xen32
* @{
*/
/** @file
*/
 
#include <arch.h>
 
#include <arch/types.h>
#include <typedefs.h>
 
#include <arch/pm.h>
 
#include <arch/drivers/ega.h>
#include <arch/drivers/vesa.h>
#include <genarch/i8042/i8042.h>
#include <arch/drivers/i8254.h>
#include <arch/drivers/i8259.h>
 
#include <arch/context.h>
 
#include <config.h>
 
#include <arch/interrupt.h>
#include <arch/asm.h>
#include <genarch/acpi/acpi.h>
 
#include <arch/bios/bios.h>
 
#include <arch/mm/memory_init.h>
#include <interrupt.h>
#include <arch/debugger.h>
#include <proc/thread.h>
#include <syscall/syscall.h>
#include <console/console.h>
 
void arch_pre_mm_init(void)
{
// pm_init();
 
if (config.cpu_active == 1) {
// bios_init();
// i8259_init(); /* PIC */
// i8254_init(); /* hard clock */
// exc_register(VECTOR_SYSCALL, "syscall", (iroutine) syscall);
#ifdef CONFIG_SMP
// exc_register(VECTOR_TLB_SHOOTDOWN_IPI, "tlb_shootdown",
// (iroutine) tlb_shootdown_ipi);
#endif /* CONFIG_SMP */
}
}
 
void arch_post_mm_init(void)
{
if (config.cpu_active == 1) {
 
#ifdef CONFIG_FB
if (vesa_present())
vesa_init();
else
#endif
ega_init(); /* video */
/* Enable debugger */
debugger_init();
/* Merge all memory zones to 1 big zone */
zone_merge_all();
}
}
 
void arch_pre_smp_init(void)
{
if (config.cpu_active == 1) {
memory_print_map();
#ifdef CONFIG_SMP
acpi_init();
#endif /* CONFIG_SMP */
}
}
 
void arch_post_smp_init(void)
{
i8042_init(); /* keyboard controller */
}
 
void calibrate_delay_loop(void)
{
i8254_calibrate_delay_loop();
if (config.cpu_active == 1) {
/*
* This has to be done only on UP.
* On SMP, i8254 is not used for time keeping and its interrupt pin remains masked.
*/
i8254_normal_operation();
}
}
 
/** Set thread-local-storage pointer
*
* TLS pointer is set in GS register. That means, the GS contains
* selector, and the descriptor->base is the correct address.
*/
unative_t sys_tls_set(unative_t addr)
{
THREAD->arch.tls = addr;
set_tls_desc(addr);
 
return 0;
}
 
/** Acquire console back for kernel
*
*/
void arch_grab_console(void)
{
i8042_grab();
}
/** Return console to userspace
*
*/
void arch_release_console(void)
{
i8042_release();
}
 
/** @}
*/
/trunk/kernel/arch/xen32/src/pm.c
1,0 → 0,0
link ../../ia32/src/pm.c
/*
* Copyright (C) 2001-2004 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup xen32
* @{
*/
/** @file
*/
 
#include <arch/pm.h>
#include <config.h>
#include <arch/types.h>
#include <typedefs.h>
#include <arch/interrupt.h>
#include <arch/asm.h>
#include <arch/context.h>
#include <panic.h>
#include <arch/mm/page.h>
#include <mm/slab.h>
#include <memstr.h>
#include <arch/boot/boot.h>
#include <interrupt.h>
 
/*
* Early xen32 configuration functions and data structures.
*/
 
/*
* We have no use for segmentation so we set up flat mode. In this
* mode, we use, for each privilege level, two segments spanning the
* whole memory. One is for code and one is for data.
*
* One is for GS register which holds pointer to the TLS thread
* structure in it's base.
*/
descriptor_t gdt[GDT_ITEMS] = {
/* NULL descriptor */
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
/* KTEXT descriptor */
{ 0xffff, 0, 0, AR_PRESENT | AR_CODE | DPL_KERNEL, 0xf, 0, 0, 1, 1, 0 },
/* KDATA descriptor */
{ 0xffff, 0, 0, AR_PRESENT | AR_DATA | AR_WRITABLE | DPL_KERNEL, 0xf, 0, 0, 1, 1, 0 },
/* UTEXT descriptor */
{ 0xffff, 0, 0, AR_PRESENT | AR_CODE | DPL_USER, 0xf, 0, 0, 1, 1, 0 },
/* UDATA descriptor */
{ 0xffff, 0, 0, AR_PRESENT | AR_DATA | AR_WRITABLE | DPL_USER, 0xf, 0, 0, 1, 1, 0 },
/* TSS descriptor - set up will be completed later */
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
/* TLS descriptor */
{ 0xffff, 0, 0, AR_PRESENT | AR_DATA | AR_WRITABLE | DPL_USER, 0xf, 0, 0, 1, 1, 0 },
/* VESA Init descriptor */
#ifdef CONFIG_FB
{ 0xffff, 0, VESA_INIT_SEGMENT>>12, AR_PRESENT | AR_CODE | DPL_KERNEL, 0xf, 0, 0, 0, 0, 0 }
#endif
};
 
static idescriptor_t idt[IDT_ITEMS];
 
static tss_t tss;
 
tss_t *tss_p = NULL;
 
/* gdtr is changed by kmp before next CPU is initialized */
ptr_16_32_t bootstrap_gdtr = { .limit = sizeof(gdt), .base = KA2PA((uintptr_t) gdt) };
ptr_16_32_t gdtr = { .limit = sizeof(gdt), .base = (uintptr_t) gdt };
 
void gdt_setbase(descriptor_t *d, uintptr_t base)
{
d->base_0_15 = base & 0xffff;
d->base_16_23 = ((base) >> 16) & 0xff;
d->base_24_31 = ((base) >> 24) & 0xff;
}
 
void gdt_setlimit(descriptor_t *d, uint32_t limit)
{
d->limit_0_15 = limit & 0xffff;
d->limit_16_19 = (limit >> 16) & 0xf;
}
 
void idt_setoffset(idescriptor_t *d, uintptr_t offset)
{
/*
* Offset is a linear address.
*/
d->offset_0_15 = offset & 0xffff;
d->offset_16_31 = offset >> 16;
}
 
void tss_initialize(tss_t *t)
{
memsetb((uintptr_t) t, sizeof(struct tss), 0);
}
 
/*
* This function takes care of proper setup of IDT and IDTR.
*/
void idt_init(void)
{
idescriptor_t *d;
int i;
 
for (i = 0; i < IDT_ITEMS; i++) {
d = &idt[i];
 
d->unused = 0;
d->selector = selector(KTEXT_DES);
 
d->access = AR_PRESENT | AR_INTERRUPT; /* masking interrupt */
 
if (i == VECTOR_SYSCALL) {
/*
* The syscall interrupt gate must be calleable from userland.
*/
d->access |= DPL_USER;
}
idt_setoffset(d, ((uintptr_t) interrupt_handlers) + i*interrupt_handler_size);
exc_register(i, "undef", (iroutine) null_interrupt);
}
exc_register(13, "gp_fault", (iroutine) gp_fault);
exc_register( 7, "nm_fault", (iroutine) nm_fault);
exc_register(12, "ss_fault", (iroutine) ss_fault);
exc_register(19, "simd_fp", (iroutine) simd_fp_exception);
}
 
 
/* Clean IOPL(12,13) and NT(14) flags in EFLAGS register */
static void clean_IOPL_NT_flags(void)
{
__asm__ volatile (
"pushfl\n"
"pop %%eax\n"
"and $0xffff8fff, %%eax\n"
"push %%eax\n"
"popfl\n"
: : : "eax"
);
}
 
/* Clean AM(18) flag in CR0 register */
static void clean_AM_flag(void)
{
__asm__ volatile (
"mov %%cr0, %%eax\n"
"and $0xfffbffff, %%eax\n"
"mov %%eax, %%cr0\n"
: : : "eax"
);
}
 
void pm_init(void)
{
descriptor_t *gdt_p = (descriptor_t *) gdtr.base;
ptr_16_32_t idtr;
 
/*
* Update addresses in GDT and IDT to their virtual counterparts.
*/
idtr.limit = sizeof(idt);
idtr.base = (uintptr_t) idt;
gdtr_load(&gdtr);
idtr_load(&idtr);
/*
* Each CPU has its private GDT and TSS.
* All CPUs share one IDT.
*/
 
if (config.cpu_active == 1) {
idt_init();
/*
* NOTE: bootstrap CPU has statically allocated TSS, because
* the heap hasn't been initialized so far.
*/
tss_p = &tss;
}
else {
tss_p = (tss_t *) malloc(sizeof(tss_t), FRAME_ATOMIC);
if (!tss_p)
panic("could not allocate TSS\n");
}
 
tss_initialize(tss_p);
gdt_p[TSS_DES].access = AR_PRESENT | AR_TSS | DPL_KERNEL;
gdt_p[TSS_DES].special = 1;
gdt_p[TSS_DES].granularity = 0;
gdt_setbase(&gdt_p[TSS_DES], (uintptr_t) tss_p);
gdt_setlimit(&gdt_p[TSS_DES], TSS_BASIC_SIZE - 1);
 
/*
* As of this moment, the current CPU has its own GDT pointing
* to its own TSS. We just need to load the TR register.
*/
tr_load(selector(TSS_DES));
clean_IOPL_NT_flags(); /* Disable I/O on nonprivileged levels and clear NT flag. */
clean_AM_flag(); /* Disable alignment check */
}
 
void set_tls_desc(uintptr_t tls)
{
ptr_16_32_t cpugdtr;
descriptor_t *gdt_p;
 
gdtr_store(&cpugdtr);
gdt_p = (descriptor_t *) cpugdtr.base;
gdt_setbase(&gdt_p[TLS_DES], tls);
/* Reload gdt register to update GS in CPU */
gdtr_load(&cpugdtr);
}
 
/** @}
*/
Property changes:
Deleted: svn:special
-*
\ No newline at end of property
/trunk/kernel/arch/xen32/src/boot/boot.S
39,270 → 39,27
.ascii "PT_MODE_WRITABLE"
.byte 0
 
.section K_TEXT_START, "ax"
.text
 
KTEXT=8
KDATA=16
 
.code32
.align 4
.global kernel_image_start
kernel_image_start:
lgdt KA2PA(bootstrap_gdtr) # initialize Global Descriptor Table register
 
movw $KDATA, %cx
movw %cx, %es
movw %cx, %fs
movw %cx, %gs
movw %cx, %ds # kernel data + stack
movw %cx, %ss
jmpl $KTEXT, $multiboot_meeting_point
multiboot_meeting_point:
pushl %ebx # save parameters from GRUB
pushl %eax
#ifdef CONFIG_FB
mov $vesa_init, %esi
mov $VESA_INIT_SEGMENT << 4, %edi
mov $e_vesa_init - vesa_init, %ecx
cld
rep movsb
 
mov $VESA_INIT_SEGMENT << 4, %edi
jmpl %edi
movl $kernel_stack, %esp # initialize stack pointer
vesa_meeting_point:
mov %esi, KA2PA(vesa_ph_addr)
mov %di, KA2PA(vesa_height)
shr $16, %edi
mov %di, KA2PA(vesa_width)
mov %bx, KA2PA(vesa_scanline)
shr $16, %ebx
mov %bx, KA2PA(vesa_bpp)
#endif
call map_kernel # map kernel and turn paging on
popl %eax
popl %ebx
call main_bsp # never returns
 
cli
hlt
 
.global map_kernel
map_kernel:
#
# Here we setup mapping for both the unmapped and mapped sections of the kernel.
# For simplicity, we map the entire 4G space.
#
movl %cr4, %ecx
orl $(1<<4), %ecx
movl %ecx, %cr4 # turn PSE on
movl $(page_directory+0), %esi
movl $(page_directory+2048), %edi
xorl %ecx, %ecx
xorl %ebx, %ebx
0:
movl $((1<<7)|(1<<0)), %eax
orl %ebx, %eax
movl %eax, (%esi,%ecx,4) # mapping 0x00000000+%ecx*4M => 0x00000000+%ecx*4M
movl %eax, (%edi,%ecx,4) # mapping 0x80000000+%ecx*4M => 0x00000000+%ecx*4M
addl $(4*1024*1024), %ebx
.data
 
incl %ecx
cmpl $512, %ecx
jl 0b
 
movl %esi, %cr3
# turn paging on
movl %cr0, %ebx
orl $(1<<31), %ebx
movl %ebx, %cr0
ret
 
#ifdef CONFIG_FB
vesa_init:
jmp $selector(VESA_INIT_DES), $vesa_init_real - vesa_init
.code16
vesa_init_real:
mov %cr0, %eax
and $~1, %eax
mov %eax, %cr0
jmp $VESA_INIT_SEGMENT, $vesa_init_real2 - vesa_init
vesa_init_real2:
mov $VESA_INIT_SEGMENT, %bx
mov %bx, %es
mov %bx, %fs
mov %bx, %gs
mov %bx, %ds
mov %bx, %ss
movl %esp, %eax
movl $0x0000fffc, %esp
movl $0x0000fffc, %ebp
pushl %eax
#define VESA_INFO_SIZE 1024
 
#define VESA_MODE_LIST_PTR_OFFSET 14
#define VESA_MODE_WIDTH_OFFSET 18
#define VESA_MODE_HEIGHT_OFFSET 20
#define VESA_MODE_BPP_OFFSET 25
#define VESA_MODE_SCANLINE_OFFSET 16
#define VESA_MODE_PHADDR_OFFSET 40
 
#define VESA_END_OF_MODES 0xffff
 
#define VESA_OK 0x4f
 
#define VESA_GET_INFO 0x4f00
#define VESA_GET_MODE_INFO 0x4f01
#define VESA_SET_MODE 0x4f02
 
#define CONFIG_VESA_BPP_a 255
 
#if CONFIG_VESA_BPP == 24
#undef CONFIG_VESA_BPP_a
#define CONFIG_VESA_BPP_a 32
#endif
 
mov $VESA_GET_INFO, %ax
mov $e_vesa_init - vesa_init, %di
push %di
int $0x10
pop %di
cmp $VESA_OK, %al
jnz 0f
mov 2 + VESA_MODE_LIST_PTR_OFFSET(%di), %si
mov %si, %gs
mov VESA_MODE_LIST_PTR_OFFSET(%di), %si
add $VESA_INFO_SIZE, %di
 
1:# Try next mode
mov %gs:(%si), %cx
cmp $VESA_END_OF_MODES, %cx
jz 0f
inc %si
inc %si
push %cx
push %di
push %si
mov $VESA_GET_MODE_INFO, %ax
int $0x10
pop %si
pop %di
pop %cx
cmp $VESA_OK, %al
jnz 0f
mov $CONFIG_VESA_WIDTH, %ax
cmp VESA_MODE_WIDTH_OFFSET(%di), %ax
jnz 1b
mov $CONFIG_VESA_HEIGHT,%ax
cmp VESA_MODE_HEIGHT_OFFSET(%di), %ax
jnz 1b
mov $CONFIG_VESA_BPP, %al
cmp VESA_MODE_BPP_OFFSET(%di), %al
jz 2f
mov $CONFIG_VESA_BPP_a, %al
cmp VESA_MODE_BPP_OFFSET(%di), %al
jnz 1b
2:
mov %cx, %bx
or $0xc000, %bx
push %di
mov $VESA_SET_MODE, %ax
int $0x10
pop %di
cmp $VESA_OK, %al
jnz 0f
mov VESA_MODE_PHADDR_OFFSET(%di), %esi
mov VESA_MODE_WIDTH_OFFSET(%di), %ax
shl $16, %eax
mov VESA_MODE_HEIGHT_OFFSET(%di), %ax
mov VESA_MODE_BPP_OFFSET(%di), %bl
xor %bh, %bh
shl $16, %ebx
mov VESA_MODE_SCANLINE_OFFSET(%di), %bx
mov %eax, %edi
8:
mov %cr0, %eax
or $1, %eax
mov %eax, %cr0
jmp 9f
9:
ljmpl $KTEXT, $(vesa_init_protect - vesa_init + VESA_INIT_SEGMENT << 4)
 
0:# No prefered mode found
mov $0x111, %cx
push %di
push %cx
mov $VESA_GET_MODE_INFO, %ax
int $0x10
pop %cx
pop %di
cmp $VESA_OK, %al
jnz 1f
jz 2b # Force relative jump
1:
mov $0x0003, %ax
int $0x10
mov $0xffffffff, %edi # EGA text mode used, because of problems with VESA
xor %ax, %ax
jz 8b # Force relative jump
 
.code32
vesa_init_protect:
popl %esp
 
movw $KDATA, %cx
movw %cx, %es
movw %cx, %fs
movw %cx, %gs
movw %cx, %ds # kernel data + stack
movw %cx, %ss
jmpl $KTEXT, $vesa_meeting_point
 
.align 4
e_vesa_init:
#endif
 
.section K_DATA_START, "aw", @progbits
 
.align 4096
page_directory:
.space 4096, 0
 
kernel_stack_bottom:
.space TEMP_STACK_SIZE
kernel_stack: