Subversion Repositories HelenOS-historic

Compare Revisions

No changes between revisions

Ignore whitespace Rev 693 → Rev 694

/kernel/trunk/arch/amd64/boot/Makefile
File deleted
/kernel/trunk/arch/amd64/boot/boot.ld
File deleted
/kernel/trunk/arch/amd64/boot/boot.S
File deleted
/kernel/trunk/arch/amd64/_link.ld.in
12,13 → 12,10
#include <arch/boot/boot.h>
#include <arch/mm/page.h>
 
ENTRY(kernel_image_start)
 
SECTIONS {
.unmapped BOOTSTRAP_OFFSET: AT (BOOTSTRAP_OFFSET) {
.unmapped BOOT_OFFSET: AT (0) {
unmapped_ktext_start = .;
*(K_TEXT_START);
*(K_TEXT_START_2);
unmapped_ktext_end = .;
 
unmapped_kdata_start = .;
26,9 → 23,8
unmapped_kdata_end = .;
}
 
.mapped (PA2KA(BOOT_OFFSET+BOOTSTRAP_OFFSET)+SIZEOF(.unmapped)) : AT (BOOTSTRAP_OFFSET+SIZEOF(.unmapped)) {
.mapped (PA2KA(BOOT_OFFSET)+SIZEOF(.unmapped)) : AT (SIZEOF(.unmapped)) {
ktext_start = .;
*(BOOT_DATA);
*(.text);
ktext_end = .;
 
36,7 → 32,7
*(.data); /* initialized data */
*(.rodata*); /* string literals */
hardcoded_load_address = .;
QUAD(PA2KA(BOOT_OFFSET+BOOTSTRAP_OFFSET));
QUAD(PA2KA(BOOT_OFFSET));
hardcoded_ktext_size = .;
QUAD(ktext_end - ktext_start + (unmapped_ktext_end - unmapped_ktext_start));
hardcoded_kdata_size = .;
56,18 → 52,4
 
kdata_end = .;
}
 
_hardcoded_kernel_size = (ktext_end - ktext_start) + (unmapped_ktext_end - unmapped_ktext_start) + (kdata_end - kdata_start) + (unmapped_kdata_end - unmapped_kdata_start);
 
/* Symbols that need to be accessed both from real mode & long mode */
/* e820*_boot is real mode (pre-above-1MB-move), e820* is */
/* kernel mapped above-1MB-physical copied symbol */
e820table_boot = KA2PA(e820table) - BOOT_OFFSET;
e820counter_boot = KA2PA(e820counter) - BOOT_OFFSET;
 
/* real_bootstrap_gdtr is mapped real_bootstrap_gdtr_boot */
/* It is physically outside of kernel area, we have to access */
/* it after modification from long mode for booting */
/* SMP slave processors */
real_bootstrap_gdtr = PA2KA(real_bootstrap_gdtr_boot);
}
/kernel/trunk/arch/amd64/include/boot/memmapasm.h
File deleted
\ No newline at end of file
Property changes:
Deleted: svn:special
-*
\ No newline at end of property
/kernel/trunk/arch/amd64/include/boot/boot.h
29,8 → 29,9
#ifndef __amd64_BOOT_H__
#define __amd64_BOOT_H__
 
#define BOOTSTRAP_OFFSET 0x8000
#define BOOT_OFFSET 0x100000
#define BOOT_OFFSET 0x108000
#define AP_BOOT_OFFSET 0x8000
#define BOOT_STACK_SIZE 0x400
 
#define MULTIBOOT_HEADER_MAGIC 0x1BADB002
#define MULTIBOOT_HEADER_FLAGS 0x00010003
/kernel/trunk/arch/amd64/Makefile.inc
77,7 → 77,6
arch/$(ARCH)/src/dummy.s \
arch/$(ARCH)/src/fpu_context.c \
arch/$(ARCH)/src/boot/boot.S \
arch/$(ARCH)/src/boot/memmap.S \
arch/$(ARCH)/src/pm.c \
arch/$(ARCH)/src/context.S \
arch/$(ARCH)/src/drivers/ega.c \
/kernel/trunk/arch/amd64/src/boot/memmap.S
File deleted
/kernel/trunk/arch/amd64/src/boot/boot.S
33,55 → 33,45
#include <arch/cpuid.h>
#include <arch/boot/boot.h>
 
#define START_STACK (BOOT_OFFSET - BOOT_STACK_SIZE)
#define START_STACK_64 0xffffffff80007c00
#
# This is where we require any SPARTAN-kernel-compatible boot loader
# to pass control in real mode.
#
# Protected mode tables are statically initialised during compile
# time. So we can just load the respective table registers and
# switch to protected mode.
#
 
#define START_STACK (BOOTSTRAP_OFFSET-0x400)
.section K_TEXT_START, "ax"
.code16
.global kernel_image_start
.global multiboot_image_start
kernel_image_start:
cli
xorw %ax,%ax
movw %ax,%ds
movw %ax,%es
movw %ax,%ss # initialize stack segment register
movl $(START_STACK), %esp # initialize stack pointer
call memmap_arch_init
movl $0x80000000, %eax
cpuid
cmp $0x80000000, %eax # any function > 80000000h?
jbe no_long_mode
movl $(AMD_CPUID_EXTENDED), %eax # Extended function code 80000001
cpuid
bt $29, %edx # Test if long mode is supported.
jnc no_long_mode
 
# Load gdtr, idtr
lgdt real_bootstrap_gdtr_boot
movl %cr0,%eax
orl $0x1,%eax
movl %eax,%cr0 # switch to protected mode
 
jmpl $gdtselector(KTEXT32_DES), $now_in_prot
 
no_long_mode:
1:
jmp 1b
# .code16
# .global kernel_image_start
# .global multiboot_image_start
# kernel_image_start:
# cli
# xorw %ax,%ax
# movw %ax,%ds
# movw %ax,%es
# movw %ax,%ss # initialize stack segment register
# movl $(START_STACK), %esp # initialize stack pointer
#
# call memmap_arch_init
#
# movl $0x80000000, %eax
# cpuid
# cmp $0x80000000, %eax # any function > 80000000h?
# jbe no_long_mode
# movl $(AMD_CPUID_EXTENDED), %eax # Extended function code 80000001
# cpuid
# bt $29, %edx # Test if long mode is supported.
# jnc no_long_mode
#
# # Load gdtr, idtr
# lgdt real_bootstrap_gdtr_boot
#
# movl %cr0,%eax
# orl $0x1,%eax
# movl %eax,%cr0 # switch to protected mode
#
# jmpl $gdtselector(KTEXT32_DES), $now_in_prot
#
# no_long_mode:
# 1:
# jmp 1b
#
.code32
.align 4
multiboot_header:
88,11 → 78,11
.long MULTIBOOT_HEADER_MAGIC
.long MULTIBOOT_HEADER_FLAGS
.long -(MULTIBOOT_HEADER_MAGIC + MULTIBOOT_HEADER_FLAGS) # checksum
.long multiboot_header + BOOT_OFFSET
.long unmapped_ktext_start + BOOT_OFFSET
.long multiboot_header
.long unmapped_ktext_start
.long 0
.long 0
.long multiboot_image_start + BOOT_OFFSET
.long multiboot_image_start
 
multiboot_image_start:
movl $START_STACK, %esp # initialize stack pointer
109,44 → 99,39
jmpl $gdtselector(KTEXT_DES), $multiboot_meeting_point + BOOT_OFFSET
multiboot_meeting_point:
hlt
 
# Protected 32-bit. We want to reuse the code-seg descriptor,
# the Default operand size must not be 1 when entering long mode
now_in_prot:
# Set up stack & data descriptors
movw $gdtselector(KDATA_DES), %ax
movw %ax, %ds
movw %ax, %ss
 
movb $0xd1, %al # enable A20 using the keyboard controller
outb %al, $0x64
movb $0xdf, %al
outb %al, $0x60
 
# Protected 32-bit. We want to reuse the code-seg descriptor,
# the Default operand size must not be 1 when entering long mode
pushl %ebx # save parameters from GRUB
pushl %eax
# Enable 64-bit page transaltion entries - CR4.PAE = 1.
# Paging is not enabled until after long mode is enabled
movl %cr4, %eax
btsl $5, %eax
movl %eax, %cr4
 
# Set up paging tables
leal ptl_0, %eax
movl %eax, %cr3
# Enable long mode
movl $EFER_MSR_NUM, %ecx # EFER MSR number
rdmsr # Read EFER
btsl $AMD_LME_FLAG, %eax # Set LME=1
wrmsr # Write EFER
movl $EFER_MSR_NUM, %ecx # EFER MSR number
rdmsr # Read EFER
btsl $AMD_LME_FLAG, %eax # Set LME=1
wrmsr # Write EFER
# Enable paging to activate long mode (set CR0.PG=1)
movl %cr0, %eax
btsl $31, %eax
movl %eax, %cr0
# At this point we are in compatibility mode
jmpl $gdtselector(KTEXT_DES), $start64
 
.code64
153,16 → 138,10
start64:
movq $(PA2KA(START_STACK)), %rsp
 
# Copy kernel to higher physical memory
movq $BOOTSTRAP_OFFSET, %rsi
movq $BOOTSTRAP_OFFSET + BOOT_OFFSET, %rdi
movq $_hardcoded_kernel_size, %rcx
cld
rep movsb
call main_bsp # never returns
call main_bsp # never returns
1:
jmp 1b
cli
hlt
.section K_DATA_START, "aw", @progbits
.align 4096
218,11 → 197,6
.fill 510,8,0
.quad ptl_1 + (PTL_WRITABLE | PTL_PRESENT)
 
.global real_bootstrap_gdtr_boot
real_bootstrap_gdtr_boot:
.word gdtselector(GDT_ITEMS)
.long KA2PA(gdt)-BOOT_OFFSET
 
.global protected_bootstrap_gdtr
protected_bootstrap_gdtr:
.word gdtselector(GDT_ITEMS)
/kernel/trunk/arch/amd64/src/mm/memory_init.c
31,8 → 31,9
#include <arch/mm/page.h>
#include <print.h>
 
__u8 e820counter __attribute__ ((section ("BOOT_DATA"))) = 0xff;
struct e820memmap_ e820table[MEMMAP_E820_MAX_RECORDS] __attribute__ ((section ("BOOT_DATA"))) ;
__u8 e820counter = 0xff;
struct e820memmap_ e820table[MEMMAP_E820_MAX_RECORDS];
__u32 e801memorysize;
 
size_t get_memory_size(void)
{