Subversion Repositories HelenOS-historic

Compare Revisions

Ignore whitespace Rev 187 → Rev 188

/SPARTAN/trunk/arch/amd64/include/pm.h
34,7 → 34,7
#include <arch/context.h>
 
#define IDT_ITEMS 64
#define GDT_ITEMS 6
#define GDT_ITEMS 7
 
#define NULL_DES 0
#define KTEXT_DES 1
52,6 → 52,7
#define AR_DATA (2<<3)
#define AR_CODE (3<<3)
#define AR_WRITABLE (1<<1)
#define AR_READABLE (1<<1)
#define AR_INTERRUPT (0xe)
#define AR_TSS (0x9)
 
/SPARTAN/trunk/arch/amd64/include/mm/page.h
32,12 → 32,10
#include <mm/page.h>
#include <arch/mm/frame.h>
#include <arch/types.h>
#include <arch/mm/ptl.h>
 
#define PAGE_SIZE FRAME_SIZE
 
#define KA2PA(x) (((__address) (x)) + 0x80000000)
#define PA2KA(x) (((__address) (x)) - 0x80000000)
 
#define PTL0_INDEX_ARCH(vaddr) 0
#define PTL1_INDEX_ARCH(vaddr) 0
#define PTL2_INDEX_ARCH(vaddr) 0
/SPARTAN/trunk/arch/amd64/include/mm/ptl.h
0,0 → 1,46
/*
* Copyright (C) 2005 Ondrej Palkovsky
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
 
#ifndef __amd64_PTL_H_
#define __amd64_PTL_H_
 
#define PTL_NO_EXEC (1<<63)
#define PTL_ACCESSED (1<<5)
#define PTL_CACHE_DISABLE (1<<4)
#define PTL_CACHE_THROUGH (1<<3)
#define PTL_USER (1<<2)
#define PTL_WRITABLE (1<<1)
#define PTL_PRESENT 1
#define PTL_2MB_PAGE (1<<7)
 
#define KA2PA(x) (((__address) (x)) + 0x80000000)
#define PA2KA(x) (((__address) (x)) - 0x80000000)
 
 
#endif
/SPARTAN/trunk/arch/amd64/src/boot/boot.S
26,10 → 26,11
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
 
.section K_TEXT_START
.global kernel_image_start
#include <arch/mm/ptl.h>
 
.code16
#define START_STACK 0x7c00
#define START_STACK_64 $0xffffffff80007c00
#
# This is where we require any SPARTAN-kernel-compatible boot loader
# to pass control in real mode.
38,14 → 39,17
# time. So we can just load the respective table registers and
# switch to protected mode.
#
.section K_TEXT_START
.code16
.global kernel_image_start
kernel_image_start:
cli
xorw %ax,%ax
movw %ax,%ds
movw %ax,%ss # initialize stack segment register
movl $0x7c00,%esp # initialize stack pointer
movl START_STACK,%esp # initialize stack pointer
call memmap_arch_init
# call memmap_arch_init
mov $0x80000000, %eax
cpuid
56,38 → 60,99
bt $29, %edx # Test if long mode is supported.
jnc no_long_mode
 
# Fill out GDTR.base, IDTR.base
leal gdtr, %eax
movl gdt_addr, %ebx
movl %ebx, 2(%eax)
# Load gdtr, idtr
lgdt gdtr_inst
lidt idtr_inst
movl %cr0,%eax
orl $0x1,%eax
movl %eax,%cr0 # switch to protected mode
 
movl idt_addr, %ebx
leal idtr, %eax
movl %ebx, 2(%eax)
jmpl $40, $now_in_prot
 
# Load gdtr, idtr
lgdt gdtr
lidt idtr
no_long_mode:
1:
jmp 1b
 
# Protected 16-bit. We want to reuse the code-seg descriptor,
# the Default operand size must not be 1 when entering long mode
now_in_prot:
# Set up stack & data descriptors
movw $16, %ax
movw %ax, %ds
movw %ax, %fs
movw %ax, %gs
movw %ax, %ss
 
# Enable 64-bit page transaltion entries - CR4.PAE = 1.
# Paging is not enabled until after long mode is enabled
movl %cr4, %eax
btsl $5, %eax
movl %eax, %cr4
 
# Set up paging tables
leal ptl_0, %eax
movl %eax, %cr3
# Enable long mode
movl $0xc0000080, %ecx # EFER MSR number
rdmsr # Read EFER
btsl $8, %eax # Set LME=1
wrmsr # Write EFER
mov $1, %eax # Enable protected mode (CR0.PE = 1)
mov %eax, %cr0
# Enable paging to activate long mode (set CR0.PG=1)
movl %cr0, %eax
btsl $31, %eax
movl %eax, %cr0
# At this point we are in compatibility mode
jmpl $8, $start64
 
jmpl $8, $now_in_prot
.code64
start64:
movq START_STACK_64, %rsp
now_in_prot:
lidt idtr_inst
 
no_long_mode:
call main_bsp # never returns
1:
jmp 1b
 
.section K_DATA_START
.section K_DATA_START
.align 4096
page_directory:
.space 4096, 0
.global ptl_2
ptl_2:
.quad 0x0 | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
.quad 0x200000 | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
.quad 0x400000 | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
.quad 0x600000 | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
.quad 0x800000 | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
.quad 0xa00000 | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
.quad 0xc00000 | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
.quad 0xe00000 | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
.align 4096
.global ptl_1
ptl_1:
.quad ptl_2 + (PTL_WRITABLE | PTL_PRESENT)
.fill 509,8,0
.quad ptl_2 + (PTL_WRITABLE | PTL_PRESENT)
.fill 2,8,0
.align 4096
.global ptl_0
ptl_0:
.quad ptl_1 + (PTL_WRITABLE | PTL_PRESENT)
.fill 510,8,0
.quad ptl_1 + (PTL_WRITABLE | PTL_PRESENT)
 
gdt_addr:
.quad gdt + 0x80000000
idt_addr:
.quad idt + 0x80000000
.global gdtr_inst
gdtr_inst:
.word 7*8 # GDT_ITEMS * 8
.long gdt + 0x80000000
 
.global idtr_inst
idtr_inst:
.word 0
.long idt + 0x80000000
/SPARTAN/trunk/arch/amd64/src/pm.c
44,11 → 44,11
{ .limit_0_15 = 0xffff,
.base_0_15 = 0,
.base_16_23 = 0,
.access = AR_PRESENT | AR_CODE | DPL_KERNEL,
.access = AR_PRESENT | AR_CODE | DPL_KERNEL | AR_READABLE ,
.limit_16_19 = 0xf,
.available = 0,
.longmode = 1,
.special = 0,
.special = 0,
.granularity = 1,
.base_24_31 = 0 },
/* KDATA descriptor */
60,7 → 60,7
.available = 0,
.longmode = 0,
.special = 0,
.granularity = 0,
.granularity = 1,
.base_24_31 = 0 },
/* UTEXT descriptor */
{ .limit_0_15 = 0xffff,
84,6 → 84,17
.special = 1,
.granularity = 1,
.base_24_31 = 0 },
/* KTEXT 16-bit protected */
{ .limit_0_15 = 0xffff,
.base_0_15 = 0,
.base_16_23 = 0,
.access = AR_PRESENT | AR_CODE | DPL_KERNEL | AR_READABLE,
.limit_16_19 = 0xf,
.available = 0,
.longmode = 0,
.special = 0,
.granularity = 1,
.base_24_31 = 0 },
/* TSS descriptor - set up will be completed later */
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
};
92,7 → 103,5
 
static struct tss tss;
 
/* gdtr is changed by kmp before next CPU is initialized */
struct ptr_16_32 gdtr __attribute__ ((section ("K_DATA_START"))) = { .limit = sizeof(gdt) };
//struct ptr_16_32 gdtr __attribute__ ((section ("K_DATA_START"))) = { .limit = sizeof(gdt), .base = KA2PA((__address) gdt) };
struct ptr_16_32 idtr __attribute__ ((section ("K_DATA_START"))) = { .limit = sizeof(idt) };
/* Does not compile correctly if it does not exist */
int __attribute__ ((section ("K_DATA_START"))) __fake;
/SPARTAN/trunk/arch/amd64/_link.ld
39,6 → 39,6
 
_hardcoded_ktext_size = ktext_end - ktext_start + (unmapped_ktext_end - unmapped_ktext_start);
_hardcoded_kdata_size = kdata_end - kdata_start + (unmapped_kdata_end - unmapped_kdata_start);
_hardcoded_load_address = -0x80008000;
 
_hardcoded_load_address = 0xffffffff80008000;
_ka_start = 0xffffffff80000000;
}