Subversion Repositories HelenOS

Compare Revisions

Problem with comparison.

Ignore whitespace Rev HEAD → Rev 967

/kernel/trunk/arch/amd64/include/mm/page.h
0,0 → 1,151
/*
* Copyright (C) 2005 Ondrej Palkovsky
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#ifndef __amd64_PAGE_H__
#define __amd64_PAGE_H__
 
#include <arch/mm/frame.h>
 
#define PAGE_WIDTH FRAME_WIDTH
#define PAGE_SIZE FRAME_SIZE
 
#ifdef KERNEL
 
#ifndef __ASM__
# include <mm/page.h>
# include <arch/types.h>
#endif
 
#ifndef __ASM__
# define KA2PA(x) (((__address) (x)) - 0xffffffff80000000)
# define PA2KA(x) (((__address) (x)) + 0xffffffff80000000)
#else
# define KA2PA(x) ((x) - 0xffffffff80000000)
# define PA2KA(x) ((x) + 0xffffffff80000000)
#endif
 
#define PTL0_ENTRIES_ARCH 512
#define PTL1_ENTRIES_ARCH 512
#define PTL2_ENTRIES_ARCH 512
#define PTL3_ENTRIES_ARCH 512
 
#define PTL0_INDEX_ARCH(vaddr) (((vaddr)>>39)&0x1ff)
#define PTL1_INDEX_ARCH(vaddr) (((vaddr)>>30)&0x1ff)
#define PTL2_INDEX_ARCH(vaddr) (((vaddr)>>21)&0x1ff)
#define PTL3_INDEX_ARCH(vaddr) (((vaddr)>>12)&0x1ff)
 
#define GET_PTL1_ADDRESS_ARCH(ptl0, i) ((pte_t *) ((((__u64) ((pte_t *)(ptl0))[(i)].addr_12_31)<<12) | (((__u64) ((pte_t *)(ptl0))[(i)].addr_32_51)<<32 )))
#define GET_PTL2_ADDRESS_ARCH(ptl1, i) ((pte_t *) ((((__u64) ((pte_t *)(ptl1))[(i)].addr_12_31)<<12) | (((__u64) ((pte_t *)(ptl1))[(i)].addr_32_51)<<32 )))
#define GET_PTL3_ADDRESS_ARCH(ptl2, i) ((pte_t *) ((((__u64) ((pte_t *)(ptl2))[(i)].addr_12_31)<<12) | (((__u64) ((pte_t *)(ptl2))[(i)].addr_32_51)<<32 )))
#define GET_FRAME_ADDRESS_ARCH(ptl3, i) ((__address *) ((((__u64) ((pte_t *)(ptl3))[(i)].addr_12_31)<<12) | (((__u64) ((pte_t *)(ptl3))[(i)].addr_32_51)<<32 )))
 
#define SET_PTL0_ADDRESS_ARCH(ptl0) (write_cr3((__address) (ptl0)))
#define SET_PTL1_ADDRESS_ARCH(ptl0, i, a) set_pt_addr((pte_t *)(ptl0), (index_t)(i), a)
#define SET_PTL2_ADDRESS_ARCH(ptl1, i, a) set_pt_addr((pte_t *)(ptl1), (index_t)(i), a)
#define SET_PTL3_ADDRESS_ARCH(ptl2, i, a) set_pt_addr((pte_t *)(ptl2), (index_t)(i), a)
#define SET_FRAME_ADDRESS_ARCH(ptl3, i, a) set_pt_addr((pte_t *)(ptl3), (index_t)(i), a)
 
#define GET_PTL1_FLAGS_ARCH(ptl0, i) get_pt_flags((pte_t *)(ptl0), (index_t)(i))
#define GET_PTL2_FLAGS_ARCH(ptl1, i) get_pt_flags((pte_t *)(ptl1), (index_t)(i))
#define GET_PTL3_FLAGS_ARCH(ptl2, i) get_pt_flags((pte_t *)(ptl2), (index_t)(i))
#define GET_FRAME_FLAGS_ARCH(ptl3, i) get_pt_flags((pte_t *)(ptl3), (index_t)(i))
 
#define SET_PTL1_FLAGS_ARCH(ptl0, i, x) set_pt_flags((pte_t *)(ptl0), (index_t)(i), (x))
#define SET_PTL2_FLAGS_ARCH(ptl1, i, x) set_pt_flags((pte_t *)(ptl1), (index_t)(i), (x))
#define SET_PTL3_FLAGS_ARCH(ptl2, i, x) set_pt_flags((pte_t *)(ptl2), (index_t)(i), (x))
#define SET_FRAME_FLAGS_ARCH(ptl3, i, x) set_pt_flags((pte_t *)(ptl3), (index_t)(i), (x))
 
#define PTE_VALID_ARCH(p) (*((__u64 *) (p)) != 0)
 
#ifndef __ASM__
 
/** Page Table Entry. */
struct page_specifier {
unsigned present : 1;
unsigned writeable : 1;
unsigned uaccessible : 1;
unsigned page_write_through : 1;
unsigned page_cache_disable : 1;
unsigned accessed : 1;
unsigned dirty : 1;
unsigned unused: 1;
unsigned global : 1;
unsigned soft_valid : 1; /**< Valid content even if present bit is cleared. */
unsigned avl : 2;
unsigned addr_12_31 : 30;
unsigned addr_32_51 : 21;
unsigned no_execute : 1;
} __attribute__ ((packed));
 
static inline int get_pt_flags(pte_t *pt, index_t i)
{
pte_t *p = &pt[i];
return (
(!p->page_cache_disable)<<PAGE_CACHEABLE_SHIFT |
(!p->present)<<PAGE_PRESENT_SHIFT |
p->uaccessible<<PAGE_USER_SHIFT |
1<<PAGE_READ_SHIFT |
p->writeable<<PAGE_WRITE_SHIFT |
(!p->no_execute)<<PAGE_EXEC_SHIFT |
p->global<<PAGE_GLOBAL_SHIFT
);
}
 
static inline void set_pt_addr(pte_t *pt, index_t i, __address a)
{
pte_t *p = &pt[i];
 
p->addr_12_31 = (a >> 12) & 0xfffff;
p->addr_32_51 = a >> 32;
}
 
static inline void set_pt_flags(pte_t *pt, index_t i, int flags)
{
pte_t *p = &pt[i];
p->page_cache_disable = !(flags & PAGE_CACHEABLE);
p->present = !(flags & PAGE_NOT_PRESENT);
p->uaccessible = (flags & PAGE_USER) != 0;
p->writeable = (flags & PAGE_WRITE) != 0;
p->no_execute = (flags & PAGE_EXEC) == 0;
p->global = (flags & PAGE_GLOBAL) != 0;
/*
* Ensure that there is at least one bit set even if the present bit is cleared.
*/
p->soft_valid = 1;
}
 
extern void page_arch_init(void);
 
#endif /* __ASM__ */
 
#endif /* KERNEL */
 
#endif
/kernel/trunk/arch/amd64/include/mm/as.h
0,0 → 1,45
/*
* Copyright (C) 2005 Ondrej Palkovsky
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#ifndef __amd64_AS_H__
#define __amd64_AS_H__
 
#include <arch/types.h>
 
#define KERNEL_ADDRESS_SPACE_START_ARCH (__address) 0xffffffff80000000
#define KERNEL_ADDRESS_SPACE_END_ARCH (__address) 0xffffffffffffffff
#define USER_ADDRESS_SPACE_START_ARCH (__address) 0x0000000000000000
#define USER_ADDRESS_SPACE_END_ARCH (__address) 0x00007fffffffffff
 
#define USTACK_ADDRESS_ARCH (USER_ADDRESS_SPACE_END_ARCH-(PAGE_SIZE-1))
 
#define as_install_arch(as)
 
extern void as_arch_init(void);
 
#endif
/kernel/trunk/arch/amd64/include/mm/frame.h
0,0 → 1,45
/*
* Copyright (C) 2005 Ondrej Palkovsky
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#ifndef __amd64_FRAME_H__
#define __amd64_FRAME_H__
 
#ifndef __ASM__
#include <arch/types.h>
#endif /* __ASM__ */
 
#define FRAME_WIDTH 12 /* 4K */
#define FRAME_SIZE (1<<FRAME_WIDTH)
 
 
#ifndef __ASM__
extern __address last_frame;
extern void frame_arch_init(void);
#endif /* __ASM__ */
 
#endif
/kernel/trunk/arch/amd64/include/mm/asid.h
0,0 → 1,0
link ../../../ia32/include/mm/asid.h
Property changes:
Added: svn:special
+*
\ No newline at end of property
/kernel/trunk/arch/amd64/include/mm/tlb.h
0,0 → 1,35
/*
* Copyright (C) 2005 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#ifndef __amd64_TLB_H__
#define __amd64_TLB_H__
 
#define tlb_arch_init()
#define tlb_print()
 
#endif
/kernel/trunk/arch/amd64/include/mm/memory_init.h
0,0 → 1,0
link ../../../ia32/include/mm/memory_init.h
Property changes:
Added: svn:special
+*
\ No newline at end of property
/kernel/trunk/arch/amd64/include/mm/ptl.h
0,0 → 1,43
/*
* Copyright (C) 2005 Ondrej Palkovsky
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
 
#ifndef __amd64_PTL_H_
#define __amd64_PTL_H_
 
#define PTL_NO_EXEC (1<<63)
#define PTL_ACCESSED (1<<5)
#define PTL_CACHE_DISABLE (1<<4)
#define PTL_CACHE_THROUGH (1<<3)
#define PTL_USER (1<<2)
#define PTL_WRITABLE (1<<1)
#define PTL_PRESENT 1
#define PTL_2MB_PAGE (1<<7)
 
 
#endif
/kernel/trunk/arch/amd64/include/syscall.h
0,0 → 1,38
/*
* Copyright (C) 2006 Ondrej Palkovsky
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#ifndef __amd64_SYSCALL_H__
#define __amd64_SYSCALL_H__
 
#include <arch/types.h>
 
extern __native syscall_handler(__native a1,__native a2, __native a3,
__native a4, __native id);
extern void syscall_setup_cpu(void);
 
#endif
/kernel/trunk/arch/amd64/include/interrupt.h
0,0 → 1,100
/*
* Copyright (C) 2001-2004 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#ifndef __ia32_INTERRUPT_H__
#define __ia32_INTERRUPT_H__
 
#include <arch/types.h>
#include <arch/pm.h>
 
#define IVT_ITEMS IDT_ITEMS
 
#define EXC_COUNT 32
#define IRQ_COUNT 16
 
#define IVT_EXCBASE 0
#define IVT_IRQBASE (IVT_EXCBASE+EXC_COUNT)
#define IVT_FREEBASE (IVT_IRQBASE+IRQ_COUNT)
 
#define IRQ_CLK 0
#define IRQ_KBD 1
#define IRQ_PIC1 2
#define IRQ_PIC_SPUR 7
 
/* this one must have four least significant bits set to ones */
#define VECTOR_APIC_SPUR (IVT_ITEMS-1)
 
#if (((VECTOR_APIC_SPUR + 1)%16) || VECTOR_APIC_SPUR >= IVT_ITEMS)
#error Wrong definition of VECTOR_APIC_SPUR
#endif
 
#define VECTOR_PIC_SPUR (IVT_IRQBASE+IRQ_PIC_SPUR)
#define VECTOR_CLK (IVT_IRQBASE+IRQ_CLK)
#define VECTOR_KBD (IVT_IRQBASE+IRQ_KBD)
 
#define VECTOR_TLB_SHOOTDOWN_IPI (IVT_FREEBASE+0)
#define VECTOR_WAKEUP_IPI (IVT_FREEBASE+1)
 
/** This is passed to interrupt handlers */
struct istate {
__u64 rax;
__u64 rbx;
__u64 rcx;
__u64 rdx;
__u64 rsi;
__u64 rdi;
__u64 r8;
__u64 r9;
__u64 r10;
__u64 r11;
__u64 r12;
__u64 r13;
__u64 r14;
__u64 r15;
/* These 2 items MUST be last parts of the structure */
__u64 rbp;
__u64 stack[0]; /* Additional data on stack */
} __attribute__ ((packed));
 
extern void (* disable_irqs_function)(__u16 irqmask);
extern void (* enable_irqs_function)(__u16 irqmask);
extern void (* eoi_function)(void);
 
extern void null_interrupt(int n, istate_t *istate);
extern void gp_fault(int n, istate_t *istate);
extern void nm_fault(int n, istate_t *istate);
extern void ss_fault(int n, istate_t *istate);
extern void page_fault(int n, istate_t *istate);
extern void syscall(int n, istate_t *istate);
extern void tlb_shootdown_ipi(int n, istate_t *istate);
 
extern void trap_virtual_enable_irqs(__u16 irqmask);
extern void trap_virtual_disable_irqs(__u16 irqmask);
extern void trap_virtual_eoi(void);
 
#endif
/kernel/trunk/arch/amd64/include/context.h
0,0 → 1,61
/*
* Copyright (C) 2005 Ondrej Palkovsky
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#ifndef __amd64_CONTEXT_H__
#define __amd64_CONTEXT_H__
 
#ifndef __amd64_TYPES_H__
# include <arch/types.h>
#endif
 
 
/* According to ABI the stack MUST be aligned on
* 16-byte boundary. If it is not, the va_arg calling will
* panic sooner or later
*/
#define SP_DELTA 16
 
/* We include only registers that must be preserved
* during function call
*/
struct context {
__address sp;
__address pc;
__u64 rbx;
__u64 rbp;
 
__u64 r12;
__u64 r13;
__u64 r14;
__u64 r15;
 
ipl_t ipl;
} __attribute__ ((packed));
 
#endif
/kernel/trunk/arch/amd64/include/types.h
0,0 → 1,55
/*
* Copyright (C) 2001-2004 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#ifndef __amd64_TYPES_H__
#define __amd64_TYPES_H__
 
#define NULL 0
 
typedef signed char __s8;
typedef signed short __s16;
typedef signed int __s32;
typedef signed long long __s64;
 
typedef unsigned char __u8;
typedef unsigned short __u16;
typedef unsigned int __u32;
typedef unsigned long long __u64;
 
typedef __u64 __address;
typedef __u64 pfn_t;
 
/* Flags of processor (return value of interrupts_disable()) */
typedef __u64 ipl_t;
 
typedef __u64 __native;
typedef __s64 __snative;
 
typedef struct page_specifier pte_t;
 
#endif
/kernel/trunk/arch/amd64/include/elf.h
0,0 → 1,36
/*
* Copyright (C) 2006 Sergey Bondari
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#ifndef __amd64_ELF_H__
#define __amd64_ELF_H__
 
#define ELF_MACHINE EM_X86_64
#define ELF_DATA_ENCODING ELFDATA2LSB
#define ELF_CLASS ELFCLASS64
 
#endif
/kernel/trunk/arch/amd64/include/drivers/i8042.h
0,0 → 1,0
link ../../../ia32/include/drivers/i8042.h
Property changes:
Added: svn:special
+*
\ No newline at end of property
/kernel/trunk/arch/amd64/include/cpuid.h
0,0 → 1,58
/*
* Copyright (C) 2001-2004 Ondrej Palkovsky
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#ifndef __amd64_CPUID_H__
#define __amd64_CPUID_H__
 
#define AMD_CPUID_EXTENDED 0x80000001
#define AMD_EXT_NOEXECUTE 20
 
#define INTEL_CPUID_STANDARD 0x1
#define INTEL_SSE2 26
#define INTEL_FXSAVE 24
 
#ifndef __ASM__
 
#include <arch/types.h>
 
struct cpu_info {
__u32 cpuid_eax;
__u32 cpuid_ebx;
__u32 cpuid_ecx;
__u32 cpuid_edx;
} __attribute__ ((packed));
 
extern int has_cpuid(void);
 
extern void cpuid(__u32 cmd, cpu_info_t *info);
 
 
extern __u64 rdtsc(void);
 
#endif /* __ASM__ */
#endif
/kernel/trunk/arch/amd64/include/asm.h
0,0 → 1,256
/*
* Copyright (C) 2005 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#ifndef __amd64_ASM_H__
#define __amd64_ASM_H__
 
#include <arch/types.h>
#include <config.h>
 
extern void asm_delay_loop(__u32 t);
extern void asm_fake_loop(__u32 t);
 
/** Return base address of current stack.
*
* Return the base address of the current stack.
* The stack is assumed to be STACK_SIZE bytes long.
* The stack must start on page boundary.
*/
static inline __address get_stack_base(void)
{
__address v;
__asm__ volatile ("andq %%rsp, %0\n" : "=r" (v) : "0" (~((__u64)STACK_SIZE-1)));
return v;
}
 
static inline void cpu_sleep(void) { __asm__ volatile ("hlt\n"); };
static inline void cpu_halt(void) { __asm__ volatile ("hlt\n"); };
 
 
/** Byte from port
*
* Get byte from port
*
* @param port Port to read from
* @return Value read
*/
static inline __u8 inb(__u16 port) { __u8 val; __asm__ volatile ("inb %w1, %b0 \n" : "=a" (val) : "d" (port) ); return val; }
 
/** Byte to port
*
* Output byte to port
*
* @param port Port to write to
* @param val Value to write
*/
static inline void outb(__u16 port, __u8 val) { __asm__ volatile ("outb %b0, %w1\n" : : "a" (val), "d" (port) ); }
 
/** Swap Hidden part of GS register with visible one */
static inline void swapgs(void) { __asm__ volatile("swapgs"); }
 
/** Enable interrupts.
*
* Enable interrupts and return previous
* value of EFLAGS.
*
* @return Old interrupt priority level.
*/
static inline ipl_t interrupts_enable(void) {
ipl_t v;
__asm__ volatile (
"pushfq\n"
"popq %0\n"
"sti\n"
: "=r" (v)
);
return v;
}
 
/** Disable interrupts.
*
* Disable interrupts and return previous
* value of EFLAGS.
*
* @return Old interrupt priority level.
*/
static inline ipl_t interrupts_disable(void) {
ipl_t v;
__asm__ volatile (
"pushfq\n"
"popq %0\n"
"cli\n"
: "=r" (v)
);
return v;
}
 
/** Restore interrupt priority level.
*
* Restore EFLAGS.
*
* @param ipl Saved interrupt priority level.
*/
static inline void interrupts_restore(ipl_t ipl) {
__asm__ volatile (
"pushq %0\n"
"popfq\n"
: : "r" (ipl)
);
}
 
/** Return interrupt priority level.
*
* Return EFLAFS.
*
* @return Current interrupt priority level.
*/
static inline ipl_t interrupts_read(void) {
ipl_t v;
__asm__ volatile (
"pushfq\n"
"popq %0\n"
: "=r" (v)
);
return v;
}
 
/** Read CR0
*
* Return value in CR0
*
* @return Value read.
*/
static inline __u64 read_cr0(void)
{
__u64 v;
__asm__ volatile ("movq %%cr0,%0\n" : "=r" (v));
return v;
}
 
/** Read CR2
*
* Return value in CR2
*
* @return Value read.
*/
static inline __u64 read_cr2(void)
{
__u64 v;
__asm__ volatile ("movq %%cr2,%0\n" : "=r" (v));
return v;
}
 
/** Write CR3
*
* Write value to CR3.
*
* @param v Value to be written.
*/
static inline void write_cr3(__u64 v)
{
__asm__ volatile ("movq %0,%%cr3\n" : : "r" (v));
}
 
/** Read CR3
*
* Return value in CR3
*
* @return Value read.
*/
static inline __u64 read_cr3(void)
{
__u64 v;
__asm__ volatile ("movq %%cr3,%0" : "=r" (v));
return v;
}
 
/** Write to MSR */
static inline void write_msr(__u32 msr, __u64 value)
{
__asm__ volatile (
"wrmsr;" : : "c" (msr),
"a" ((__u32)(value)),
"d" ((__u32)(value >> 32))
);
}
 
static inline __native read_msr(__u32 msr)
{
__u32 ax, dx;
 
__asm__ volatile (
"rdmsr;" : "=a"(ax), "=d"(dx) : "c" (msr)
);
return ((__u64)dx << 32) | ax;
}
 
 
/** Enable local APIC
*
* Enable local APIC in MSR.
*/
static inline void enable_l_apic_in_msr()
{
__asm__ volatile (
"movl $0x1b, %%ecx\n"
"rdmsr\n"
"orl $(1<<11),%%eax\n"
"orl $(0xfee00000),%%eax\n"
"wrmsr\n"
:
:
:"%eax","%ecx","%edx"
);
}
 
static inline __address * get_ip()
{
__address *ip;
 
__asm__ volatile (
"mov %%rip, %0"
: "=r" (ip)
);
return ip;
}
 
/** Invalidate TLB Entry.
*
* @param addr Address on a page whose TLB entry is to be invalidated.
*/
static inline void invlpg(__address addr)
{
__asm__ volatile ("invlpg %0\n" :: "m" (addr));
}
 
extern size_t interrupt_handler_size;
extern void interrupt_handlers(void);
 
#endif
/kernel/trunk/arch/amd64/include/cpu.h
0,0 → 1,73
/*
* Copyright (C) 2001-2004 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#ifndef __amd64_CPU_H__
#define __amd64_CPU_H__
 
 
#define EFER_MSR_NUM 0xc0000080
#define AMD_SCE_FLAG 0
#define AMD_LME_FLAG 8
#define AMD_LMA_FLAG 10
#define AMD_FFXSR_FLAG 14
#define AMD_NXE_FLAG 11
 
/* MSR registers */
#define AMD_MSR_STAR 0xc0000081
#define AMD_MSR_LSTAR 0xc0000082
#define AMD_MSR_SFMASK 0xc0000084
#define AMD_MSR_GS 0xc0000101
 
#ifndef __ASM__
 
#include <typedefs.h>
#include <arch/pm.h>
 
struct cpu_arch {
int vendor;
int family;
int model;
int stepping;
struct tss *tss;
};
 
struct star_msr {
};
 
struct lstar_msr {
};
 
extern void set_efer_flag(int flag);
extern __u64 read_efer_flag(void);
void cpu_setup_fpu(void);
 
#endif /* __ASM__ */
 
#endif
/kernel/trunk/arch/amd64/include/pm.h
0,0 → 1,165
/*
* Copyright (C) 2001-2004 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#ifndef __amd64_PM_H__
#define __amd64_PM_H__
 
#ifndef __ASM__
# include <arch/types.h>
# include <typedefs.h>
# include <arch/context.h>
#endif
 
#define IDT_ITEMS 64
#define GDT_ITEMS 8
 
#define NULL_DES 0
/* Warning: Do not reorder next items, unless you look into syscall.c!!! */
#define KTEXT_DES 1
#define KDATA_DES 2
#define UDATA_DES 3
#define UTEXT_DES 4
#define KTEXT32_DES 5
/* EndOfWarning */
#define TSS_DES 6
 
#define gdtselector(des) ((des)<<3)
#define idtselector(des) ((des)<<4)
 
#define PL_KERNEL 0
#define PL_USER 3
 
#define AR_PRESENT (1<<7)
#define AR_DATA (2<<3)
#define AR_CODE (3<<3)
#define AR_WRITABLE (1<<1)
#define AR_READABLE (1<<1)
#define AR_TSS (0x9)
#define AR_INTERRUPT (0xe)
#define AR_TRAP (0xf)
 
#define DPL_KERNEL (PL_KERNEL<<5)
#define DPL_USER (PL_USER<<5)
 
#define IO_MAP_BASE (104)
 
#ifndef __ASM__
 
struct descriptor {
unsigned limit_0_15: 16;
unsigned base_0_15: 16;
unsigned base_16_23: 8;
unsigned access: 8;
unsigned limit_16_19: 4;
unsigned available: 1;
unsigned longmode: 1;
unsigned special: 1;
unsigned granularity : 1;
unsigned base_24_31: 8;
} __attribute__ ((packed));
 
struct tss_descriptor {
unsigned limit_0_15: 16;
unsigned base_0_15: 16;
unsigned base_16_23: 8;
unsigned type: 4;
unsigned : 1;
unsigned dpl : 2;
unsigned present : 1;
unsigned limit_16_19: 4;
unsigned available: 1;
unsigned : 2;
unsigned granularity : 1;
unsigned base_24_31: 8;
unsigned base_32_63 : 32;
unsigned : 32;
} __attribute__ ((packed));
 
struct idescriptor {
unsigned offset_0_15: 16;
unsigned selector: 16;
unsigned ist:3;
unsigned unused: 5;
unsigned type: 5;
unsigned dpl: 2;
unsigned present : 1;
unsigned offset_16_31: 16;
unsigned offset_32_63: 32;
unsigned : 32;
} __attribute__ ((packed));
 
struct ptr_16_64 {
__u16 limit;
__u64 base;
} __attribute__ ((packed));
 
struct ptr_16_32 {
__u16 limit;
__u32 base;
} __attribute__ ((packed));
 
struct tss {
__u32 reserve1;
__u64 rsp0;
__u64 rsp1;
__u64 rsp2;
__u64 reserve2;
__u64 ist1;
__u64 ist2;
__u64 ist3;
__u64 ist4;
__u64 ist5;
__u64 ist6;
__u64 ist7;
__u64 reserve3;
__u16 reserve4;
__u16 iomap;
} __attribute__ ((packed));
 
extern struct tss *tss_p;
 
extern struct descriptor gdt[];
extern struct idescriptor idt[];
 
extern struct ptr_16_64 gdtr;
extern struct ptr_16_32 bootstrap_gdtr;
extern struct ptr_16_32 protected_ap_gdtr;
 
extern void pm_init(void);
 
extern void gdt_tss_setbase(struct descriptor *d, __address base);
extern void gdt_tss_setlimit(struct descriptor *d, __u32 limit);
 
extern void idt_init(void);
extern void idt_setoffset(struct idescriptor *d, __address offset);
 
extern void tss_initialize(struct tss *t);
 
#endif /* __ASM__ */
 
#endif
/kernel/trunk/arch/amd64/include/context_offset.h
0,0 → 1,27
/* This file is automatically generated by gencontext.c. */
#define OFFSET_SP 0x0
#define OFFSET_PC 0x8
#define OFFSET_RBX 0x10
#define OFFSET_RBP 0x18
#define OFFSET_R12 0x20
#define OFFSET_R13 0x28
#define OFFSET_R14 0x30
#define OFFSET_R15 0x38
#define OFFSET_IPL 0x40
 
#define IOFFSET_RAX 0x0
#define IOFFSET_RBX 0x8
#define IOFFSET_RCX 0x10
#define IOFFSET_RDX 0x18
#define IOFFSET_RSI 0x20
#define IOFFSET_RDI 0x28
#define IOFFSET_R8 0x30
#define IOFFSET_R9 0x38
#define IOFFSET_R10 0x40
#define IOFFSET_R11 0x48
#define IOFFSET_R12 0x50
#define IOFFSET_R13 0x58
#define IOFFSET_R14 0x60
#define IOFFSET_R15 0x68
#define IOFFSET_RBP 0x70
#define IREGISTER_SPACE 120
/kernel/trunk/arch/amd64/include/boot/boot.h
0,0 → 1,41
/*
* Copyright (C) 2005 Ondrej Palkovsky
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#ifndef __amd64_BOOT_H__
#define __amd64_BOOT_H__
 
#define BOOT_OFFSET 0x108000
#define AP_BOOT_OFFSET 0x8000
#define BOOT_STACK_SIZE 0x400
 
#define MULTIBOOT_HEADER_MAGIC 0x1BADB002
#define MULTIBOOT_HEADER_FLAGS 0x00010003
 
#define MULTIBOOT_LOADER_MAGIC 0x2BADB002
 
#endif
/kernel/trunk/arch/amd64/include/boot/memmap.h
0,0 → 1,0
link ../../../ia32/include/boot/memmap.h
Property changes:
Added: svn:special
+*
\ No newline at end of property
/kernel/trunk/arch/amd64/include/atomic.h
0,0 → 1,106
/*
* Copyright (C) 2001-2004 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#ifndef __amd64_ATOMIC_H__
#define __amd64_ATOMIC_H__
 
#include <arch/types.h>
 
typedef struct { volatile __u64 count; } atomic_t;
 
static inline void atomic_set(atomic_t *val, __u64 i)
{
val->count = i;
}
 
static inline __u64 atomic_get(atomic_t *val)
{
return val->count;
}
 
static inline void atomic_inc(atomic_t *val) {
#ifdef CONFIG_SMP
__asm__ volatile ("lock incq %0\n" : "=m" (val->count));
#else
__asm__ volatile ("incq %0\n" : "=m" (val->count));
#endif /* CONFIG_SMP */
}
 
static inline void atomic_dec(atomic_t *val) {
#ifdef CONFIG_SMP
__asm__ volatile ("lock decq %0\n" : "=m" (val->count));
#else
__asm__ volatile ("decq %0\n" : "=m" (val->count));
#endif /* CONFIG_SMP */
}
 
static inline count_t atomic_inc_pre(atomic_t *val)
{
count_t r;
 
__asm__ volatile (
"movq $1, %0\n"
"lock xaddq %0, %1\n"
: "=r" (r), "=m" (val->count)
);
 
return r;
}
 
static inline count_t atomic_dec_pre(atomic_t *val)
{
count_t r;
__asm__ volatile (
"movq $-1, %0\n"
"lock xaddq %0, %1\n"
: "=r" (r), "=m" (val->count)
);
return r;
}
 
#define atomic_inc_post(val) (atomic_inc_pre(val)+1)
#define atomic_dec_post(val) (atomic_dec_pre(val)-1)
 
static inline __u64 test_and_set(atomic_t *val) {
__u64 v;
__asm__ volatile (
"movq $1, %0\n"
"xchgq %0, %1\n"
: "=r" (v),"=m" (val->count)
);
return v;
}
 
 
extern void spinlock_arch(volatile int *val);
 
#endif
/kernel/trunk/arch/amd64/include/debug.h
0,0 → 1,0
link ../../ia32/include/debug.h
Property changes:
Added: svn:special
+*
\ No newline at end of property
/kernel/trunk/arch/amd64/include/ega.h
0,0 → 1,0
link ../../ia32/include/ega.h
Property changes:
Added: svn:special
+*
\ No newline at end of property
/kernel/trunk/arch/amd64/include/fpu_context.h
0,0 → 1,0
link ../../ia32/include/fpu_context.h
Property changes:
Added: svn:special
+*
\ No newline at end of property
/kernel/trunk/arch/amd64/include/bios
0,0 → 1,0
link ../../ia32/include/bios
Property changes:
Added: svn:special
+*
\ No newline at end of property
/kernel/trunk/arch/amd64/include/smp
0,0 → 1,0
link ../../ia32/include/smp
Property changes:
Added: svn:special
+*
\ No newline at end of property
/kernel/trunk/arch/amd64/include/i8254.h
0,0 → 1,0
link ../../ia32/include/i8254.h
Property changes:
Added: svn:special
+*
\ No newline at end of property
/kernel/trunk/arch/amd64/include/i8259.h
0,0 → 1,0
link ../../ia32/include/i8259.h
Property changes:
Added: svn:special
+*
\ No newline at end of property
/kernel/trunk/arch/amd64/include/barrier.h
0,0 → 1,0
link ../../ia32/include/barrier.h
Property changes:
Added: svn:special
+*
\ No newline at end of property
/kernel/trunk/arch/amd64/include/memstr.h
0,0 → 1,136
/*
* Copyright (C) 2005 Sergey Bondari
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#ifndef __amd64_MEMSTR_H__
#define __amd64_MEMSTR_H__
 
/** Copy memory
*
* Copy a given number of bytes (3rd argument)
* from the memory location defined by 2nd argument
* to the memory location defined by 1st argument.
* The memory areas cannot overlap.
*
* @param dst Destination
* @param src Source
* @param cnt Number of bytes
* @return Destination
*/
static inline void * memcpy(void * dst, const void * src, size_t cnt)
{
__native d0, d1, d2;
 
__asm__ __volatile__(
"rep movsq\n\t"
"movq %4, %%rcx\n\t"
"andq $7, %%rcx\n\t"
"jz 1f\n\t"
"rep movsb\n\t"
"1:\n"
: "=&c" (d0), "=&D" (d1), "=&S" (d2)
: "0" ((__native)(cnt / 8)), "g" ((__native)cnt), "1" ((__native) dst), "2" ((__native) src)
: "memory");
 
return dst;
}
 
 
/** Compare memory regions for equality
*
* Compare a given number of bytes (3rd argument)
* at memory locations defined by 1st and 2nd argument
* for equality. If bytes are equal function returns 0.
*
* @param src Region 1
* @param dst Region 2
* @param cnt Number of bytes
* @return Zero if bytes are equal, non-zero otherwise
*/
static inline int memcmp(const void * src, const void * dst, size_t cnt)
{
__native d0, d1, d2;
__native ret;
__asm__ (
"repe cmpsb\n\t"
"je 1f\n\t"
"movq %3, %0\n\t"
"addq $1, %0\n\t"
"1:\n"
: "=a" (ret), "=%S" (d0), "=&D" (d1), "=&c" (d2)
: "0" (0), "1" (src), "2" (dst), "3" ((__native)cnt)
);
return ret;
}
 
/** Fill memory with words
* Fill a given number of words (2nd argument)
* at memory defined by 1st argument with the
* word value defined by 3rd argument.
*
* @param dst Destination
* @param cnt Number of words
* @param x Value to fill
*/
static inline void memsetw(__address dst, size_t cnt, __u16 x)
{
__native d0, d1;
__asm__ __volatile__ (
"rep stosw\n\t"
: "=&D" (d0), "=&c" (d1), "=a" (x)
: "0" (dst), "1" ((__native)cnt), "2" (x)
: "memory"
);
 
}
 
/** Fill memory with bytes
* Fill a given number of bytes (2nd argument)
* at memory defined by 1st argument with the
* word value defined by 3rd argument.
*
* @param dst Destination
* @param cnt Number of bytes
* @param x Value to fill
*/
static inline void memsetb(__address dst, size_t cnt, __u8 x)
{
__native d0, d1;
__asm__ __volatile__ (
"rep stosb\n\t"
: "=&D" (d0), "=&c" (d1), "=a" (x)
: "0" (dst), "1" ((__native)cnt), "2" (x)
: "memory"
);
 
}
 
#endif
/kernel/trunk/arch/amd64/include/byteorder.h
0,0 → 1,36
/*
* Copyright (C) 2005 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#ifndef __amd64_BYTEORDER_H__
#define __amd64_BYTEORDER_H__
 
/* AMD64 is little-endian */
#define __native_le2host(n) (n)
#define __u64_le2host(n) (n)
 
#endif
/kernel/trunk/arch/amd64/include/arch.h
0,0 → 1,32
/*
* Copyright (C) 2005 Ondrej Palkovsky
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#ifndef __amd64_ARCH_H__
#define __amd64_ARCH_H__
 
#endif
/kernel/trunk/arch/amd64/include/faddr.h
0,0 → 1,36
/*
* Copyright (C) 2005 Ondrej Palkovsky
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#ifndef __amd64_FADDR_H__
#define __amd64_FADDR_H__
 
#include <arch/types.h>
 
#define FADDR(fptr) ((__address) (fptr))
 
#endif
/kernel/trunk/arch/amd64/include/thread.h
0,0 → 1,34
/*
* Copyright (C) 2005 Ondrej Palkovsky
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#ifndef __amd64_THREAD_H__
#define __amd64_THREAD_H__
 
#define ARCH_THREAD_DATA
 
#endif
/kernel/trunk/arch/amd64/include/arg.h
0,0 → 1,34
/*
* Copyright (C) 2005 Ondrej Palkovsky
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#ifndef __amd64_ARG_H__
#define __amd64_ARG_H__
 
#include <stdarg.h>
 
#endif
/kernel/trunk/arch/amd64/src/syscall.c
0,0 → 1,72
/*
* Copyright (C) 2006 Ondrej Palkovsky
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#include <syscall/syscall.h>
#include <arch/syscall.h>
#include <panic.h>
#include <arch/cpu.h>
#include <arch/pm.h>
#include <arch/asm.h>
 
#include <print.h>
#include <arch/cpu.h>
 
extern void syscall_entry(void);
 
/** Enable & setup support for SYSCALL/SYSRET */
void syscall_setup_cpu(void)
{
/* Enable SYSCALL/SYSRET */
set_efer_flag(AMD_SCE_FLAG);
 
/* Setup syscall entry address */
/* This is _mess_ - the 64-bit CS is argument+16,
* the SS is argument+8. The order is:
* +0(KDATA_DES), +8(UDATA_DES), +16(UTEXT_DES)
*/
write_msr(AMD_MSR_STAR,
((__u64)(gdtselector(KDATA_DES) | PL_USER)<<48) \
| ((__u64)(gdtselector(KTEXT_DES) | PL_KERNEL)<<32));
write_msr(AMD_MSR_LSTAR, (__u64)syscall_entry);
/* Mask RFLAGS on syscall
* - disable interrupts, until we exchange the stack register
* (mask the IE bit)
*/
write_msr(AMD_MSR_SFMASK, 0x200);
}
 
/** Dispatch system call */
__native syscall_handler(__native a1, __native a2, __native a3,
__native a4, __native id)
{
if (id < SYSCALL_END)
return syscall_table[id](a1,a2,a3,a4);
else
panic("Undefined syscall %d", id);
}
/kernel/trunk/arch/amd64/src/interrupt.c
0,0 → 1,149
/*
* Copyright (C) 2001-2004 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#include <arch/interrupt.h>
#include <print.h>
#include <debug.h>
#include <panic.h>
#include <arch/i8259.h>
#include <func.h>
#include <cpu.h>
#include <arch/asm.h>
#include <mm/tlb.h>
#include <mm/as.h>
#include <arch.h>
#include <symtab.h>
#include <arch/asm.h>
#include <proc/scheduler.h>
#include <proc/thread.h>
 
static void print_info_errcode(int n, istate_t *istate)
{
char *symbol;
__u64 *x = &istate->stack[0];
 
if (!(symbol=get_symtab_entry(x[1])))
symbol = "";
 
printf("-----EXCEPTION(%d) OCCURED----- ( %s )\n",n,__FUNCTION__);
printf("%%rip: %Q (%s)\n",istate->stack[1],symbol);
printf("ERROR_WORD=%Q\n", istate->stack[0]);
printf("%%rcs=%Q,flags=%Q, %%cr0=%Q\n", istate->stack[2],
istate->stack[3],read_cr0());
printf("%%rax=%Q, %%rbx=%Q, %%rcx=%Q\n",istate->rax,istate->rbx,istate->rcx);
printf("%%rdx=%Q, %%rsi=%Q, %%rdi=%Q\n",istate->rdx,istate->rsi,istate->rdi);
printf("%%r8 =%Q, %%r9 =%Q, %%r10=%Q\n",istate->r8,istate->r9,istate->r10);
printf("%%r11=%Q, %%r12=%Q, %%r13=%Q\n",istate->r11,istate->r12,istate->r13);
printf("%%r14=%Q, %%r15=%Q, %%rsp=%Q\n",istate->r14,istate->r15,&istate->stack[0]);
printf("%%rbp=%Q\n",istate->rbp);
printf("stack: %Q, %Q, %Q\n", x[5], x[6], x[7]);
printf(" %Q, %Q, %Q\n", x[8], x[9], x[10]);
printf(" %Q, %Q, %Q\n", x[11], x[12], x[13]);
printf(" %Q, %Q, %Q\n", x[14], x[15], x[16]);
}
 
/*
* Interrupt and exception dispatching.
*/
 
void (* disable_irqs_function)(__u16 irqmask) = NULL;
void (* enable_irqs_function)(__u16 irqmask) = NULL;
void (* eoi_function)(void) = NULL;
 
void null_interrupt(int n, istate_t *istate)
{
printf("-----EXCEPTION(%d) OCCURED----- ( %s )\n",n,__FUNCTION__); \
printf("stack: %X, %X, %X, %X\n", istate->stack[0], istate->stack[1],
istate->stack[2], istate->stack[3]);
panic("unserviced interrupt\n");
}
 
void gp_fault(int n, istate_t *istate)
{
print_info_errcode(n, istate);
panic("general protection fault\n");
}
 
void ss_fault(int n, istate_t *istate)
{
print_info_errcode(n, istate);
panic("stack fault\n");
}
 
 
void nm_fault(int n, istate_t *istate)
{
#ifdef CONFIG_FPU_LAZY
scheduler_fpu_lazy_request();
#else
panic("fpu fault");
#endif
}
 
void page_fault(int n, istate_t *istate)
{
__address page;
page = read_cr2();
if (!as_page_fault(page)) {
print_info_errcode(n, istate);
printf("Page fault address: %Q\n", page);
panic("page fault\n");
}
}
 
void tlb_shootdown_ipi(int n, istate_t *istate)
{
trap_virtual_eoi();
tlb_shootdown_ipi_recv();
}
 
void trap_virtual_enable_irqs(__u16 irqmask)
{
if (enable_irqs_function)
enable_irqs_function(irqmask);
else
panic("no enable_irqs_function\n");
}
 
void trap_virtual_disable_irqs(__u16 irqmask)
{
if (disable_irqs_function)
disable_irqs_function(irqmask);
else
panic("no disable_irqs_function\n");
}
 
void trap_virtual_eoi(void)
{
if (eoi_function)
eoi_function();
else
panic("no eoi_function\n");
 
}
/kernel/trunk/arch/amd64/src/asm_utils.S
0,0 → 1,223
#
# Copyright (C) 2005 Ondrej Palkovsky
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# - The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
 
 
# Mask for interrupts 0 - 31 (bits 0 - 31) where 0 means that int has no error word
# and 1 means interrupt with error word
 
#define ERROR_WORD_INTERRUPT_LIST 0x00027D00
 
#include <arch/pm.h>
#include <arch/context_offset.h>
#include <arch/mm/page.h>
.text
.global interrupt_handlers
.global syscall_entry
.global panic_printf
 
panic_printf:
movq $halt, (%rsp)
jmp printf
 
.global memcpy
memcpy:
jmp _memcpy
.global cpuid
.global has_cpuid
.global rdtsc
.global read_efer_flag
.global set_efer_flag
## Determine CPUID support
#
# Return 0 in EAX if CPUID is not support, 1 if supported.
#
has_cpuid:
pushfq # store flags
popq %rax # read flags
movq %rax,%rdx # copy flags
btcl $21,%edx # swap the ID bit
pushq %rdx
popfq # propagate the change into flags
pushfq
popq %rdx # read flags
andl $(1<<21),%eax # interested only in ID bit
andl $(1<<21),%edx
xorl %edx,%eax # 0 if not supported, 1 if supported
ret
 
cpuid:
movq %rbx, %r10 # we have to preserve rbx across function calls
 
movl %edi,%eax # load the command into %eax
 
cpuid
movl %eax,0(%rsi)
movl %ebx,4(%rsi)
movl %ecx,8(%rsi)
movl %edx,12(%rsi)
 
movq %r10, %rbx
ret
 
rdtsc:
xorq %rax,%rax
rdtsc
ret
 
set_efer_flag:
movq $0xc0000080, %rcx
rdmsr
btsl %edi, %eax
wrmsr
ret
read_efer_flag:
movq $0xc0000080, %rcx
rdmsr
ret
 
# Push all general purpose registers on stack except %rbp, %rsp
.macro save_all_gpr
movq %rbp, IOFFSET_RBP(%rsp)
movq %rax, IOFFSET_RAX(%rsp)
movq %rbx, IOFFSET_RBX(%rsp)
movq %rcx, IOFFSET_RCX(%rsp)
movq %rdx, IOFFSET_RDX(%rsp)
movq %rsi, IOFFSET_RSI(%rsp)
movq %rdi, IOFFSET_RDI(%rsp)
movq %r8, IOFFSET_R8(%rsp)
movq %r9, IOFFSET_R9(%rsp)
movq %r10, IOFFSET_R10(%rsp)
movq %r11, IOFFSET_R11(%rsp)
movq %r12, IOFFSET_R12(%rsp)
movq %r13, IOFFSET_R13(%rsp)
movq %r14, IOFFSET_R14(%rsp)
movq %r15, IOFFSET_R15(%rsp)
.endm
 
.macro restore_all_gpr
movq IOFFSET_RBP(%rsp), %rbp
movq IOFFSET_RAX(%rsp), %rax
movq IOFFSET_RBX(%rsp), %rbx
movq IOFFSET_RCX(%rsp), %rcx
movq IOFFSET_RDX(%rsp), %rdx
movq IOFFSET_RSI(%rsp), %rsi
movq IOFFSET_RDI(%rsp), %rdi
movq IOFFSET_R8(%rsp), %r8
movq IOFFSET_R9(%rsp), %r9
movq IOFFSET_R10(%rsp), %r10
movq IOFFSET_R11(%rsp), %r11
movq IOFFSET_R12(%rsp), %r12
movq IOFFSET_R13(%rsp), %r13
movq IOFFSET_R14(%rsp), %r14
movq IOFFSET_R15(%rsp), %r15
.endm
## Declare interrupt handlers
#
# Declare interrupt handlers for n interrupt
# vectors starting at vector i.
#
# The handlers setup data segment registers
# and call exc_dispatch().
#
.macro handler i n
subq $IREGISTER_SPACE, %rsp
save_all_gpr
 
movq $(\i),%rdi # %rdi - first parameter
movq %rsp, %rsi # %rsi - pointer to interrupt_context
call exc_dispatch # exc_dispatch(i, stack)
 
# Test if this is interrupt with error word or not
mov $\i,%cl;
movl $1,%eax;
test $0xe0,%cl;
jnz 0f;
and $0x1f,%cl;
shl %cl,%eax;
and $ERROR_WORD_INTERRUPT_LIST,%eax;
jz 0f;
 
 
# Return with error word
restore_all_gpr
# $8 = Skip error word
addq $IREGISTER_SPACE + 0x8, %rsp
iretq
 
0:
# Return with no error word
restore_all_gpr
addq $IREGISTER_SPACE, %rsp
iretq
 
.if (\n-\i)-1
handler "(\i+1)",\n
.endif
.endm
interrupt_handlers:
h_start:
handler 0 IDT_ITEMS
h_end:
 
syscall_entry:
# Switch to hidden gs
swapgs
# %gs:0 now points to pointer to stack page
mov %gs:0, %r10 # We have a ptr to stack page in r10
addq $PAGE_SIZE-16, %r10 # We need some space to store old %sp
movq %rsp, 0(%r10) # Save old stack pointer to stack
movq %r10, %rsp # Change to new stack
pushq %rcx # Return address
pushq %r11 # Save flags
 
# Switch back to remain consistent
swapgs
 
sti
movq %r9, %rcx # Exchange last parameter as a third
call syscall_handler
cli # We will be touching stack pointer
popq %r11
popq %rcx
movq 0(%rsp), %rsp
sysretq
.data
.global interrupt_handler_size
 
interrupt_handler_size: .quad (h_end-h_start)/IDT_ITEMS
/kernel/trunk/arch/amd64/src/userspace.c
0,0 → 1,68
/*
* Copyright (C) 2005 Ondrej Palkovsky
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#include <userspace.h>
#include <arch/pm.h>
#include <arch/types.h>
#include <arch.h>
#include <proc/thread.h>
#include <mm/as.h>
 
 
/** Enter userspace
*
* Change CPU protection level to 3, enter userspace.
*
*/
void userspace(__address entry)
{
ipl_t ipl;
ipl = interrupts_disable();
 
__asm__ volatile (""
"movq %0, %%rax;"
"movq %1, %%rbx;"
"movq %2, %%rcx;"
"movq %3, %%rdx;"
"movq %4, %%rsi;"
"pushq %%rax;"
"pushq %%rbx;"
"pushq %%rcx;"
"pushq %%rdx;"
"pushq %%rsi;"
"iretq;"
: : "i" (gdtselector(UDATA_DES) | PL_USER),
"i" (USTACK_ADDRESS+THREAD_STACK_SIZE),
"r" (ipl),
"i" (gdtselector(UTEXT_DES) | PL_USER),
"r" (entry));
/* Unreachable */
for(;;);
}
/kernel/trunk/arch/amd64/src/fpu_context.c
0,0 → 1,58
/*
* Copyright (C) 2005 Jakub Vana
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
 
#include <fpu_context.h>
#include <arch.h>
#include <cpu.h>
 
/** Save FPU (mmx, sse) context using fxsave instruction */
void fpu_context_save(fpu_context_t *fctx)
{
__asm__ volatile (
"fxsave %0"
: "=m"(*fctx)
);
}
 
/** Restore FPU (mmx,sse) context using fxrstor instruction */
void fpu_context_restore(fpu_context_t *fctx)
{
__asm__ volatile (
"fxrstor %0"
: "=m"(*fctx)
);
}
 
void fpu_init()
{
/* TODO: Zero all SSE, MMX etc. registers */
__asm__ volatile (
"fninit;"
);
}
/kernel/trunk/arch/amd64/src/proc/scheduler.c
0,0 → 1,49
/*
* Copyright (C) 2005 Ondrej Palkovsky
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#include <proc/scheduler.h>
#include <cpu.h>
#include <proc/thread.h>
#include <arch.h>
#include <arch/context.h> /* SP_DELTA */
#include <arch/asm.h>
 
void before_thread_runs_arch(void)
{
CPU->arch.tss->rsp0 = (__address) &THREAD->kstack[THREAD_STACK_SIZE-SP_DELTA];
 
/* Syscall support - write thread address to hidden part of gs */
swapgs();
write_msr(AMD_MSR_GS,
(__u64)&THREAD->kstack);
swapgs();
}
 
void after_thread_ran_arch(void)
{
}
/kernel/trunk/arch/amd64/src/amd64.c
0,0 → 1,158
/*
* Copyright (C) 2005 Ondrej Palkovsky
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#include <arch.h>
 
#include <arch/types.h>
 
#include <config.h>
 
#include <arch/ega.h>
#include <genarch/i8042/i8042.h>
#include <arch/i8254.h>
#include <arch/i8259.h>
 
#include <arch/bios/bios.h>
#include <arch/mm/memory_init.h>
#include <arch/cpu.h>
#include <print.h>
#include <arch/cpuid.h>
#include <genarch/acpi/acpi.h>
#include <panic.h>
#include <interrupt.h>
#include <arch/syscall.h>
 
/** Disable I/O on non-privileged levels
*
* Clean IOPL(12,13) and NT(14) flags in EFLAGS register
*/
static void clean_IOPL_NT_flags(void)
{
asm
(
"pushfq;"
"pop %%rax;"
"and $~(0x7000),%%rax;"
"pushq %%rax;"
"popfq;"
:
:
:"%rax"
);
}
 
/** Disable alignment check
*
* Clean AM(18) flag in CR0 register
*/
static void clean_AM_flag(void)
{
asm
(
"mov %%cr0,%%rax;"
"and $~(0x40000),%%rax;"
"mov %%rax,%%cr0;"
:
:
:"%rax"
);
}
 
void arch_pre_mm_init(void)
{
struct cpu_info cpuid_s;
 
cpuid(AMD_CPUID_EXTENDED,&cpuid_s);
if (! (cpuid_s.cpuid_edx & (1<<AMD_EXT_NOEXECUTE)))
panic("Processor does not support No-execute pages.\n");
 
cpuid(INTEL_CPUID_STANDARD,&cpuid_s);
if (! (cpuid_s.cpuid_edx & (1<<INTEL_FXSAVE)))
panic("Processor does not support FXSAVE/FXRESTORE.\n");
if (! (cpuid_s.cpuid_edx & (1<<INTEL_SSE2)))
panic("Processor does not support SSE2 instructions.\n");
 
/* Enable No-execute pages */
set_efer_flag(AMD_NXE_FLAG);
/* Enable FPU */
cpu_setup_fpu();
 
/* Initialize segmentation */
pm_init();
 
/* Disable I/O on nonprivileged levels
* clear the NT(nested-thread) flag
*/
clean_IOPL_NT_flags();
/* Disable alignment check */
clean_AM_flag();
 
if (config.cpu_active == 1) {
bios_init();
i8259_init(); /* PIC */
i8254_init(); /* hard clock */
 
#ifdef CONFIG_SMP
exc_register(VECTOR_TLB_SHOOTDOWN_IPI, "tlb_shootdown",
tlb_shootdown_ipi);
#endif /* CONFIG_SMP */
}
}
 
void arch_post_mm_init(void)
{
if (config.cpu_active == 1) {
ega_init(); /* video */
}
/* Setup fast SYSCALL/SYSRET */
syscall_setup_cpu();
 
}
 
void arch_pre_smp_init(void)
{
if (config.cpu_active == 1) {
memory_print_map();
#ifdef CONFIG_SMP
acpi_init();
#endif /* CONFIG_SMP */
}
}
 
void arch_post_smp_init(void)
{
i8042_init(); /* keyboard controller */
}
 
void calibrate_delay_loop(void)
{
i8254_calibrate_delay_loop();
i8254_normal_operation();
}
/kernel/trunk/arch/amd64/src/mm/page.c
0,0 → 1,64
/*
* Copyright (C) 2001-2004 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#include <arch/mm/page.h>
#include <genarch/mm/page_pt.h>
#include <arch/mm/frame.h>
#include <mm/page.h>
#include <mm/frame.h>
#include <mm/as.h>
#include <arch/interrupt.h>
#include <arch/asm.h>
#include <config.h>
#include <memstr.h>
#include <interrupt.h>
 
void page_arch_init(void)
{
__address cur;
int flags;
 
if (config.cpu_active == 1) {
page_mapping_operations = &pt_mapping_operations;
/*
* PA2KA(identity) mapping for all frames.
*/
for (cur = 0; cur < last_frame; cur += FRAME_SIZE) {
flags = PAGE_CACHEABLE | PAGE_EXEC;
if ((PA2KA(cur) >= config.base) && (PA2KA(cur) < config.base + config.kernel_size))
flags |= PAGE_GLOBAL;
page_mapping_insert(AS_KERNEL, PA2KA(cur), cur, flags);
}
exc_register(14, "page_fault", (iroutine)page_fault);
write_cr3((__address) AS_KERNEL->page_table);
}
else {
write_cr3((__address) AS_KERNEL->page_table);
}
}
/kernel/trunk/arch/amd64/src/mm/as.c
0,0 → 1,0
link ../../../ia32/src/mm/as.c
Property changes:
Added: svn:special
+*
\ No newline at end of property
/kernel/trunk/arch/amd64/src/mm/memory_init.c
0,0 → 1,71
/*
* Copyright (C) 2005 Josef Cejka
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#include <arch/boot/memmap.h>
#include <arch/mm/memory_init.h>
#include <arch/mm/page.h>
#include <print.h>
 
__u8 e820counter = 0xff;
struct e820memmap_ e820table[MEMMAP_E820_MAX_RECORDS];
__u32 e801memorysize;
 
size_t get_memory_size(void)
{
return e801memorysize*1024;
}
 
void memory_print_map(void)
{
__u8 i;
for (i=0;i<e820counter;i++) {
printf("E820 base: %Q size: %Q type: ", e820table[i].base_address, e820table[i].size);
switch (e820table[i].type) {
case MEMMAP_MEMORY_AVAILABLE:
printf("available memory\n");
break;
case MEMMAP_MEMORY_RESERVED:
printf("reserved memory\n");
break;
case MEMMAP_MEMORY_ACPI:
printf("ACPI table\n");
break;
case MEMMAP_MEMORY_NVS:
printf("NVS\n");
break;
case MEMMAP_MEMORY_UNUSABLE:
printf("unusable memory\n");
break;
default:
printf("undefined memory type\n");
}
}
 
}
 
/kernel/trunk/arch/amd64/src/mm/tlb.c
0,0 → 1,0
link ../../../ia32/src/mm/tlb.c
Property changes:
Added: svn:special
+*
\ No newline at end of property
/kernel/trunk/arch/amd64/src/mm/frame.c
0,0 → 1,0
link ../../../ia32/src/mm/frame.c
Property changes:
Added: svn:special
+*
\ No newline at end of property
/kernel/trunk/arch/amd64/src/pm.c
0,0 → 1,223
/*
* Copyright (C) 2001-2004 Jakub Jermar
* Copyright (C) 2005-2006 Ondrej Palkovsky
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#include <arch/pm.h>
#include <arch/mm/page.h>
#include <arch/types.h>
#include <arch/interrupt.h>
#include <arch/asm.h>
#include <interrupt.h>
 
#include <config.h>
 
#include <memstr.h>
#include <mm/slab.h>
#include <debug.h>
 
/*
* There is no segmentation in long mode so we set up flat mode. In this
* mode, we use, for each privilege level, two segments spanning the
* whole memory. One is for code and one is for data.
*/
 
struct descriptor gdt[GDT_ITEMS] = {
/* NULL descriptor */
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
/* KTEXT descriptor */
{ .limit_0_15 = 0xffff,
.base_0_15 = 0,
.base_16_23 = 0,
.access = AR_PRESENT | AR_CODE | DPL_KERNEL | AR_READABLE ,
.limit_16_19 = 0xf,
.available = 0,
.longmode = 1,
.special = 0,
.granularity = 1,
.base_24_31 = 0 },
/* KDATA descriptor */
{ .limit_0_15 = 0xffff,
.base_0_15 = 0,
.base_16_23 = 0,
.access = AR_PRESENT | AR_DATA | AR_WRITABLE | DPL_KERNEL,
.limit_16_19 = 0xf,
.available = 0,
.longmode = 0,
.special = 0,
.granularity = 1,
.base_24_31 = 0 },
/* UDATA descriptor */
{ .limit_0_15 = 0xffff,
.base_0_15 = 0,
.base_16_23 = 0,
.access = AR_PRESENT | AR_DATA | AR_WRITABLE | DPL_USER,
.limit_16_19 = 0xf,
.available = 0,
.longmode = 0,
.special = 1,
.granularity = 1,
.base_24_31 = 0 },
/* UTEXT descriptor */
{ .limit_0_15 = 0xffff,
.base_0_15 = 0,
.base_16_23 = 0,
.access = AR_PRESENT | AR_CODE | DPL_USER,
.limit_16_19 = 0xf,
.available = 0,
.longmode = 1,
.special = 0,
.granularity = 1,
.base_24_31 = 0 },
/* KTEXT 32-bit protected, for protected mode before long mode */
{ .limit_0_15 = 0xffff,
.base_0_15 = 0,
.base_16_23 = 0,
.access = AR_PRESENT | AR_CODE | DPL_KERNEL | AR_READABLE,
.limit_16_19 = 0xf,
.available = 0,
.longmode = 0,
.special = 1,
.granularity = 1,
.base_24_31 = 0 },
/* TSS descriptor - set up will be completed later,
* on AMD64 it is 64-bit - 2 items in table */
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
};
 
struct idescriptor idt[IDT_ITEMS];
 
struct ptr_16_64 gdtr = {.limit = sizeof(gdt), .base= (__u64) gdt };
struct ptr_16_64 idtr = {.limit = sizeof(idt), .base= (__u64) idt };
 
static struct tss tss;
struct tss *tss_p = NULL;
 
void gdt_tss_setbase(struct descriptor *d, __address base)
{
struct tss_descriptor *td = (struct tss_descriptor *) d;
 
td->base_0_15 = base & 0xffff;
td->base_16_23 = ((base) >> 16) & 0xff;
td->base_24_31 = ((base) >> 24) & 0xff;
td->base_32_63 = ((base) >> 32);
}
 
void gdt_tss_setlimit(struct descriptor *d, __u32 limit)
{
struct tss_descriptor *td = (struct tss_descriptor *) d;
 
td->limit_0_15 = limit & 0xffff;
td->limit_16_19 = (limit >> 16) & 0xf;
}
 
void idt_setoffset(struct idescriptor *d, __address offset)
{
/*
* Offset is a linear address.
*/
d->offset_0_15 = offset & 0xffff;
d->offset_16_31 = offset >> 16 & 0xffff;
d->offset_32_63 = offset >> 32;
}
 
void tss_initialize(struct tss *t)
{
memsetb((__address) t, sizeof(struct tss), 0);
}
 
/*
* This function takes care of proper setup of IDT and IDTR.
*/
void idt_init(void)
{
struct idescriptor *d;
int i;
 
for (i = 0; i < IDT_ITEMS; i++) {
d = &idt[i];
 
d->unused = 0;
d->selector = gdtselector(KTEXT_DES);
 
d->present = 1;
d->type = AR_INTERRUPT; /* masking interrupt */
 
idt_setoffset(d, ((__address) interrupt_handlers) + i*interrupt_handler_size);
exc_register(i, "undef", (iroutine)null_interrupt);
}
exc_register(13, "gp_fault", gp_fault);
exc_register( 7, "nm_fault", nm_fault);
exc_register(12, "ss_fault", ss_fault);
}
 
/** Initialize segmentation - code/data/idt tables
*
*/
void pm_init(void)
{
struct descriptor *gdt_p = (struct descriptor *) gdtr.base;
struct tss_descriptor *tss_desc;
 
/*
* Each CPU has its private GDT and TSS.
* All CPUs share one IDT.
*/
 
if (config.cpu_active == 1) {
idt_init();
/*
* NOTE: bootstrap CPU has statically allocated TSS, because
* the heap hasn't been initialized so far.
*/
tss_p = &tss;
}
else {
tss_p = (struct tss *) malloc(sizeof(struct tss),FRAME_ATOMIC);
if (!tss_p)
panic("could not allocate TSS\n");
}
 
tss_initialize(tss_p);
 
tss_desc = (struct tss_descriptor *) (&gdt_p[TSS_DES]);
tss_desc->present = 1;
tss_desc->type = AR_TSS;
tss_desc->dpl = PL_KERNEL;
gdt_tss_setbase(&gdt_p[TSS_DES], (__address) tss_p);
gdt_tss_setlimit(&gdt_p[TSS_DES], sizeof(struct tss) - 1);
 
__asm__("lgdt %0" : : "m"(gdtr));
__asm__("lidt %0" : : "m"(idtr));
/*
* As of this moment, the current CPU has its own GDT pointing
* to its own TSS. We just need to load the TR register.
*/
__asm__("ltr %0" : : "r" ((__u16) gdtselector(TSS_DES)));
}
/kernel/trunk/arch/amd64/src/boot/boot.S
0,0 → 1,304
#
# Copyright (C) 2005 Ondrej Palkovsky
# Copyright (C) 2006 Martin Decky
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# - The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
 
#include <arch/boot/boot.h>
#include <arch/boot/memmap.h>
#include <arch/mm/page.h>
#include <arch/mm/ptl.h>
#include <arch/pm.h>
#include <arch/cpu.h>
#include <arch/cpuid.h>
 
#define START_STACK (BOOT_OFFSET - BOOT_STACK_SIZE)
.section K_TEXT_START, "ax"
 
.code32
.align 4
.global multiboot_image_start
multiboot_header:
.long MULTIBOOT_HEADER_MAGIC
.long MULTIBOOT_HEADER_FLAGS
.long -(MULTIBOOT_HEADER_MAGIC + MULTIBOOT_HEADER_FLAGS) # checksum
.long multiboot_header
.long unmapped_ktext_start
.long 0
.long 0
.long multiboot_image_start
 
multiboot_image_start:
movl $START_STACK, %esp # initialize stack pointer
lgdt bootstrap_gdtr # initialize Global Descriptor Table register
 
movw $gdtselector(KDATA_DES), %cx
movw %cx, %es
movw %cx, %ds # kernel data + stack
movw %cx, %ss
# Simics seems to remove hidden part of GS on entering user mode
# when _visible_ part of GS does not point to user-mode segment
movw $gdtselector(UDATA_DES), %cx
movw %cx, %fs
movw %cx, %gs
jmpl $gdtselector(KTEXT32_DES), $multiboot_meeting_point
multiboot_meeting_point:
movl %eax, grub_eax # save parameters from GRUB
movl %ebx, grub_ebx
# Protected 32-bit. We want to reuse the code-seg descriptor,
# the Default operand size must not be 1 when entering long mode
movl $0x80000000, %eax
cpuid
cmp $0x80000000, %eax # any function > 80000000h?
jbe long_mode_unsupported
movl $(AMD_CPUID_EXTENDED), %eax # Extended function code 80000001
cpuid
bt $29, %edx # Test if long mode is supported.
jc long_mode_supported
 
long_mode_unsupported:
cli
hlt
long_mode_supported:
# Enable 64-bit page transaltion entries - CR4.PAE = 1.
# Paging is not enabled until after long mode is enabled
movl %cr4, %eax
btsl $5, %eax
movl %eax, %cr4
 
# Set up paging tables
leal ptl_0, %eax
movl %eax, %cr3
# Enable long mode
movl $EFER_MSR_NUM, %ecx # EFER MSR number
rdmsr # Read EFER
btsl $AMD_LME_FLAG, %eax # Set LME=1
wrmsr # Write EFER
# Enable paging to activate long mode (set CR0.PG=1)
movl %cr0, %eax
btsl $31, %eax
movl %eax, %cr0
# At this point we are in compatibility mode
jmpl $gdtselector(KTEXT_DES), $start64
 
.code64
start64:
movq $(PA2KA(START_STACK)), %rsp
movl grub_eax, %eax
movl grub_ebx, %ebx
cmpl $MULTIBOOT_LOADER_MAGIC, %eax # compare GRUB signature
je valid_boot
xorl %ecx, %ecx # no memory size or map available
movl %ecx, e801memorysize
movl %ecx, e820counter
jmp invalid_boot
valid_boot:
movl (%ebx), %eax # ebx = physical address of struct multiboot_info
bt $0, %eax # mbi->flags[0] (mem_lower, mem_upper valid)
jc mem_valid
xorl %ecx, %ecx
jmp mem_invalid
mem_valid:
movl 4(%ebx), %ecx # mbi->mem_lower
addl 8(%ebx), %ecx # mbi->mem_upper
mem_invalid:
movl %ecx, e801memorysize
bt $3, %eax # mbi->flags[3] (mods_count, mods_addr valid)
jc mods_valid
xorl %ecx, %ecx
xorl %edx, %edx
jmp mods_invalid
mods_valid:
movl 20(%ebx), %ecx # mbi->mods_count
cmpl $0, %ecx
je mods_invalid
xorq %rdx, %rdx
movq %rdx, %rcx
movl 24(%ebx), %esi # mbi->mods_addr
movl 0(%esi), %edx # mods->mod_start
movl 4(%esi), %ecx # mods->mod_end
subl %edx, %ecx
addq $0xffffffff80000000, %rdx
mods_invalid:
movq %rcx, init_size
movq %rdx, init_addr
bt $6, %eax # mbi->flags[6] (mmap_length, mmap_addr valid)
jc mmap_valid
xorl %edx, %edx
jmp mmap_invalid
mmap_valid:
movl 44(%ebx), %ecx # mbi->mmap_length
movl 48(%ebx), %esi # mbi->mmap_addr
movq $e820table, %rdi
xorl %edx, %edx
mmap_loop:
cmpl $0, %ecx
jle mmap_end
movl 4(%esi), %eax # mmap->base_addr_low
movl %eax, (%rdi)
movl 8(%esi), %eax # mmap->base_addr_high
movl %eax, 4(%rdi)
movl 12(%esi), %eax # mmap->length_low
movl %eax, 8(%rdi)
movl 16(%esi), %eax # mmap->length_high
movl %eax, 12(%rdi)
movl 20(%esi), %eax # mmap->type
movl %eax, 16(%rdi)
movl (%esi), %eax # mmap->size
addl $0x4, %eax
addl %eax, %esi
subl %eax, %ecx
addq $MEMMAP_E820_RECORD_SIZE, %rdi
incl %edx
jmp mmap_loop
mmap_end:
mmap_invalid:
movl %edx, e820counter
invalid_boot:
#ifdef CONFIG_SMP
# copy AP bootstrap routines below 1 MB
movq $BOOT_OFFSET, %rsi
movq $AP_BOOT_OFFSET, %rdi
movq $_hardcoded_unmapped_size, %rcx
cld
rep movsb
#endif
call main_bsp # never returns
cli
hlt
.section K_DATA_START, "aw", @progbits
.align 4096
 
# Identical mapping of first 64MB and the same of -2GB -> 0
.global ptl_2
ptl_2:
.quad 0x0 | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
.quad 0x200000 | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
.quad 0x400000 | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
.quad 0x600000 | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
.quad 0x800000 | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
.quad 0xa00000 | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
.quad 0xc00000 | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
.quad 0xe00000 | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
.quad 0x1000000 | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
.quad 0x1200000 | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
.quad 0x1400000 | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
.quad 0x1600000 | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
.quad 0x1800000 | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
.quad 0x1a00000 | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
.quad 0x1c00000 | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
.quad 0x1e00000 | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
.quad 0x2000000 | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
.quad 0x2200000 | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
.quad 0x2400000 | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
.quad 0x2600000 | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
.quad 0x2800000 | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
.quad 0x2a00000 | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
.quad 0x2c00000 | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
.quad 0x2e00000 | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
.quad 0x3000000 | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
.quad 0x3200000 | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
.quad 0x3400000 | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
.quad 0x3600000 | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
.quad 0x3800000 | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
.quad 0x3a00000 | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
.quad 0x3c00000 | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
.quad 0x3e00000 | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
.align 4096
.global ptl_1
ptl_1:
.quad ptl_2 + (PTL_WRITABLE | PTL_PRESENT)
.fill 509,8,0
.quad ptl_2 + (PTL_WRITABLE | PTL_PRESENT)
.fill 1,8,0
.align 4096
.global ptl_0
ptl_0:
.quad ptl_1 + (PTL_WRITABLE | PTL_PRESENT)
.fill 510,8,0
.quad ptl_1 + (PTL_WRITABLE | PTL_PRESENT)
 
.global bootstrap_gdtr
bootstrap_gdtr:
.word gdtselector(GDT_ITEMS)
.long KA2PA(gdt)
 
grub_eax:
.long 0
 
grub_ebx:
.long 0
/kernel/trunk/arch/amd64/src/smp/ap.S
0,0 → 1,115
#
# Copyright (C) 2001-2004 Jakub Jermar
# Copyright (C) 2005-2006 Martin Decky
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# - The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
 
#
# Init code for application processors.
#
 
#include <arch/boot/boot.h>
#include <arch/boot/memmap.h>
#include <arch/mm/page.h>
#include <arch/pm.h>
#include <arch/cpu.h>
#include <arch/cpuid.h>
 
.section K_TEXT_START, "ax"
 
#ifdef CONFIG_SMP
 
.global unmapped_ap_boot
 
# This piece of code is real-mode and is meant to be alligned at 4K boundary.
# The requirement for such an alignment comes from MP Specification's STARTUP IPI
# requirements.
 
.align 4096
unmapped_ap_boot:
.code16
cli
xorw %ax, %ax
movw %ax, %ds
 
lgdt ap_gdtr # initialize Global Descriptor Table register
movl %cr0, %eax
orl $1, %eax
movl %eax, %cr0 # switch to protected mode
jmpl $gdtselector(KTEXT32_DES), $jump_to_kernel - BOOT_OFFSET + AP_BOOT_OFFSET
jump_to_kernel:
.code32
movw $gdtselector(KDATA_DES), %ax
movw %ax, %ds
movw %ax, %es
movw %ax, %ss
movw $gdtselector(UDATA_DES), %ax
movw %ax, %gs
# Enable 64-bit page transaltion entries - CR4.PAE = 1.
# Paging is not enabled until after long mode is enabled
movl %cr4, %eax
btsl $5, %eax
movl %eax, %cr4
 
leal ptl_0, %eax
movl %eax, %cr3
# Enable long mode
movl $EFER_MSR_NUM, %ecx # EFER MSR number
rdmsr # Read EFER
btsl $AMD_LME_FLAG, %eax # Set LME=1
wrmsr # Write EFER
# Enable paging to activate long mode (set CR0.PG=1)
movl %cr0, %eax
btsl $31, %eax
movl %eax, %cr0
# At this point we are in compatibility mode
jmpl $gdtselector(KTEXT_DES), $start64 - BOOT_OFFSET + AP_BOOT_OFFSET
 
.code64
start64:
movq (ctx), %rsp
call main_ap - AP_BOOT_OFFSET + BOOT_OFFSET # never returns
 
#endif /* CONFIG_SMP */
 
.section K_DATA_START, "ax"
 
#ifdef CONFIG_SMP
 
.global unmapped_ap_gdtr
 
unmapped_ap_gdtr:
.word 0
.long 0
 
#endif /* CONFIG_SMP */
/kernel/trunk/arch/amd64/src/smp/mps.c
0,0 → 1,0
link ../../../ia32/src/smp/mps.c
Property changes:
Added: svn:special
+*
\ No newline at end of property
/kernel/trunk/arch/amd64/src/smp/smp.c
0,0 → 1,0
link ../../../ia32/src/smp/smp.c
Property changes:
Added: svn:special
+*
\ No newline at end of property
/kernel/trunk/arch/amd64/src/smp/ipi.c
0,0 → 1,0
link ../../../ia32/src/smp/ipi.c
Property changes:
Added: svn:special
+*
\ No newline at end of property
/kernel/trunk/arch/amd64/src/smp/apic.c
0,0 → 1,0
link ../../../ia32/src/smp/apic.c
Property changes:
Added: svn:special
+*
\ No newline at end of property
/kernel/trunk/arch/amd64/src/cpu/cpu.c
0,0 → 1,159
/*
* Copyright (C) 2001-2004 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#include <arch/cpu.h>
#include <arch/cpuid.h>
#include <arch/pm.h>
 
#include <arch.h>
#include <arch/types.h>
#include <print.h>
#include <typedefs.h>
#include <fpu_context.h>
 
/*
* Identification of CPUs.
* Contains only non-MP-Specification specific SMP code.
*/
#define AMD_CPUID_EBX 0x68747541
#define AMD_CPUID_ECX 0x444d4163
#define AMD_CPUID_EDX 0x69746e65
 
#define INTEL_CPUID_EBX 0x756e6547
#define INTEL_CPUID_ECX 0x6c65746e
#define INTEL_CPUID_EDX 0x49656e69
 
 
enum vendor {
VendorUnknown=0,
VendorAMD,
VendorIntel
};
 
static char *vendor_str[] = {
"Unknown Vendor",
"AuthenticAMD",
"GenuineIntel"
};
 
 
/** Setup flags on processor so that we can use the FPU
*
* cr0.osfxsr = 1 -> we do support fxstor/fxrestor
* cr0.em = 0 -> we do not emulate coprocessor
* cr0.mp = 1 -> we do want lazy context switch
*/
void cpu_setup_fpu(void)
{
__asm__ volatile (
"movq %%cr0, %%rax;"
"btsq $1, %%rax;" /* cr0.mp */
"btrq $2, %%rax;" /* cr0.em */
"movq %%rax, %%cr0;"
 
"movq %%cr4, %%rax;"
"bts $9, %%rax;" /* cr4.osfxsr */
"movq %%rax, %%cr4;"
:
:
:"%rax"
);
}
 
/** Set the TS flag to 1.
*
* If a thread accesses coprocessor, exception is run, which
* does a lazy fpu context switch.
*
*/
void fpu_disable(void)
{
__asm__ volatile (
"mov %%cr0,%%rax;"
"bts $3,%%rax;"
"mov %%rax,%%cr0;"
:
:
:"%rax"
);
}
 
void fpu_enable(void)
{
__asm__ volatile (
"mov %%cr0,%%rax;"
"btr $3,%%rax;"
"mov %%rax,%%cr0;"
:
:
:"%rax"
);
}
 
void cpu_arch_init(void)
{
CPU->arch.tss = tss_p;
CPU->fpu_owner=NULL;
}
 
 
void cpu_identify(void)
{
cpu_info_t info;
 
CPU->arch.vendor = VendorUnknown;
if (has_cpuid()) {
cpuid(0, &info);
 
/*
* Check for AMD processor.
*/
if (info.cpuid_ebx==AMD_CPUID_EBX && info.cpuid_ecx==AMD_CPUID_ECX && info.cpuid_edx==AMD_CPUID_EDX) {
CPU->arch.vendor = VendorAMD;
}
 
/*
* Check for Intel processor.
*/
if (info.cpuid_ebx==INTEL_CPUID_EBX && info.cpuid_ecx==INTEL_CPUID_ECX && info.cpuid_edx==INTEL_CPUID_EDX) {
CPU->arch.vendor = VendorIntel;
}
cpuid(1, &info);
CPU->arch.family = (info.cpuid_eax>>8)&0xf;
CPU->arch.model = (info.cpuid_eax>>4)&0xf;
CPU->arch.stepping = (info.cpuid_eax>>0)&0xf;
}
}
 
void cpu_print_report(cpu_t* m)
{
printf("cpu%d: (%s family=%d model=%d stepping=%d) %dMHz\n",
m->id, vendor_str[m->arch.vendor], m->arch.family, m->arch.model, m->arch.stepping,
m->frequency_mhz);
}
/kernel/trunk/arch/amd64/src/bios
0,0 → 1,0
link ../../ia32/src/bios
Property changes:
Added: svn:special
+*
\ No newline at end of property
/kernel/trunk/arch/amd64/src/drivers
0,0 → 1,0
link ../../ia32/src/drivers
Property changes:
Added: svn:special
+*
\ No newline at end of property
/kernel/trunk/arch/amd64/src/context.S
0,0 → 1,78
#
# Copyright (C) 2001-2004 Jakub Jermar
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# - The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
 
.text
 
.global context_save_arch
.global context_restore_arch
 
#include <arch/context_offset.h>
 
## Save current CPU context
#
# Save CPU context to context_t variable
# pointed by the 1st argument. Returns 1 in EAX.
#
context_save_arch:
movq (%rsp), %rdx # the caller's return %eip
# In %edi is passed 1st argument
movq %rdx, OFFSET_PC(%rdi)
movq %rsp, OFFSET_SP(%rdi)
movq %rbx, OFFSET_RBX(%rdi)
movq %rbp, OFFSET_RBP(%rdi)
movq %r12, OFFSET_R12(%rdi)
movq %r13, OFFSET_R13(%rdi)
movq %r14, OFFSET_R14(%rdi)
movq %r15, OFFSET_R15(%rdi)
xorq %rax,%rax # context_save returns 1
incq %rax
ret
 
 
## Restore current CPU context
#
# Restore CPU context from context_t variable
# pointed by the 1st argument. Returns 0 in EAX.
#
context_restore_arch:
movq OFFSET_R15(%rdi), %r15
movq OFFSET_R14(%rdi), %r14
movq OFFSET_R13(%rdi), %r13
movq OFFSET_R12(%rdi), %r12
movq OFFSET_RBP(%rdi), %rbp
movq OFFSET_RBX(%rdi), %rbx
movq OFFSET_SP(%rdi), %rsp # ctx->sp -> %rsp
movq OFFSET_PC(%rdi), %rdx
movq %rdx,(%rsp)
 
xorq %rax,%rax # context_restore returns 0
ret
/kernel/trunk/arch/amd64/src/dummy.s
0,0 → 1,29
#
# Copyright (C) 2005 Jakub Jermar
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# - The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
 
.text
/kernel/trunk/arch/amd64/src/delay.S
0,0 → 1,46
#
# Copyright (C) 2001-2004 Jakub Jermar
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# - The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
 
#
# Micro second delay loop functions.
#
 
.text
 
.global asm_delay_loop
.global asm_fake_loop
 
asm_delay_loop:
0: dec %rdi
jnz 0b
ret
 
asm_fake_loop:
0: dec %rdi
jz 0b
ret
/kernel/trunk/arch/amd64/_link.ld.in
0,0 → 1,64
/** AMD64 linker script
*
* umapped section:
* kernel text
* kernel data
* mapped section:
* kernel text
* kernel data
*/
 
#define __ASM__
#include <arch/boot/boot.h>
#include <arch/mm/page.h>
 
SECTIONS {
.unmapped BOOT_OFFSET: AT (0) {
unmapped_ktext_start = .;
*(K_TEXT_START);
unmapped_ktext_end = .;
 
unmapped_kdata_start = .;
*(K_DATA_START);
unmapped_kdata_end = .;
}
 
.mapped (PA2KA(BOOT_OFFSET)+SIZEOF(.unmapped)) : AT (SIZEOF(.unmapped)) {
ktext_start = .;
*(.text);
ktext_end = .;
 
kdata_start = .;
*(.data); /* initialized data */
*(.rodata*); /* string literals */
hardcoded_load_address = .;
QUAD(PA2KA(BOOT_OFFSET));
hardcoded_ktext_size = .;
QUAD(ktext_end - ktext_start + (unmapped_ktext_end - unmapped_ktext_start));
hardcoded_kdata_size = .;
QUAD(kdata_end - kdata_start + (unmapped_kdata_end - unmapped_kdata_start));
hardcoded_unmapped_ktext_size = .;
QUAD(unmapped_ktext_end - unmapped_ktext_start);
hardcoded_unmapped_kdata_size = .;
QUAD(unmapped_kdata_end - unmapped_kdata_start);
*(COMMON); /* global variables */
 
*(.eh_frame);
 
symbol_table = .;
*(symtab.*); /* Symbol table, must be LAST symbol!*/
 
*(.bss); /* uninitialized static variables */
 
kdata_end = .;
}
#ifdef CONFIG_SMP
_hardcoded_unmapped_size = (unmapped_ktext_end - unmapped_ktext_start) + (unmapped_kdata_end - unmapped_kdata_start);
ap_boot = unmapped_ap_boot - BOOT_OFFSET + AP_BOOT_OFFSET;
ap_gdtr = unmapped_ap_gdtr - BOOT_OFFSET + AP_BOOT_OFFSET;
protected_ap_gdtr = PA2KA(ap_gdtr);
 
#endif /* CONFIG_SMP */
 
}
/kernel/trunk/arch/amd64/Makefile.inc
0,0 → 1,116
#
# Copyright (C) 2005 Martin Decky
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# - The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
 
## Toolchain configuration
#
 
BFD_NAME = elf64-x86-64
BFD_ARCH = i386:x86-64
BFD = binary
TARGET = amd64-linux-gnu
TOOLCHAIN_DIR = /usr/local/amd64/bin
 
## Make some default assumptions
#
 
ifndef CPU
CPU = opteron
endif
 
CFLAGS += -fno-unwind-tables -m64 -mcmodel=kernel -mno-red-zone
DEFS += -D_CPU=${CPU} -D__64_BITS__
 
## Accepted CPUs
#
 
ifeq ($(CPU),opteron)
CFLAGS += -march=opteron
DEFS += -DFENCES=p4
endif
 
## Own configuration directives
#
 
CONFIG_ACPI = y
 
## Compile with hierarchical page tables support.
#
 
CONFIG_PAGE_PT = y
 
## Compile with i8042 support.
#
 
CONFIG_I8042 = y
 
## Accepted configuration directives
#
 
ifeq ($(CONFIG_SMP),y)
DEFS += -DCONFIG_SMP
endif
ifeq ($(CONFIG_HT),y)
DEFS += -DCONFIG_HT
endif
 
ifeq ($(CONFIG_SIMICS_FIX),y)
DEFS += -DCONFIG_SIMICS_FIX
endif
 
ARCH_SOURCES = \
arch/$(ARCH)/src/dummy.s \
arch/$(ARCH)/src/fpu_context.c \
arch/$(ARCH)/src/boot/boot.S \
arch/$(ARCH)/src/pm.c \
arch/$(ARCH)/src/context.S \
arch/$(ARCH)/src/drivers/ega.c \
arch/$(ARCH)/src/drivers/i8254.c \
arch/$(ARCH)/src/drivers/i8259.c \
arch/$(ARCH)/src/delay.S \
arch/$(ARCH)/src/amd64.c \
arch/$(ARCH)/src/bios/bios.c \
arch/$(ARCH)/src/interrupt.c \
arch/$(ARCH)/src/mm/as.c \
arch/$(ARCH)/src/mm/frame.c \
arch/$(ARCH)/src/mm/page.c \
arch/$(ARCH)/src/mm/tlb.c \
arch/$(ARCH)/src/asm_utils.S \
arch/$(ARCH)/src/mm/memory_init.c \
arch/$(ARCH)/src/cpu/cpu.c \
arch/$(ARCH)/src/proc/scheduler.c \
arch/$(ARCH)/src/userspace.c \
arch/$(ARCH)/src/syscall.c
 
ifeq ($(CONFIG_SMP),y)
ARCH_SOURCES += \
arch/$(ARCH)/src/smp/ap.S \
arch/$(ARCH)/src/smp/apic.c \
arch/$(ARCH)/src/smp/ipi.c \
arch/$(ARCH)/src/smp/mps.c \
arch/$(ARCH)/src/smp/smp.c
endif