Subversion Repositories HelenOS

Compare Revisions

No changes between revisions

Ignore whitespace Rev 1820 → Rev 1821

/trunk/kernel/arch/xen32/include/boot/boot.h
44,19 → 44,19
#include <arch/types.h>
 
typedef struct {
char magic[32]; /**< "xen-<version>-<platform>" */
unsigned long nr_pages; /**< Total pages allocated to this domain */
void *shared_info; /**< Machine address of shared info struct */
int8_t magic[32]; /**< "xen-<version>-<platform>" */
uint32_t frames; /**< Available frames */
void *shared_info; /**< Shared info structure (machine address) */
uint32_t flags; /**< SIF_xxx flags */
void *store_mfn; /**< Machine page number of shared page */
pfn_t store_mfn; /**< Shared page (machine page) */
uint32_t store_evtchn; /**< Event channel for store communication */
void *console_mfn; /**< Machine address of console page */
void *console_mfn; /**< Console page (machine address) */
uint32_t console_evtchn; /**< Event channel for console messages */
unsigned long *pt_base; /**< Virtual address of page directory */
unsigned long nr_pt_frames; /**< Number of bootstrap p.t. frames */
unsigned long *mfn_list; /**< Virtual address of page-frame list */
void *mod_start; /**< Virtual address of pre-loaded module */
unsigned long mod_len; /**< Size (bytes) of pre-loaded module */
pte_t *ptl0; /**< Boot PTL0 (kernel address) */
uint32_t pt_frames; /**< Number of bootstrap page table frames */
pfn_t *pm_map; /**< Physical->machine frame map (kernel address) */
void *mod_start; /**< Modules start (kernel address) */
uint32_t mod_len; /**< Modules size (bytes) */
int8_t cmd_line[GUEST_CMDLINE];
} start_info_t;
 
/trunk/kernel/arch/xen32/include/asm.h
42,8 → 42,6
 
extern uint32_t interrupt_handler_size;
 
extern void paging_on(void);
 
extern void interrupt_handlers(void);
 
extern void enable_l_apic_in_msr(void);
74,8 → 72,6
 
GEN_READ_REG(cr0);
GEN_READ_REG(cr2);
GEN_READ_REG(cr3);
GEN_WRITE_REG(cr3);
 
GEN_READ_REG(dr0);
GEN_READ_REG(dr1);
/trunk/kernel/arch/xen32/include/mm/frame.h
36,7 → 36,7
#define __xen32_FRAME_H__
 
#define FRAME_WIDTH 12 /* 4K */
#define FRAME_SIZE (1<<FRAME_WIDTH)
#define FRAME_SIZE (1 << FRAME_WIDTH)
 
 
#ifdef KERNEL
43,7 → 43,10
#ifndef __ASM__
 
#include <arch/types.h>
#include <arch/boot/boot.h>
 
#define PA2MA(x) ((start_info.pm_map[((uintptr_t) (x)) >> 12] << 12) + (((uintptr_t) (x)) & 0xfff))
 
extern uintptr_t last_frame;
 
extern void frame_arch_init(void);
/trunk/kernel/arch/xen32/include/mm/page.h
43,6 → 43,7
#ifdef KERNEL
 
#ifndef __ASM__
# include <arch/hypercall.h>
# define KA2PA(x) (((uintptr_t) (x)) - 0x80000000)
# define PA2KA(x) (((uintptr_t) (x)) + 0x80000000)
#else
59,21 → 60,36
#define PTL2_ENTRIES_ARCH 0
#define PTL3_ENTRIES_ARCH 1024
 
#define PTL0_INDEX_ARCH(vaddr) (((vaddr)>>22)&0x3ff)
#define PTL0_INDEX_ARCH(vaddr) (((vaddr) >> 22) & 0x3ff)
#define PTL1_INDEX_ARCH(vaddr) 0
#define PTL2_INDEX_ARCH(vaddr) 0
#define PTL3_INDEX_ARCH(vaddr) (((vaddr)>>12)&0x3ff)
#define PTL3_INDEX_ARCH(vaddr) (((vaddr) >> 12) & 0x3ff)
 
#define GET_PTL1_ADDRESS_ARCH(ptl0, i) ((pte_t *)((((pte_t *)(ptl0))[(i)].frame_address)<<12))
#define GET_PTL1_ADDRESS_ARCH(ptl0, i) ((pte_t *)((((pte_t *)(ptl0))[(i)].frame_address) << 12))
#define GET_PTL2_ADDRESS_ARCH(ptl1, i) (ptl1)
#define GET_PTL3_ADDRESS_ARCH(ptl2, i) (ptl2)
#define GET_FRAME_ADDRESS_ARCH(ptl3, i) ((uintptr_t)((((pte_t *)(ptl3))[(i)].frame_address)<<12))
#define GET_FRAME_ADDRESS_ARCH(ptl3, i) ((uintptr_t)((((pte_t *)(ptl3))[(i)].frame_address) << 12))
 
#define SET_PTL0_ADDRESS_ARCH(ptl0) (write_cr3((uintptr_t) (ptl0)))
#define SET_PTL1_ADDRESS_ARCH(ptl0, i, a) (((pte_t *)(ptl0))[(i)].frame_address = (a)>>12)
#define SET_PTL0_ADDRESS_ARCH(ptl0) { \
mmuext_op_t mmu_ext; \
mmu_ext.cmd = MMUEXT_NEW_BASEPTR; \
mmu_ext.arg1.mfn = ADDR2PFN(PA2MA(ptl0)); \
xen_mmuext_op(&mmu_ext, 1, NULL, DOMID_SELF); \
}
#define SET_PTL1_ADDRESS_ARCH(ptl0, i, a) { \
mmu_update_t update; \
update.ptr = PA2MA(KA2PA(&((pte_t *) (ptl0))[(i)])); \
update.val = PA2MA(a); \
xen_mmu_update(&update, 1, NULL, DOMID_SELF); \
}
#define SET_PTL2_ADDRESS_ARCH(ptl1, i, a)
#define SET_PTL3_ADDRESS_ARCH(ptl2, i, a)
#define SET_FRAME_ADDRESS_ARCH(ptl3, i, a) (((pte_t *)(ptl3))[(i)].frame_address = (a)>>12)
#define SET_FRAME_ADDRESS_ARCH(ptl3, i, a) { \
mmu_update_t update; \
update.ptr = PA2MA(KA2PA(&((pte_t *) (ptl3))[(i)])); \
update.val = PA2MA(a); \
xen_mmu_update(&update, 1, NULL, DOMID_SELF); \
}
 
#define GET_PTL1_FLAGS_ARCH(ptl0, i) get_pt_flags((pte_t *)(ptl0), (index_t)(i))
#define GET_PTL2_FLAGS_ARCH(ptl1, i) PAGE_PRESENT
87,7 → 103,7
 
#define PTE_VALID_ARCH(p) (*((uint32_t *) (p)) != 0)
#define PTE_PRESENT_ARCH(p) ((p)->present != 0)
#define PTE_GET_FRAME_ARCH(p) ((p)->frame_address<<FRAME_WIDTH)
#define PTE_GET_FRAME_ARCH(p) ((p)->frame_address << FRAME_WIDTH)
#define PTE_WRITABLE_ARCH(p) ((p)->writeable != 0)
#define PTE_EXECUTABLE_ARCH(p) 1
 
101,16 → 117,16
/* Page fault error codes. */
 
/** When bit on this position is 0, the page fault was caused by a not-present page. */
#define PFERR_CODE_P (1<<0)
#define PFERR_CODE_P (1 << 0)
 
/** When bit on this position is 1, the page fault was caused by a write. */
#define PFERR_CODE_RW (1<<1)
#define PFERR_CODE_RW (1 << 1)
 
/** When bit on this position is 1, the page fault was caused in user mode. */
#define PFERR_CODE_US (1<<2)
#define PFERR_CODE_US (1 << 2)
 
/** When bit on this position is 1, a reserved bit was set in page directory. */
#define PFERR_CODE_RSVD (1<<3)
#define PFERR_CODE_RSVD (1 << 3)
 
/** Page Table Entry. */
struct page_specifier {
/trunk/kernel/arch/xen32/src/asm.S
1,0 → 0,0
link ../../ia32/src/asm.S
#
# Copyright (C) 2001-2004 Jakub Jermar
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# - The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
 
## very low and hardware-level functions
 
# Mask for interrupts 0 - 31 (bits 0 - 31) where 0 means that int has no error word
# and 1 means interrupt with error word
#define ERROR_WORD_INTERRUPT_LIST 0x00027D00
 
.text
 
.global enable_l_apic_in_msr
.global interrupt_handlers
.global memcpy
.global memcpy_from_uspace
.global memcpy_from_uspace_failover_address
.global memcpy_to_uspace
.global memcpy_to_uspace_failover_address
 
 
#define MEMCPY_DST 4
#define MEMCPY_SRC 8
#define MEMCPY_SIZE 12
 
/** Copy memory to/from userspace.
*
* This is almost conventional memcpy().
* The difference is that there is a failover part
* to where control is returned from a page fault
* if the page fault occurs during copy_from_uspace()
* or copy_to_uspace().
*
* @param MEMCPY_DST(%esp) Destination address.
* @param MEMCPY_SRC(%esp) Source address.
* @param MEMCPY_SIZE(%esp) Size.
*
* @return MEMCPY_SRC(%esp) on success and 0 on failure.
*/
memcpy:
memcpy_from_uspace:
memcpy_to_uspace:
movl %edi, %edx /* save %edi */
movl %esi, %eax /* save %esi */
movl MEMCPY_SIZE(%esp), %ecx
shrl $2, %ecx /* size / 4 */
movl MEMCPY_DST(%esp), %edi
movl MEMCPY_SRC(%esp), %esi
rep movsl /* copy as much as possible word by word */
 
movl MEMCPY_SIZE(%esp), %ecx
andl $3, %ecx /* size % 4 */
jz 0f
rep movsb /* copy the rest byte by byte */
 
0:
movl %edx, %edi
movl %eax, %esi
movl MEMCPY_SRC(%esp), %eax /* MEMCPY_SRC(%esp), success */
ret
/*
* We got here from as_page_fault() after the memory operations
* above had caused a page fault.
*/
memcpy_from_uspace_failover_address:
memcpy_to_uspace_failover_address:
movl %edx, %edi
movl %eax, %esi
xorl %eax, %eax /* return 0, failure */
ret
 
 
## Enable local APIC
#
# Enable local APIC in MSR.
#
enable_l_apic_in_msr:
push %eax
 
movl $0x1b, %ecx
rdmsr
orl $(1<<11),%eax
orl $(0xfee00000),%eax
wrmsr
 
pop %eax
ret
 
# Clear nested flag
# overwrites %ecx
.macro CLEAR_NT_FLAG
pushfl
pop %ecx
and $0xffffbfff,%ecx
push %ecx
popfl
.endm
 
## Declare interrupt handlers
#
# Declare interrupt handlers for n interrupt
# vectors starting at vector i.
#
# The handlers setup data segment registers
# and call exc_dispatch().
#
#define INTERRUPT_ALIGN 64
.macro handler i n
 
.ifeq \i-0x30 # Syscall handler
push %ds
push %es
push %fs
push %gs
 
# Push arguments on stack
push %edi
push %esi
push %edx
push %ecx
push %eax
# we must fill the data segment registers
movw $16,%ax
movw %ax,%ds
movw %ax,%es
sti
call syscall_handler # syscall_handler(ax,cx,dx,si,di)
cli
addl $20, %esp # clean-up of parameters
pop %gs
pop %fs
pop %es
pop %ds
CLEAR_NT_FLAG
iret
.else
/*
* This macro distinguishes between two versions of ia32 exceptions.
* One version has error word and the other does not have it.
* The latter version fakes the error word on the stack so that the
* handlers and istate_t can be the same for both types.
*/
.iflt \i-32
.if (1 << \i) & ERROR_WORD_INTERRUPT_LIST
/*
* With error word, do nothing
*/
.else
/*
* Version without error word,
*/
subl $4, %esp
.endif
.else
/*
* Version without error word,
*/
subl $4, %esp
.endif
push %ds
push %es
push %fs
push %gs
 
#ifdef CONFIG_DEBUG_ALLREGS
push %ebx
push %ebp
push %edi
push %esi
#else
sub $16, %esp
#endif
push %edx
push %ecx
push %eax
# we must fill the data segment registers
movw $16,%ax
movw %ax,%ds
movw %ax,%es
 
pushl %esp # *istate
pushl $(\i) # intnum
call exc_dispatch # excdispatch(intnum, *istate)
addl $8,%esp # Clear arguments from stack
 
CLEAR_NT_FLAG # Modifies %ecx
pop %eax
pop %ecx
pop %edx
#ifdef CONFIG_DEBUG_ALLREGS
pop %esi
pop %edi
pop %ebp
pop %ebx
#else
add $16, %esp
#endif
pop %gs
pop %fs
pop %es
pop %ds
 
addl $4,%esp # Skip error word, no matter whether real or fake.
iret
.endif
 
.align INTERRUPT_ALIGN
.if (\n-\i)-1
handler "(\i+1)",\n
.endif
.endm
 
# keep in sync with pm.h !!!
IDT_ITEMS=64
.align INTERRUPT_ALIGN
interrupt_handlers:
h_start:
handler 0 IDT_ITEMS
h_end:
 
.data
.global interrupt_handler_size
 
interrupt_handler_size: .long (h_end-h_start)/IDT_ITEMS
Property changes:
Deleted: svn:special
-*
\ No newline at end of property
/trunk/kernel/arch/xen32/src/boot/boot.S
35,8 → 35,7
.ascii "GUEST_OS=HelenOS,"
.ascii "XEN_VER=xen-3.0,"
.ascii "HYPERCALL_PAGE=0x0000,"
.ascii "LOADER=generic,"
.ascii "PT_MODE_WRITABLE"
.ascii "LOADER=generic"
.byte 0
 
.text
46,8 → 45,6
.global kernel_image_start
kernel_image_start:
cld
# copy start_info (esi initialized by Xen)
movl $start_info, %edi
/trunk/kernel/arch/xen32/src/mm/tlb.c
37,11 → 37,15
#include <arch/mm/asid.h>
#include <arch/asm.h>
#include <arch/types.h>
#include <arch/hypercall.h>
 
/** Invalidate all entries in TLB. */
void tlb_invalidate_all(void)
{
write_cr3(read_cr3());
mmuext_op_t mmu_ext;
mmu_ext.cmd = MMUEXT_TLB_FLUSH_LOCAL;
xen_mmuext_op(&mmu_ext, 1, NULL, DOMID_SELF);
}
 
/** Invalidate all entries in TLB that belong to specified address space.
/trunk/kernel/arch/xen32/src/mm/frame.c
49,22 → 49,20
 
uintptr_t last_frame = 0;
 
#define L1_PT_SHIFT 10
#define L2_PT_SHIFT 0
#define L0_PT_SHIFT 10
#define L3_PT_SHIFT 0
 
#define L1_PT_ENTRIES 1024
#define L2_PT_ENTRIES 1024
#define L0_PT_ENTRIES 1024
#define L3_PT_ENTRIES 1024
 
#define L1_OFFSET_MASK (L1_PT_ENTRIES - 1)
#define L2_OFFSET_MASK (L2_PT_ENTRIES - 1)
#define L0_INDEX_MASK (L0_PT_ENTRIES - 1)
#define L3_INDEX_MASK (L3_PT_ENTRIES - 1)
 
#define PFN2PTL1_OFFSET(pfn) ((pfn >> L1_PT_SHIFT) & L1_OFFSET_MASK)
#define PFN2PTL2_OFFSET(pfn) ((pfn >> L2_PT_SHIFT) & L2_OFFSET_MASK)
#define PFN2PTL0_INDEX(pfn) ((pfn >> L0_PT_SHIFT) & L0_INDEX_MASK)
#define PFN2PTL3_INDEX(pfn) ((pfn >> L3_PT_SHIFT) & L3_INDEX_MASK)
 
#define PAGE_MASK (~(PAGE_SIZE - 1))
 
#define PTE2ADDR(pte) (pte & PAGE_MASK)
 
#define _PAGE_PRESENT 0x001UL
#define _PAGE_RW 0x002UL
#define _PAGE_USER 0x004UL
76,29 → 74,29
#define _PAGE_PSE 0x080UL
#define _PAGE_GLOBAL 0x100UL
 
#define L1_PROT (_PAGE_PRESENT | _PAGE_ACCESSED)
#define L2_PROT (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED)
#define L0_PROT (_PAGE_PRESENT | _PAGE_ACCESSED)
#define L3_PROT (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED)
 
void frame_arch_init(void)
{
if (config.cpu_active == 1) {
/* The only memory zone starts just after page table */
pfn_t start = ADDR2PFN(ALIGN_UP(KA2PA(start_info.pt_base), PAGE_SIZE)) + start_info.nr_pt_frames;
size_t size = start_info.nr_pages - start;
pfn_t start = ADDR2PFN(ALIGN_UP(KA2PA(start_info.ptl0), PAGE_SIZE)) + start_info.pt_frames;
size_t size = start_info.frames - start;
/* Create identity mapping */
pfn_t phys;
count_t count = 0;
for (phys = start; phys < start + size; phys++) {
mmu_update_t updates[L2_PT_ENTRIES];
mmu_update_t updates[L3_PT_ENTRIES];
pfn_t virt = ADDR2PFN(PA2KA(PFN2ADDR(phys)));
size_t ptl1_offset = PFN2PTL1_OFFSET(virt);
size_t ptl2_offset = PFN2PTL2_OFFSET(virt);
size_t ptl0_index = PFN2PTL0_INDEX(virt);
size_t ptl3_index = PFN2PTL3_INDEX(virt);
unsigned long *ptl2_base = (unsigned long *) PTE2ADDR(start_info.pt_base[ptl1_offset]);
pte_t *ptl3 = (pte_t *) PFN2ADDR(start_info.ptl0[ptl0_index].frame_address);
if (ptl2_base == 0) {
if (ptl3 == 0) {
mmuext_op_t mmu_ext;
pfn_t virt2 = ADDR2PFN(PA2KA(PFN2ADDR(start)));
106,42 → 104,42
/* New L1 page table entry needed */
memsetb(PFN2ADDR(virt2), PAGE_SIZE, 0);
size_t ptl1_offset2 = PFN2PTL1_OFFSET(virt2);
size_t ptl2_offset2 = PFN2PTL2_OFFSET(virt2);
unsigned long *ptl2_base2 = (unsigned long *) PTE2ADDR(start_info.pt_base[ptl1_offset2]);
size_t ptl0_index2 = PFN2PTL0_INDEX(virt2);
size_t ptl3_index2 = PFN2PTL3_INDEX(virt2);
pte_t *ptl3_2 = (pte_t *) PFN2ADDR(start_info.ptl0[ptl0_index2].frame_address);
if (ptl2_base2 == 0)
if (ptl3_2 == 0)
panic("Unable to find page table reference");
updates[count].ptr = (uintptr_t) &ptl2_base2[ptl2_offset2];
updates[count].val = PFN2ADDR(start_info.mfn_list[start]) | L1_PROT;
updates[count].ptr = (uintptr_t) &ptl3_2[ptl3_index2];
updates[count].val = PA2MA(PFN2ADDR(start)) | L0_PROT;
if (xen_mmu_update(updates, count + 1, NULL, DOMID_SELF) < 0)
panic("Unable to map new page table");
count = 0;
mmu_ext.cmd = MMUEXT_PIN_L1_TABLE;
mmu_ext.arg1.mfn = start_info.mfn_list[start];
mmu_ext.arg1.mfn = ADDR2PFN(PA2MA(PFN2ADDR(start)));
if (xen_mmuext_op(&mmu_ext, 1, NULL, DOMID_SELF) < 0)
panic("Error pinning new page table");
unsigned long *ptl0 = (unsigned long *) PFN2ADDR(start_info.mfn_list[ADDR2PFN(KA2PA(start_info.pt_base))]);
pte_t *ptl0 = (pte_t *) PA2MA(KA2PA(start_info.ptl0));
updates[count].ptr = (uintptr_t) &ptl0[ptl1_offset];
updates[count].val = PFN2ADDR(start_info.mfn_list[start]) | L2_PROT;
updates[count].ptr = (uintptr_t) &ptl0[ptl0_index];
updates[count].val = PA2MA(PFN2ADDR(start)) | L3_PROT;
if (xen_mmu_update(updates, count + 1, NULL, DOMID_SELF) < 0)
panic("Unable to update PTE for page table");
count = 0;
ptl2_base = (unsigned long *) PTE2ADDR(start_info.pt_base[ptl1_offset]);
ptl3 = (pte_t *) PFN2ADDR(start_info.ptl0[ptl0_index].frame_address);
start++;
size--;
}
updates[count].ptr = (uintptr_t) &ptl2_base[ptl2_offset];
updates[count].val = PFN2ADDR(start_info.mfn_list[phys]) | L2_PROT;
updates[count].ptr = (uintptr_t) &ptl3[ptl3_index];
updates[count].val = PA2MA(PFN2ADDR(phys)) | L3_PROT;
count++;
if ((count == L2_PT_ENTRIES) || (phys + 1 == start + size)) {
if ((count == L3_PT_ENTRIES) || (phys + 1 == start + size)) {
if (xen_mmu_update(updates, count, NULL, DOMID_SELF) < 0)
panic("Unable to update PTE");
count = 0;
/trunk/kernel/arch/xen32/src/mm/memory_init.c
39,7 → 39,7
 
size_t get_memory_size(void)
{
return start_info.nr_pages * PAGE_SIZE;
return start_info.frames * PAGE_SIZE;
}
 
void memory_print_map(void)
/trunk/kernel/arch/xen32/src/mm/page.c
26,7 → 26,7
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup xen32mm
/** @addtogroup xen32mm
* @{
*/
/** @file
51,30 → 51,11
 
void page_arch_init(void)
{
uintptr_t cur;
int flags;
 
if (config.cpu_active == 1) {
page_mapping_operations = &pt_mapping_operations;
/*
* PA2KA(identity) mapping for all frames until last_frame.
*/
for (cur = 0; cur < last_frame; cur += FRAME_SIZE) {
flags = PAGE_CACHEABLE;
if ((PA2KA(cur) >= config.base) && (PA2KA(cur) < config.base + config.kernel_size))
flags |= PAGE_GLOBAL;
page_mapping_insert(AS_KERNEL, PA2KA(cur), cur, flags);
}
 
exc_register(14, "page_fault", (iroutine) page_fault);
// write_cr3((uintptr_t) AS_KERNEL->page_table);
}
else {
// write_cr3((uintptr_t) AS_KERNEL->page_table);
}
 
// paging_on();
AS_KERNEL->page_table = (pte_t *) KA2PA(start_info.ptl0);
} else
SET_PTL0_ADDRESS_ARCH(AS_KERNEL->page_table);
}
 
void page_fault(int n, istate_t *istate)