Subversion Repositories HelenOS

Compare Revisions

Ignore whitespace Rev 1820 → Rev 1821

/trunk/kernel/arch/xen32/src/mm/tlb.c
37,11 → 37,15
#include <arch/mm/asid.h>
#include <arch/asm.h>
#include <arch/types.h>
#include <arch/hypercall.h>
 
/** Invalidate all entries in TLB. */
void tlb_invalidate_all(void)
{
write_cr3(read_cr3());
mmuext_op_t mmu_ext;
mmu_ext.cmd = MMUEXT_TLB_FLUSH_LOCAL;
xen_mmuext_op(&mmu_ext, 1, NULL, DOMID_SELF);
}
 
/** Invalidate all entries in TLB that belong to specified address space.
/trunk/kernel/arch/xen32/src/mm/frame.c
49,22 → 49,20
 
uintptr_t last_frame = 0;
 
#define L1_PT_SHIFT 10
#define L2_PT_SHIFT 0
#define L0_PT_SHIFT 10
#define L3_PT_SHIFT 0
 
#define L1_PT_ENTRIES 1024
#define L2_PT_ENTRIES 1024
#define L0_PT_ENTRIES 1024
#define L3_PT_ENTRIES 1024
 
#define L1_OFFSET_MASK (L1_PT_ENTRIES - 1)
#define L2_OFFSET_MASK (L2_PT_ENTRIES - 1)
#define L0_INDEX_MASK (L0_PT_ENTRIES - 1)
#define L3_INDEX_MASK (L3_PT_ENTRIES - 1)
 
#define PFN2PTL1_OFFSET(pfn) ((pfn >> L1_PT_SHIFT) & L1_OFFSET_MASK)
#define PFN2PTL2_OFFSET(pfn) ((pfn >> L2_PT_SHIFT) & L2_OFFSET_MASK)
#define PFN2PTL0_INDEX(pfn) ((pfn >> L0_PT_SHIFT) & L0_INDEX_MASK)
#define PFN2PTL3_INDEX(pfn) ((pfn >> L3_PT_SHIFT) & L3_INDEX_MASK)
 
#define PAGE_MASK (~(PAGE_SIZE - 1))
 
#define PTE2ADDR(pte) (pte & PAGE_MASK)
 
#define _PAGE_PRESENT 0x001UL
#define _PAGE_RW 0x002UL
#define _PAGE_USER 0x004UL
76,29 → 74,29
#define _PAGE_PSE 0x080UL
#define _PAGE_GLOBAL 0x100UL
 
#define L1_PROT (_PAGE_PRESENT | _PAGE_ACCESSED)
#define L2_PROT (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED)
#define L0_PROT (_PAGE_PRESENT | _PAGE_ACCESSED)
#define L3_PROT (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED)
 
void frame_arch_init(void)
{
if (config.cpu_active == 1) {
/* The only memory zone starts just after page table */
pfn_t start = ADDR2PFN(ALIGN_UP(KA2PA(start_info.pt_base), PAGE_SIZE)) + start_info.nr_pt_frames;
size_t size = start_info.nr_pages - start;
pfn_t start = ADDR2PFN(ALIGN_UP(KA2PA(start_info.ptl0), PAGE_SIZE)) + start_info.pt_frames;
size_t size = start_info.frames - start;
/* Create identity mapping */
pfn_t phys;
count_t count = 0;
for (phys = start; phys < start + size; phys++) {
mmu_update_t updates[L2_PT_ENTRIES];
mmu_update_t updates[L3_PT_ENTRIES];
pfn_t virt = ADDR2PFN(PA2KA(PFN2ADDR(phys)));
size_t ptl1_offset = PFN2PTL1_OFFSET(virt);
size_t ptl2_offset = PFN2PTL2_OFFSET(virt);
size_t ptl0_index = PFN2PTL0_INDEX(virt);
size_t ptl3_index = PFN2PTL3_INDEX(virt);
unsigned long *ptl2_base = (unsigned long *) PTE2ADDR(start_info.pt_base[ptl1_offset]);
pte_t *ptl3 = (pte_t *) PFN2ADDR(start_info.ptl0[ptl0_index].frame_address);
if (ptl2_base == 0) {
if (ptl3 == 0) {
mmuext_op_t mmu_ext;
pfn_t virt2 = ADDR2PFN(PA2KA(PFN2ADDR(start)));
106,42 → 104,42
/* New L1 page table entry needed */
memsetb(PFN2ADDR(virt2), PAGE_SIZE, 0);
size_t ptl1_offset2 = PFN2PTL1_OFFSET(virt2);
size_t ptl2_offset2 = PFN2PTL2_OFFSET(virt2);
unsigned long *ptl2_base2 = (unsigned long *) PTE2ADDR(start_info.pt_base[ptl1_offset2]);
size_t ptl0_index2 = PFN2PTL0_INDEX(virt2);
size_t ptl3_index2 = PFN2PTL3_INDEX(virt2);
pte_t *ptl3_2 = (pte_t *) PFN2ADDR(start_info.ptl0[ptl0_index2].frame_address);
if (ptl2_base2 == 0)
if (ptl3_2 == 0)
panic("Unable to find page table reference");
updates[count].ptr = (uintptr_t) &ptl2_base2[ptl2_offset2];
updates[count].val = PFN2ADDR(start_info.mfn_list[start]) | L1_PROT;
updates[count].ptr = (uintptr_t) &ptl3_2[ptl3_index2];
updates[count].val = PA2MA(PFN2ADDR(start)) | L0_PROT;
if (xen_mmu_update(updates, count + 1, NULL, DOMID_SELF) < 0)
panic("Unable to map new page table");
count = 0;
mmu_ext.cmd = MMUEXT_PIN_L1_TABLE;
mmu_ext.arg1.mfn = start_info.mfn_list[start];
mmu_ext.arg1.mfn = ADDR2PFN(PA2MA(PFN2ADDR(start)));
if (xen_mmuext_op(&mmu_ext, 1, NULL, DOMID_SELF) < 0)
panic("Error pinning new page table");
unsigned long *ptl0 = (unsigned long *) PFN2ADDR(start_info.mfn_list[ADDR2PFN(KA2PA(start_info.pt_base))]);
pte_t *ptl0 = (pte_t *) PA2MA(KA2PA(start_info.ptl0));
updates[count].ptr = (uintptr_t) &ptl0[ptl1_offset];
updates[count].val = PFN2ADDR(start_info.mfn_list[start]) | L2_PROT;
updates[count].ptr = (uintptr_t) &ptl0[ptl0_index];
updates[count].val = PA2MA(PFN2ADDR(start)) | L3_PROT;
if (xen_mmu_update(updates, count + 1, NULL, DOMID_SELF) < 0)
panic("Unable to update PTE for page table");
count = 0;
ptl2_base = (unsigned long *) PTE2ADDR(start_info.pt_base[ptl1_offset]);
ptl3 = (pte_t *) PFN2ADDR(start_info.ptl0[ptl0_index].frame_address);
start++;
size--;
}
updates[count].ptr = (uintptr_t) &ptl2_base[ptl2_offset];
updates[count].val = PFN2ADDR(start_info.mfn_list[phys]) | L2_PROT;
updates[count].ptr = (uintptr_t) &ptl3[ptl3_index];
updates[count].val = PA2MA(PFN2ADDR(phys)) | L3_PROT;
count++;
if ((count == L2_PT_ENTRIES) || (phys + 1 == start + size)) {
if ((count == L3_PT_ENTRIES) || (phys + 1 == start + size)) {
if (xen_mmu_update(updates, count, NULL, DOMID_SELF) < 0)
panic("Unable to update PTE");
count = 0;
/trunk/kernel/arch/xen32/src/mm/memory_init.c
39,7 → 39,7
 
size_t get_memory_size(void)
{
return start_info.nr_pages * PAGE_SIZE;
return start_info.frames * PAGE_SIZE;
}
 
void memory_print_map(void)
/trunk/kernel/arch/xen32/src/mm/page.c
26,7 → 26,7
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup xen32mm
/** @addtogroup xen32mm
* @{
*/
/** @file
51,30 → 51,11
 
void page_arch_init(void)
{
uintptr_t cur;
int flags;
 
if (config.cpu_active == 1) {
page_mapping_operations = &pt_mapping_operations;
/*
* PA2KA(identity) mapping for all frames until last_frame.
*/
for (cur = 0; cur < last_frame; cur += FRAME_SIZE) {
flags = PAGE_CACHEABLE;
if ((PA2KA(cur) >= config.base) && (PA2KA(cur) < config.base + config.kernel_size))
flags |= PAGE_GLOBAL;
page_mapping_insert(AS_KERNEL, PA2KA(cur), cur, flags);
}
 
exc_register(14, "page_fault", (iroutine) page_fault);
// write_cr3((uintptr_t) AS_KERNEL->page_table);
}
else {
// write_cr3((uintptr_t) AS_KERNEL->page_table);
}
 
// paging_on();
AS_KERNEL->page_table = (pte_t *) KA2PA(start_info.ptl0);
} else
SET_PTL0_ADDRESS_ARCH(AS_KERNEL->page_table);
}
 
void page_fault(int n, istate_t *istate)