Subversion Repositories HelenOS

Compare Revisions

Regard whitespace Rev 3765 → Rev 3766

/trunk/kernel/arch/ia64/src/mm/tlb.c
168,13 → 168,8
va&=~((1<<ps)-1);
break;
}
for(; va<(page+cnt*(PAGE_SIZE)); va += (1<<ps)) {
asm volatile (
"ptc.l %0,%1;;"
:
: "r" (va), "r" (ps<<2)
);
}
for(; va < (page + cnt * PAGE_SIZE); va += (1 << ps))
asm volatile ("ptc.l %0, %1;;" :: "r" (va), "r" (ps << 2));
srlz_d();
srlz_i();
189,7 → 184,8
*
* @param va Virtual page address.
* @param asid Address space identifier.
* @param entry The rest of TLB entry as required by TLB insertion format.
* @param entry The rest of TLB entry as required by TLB insertion
* format.
*/
void dtc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry)
{
200,7 → 196,8
*
* @param va Virtual page address.
* @param asid Address space identifier.
* @param entry The rest of TLB entry as required by TLB insertion format.
* @param entry The rest of TLB entry as required by TLB insertion
* format.
*/
void itc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry)
{
211,8 → 208,10
*
* @param va Virtual page address.
* @param asid Address space identifier.
* @param entry The rest of TLB entry as required by TLB insertion format.
* @param dtc If true, insert into data translation cache, use instruction translation cache otherwise.
* @param entry The rest of TLB entry as required by TLB insertion
* format.
* @param dtc If true, insert into data translation cache, use
* instruction translation cache otherwise.
*/
void tc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtc)
{
247,7 → 246,8
"mov psr.l=r8;;\n"
"srlz.d;;\n"
:
: "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (dtc)
: "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]),
"r" (entry.word[0]), "r" (dtc)
: "p6", "p7", "r8"
);
262,10 → 262,12
*
* @param va Virtual page address.
* @param asid Address space identifier.
* @param entry The rest of TLB entry as required by TLB insertion format.
* @param entry The rest of TLB entry as required by TLB insertion
* format.
* @param tr Translation register.
*/
void itr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, index_t tr)
void
itr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, index_t tr)
{
tr_mapping_insert(va, asid, entry, false, tr);
}
274,10 → 276,12
*
* @param va Virtual page address.
* @param asid Address space identifier.
* @param entry The rest of TLB entry as required by TLB insertion format.
* @param entry The rest of TLB entry as required by TLB insertion
* format.
* @param tr Translation register.
*/
void dtr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, index_t tr)
void
dtr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, index_t tr)
{
tr_mapping_insert(va, asid, entry, true, tr);
}
286,11 → 290,15
*
* @param va Virtual page address.
* @param asid Address space identifier.
* @param entry The rest of TLB entry as required by TLB insertion format.
* @param dtr If true, insert into data translation register, use instruction translation register otherwise.
* @param entry The rest of TLB entry as required by TLB insertion
* format.
* @param dtr If true, insert into data translation register, use
* instruction translation register otherwise.
* @param tr Translation register.
*/
void tr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtr, index_t tr)
void
tr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtr,
index_t tr)
{
region_register rr;
bool restore_rr = false;
323,7 → 331,8
"mov psr.l=r8;;\n"
"srlz.d;;\n"
:
: "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (tr), "r" (dtr)
: "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]),
"r" (entry.word[0]), "r" (tr), "r" (dtr)
: "p6", "p7", "r8"
);
338,10 → 347,13
*
* @param page Virtual page address including VRN bits.
* @param frame Physical frame address.
* @param dtr If true, insert into data translation register, use data translation cache otherwise.
* @param dtr If true, insert into data translation register, use data
* translation cache otherwise.
* @param tr Translation register if dtr is true, ignored otherwise.
*/
void dtlb_kernel_mapping_insert(uintptr_t page, uintptr_t frame, bool dtr, index_t tr)
void
dtlb_kernel_mapping_insert(uintptr_t page, uintptr_t frame, bool dtr,
index_t tr)
{
tlb_entry_t entry;
461,44 → 473,46
page_table_unlock(AS, true);
if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
fault_if_from_uspace(istate,"Page fault at %p",va);
panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid, istate->cr_iip);
panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid,
istate->cr_iip);
}
}
}
 
 
 
static int is_io_page_accessible(int page)
{
if(TASK->arch.iomap) return bitmap_get(TASK->arch.iomap,page);
else return 0;
if (TASK->arch.iomap)
return bitmap_get(TASK->arch.iomap,page);
else
return 0;
}
 
#define IO_FRAME_BASE 0xFFFFC000000
 
/** There is special handling of memmaped lagacy io, because
* of 4KB sized access
* only for userspace
/**
* There is special handling of memory mapped legacy io, because of 4KB sized
* access for userspace.
*
* @param va virtual address of page fault
* @param va Virtual address of page fault.
* @param istate Structure with saved interruption state.
*
*
* @return 1 on success, 0 on fail
* @return One on success, zero on failure.
*/
static int try_memmap_io_insertion(uintptr_t va, istate_t *istate)
{
if((va >= IO_OFFSET ) && (va < IO_OFFSET + (1<<IO_PAGE_WIDTH)))
if ((va >= IO_OFFSET ) && (va < IO_OFFSET + (1 << IO_PAGE_WIDTH))) {
if(TASK){
uint64_t io_page = (va & ((1 << IO_PAGE_WIDTH) - 1)) >>
USPACE_IO_PAGE_WIDTH;
uint64_t io_page=(va & ((1<<IO_PAGE_WIDTH)-1)) >> (USPACE_IO_PAGE_WIDTH);
if(is_io_page_accessible(io_page)){
uint64_t page,frame;
 
page = IO_OFFSET + (1 << USPACE_IO_PAGE_WIDTH) * io_page;
frame = IO_FRAME_BASE + (1 << USPACE_IO_PAGE_WIDTH) * io_page;
page = IO_OFFSET +
(1 << USPACE_IO_PAGE_WIDTH) * io_page;
frame = IO_FRAME_BASE +
(1 << USPACE_IO_PAGE_WIDTH) * io_page;
 
 
tlb_entry_t entry;
entry.word[0] = 0;
516,21 → 530,15
dtc_mapping_insert(page, TASK->as->asid, entry);
return 1;
}else {
fault_if_from_uspace(istate,"IO access fault at %p",va);
return 0;
fault_if_from_uspace(istate,
"IO access fault at %p", va);
}
} else
return 0;
else
return 0;
}
}
return 0;
 
}
 
 
 
 
/** Data TLB fault handler for faults with VHPT turned off.
*
* @param vector Interruption vector.
568,13 → 576,16
page_table_unlock(AS, true);
} else {
page_table_unlock(AS, true);
if (try_memmap_io_insertion(va,istate)) return;
if (try_memmap_io_insertion(va, istate))
return;
/*
* Forward the page fault to the address space page fault handler.
* Forward the page fault to the address space page fault
* handler.
*/
if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
fault_if_from_uspace(istate,"Page fault at %p",va);
panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid, istate->cr_iip);
panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid,
istate->cr_iip);
}
}
}
620,9 → 631,8
} else {
if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) {
fault_if_from_uspace(istate,"Page fault at %p",va);
panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid, istate->cr_iip);
t->d = true;
dtc_pte_copy(t);
panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid,
istate->cr_iip);
}
}
page_table_unlock(AS, true);
657,9 → 667,8
} else {
if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
fault_if_from_uspace(istate,"Page fault at %p",va);
panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid, istate->cr_iip);
t->a = true;
itc_pte_copy(t);
panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid,
istate->cr_iip);
}
}
page_table_unlock(AS, true);
694,9 → 703,8
} else {
if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
fault_if_from_uspace(istate,"Page fault at %p",va);
panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid, istate->cr_iip);
t->a = true;
itc_pte_copy(t);
panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid,
istate->cr_iip);
}
}
page_table_unlock(AS, true);