Subversion Repositories HelenOS-historic

Compare Revisions

Ignore whitespace Rev 901 → Rev 902

/kernel/trunk/genarch/include/mm/page_ht.h
51,7 → 51,11
as_t *as; /**< Address space. */
__address page; /**< Virtual memory page. */
__address frame; /**< Physical memory frame. */
int flags;
unsigned g : 1; /**< Global page. */
unsigned x : 1; /**< Execute. */
unsigned w : 1; /**< Writable. */
unsigned k : 1; /**< Kernel privileges required. */
unsigned c : 1; /**< Cacheable. */
unsigned a : 1; /**< Accessed. */
unsigned d : 1; /**< Dirty. */
unsigned p : 1; /**< Present. */
/kernel/trunk/genarch/src/mm/page_ht.c
171,7 → 171,18
if (!hash_table_find(&page_ht, key)) {
t = (pte_t *) malloc(sizeof(pte_t), FRAME_ATOMIC);
ASSERT(t != NULL);
 
t->g = (flags & PAGE_GLOBAL) != 0;
t->x = (flags & PAGE_EXEC) != 0;
t->w = (flags & PAGE_WRITE) != 0;
t->k = !(flags & PAGE_USER);
t->c = (flags & PAGE_CACHEABLE) != 0;
t->p = !(flags & PAGE_NOT_PRESENT);
 
t->as = as;
t->page = page;
t->frame = frame;
 
hash_table_insert(&page_ht, key, &t->link);
}
/kernel/trunk/generic/src/adt/hash_table.c
47,6 → 47,8
*/
void hash_table_create(hash_table_t *h, count_t m, count_t max_keys, hash_table_operations_t *op)
{
int i;
 
ASSERT(h);
ASSERT(op && op->hash && op->compare);
ASSERT(max_keys > 0);
57,6 → 59,9
}
memsetb((__address) h->entry, m * sizeof(link_t *), 0);
for (i = 0; i < m; i++)
list_initialize(&h->entry[i]);
h->entries = m;
h->max_keys = max_keys;
h->op = op;
/kernel/trunk/arch/ia64/include/mm/tlb.h
77,8 → 77,11
extern void dtr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, index_t tr);
extern void itr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, index_t tr);
 
extern void dtlb_mapping_insert(__address page, __address frame, bool dtr, index_t tr);
extern void dtlb_kernel_mapping_insert(__address page, __address frame, bool dtr, index_t tr);
 
extern void dtc_pte_copy(pte_t *t);
extern void itc_pte_copy(pte_t *t);
 
extern void alternate_instruction_tlb_fault(__u64 vector, struct exception_regdump *pstate);
extern void alternate_data_tlb_fault(__u64 vector, struct exception_regdump *pstate);
extern void data_nested_tlb_fault(__u64 vector, struct exception_regdump *pstate);
/kernel/trunk/arch/ia64/src/proc/scheduler.c
46,7 → 46,7
* First, make sure it is not mapped already.
* If not, fill respective tranlsation register.
*/
dtlb_mapping_insert((__address) THREAD->kstack, KA2PA(THREAD->kstack), true, DTR_KSTACK);
dtlb_kernel_mapping_insert((__address) THREAD->kstack, KA2PA(THREAD->kstack), true, DTR_KSTACK);
}
}
 
/kernel/trunk/arch/ia64/src/mm/tlb.c
32,6 → 32,8
 
#include <mm/tlb.h>
#include <mm/asid.h>
#include <mm/page.h>
#include <mm/as.h>
#include <arch/mm/tlb.h>
#include <arch/mm/page.h>
#include <arch/barrier.h>
38,7 → 40,7
#include <arch/interrupt.h>
#include <typedefs.h>
#include <panic.h>
#include <print.h>
#include <arch.h>
 
/** Invalidate all TLB entries. */
void tlb_invalidate_all(void)
87,9 → 89,6
region_register rr;
bool restore_rr = false;
 
if (!(entry.p))
return;
 
rr.word = rr_read(VA2VRN(va));
if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
/*
166,9 → 165,6
region_register rr;
bool restore_rr = false;
 
if (!(entry.p))
return;
 
rr.word = rr_read(VA2VRN(va));
if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
/*
216,7 → 212,7
* @param dtr If true, insert into data translation register, use data translation cache otherwise.
* @param tr Translation register if dtr is true, ignored otherwise.
*/
void dtlb_mapping_insert(__address page, __address frame, bool dtr, index_t tr)
void dtlb_kernel_mapping_insert(__address page, __address frame, bool dtr, index_t tr)
{
tlb_entry_t entry;
238,12 → 234,83
dtc_mapping_insert(page, ASID_KERNEL, entry);
}
 
/** Copy content of PTE into data translation cache.
*
* @param t PTE.
*/
void dtc_pte_copy(pte_t *t)
{
tlb_entry_t entry;
 
entry.word[0] = 0;
entry.word[1] = 0;
entry.p = t->p;
entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
entry.a = t->a;
entry.d = t->d;
entry.pl = t->k ? PL_KERNEL : PL_USER;
entry.ar = t->w ? AR_WRITE : AR_READ;
entry.ppn = t->frame >> PPN_SHIFT;
entry.ps = PAGE_WIDTH;
dtc_mapping_insert(t->page, t->as->asid, entry);
}
 
/** Copy content of PTE into instruction translation cache.
*
* @param t PTE.
*/
void itc_pte_copy(pte_t *t)
{
tlb_entry_t entry;
 
entry.word[0] = 0;
entry.word[1] = 0;
ASSERT(t->x);
entry.p = t->p;
entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
entry.a = t->a;
entry.pl = t->k ? PL_KERNEL : PL_USER;
entry.ar = t->x ? (AR_EXECUTE | AR_READ) : AR_READ;
entry.ppn = t->frame >> PPN_SHIFT;
entry.ps = PAGE_WIDTH;
itc_mapping_insert(t->page, t->as->asid, entry);
}
 
/** Instruction TLB fault handler for faults with VHPT turned off.
*
* @param vector Interruption vector.
* @param pstate Structure with saved interruption state.
*/
void alternate_instruction_tlb_fault(__u64 vector, struct exception_regdump *pstate)
{
panic("%s\n", __FUNCTION__);
region_register rr;
__address va;
pte_t *t;
va = pstate->cr_ifa; /* faulting address */
t = page_mapping_find(AS, va);
if (t) {
/*
* The mapping was found in software page hash table.
* Insert it into data translation cache.
*/
itc_pte_copy(t);
} else {
/*
* Forward the page fault to address space page fault handler.
*/
if (!as_page_fault(va)) {
panic("%s: va=%P, rid=%d\n", __FUNCTION__, pstate->cr_ifa, rr.map.rid);
}
}
}
 
/** Data TLB fault with VHPT turned off.
/** Data TLB fault handler for faults with VHPT turned off.
*
* @param vector Interruption vector.
* @param pstate Structure with saved interruption state.
253,6 → 320,7
region_register rr;
rid_t rid;
__address va;
pte_t *t;
va = pstate->cr_ifa; /* faulting address */
rr.word = rr_read(VA2VRN(va));
263,34 → 331,130
* Provide KA2PA(identity) mapping for faulting piece of
* kernel address space.
*/
dtlb_mapping_insert(va, KA2PA(va), false, 0);
dtlb_kernel_mapping_insert(va, KA2PA(va), false, 0);
return;
}
}
panic("%s: va=%P, rid=%d\n", __FUNCTION__, pstate->cr_ifa, rr.map.rid);
t = page_mapping_find(AS, va);
if (t) {
/*
* The mapping was found in software page hash table.
* Insert it into data translation cache.
*/
dtc_pte_copy(t);
} else {
/*
* Forward the page fault to address space page fault handler.
*/
if (!as_page_fault(va)) {
panic("%s: va=%P, rid=%d\n", __FUNCTION__, pstate->cr_ifa, rr.map.rid);
}
}
}
 
/** Data nested TLB fault handler.
*
* This fault should not occur.
*
* @param vector Interruption vector.
* @param pstate Structure with saved interruption state.
*/
void data_nested_tlb_fault(__u64 vector, struct exception_regdump *pstate)
{
panic("%s\n", __FUNCTION__);
}
 
/** Data Dirty bit fault handler.
*
* @param vector Interruption vector.
* @param pstate Structure with saved interruption state.
*/
void data_dirty_bit_fault(__u64 vector, struct exception_regdump *pstate)
{
panic("%s\n", __FUNCTION__);
pte_t *t;
 
t = page_mapping_find(AS, pstate->cr_ifa);
ASSERT(t && t->p);
if (t && t->p) {
/*
* Update the Dirty bit in page tables and reinsert
* the mapping into DTC.
*/
t->d = true;
dtc_pte_copy(t);
}
}
 
/** Instruction access bit fault handler.
*
* @param vector Interruption vector.
* @param pstate Structure with saved interruption state.
*/
void instruction_access_bit_fault(__u64 vector, struct exception_regdump *pstate)
{
panic("%s\n", __FUNCTION__);
pte_t *t;
 
t = page_mapping_find(AS, pstate->cr_ifa);
ASSERT(t && t->p);
if (t && t->p) {
/*
* Update the Accessed bit in page tables and reinsert
* the mapping into ITC.
*/
t->a = true;
itc_pte_copy(t);
}
}
 
/** Data access bit fault handler.
*
* @param vector Interruption vector.
* @param pstate Structure with saved interruption state.
*/
void data_access_bit_fault(__u64 vector, struct exception_regdump *pstate)
{
panic("%s\n", __FUNCTION__);
pte_t *t;
 
t = page_mapping_find(AS, pstate->cr_ifa);
ASSERT(t && t->p);
if (t && t->p) {
/*
* Update the Accessed bit in page tables and reinsert
* the mapping into DTC.
*/
t->a = true;
dtc_pte_copy(t);
}
}
 
/** Page not present fault handler.
*
* @param vector Interruption vector.
* @param pstate Structure with saved interruption state.
*/
void page_not_present(__u64 vector, struct exception_regdump *pstate)
{
panic("%s\n", __FUNCTION__);
region_register rr;
__address va;
pte_t *t;
va = pstate->cr_ifa; /* faulting address */
t = page_mapping_find(AS, va);
ASSERT(t);
if (t->p) {
/*
* If the Present bit is set in page hash table, just copy it
* and update ITC/DTC.
*/
if (t->x)
itc_pte_copy(t);
else
dtc_pte_copy(t);
} else {
if (!as_page_fault(va)) {
panic("%s: va=%P, rid=%d\n", __FUNCTION__, pstate->cr_ifa, rr.map.rid);
}
}
}
/kernel/trunk/arch/ia64/src/mm/page.c
72,7 → 72,7
srlz_d();
 
/*
* And invalidate the rest of region register.
* And setup the rest of region register.
*/
for(i = 0; i < REGION_REGISTERS; i++) {
/* skip kernel rr */
81,7 → 81,8
rr.word == rr_read(i);
rr.map.ve = 0; /* disable VHPT walker */
rr.map.rid = RID_INVALID;
rr.map.rid = RID_KERNEL;
rr.map.ps = PAGE_WIDTH;
rr_write(i, rr.word);
srlz_i();
srlz_d();