Subversion Repositories HelenOS

Compare Revisions

Ignore whitespace Rev 393 → Rev 394

/SPARTAN/trunk/arch/mips32/include/types.h
49,6 → 49,6
 
typedef __u32 __native;
 
typedef struct entry_lo pte_t;
typedef struct pte pte_t;
 
#endif
/SPARTAN/trunk/arch/mips32/include/mm/page.h
46,7 → 46,8
* Page table layout:
* - 32-bit virtual addresses
* - Offset is 14 bits => pages are 16K long
* - PTE's use the same format as CP0 EntryLo[01] registers => PTE is therefore 4 bytes long
* - PTE's use similar format as CP0 EntryLo[01] registers => PTE is therefore 4 bytes long
* - PTE's make use of CP0 EntryLo's two-bit reserved field for bit W (writable) and bit A (accessed)
* - PTL0 has 64 entries (6 bits)
* - PTL1 is not used
* - PTL2 is not used
56,7 → 57,7
#define PTL0_INDEX_ARCH(vaddr) ((vaddr)>>26)
#define PTL1_INDEX_ARCH(vaddr) 0
#define PTL2_INDEX_ARCH(vaddr) 0
#define PTL3_INDEX_ARCH(vaddr) (((vaddr)>>12)&0xfff)
#define PTL3_INDEX_ARCH(vaddr) (((vaddr)>>14)&0x3fff)
 
#define GET_PTL0_ADDRESS_ARCH() (PTL0)
#define SET_PTL0_ADDRESS_ARCH(ptl0) (PTL0 = (pte_t *)(ptl0))
97,7 → 98,7
((!p->v)<<PAGE_PRESENT_SHIFT) |
(1<<PAGE_USER_SHIFT) |
(1<<PAGE_READ_SHIFT) |
((p->d)<<PAGE_WRITE_SHIFT) |
((p->w)<<PAGE_WRITE_SHIFT) |
(1<<PAGE_EXEC_SHIFT)
);
109,7 → 110,7
p->c = (flags & PAGE_CACHEABLE) != 0 ? PAGE_CACHEABLE_EXC_WRITE : PAGE_UNCACHED;
p->v = !(flags & PAGE_NOT_PRESENT);
p->d = (flags & PAGE_WRITE) != 0;
p->w = (flags & PAGE_WRITE) != 0;
}
 
extern void page_arch_init(void);
/SPARTAN/trunk/arch/mips32/include/mm/tlb.h
47,9 → 47,19
unsigned d : 1; /* dirty/write-protect bit */
unsigned c : 3; /* cache coherency attribute */
unsigned pfn : 24; /* frame number */
unsigned : 2;
unsigned zero: 2; /* zero */
} __attribute__ ((packed));
 
struct pte {
unsigned g : 1; /* global bit */
unsigned v : 1; /* valid bit */
unsigned d : 1; /* dirty/write-protect bit */
unsigned c : 3; /* cache coherency attribute */
unsigned pfn : 24; /* frame number */
unsigned w : 1; /* writable */
unsigned a : 1; /* accessed */
} __attribute__ ((packed));
 
struct entry_hi {
unsigned asid : 8;
unsigned : 5;
62,14 → 72,22
unsigned : 7;
} __attribute__ ((packed));
 
struct tlb_entry {
struct entry_lo lo0;
struct entry_lo lo1;
struct entry_hi hi;
struct page_mask mask;
struct index {
unsigned index : 4;
unsigned : 27;
unsigned p : 1;
} __attribute__ ((packed));
 
/** Probe TLB for Matching Entry
*
* Probe TLB for Matching Entry.
*/
static inline void tlbp(void)
{
__asm__ volatile ("tlbp\n\t");
}
 
 
/** Read Indexed TLB Entry
*
* Read Indexed TLB Entry.
/SPARTAN/trunk/arch/mips32/src/mm/tlb.c
42,6 → 42,9
static void tlb_invalid_fail(struct exception_regdump *pstate);
static void tlb_modified_fail(struct exception_regdump *pstate);
 
static pte_t *find_mapping_and_check(__address badvaddr);
static void prepare_entry_lo(struct entry_lo *lo, bool g, bool v, bool d, bool c, __address pfn);
 
/** Initialize TLB
*
* Initialize TLB.
79,46 → 82,34
*/
void tlb_refill(struct exception_regdump *pstate)
{
struct entry_hi hi;
struct entry_lo lo;
__address badvaddr;
pte_t *pte;
*((__u32 *) &hi) = cp0_entry_hi_read();
badvaddr = cp0_badvaddr_read();
spinlock_lock(&VM->lock);
 
/*
* Refill cannot succeed if the ASIDs don't match.
*/
if (hi.asid != VM->asid)
spinlock_lock(&VM->lock);
pte = find_mapping_and_check(badvaddr);
if (!pte)
goto fail;
 
/*
* Refill cannot succeed if badvaddr is not
* associated with any mapping.
* Record access to PTE.
*/
pte = find_mapping(badvaddr, 0);
if (!pte)
goto fail;
/*
* Refill cannot succeed if the mapping is marked as invalid.
*/
if (!pte->v)
goto fail;
pte->a = 1;
 
prepare_entry_lo(&lo, pte->g, pte->v, pte->d, pte->c, pte->pfn);
 
/*
* New entry is to be inserted into TLB
*/
cp0_pagemask_write(TLB_PAGE_MASK_16K);
if ((badvaddr/PAGE_SIZE) % 2 == 0) {
cp0_entry_lo0_write(*((__u32 *) pte));
cp0_entry_lo0_write(*((__u32 *) &lo));
cp0_entry_lo1_write(0);
}
else {
cp0_entry_lo0_write(0);
cp0_entry_lo1_write(*((__u32 *) pte));
cp0_entry_lo1_write(*((__u32 *) &lo));
}
tlbwr();
 
130,13 → 121,135
tlb_refill_fail(pstate);
}
 
/** Process TLB Invalid Exception
*
* Process TLB Invalid Exception.
*
* @param pstate Interrupted register context.
*/
void tlb_invalid(struct exception_regdump *pstate)
{
struct index index;
__address badvaddr;
struct entry_lo lo;
pte_t *pte;
 
badvaddr = cp0_badvaddr_read();
 
/*
* Locate the faulting entry in TLB.
*/
tlbp();
*((__u32 *) &index) = cp0_index_read();
spinlock_lock(&VM->lock);
/*
* Fail if the entry is not in TLB.
*/
if (index.p)
goto fail;
 
pte = find_mapping_and_check(badvaddr);
if (!pte)
goto fail;
 
/*
* Read the faulting TLB entry.
*/
tlbr();
 
/*
* Record access to PTE.
*/
pte->a = 1;
 
prepare_entry_lo(&lo, pte->g, pte->v, pte->d, pte->c, pte->pfn);
 
/*
* The entry is to be updated in TLB.
*/
if ((badvaddr/PAGE_SIZE) % 2 == 0)
cp0_entry_lo0_write(*((__u32 *) &lo));
else
cp0_entry_lo1_write(*((__u32 *) &lo));
tlbwi();
 
spinlock_unlock(&VM->lock);
return;
fail:
spinlock_unlock(&VM->lock);
tlb_invalid_fail(pstate);
}
 
/** Process TLB Modified Exception
*
* Process TLB Modified Exception.
*
* @param pstate Interrupted register context.
*/
 
void tlb_modified(struct exception_regdump *pstate)
{
struct index index;
__address badvaddr;
struct entry_lo lo;
pte_t *pte;
 
badvaddr = cp0_badvaddr_read();
 
/*
* Locate the faulting entry in TLB.
*/
tlbp();
*((__u32 *) &index) = cp0_index_read();
spinlock_lock(&VM->lock);
/*
* Fail if the entry is not in TLB.
*/
if (index.p)
goto fail;
 
pte = find_mapping_and_check(badvaddr);
if (!pte)
goto fail;
 
/*
* Fail if the page is not writable.
*/
if (!pte->w)
goto fail;
 
/*
* Read the faulting TLB entry.
*/
tlbr();
 
/*
* Record access and write to PTE.
*/
pte->a = 1;
pte->d = 1;
 
prepare_entry_lo(&lo, pte->g, pte->v, pte->w, pte->c, pte->pfn);
 
/*
* The entry is to be updated in TLB.
*/
if ((badvaddr/PAGE_SIZE) % 2 == 0)
cp0_entry_lo0_write(*((__u32 *) &lo));
else
cp0_entry_lo1_write(*((__u32 *) &lo));
tlbwi();
 
spinlock_unlock(&VM->lock);
return;
fail:
spinlock_unlock(&VM->lock);
tlb_modified_fail(pstate);
}
 
162,8 → 275,7
char *s = get_symtab_entry(pstate->epc);
if (s)
symbol = s;
panic("%X: TLB Invalid Exception at %X(%s)\n", cp0_badvaddr_read(),
pstate->epc, symbol);
panic("%X: TLB Invalid Exception at %X(%s)\n", cp0_badvaddr_read(), pstate->epc, symbol);
}
 
void tlb_modified_fail(struct exception_regdump *pstate)
173,8 → 285,7
char *s = get_symtab_entry(pstate->epc);
if (s)
symbol = s;
panic("%X: TLB Modified Exception at %X(%s)\n", cp0_badvaddr_read(),
pstate->epc, symbol);
panic("%X: TLB Modified Exception at %X(%s)\n", cp0_badvaddr_read(), pstate->epc, symbol);
}
 
 
188,3 → 299,51
cpu_priority_restore(pri);
}
 
/** Try to find PTE for faulting address
*
* Try to find PTE for faulting address.
* The VM->lock must be held on entry to this function.
*
* @param badvaddr Faulting virtual address.
*
* @return PTE on success, NULL otherwise.
*/
pte_t *find_mapping_and_check(__address badvaddr)
{
struct entry_hi hi;
pte_t *pte;
 
*((__u32 *) &hi) = cp0_entry_hi_read();
 
/*
* Handler cannot succeed if the ASIDs don't match.
*/
if (hi.asid != VM->asid)
return NULL;
/*
* Handler cannot succeed if badvaddr has no mapping.
*/
pte = find_mapping(badvaddr, 0);
if (!pte)
return NULL;
 
/*
* Handler cannot succeed if the mapping is marked as invalid.
*/
if (!pte->v)
return NULL;
 
return pte;
}
 
void prepare_entry_lo(struct entry_lo *lo, bool g, bool v, bool d, bool c, __address pfn)
{
lo->g = g;
lo->v = v;
lo->d = d;
lo->c = c;
lo->pfn = pfn;
lo->zero = 0;
}