Subversion Repositories HelenOS-historic

Compare Revisions

Ignore whitespace Rev 901 → Rev 900

/kernel/trunk/arch/ia64/include/mm/tlb.h
38,47 → 38,14
#include <arch/types.h>
#include <typedefs.h>
 
/** Data and instruction Translation Register indices. */
#define DTR_KERNEL 0
#define ITR_KERNEL 0
#define DTR_KSTACK 1
extern void tc_mapping_insert(__address va, asid_t asid, vhpt_entry_t entry, bool dtc);
extern void dtc_mapping_insert(__address va, asid_t asid, vhpt_entry_t entry);
extern void itc_mapping_insert(__address va, asid_t asid, vhpt_entry_t entry);
 
/** Portion of TLB insertion format data structure. */
union tlb_entry {
__u64 word[2];
struct {
/* Word 0 */
unsigned p : 1; /**< Present. */
unsigned : 1;
unsigned ma : 3; /**< Memory attribute. */
unsigned a : 1; /**< Accessed. */
unsigned d : 1; /**< Dirty. */
unsigned pl : 2; /**< Privilege level. */
unsigned ar : 3; /**< Access rights. */
unsigned long long ppn : 38; /**< Physical Page Number, a.k.a. PFN. */
unsigned : 2;
unsigned ed : 1;
unsigned ig1 : 11;
 
/* Word 1 */
unsigned : 2;
unsigned ps : 6; /**< Page size will be 2^ps. */
unsigned key : 24; /**< Protection key, unused. */
unsigned : 32;
} __attribute__ ((packed));
} __attribute__ ((packed));
typedef union tlb_entry tlb_entry_t;
 
extern void tc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, bool dtc);
extern void dtc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry);
extern void itc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry);
 
extern void tr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, bool dtr, index_t tr);
extern void dtr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, index_t tr);
extern void itr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, index_t tr);
 
extern void dtlb_mapping_insert(__address page, __address frame, bool dtr, index_t tr);
 
extern void alternate_instruction_tlb_fault(__u64 vector, struct exception_regdump *pstate);
extern void alternate_data_tlb_fault(__u64 vector, struct exception_regdump *pstate);
extern void data_nested_tlb_fault(__u64 vector, struct exception_regdump *pstate);
/kernel/trunk/arch/ia64/include/mm/page.h
30,12 → 30,25
#ifndef __ia64_PAGE_H__
#define __ia64_PAGE_H__
 
#ifndef __ASM__
 
 
#include <arch/mm/frame.h>
#include <arch/barrier.h>
#include <genarch/mm/page_ht.h>
#include <arch/mm/asid.h>
#include <arch/types.h>
#include <typedefs.h>
#include <debug.h>
 
#endif
 
#define PAGE_SIZE FRAME_SIZE
#define PAGE_WIDTH FRAME_WIDTH
#define KERNEL_PAGE_WIDTH 28
 
/** Bit width of the TLB-locked portion of kernel address space. */
#define KERNEL_PAGE_WIDTH 28 /* 256M */
 
 
#define SET_PTL0_ADDRESS_ARCH(x) /**< To be removed as situation permits. */
 
#define PPN_SHIFT 12
42,7 → 55,6
 
#define VRN_SHIFT 61
#define VRN_MASK (7LL << VRN_SHIFT)
#define VA2VRN(va) ((va)>>VRN_SHIFT)
 
#ifdef __ASM__
#define VRN_KERNEL 7
55,6 → 67,7
#define KA2PA(x) ((__address) (x-(VRN_KERNEL<<VRN_SHIFT)))
#define PA2KA(x) ((__address) (x+(VRN_KERNEL<<VRN_SHIFT)))
 
 
#define VHPT_WIDTH 20 /* 1M */
#define VHPT_SIZE (1 << VHPT_WIDTH)
#define VHPT_BASE 0 /* Must be aligned to VHPT_SIZE */
74,16 → 87,13
#define AR_EXECUTE 0x1
#define AR_WRITE 0x2
 
 
#define VA_REGION_INDEX 61
 
#define VA_REGION(va) (va>>VA_REGION_INDEX)
 
#ifndef __ASM__
 
#include <arch/mm/frame.h>
#include <arch/barrier.h>
#include <genarch/mm/page_ht.h>
#include <arch/mm/asid.h>
#include <arch/types.h>
#include <typedefs.h>
#include <debug.h>
 
struct vhpt_tag_info {
unsigned long long tag : 63;
unsigned ti : 1;
145,6 → 155,8
__u64 word[4];
} vhpt_entry_t;
 
typedef vhpt_entry_t tlb_entry_t;
 
struct region_register_map {
unsigned ve : 1;
unsigned : 1;
218,10 → 230,13
{
__u64 ret;
ASSERT(i < REGION_REGISTERS);
__asm__ volatile ("mov %0 = rr[%1]\n" : "=r" (ret) : "r" (i << VRN_SHIFT));
i=i<<VRN_SHIFT;
__asm__ volatile ("mov %0 = rr[%1]\n" : "=r" (ret) : "r" (i));
return ret;
}
 
 
/** Write Region Register.
*
* @param i Region register index.
230,11 → 245,11
static inline void rr_write(index_t i, __u64 v)
{
ASSERT(i < REGION_REGISTERS);
i=i<<VRN_SHIFT;
__asm__ volatile (
"mov rr[%0] = %1\n"
:
: "r" (i << VRN_SHIFT), "r" (v)
);
"mov rr[%0] = %1;;\n"
:
: "r" (i), "r" (v));
}
/** Read Page Table Register.
265,6 → 280,10
extern bool vhpt_compare(__address page, asid_t asid, vhpt_entry_t *v);
extern void vhpt_set_record(vhpt_entry_t *v, __address page, asid_t asid, __address frame, int flags);
 
 
 
#endif
 
#endif
 
 
/kernel/trunk/arch/ia64/include/mm/asid.h
34,24 → 34,27
#include <arch/types.h>
 
typedef __u16 asid_t;
typedef __u32 rid_t;
 
#endif /* __ASM__ */
 
/**
* Number of ia64 RIDs (Region Identifiers) per kernel ASID.
* Note that some architectures may support more bits,
* but those extra bits are not used by the kernel.
*/
#endif
#define RIDS_PER_ASID 7
 
#define RID_MAX 262143 /* 2^18 - 1 */
#define RID_KERNEL 0
#define RID_INVALID 1
 
#define ASID2RID(asid, vrn) (((asid)>RIDS_PER_ASID)?(((asid)*RIDS_PER_ASID)+(vrn)):(asid))
#define ASID2RID(asid, vrn) (((asid)*RIDS_PER_ASID)+(vrn))
#define RID2ASID(rid) ((rid)/RIDS_PER_ASID)
 
#ifndef __ASM__
 
 
typedef __u32 rid_t;
 
#endif
 
#define ASID_MAX_ARCH (RID_MAX/RIDS_PER_ASID)
 
#endif
/kernel/trunk/arch/ia64/src/proc/scheduler.c
File deleted
/kernel/trunk/arch/ia64/src/mm/tlb.c
31,14 → 31,11
*/
 
#include <mm/tlb.h>
#include <mm/asid.h>
#include <arch/mm/tlb.h>
#include <arch/mm/page.h>
#include <arch/barrier.h>
#include <arch/interrupt.h>
#include <typedefs.h>
#include <panic.h>
#include <print.h>
 
/** Invalidate all TLB entries. */
void tlb_invalidate_all(void)
87,11 → 84,11
region_register rr;
bool restore_rr = false;
 
if (!(entry.p))
if (!(entry.not_present.p))
return;
 
rr.word = rr_read(VA2VRN(va));
if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
rr.word = rr_read(VA_REGION(va));
if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA_REGION(va))))) {
/*
* The selected region register does not contain required RID.
* Save the old content of the register and replace the RID.
99,8 → 96,8
region_register rr0;
 
rr0 = rr;
rr0.map.rid = ASID2RID(asid, VA2VRN(va));
rr_write(VA2VRN(va), rr0.word);
rr0.map.rid = ASID2RID(asid, VA_REGION(va));
rr_write(VA_REGION(va), rr0.word);
srlz_d();
srlz_i();
}
123,7 → 120,7
);
if (restore_rr) {
rr_write(VA2VRN(va), rr.word);
rr_write(VA_REGION(va),rr.word);
srlz_d();
srlz_i();
}
166,11 → 163,11
region_register rr;
bool restore_rr = false;
 
if (!(entry.p))
if (!(entry.not_present.p))
return;
 
rr.word = rr_read(VA2VRN(va));
if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
rr.word = rr_read(VA_REGION(va));
if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA_REGION(va))))) {
/*
* The selected region register does not contain required RID.
* Save the old content of the register and replace the RID.
178,8 → 175,8
region_register rr0;
 
rr0 = rr;
rr0.map.rid = ASID2RID(asid, VA2VRN(va));
rr_write(VA2VRN(va), rr0.word);
rr0.map.rid = ASID2RID(asid, VA_REGION(va));
rr_write(VA_REGION(va), rr0.word);
srlz_d();
srlz_i();
}
202,72 → 199,20
);
if (restore_rr) {
rr_write(VA2VRN(va), rr.word);
rr_write(VA_REGION(va),rr.word);
srlz_d();
srlz_i();
}
}
 
/** Insert data into DTLB.
*
* @param va Virtual page address.
* @param asid Address space identifier.
* @param entry The rest of TLB entry as required by TLB insertion format.
* @param dtr If true, insert into data translation register, use data translation cache otherwise.
* @param tr Translation register if dtr is true, ignored otherwise.
*/
void dtlb_mapping_insert(__address page, __address frame, bool dtr, index_t tr)
{
tlb_entry_t entry;
entry.word[0] = 0;
entry.word[1] = 0;
entry.p = true; /* present */
entry.ma = MA_WRITEBACK;
entry.a = true; /* already accessed */
entry.d = true; /* already dirty */
entry.pl = PL_KERNEL;
entry.ar = AR_READ | AR_WRITE;
entry.ppn = frame >> PPN_SHIFT;
entry.ps = PAGE_WIDTH;
if (dtr)
dtr_mapping_insert(page, ASID_KERNEL, entry, tr);
else
dtc_mapping_insert(page, ASID_KERNEL, entry);
}
 
void alternate_instruction_tlb_fault(__u64 vector, struct exception_regdump *pstate)
{
panic("%s\n", __FUNCTION__);
}
 
/** Data TLB fault with VHPT turned off.
*
* @param vector Interruption vector.
* @param pstate Structure with saved interruption state.
*/
void alternate_data_tlb_fault(__u64 vector, struct exception_regdump *pstate)
{
region_register rr;
rid_t rid;
__address va;
va = pstate->cr_ifa; /* faulting address */
rr.word = rr_read(VA2VRN(va));
rid = rr.map.rid;
if (RID2ASID(rid) == ASID_KERNEL) {
if (VA2VRN(va) == VRN_KERNEL) {
/*
* Provide KA2PA(identity) mapping for faulting piece of
* kernel address space.
*/
dtlb_mapping_insert(va, KA2PA(va), false, 0);
return;
}
}
panic("%s: va=%P, rid=%d\n", __FUNCTION__, pstate->cr_ifa, rr.map.rid);
panic("%s: %P\n", __FUNCTION__, pstate->cr_ifa);
}
 
void data_nested_tlb_fault(__u64 vector, struct exception_regdump *pstate)
/kernel/trunk/arch/ia64/src/mm/page.c
55,6 → 55,7
/** Initialize VHPT and region registers. */
void set_environment(void)
{
 
region_register rr;
pta_register pta;
int i;
61,16 → 62,17
 
/*
* First set up kernel region register.
* This is redundant (see start.S) but we keep it here just for sure.
* This action is redundand (see start.S) but I would to keep it to make sure that
*no unexpected changes will be made.
*/
rr.word = rr_read(VRN_KERNEL);
rr.map.ve = 0; /* disable VHPT walker */
rr.map.ps = PAGE_WIDTH;
rr.map.rid = ASID2RID(ASID_KERNEL, VRN_KERNEL);
rr.map.rid = ASID2RID(ASID_KERNEL,VRN_KERNEL);
rr_write(VRN_KERNEL, rr.word);
srlz_i();
srlz_d();
 
/*
* And invalidate the rest of region register.
*/
81,7 → 83,7
rr.word == rr_read(i);
rr.map.ve = 0; /* disable VHPT walker */
rr.map.rid = RID_INVALID;
rr.map.rid = ASID2RID(ASID_INVALID,i);
rr_write(i, rr.word);
srlz_i();
srlz_d();
98,6 → 100,10
pta_write(pta.word);
srlz_i();
srlz_d();
 
return ;
}
 
/** Calculate address of collision chain from VPN and ASID.
/kernel/trunk/arch/ia64/src/dummy.s
31,6 → 31,8
.global calibrate_delay_loop
.global asm_delay_loop
.global userspace
.global before_thread_runs_arch
.global after_thread_ran_arch
.global cpu_sleep
.global dummy
.global fpu_enable
37,6 → 39,8
.global fpu_disable
.global fpu_init
 
before_thread_runs_arch:
after_thread_ran_arch:
userspace:
calibrate_delay_loop:
asm_delay_loop:
/kernel/trunk/arch/ia64/src/start.S
52,7 → 52,7
mov r9=rr[r8]
movl r10=(RR_MASK)
and r9=r10,r9
movl r10=((RID_KERNEL<<RID_SHIFT)|(KERNEL_PAGE_WIDTH<<PS_SHIFT))
movl r10=((ASID2RID(ASID_KERNEL,VRN_KERNEL)<<RID_SHIFT)|(KERNEL_PAGE_WIDTH<<PS_SHIFT))
or r9=r10,r9
mov rr[r8]=r9
 
/kernel/trunk/arch/ia64/Makefile.inc
74,5 → 74,4
arch/$(ARCH)/src/mm/frame.c \
arch/$(ARCH)/src/mm/page.c \
arch/$(ARCH)/src/mm/tlb.c \
arch/$(ARCH)/src/proc/scheduler.c \
arch/$(ARCH)/src/drivers/it.c
/kernel/trunk/arch/sparc64/src/proc/scheduler.c
30,7 → 30,6
#include <proc/thread.h>
#include <arch.h>
#include <arch/mm/tlb.h>
#include <arch/mm/page.h>
#include <config.h>
#include <align.h>
 
39,9 → 38,9
{
__address base;
base = ALIGN_DOWN(config.base, 1<<KERNEL_PAGE_WIDTH);
base = ALIGN_DOWN(config.base, 4*1024*1024);
 
if ((__address) THREAD->kstack < base || (__address) THREAD->kstack > base + (1<<KERNEL_PAGE_WIDTH)) {
if ((__address) THREAD->kstack < base || (__address) THREAD->kstack > base + 4*1024*1024) {
/*
* Kernel stack of this thread is not locked in DTLB.
* First, make sure it is not mapped already.
48,7 → 47,7
* If not, create a locked mapping for it.
*/
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, (__address) THREAD->kstack);
dtlb_insert_mapping((__address) THREAD->kstack, KA2PA(THREAD->kstack), PAGESIZE_8K, true, true);
dtlb_insert_mapping((__address) THREAD->kstack, (__address) THREAD->kstack, PAGESIZE_8K, true, true);
}
}
 
57,9 → 56,9
{
__address base;
 
base = ALIGN_DOWN(config.base, 1<<KERNEL_PAGE_WIDTH);
base = ALIGN_DOWN(config.base, 4*1024*1024);
 
if ((__address) THREAD->kstack < base || (__address) THREAD->kstack > base + (1<<KERNEL_PAGE_WIDTH)) {
if ((__address) THREAD->kstack < base || (__address) THREAD->kstack > base + 4*1024*1024) {
/*
* Kernel stack of this thread is locked in DTLB.
* Destroy the mapping.
/kernel/trunk/arch/sparc64/include/mm/tlb.h
46,9 → 46,6
#define PAGESIZE_512K 2
#define PAGESIZE_4M 3
 
/** Bit width of the TLB-locked portion of kernel address space. */
#define KERNEL_PAGE_WIDTH 22 /* 4M */
 
union tlb_context_reg {
__u64 v;
struct {