Subversion Repositories HelenOS-historic

Compare Revisions

Ignore whitespace Rev 900 → Rev 901

/kernel/trunk/arch/sparc64/include/mm/tlb.h
46,6 → 46,9
#define PAGESIZE_512K 2
#define PAGESIZE_4M 3
 
/** Bit width of the TLB-locked portion of kernel address space. */
#define KERNEL_PAGE_WIDTH 22 /* 4M */
 
union tlb_context_reg {
__u64 v;
struct {
/kernel/trunk/arch/sparc64/src/proc/scheduler.c
30,6 → 30,7
#include <proc/thread.h>
#include <arch.h>
#include <arch/mm/tlb.h>
#include <arch/mm/page.h>
#include <config.h>
#include <align.h>
 
38,9 → 39,9
{
__address base;
base = ALIGN_DOWN(config.base, 4*1024*1024);
base = ALIGN_DOWN(config.base, 1<<KERNEL_PAGE_WIDTH);
 
if ((__address) THREAD->kstack < base || (__address) THREAD->kstack > base + 4*1024*1024) {
if ((__address) THREAD->kstack < base || (__address) THREAD->kstack > base + (1<<KERNEL_PAGE_WIDTH)) {
/*
* Kernel stack of this thread is not locked in DTLB.
* First, make sure it is not mapped already.
47,7 → 48,7
* If not, create a locked mapping for it.
*/
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, (__address) THREAD->kstack);
dtlb_insert_mapping((__address) THREAD->kstack, (__address) THREAD->kstack, PAGESIZE_8K, true, true);
dtlb_insert_mapping((__address) THREAD->kstack, KA2PA(THREAD->kstack), PAGESIZE_8K, true, true);
}
}
 
56,9 → 57,9
{
__address base;
 
base = ALIGN_DOWN(config.base, 4*1024*1024);
base = ALIGN_DOWN(config.base, 1<<KERNEL_PAGE_WIDTH);
 
if ((__address) THREAD->kstack < base || (__address) THREAD->kstack > base + 4*1024*1024) {
if ((__address) THREAD->kstack < base || (__address) THREAD->kstack > base + (1<<KERNEL_PAGE_WIDTH)) {
/*
* Kernel stack of this thread is locked in DTLB.
* Destroy the mapping.
/kernel/trunk/arch/ia64/include/mm/page.h
30,25 → 30,12
#ifndef __ia64_PAGE_H__
#define __ia64_PAGE_H__
 
#ifndef __ASM__
 
 
#include <arch/mm/frame.h>
#include <arch/barrier.h>
#include <genarch/mm/page_ht.h>
#include <arch/mm/asid.h>
#include <arch/types.h>
#include <typedefs.h>
#include <debug.h>
 
#endif
 
#define PAGE_SIZE FRAME_SIZE
#define PAGE_WIDTH FRAME_WIDTH
#define KERNEL_PAGE_WIDTH 28
 
/** Bit width of the TLB-locked portion of kernel address space. */
#define KERNEL_PAGE_WIDTH 28 /* 256M */
 
 
#define SET_PTL0_ADDRESS_ARCH(x) /**< To be removed as situation permits. */
 
#define PPN_SHIFT 12
55,6 → 42,7
 
#define VRN_SHIFT 61
#define VRN_MASK (7LL << VRN_SHIFT)
#define VA2VRN(va) ((va)>>VRN_SHIFT)
 
#ifdef __ASM__
#define VRN_KERNEL 7
67,7 → 55,6
#define KA2PA(x) ((__address) (x-(VRN_KERNEL<<VRN_SHIFT)))
#define PA2KA(x) ((__address) (x+(VRN_KERNEL<<VRN_SHIFT)))
 
 
#define VHPT_WIDTH 20 /* 1M */
#define VHPT_SIZE (1 << VHPT_WIDTH)
#define VHPT_BASE 0 /* Must be aligned to VHPT_SIZE */
87,13 → 74,16
#define AR_EXECUTE 0x1
#define AR_WRITE 0x2
 
#ifndef __ASM__
 
#define VA_REGION_INDEX 61
#include <arch/mm/frame.h>
#include <arch/barrier.h>
#include <genarch/mm/page_ht.h>
#include <arch/mm/asid.h>
#include <arch/types.h>
#include <typedefs.h>
#include <debug.h>
 
#define VA_REGION(va) (va>>VA_REGION_INDEX)
 
#ifndef __ASM__
 
struct vhpt_tag_info {
unsigned long long tag : 63;
unsigned ti : 1;
155,8 → 145,6
__u64 word[4];
} vhpt_entry_t;
 
typedef vhpt_entry_t tlb_entry_t;
 
struct region_register_map {
unsigned ve : 1;
unsigned : 1;
230,13 → 218,10
{
__u64 ret;
ASSERT(i < REGION_REGISTERS);
i=i<<VRN_SHIFT;
__asm__ volatile ("mov %0 = rr[%1]\n" : "=r" (ret) : "r" (i));
__asm__ volatile ("mov %0 = rr[%1]\n" : "=r" (ret) : "r" (i << VRN_SHIFT));
return ret;
}
 
 
/** Write Region Register.
*
* @param i Region register index.
245,11 → 230,11
static inline void rr_write(index_t i, __u64 v)
{
ASSERT(i < REGION_REGISTERS);
i=i<<VRN_SHIFT;
__asm__ volatile (
"mov rr[%0] = %1;;\n"
:
: "r" (i), "r" (v));
"mov rr[%0] = %1\n"
:
: "r" (i << VRN_SHIFT), "r" (v)
);
}
/** Read Page Table Register.
280,10 → 265,6
extern bool vhpt_compare(__address page, asid_t asid, vhpt_entry_t *v);
extern void vhpt_set_record(vhpt_entry_t *v, __address page, asid_t asid, __address frame, int flags);
 
 
 
#endif
 
#endif
 
 
/kernel/trunk/arch/ia64/include/mm/asid.h
34,27 → 34,24
#include <arch/types.h>
 
typedef __u16 asid_t;
typedef __u32 rid_t;
 
#endif /* __ASM__ */
 
/**
* Number of ia64 RIDs (Region Identifiers) per kernel ASID.
* Note that some architectures may support more bits,
* but those extra bits are not used by the kernel.
*/
#endif
#define RIDS_PER_ASID 7
 
#define RID_MAX 262143 /* 2^18 - 1 */
#define RID_KERNEL 0
#define RID_INVALID 1
 
#define ASID2RID(asid, vrn) (((asid)*RIDS_PER_ASID)+(vrn))
#define ASID2RID(asid, vrn) (((asid)>RIDS_PER_ASID)?(((asid)*RIDS_PER_ASID)+(vrn)):(asid))
#define RID2ASID(rid) ((rid)/RIDS_PER_ASID)
 
#ifndef __ASM__
 
 
typedef __u32 rid_t;
 
#endif
 
#define ASID_MAX_ARCH (RID_MAX/RIDS_PER_ASID)
 
#endif
/kernel/trunk/arch/ia64/include/mm/tlb.h
38,14 → 38,47
#include <arch/types.h>
#include <typedefs.h>
 
extern void tc_mapping_insert(__address va, asid_t asid, vhpt_entry_t entry, bool dtc);
extern void dtc_mapping_insert(__address va, asid_t asid, vhpt_entry_t entry);
extern void itc_mapping_insert(__address va, asid_t asid, vhpt_entry_t entry);
/** Data and instruction Translation Register indices. */
#define DTR_KERNEL 0
#define ITR_KERNEL 0
#define DTR_KSTACK 1
 
/** Portion of TLB insertion format data structure. */
union tlb_entry {
__u64 word[2];
struct {
/* Word 0 */
unsigned p : 1; /**< Present. */
unsigned : 1;
unsigned ma : 3; /**< Memory attribute. */
unsigned a : 1; /**< Accessed. */
unsigned d : 1; /**< Dirty. */
unsigned pl : 2; /**< Privilege level. */
unsigned ar : 3; /**< Access rights. */
unsigned long long ppn : 38; /**< Physical Page Number, a.k.a. PFN. */
unsigned : 2;
unsigned ed : 1;
unsigned ig1 : 11;
 
/* Word 1 */
unsigned : 2;
unsigned ps : 6; /**< Page size will be 2^ps. */
unsigned key : 24; /**< Protection key, unused. */
unsigned : 32;
} __attribute__ ((packed));
} __attribute__ ((packed));
typedef union tlb_entry tlb_entry_t;
 
extern void tc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, bool dtc);
extern void dtc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry);
extern void itc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry);
 
extern void tr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, bool dtr, index_t tr);
extern void dtr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, index_t tr);
extern void itr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, index_t tr);
 
extern void dtlb_mapping_insert(__address page, __address frame, bool dtr, index_t tr);
 
extern void alternate_instruction_tlb_fault(__u64 vector, struct exception_regdump *pstate);
extern void alternate_data_tlb_fault(__u64 vector, struct exception_regdump *pstate);
extern void data_nested_tlb_fault(__u64 vector, struct exception_regdump *pstate);
/kernel/trunk/arch/ia64/Makefile.inc
74,4 → 74,5
arch/$(ARCH)/src/mm/frame.c \
arch/$(ARCH)/src/mm/page.c \
arch/$(ARCH)/src/mm/tlb.c \
arch/$(ARCH)/src/proc/scheduler.c \
arch/$(ARCH)/src/drivers/it.c
/kernel/trunk/arch/ia64/src/proc/scheduler.c
0,0 → 1,55
/*
* Copyright (C) 2006 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#include <proc/scheduler.h>
#include <proc/thread.h>
#include <arch.h>
#include <arch/mm/tlb.h>
#include <config.h>
#include <align.h>
 
/** Ensure that thread's kernel stack is locked in TLB. */
void before_thread_runs_arch(void)
{
__address base;
base = ALIGN_DOWN(config.base, 1<<KERNEL_PAGE_WIDTH);
 
if ((__address) THREAD->kstack < base || (__address) THREAD->kstack > base + (1<<KERNEL_PAGE_WIDTH)) {
/*
* Kernel stack of this thread is not locked in DTLB.
* First, make sure it is not mapped already.
* If not, fill respective tranlsation register.
*/
dtlb_mapping_insert((__address) THREAD->kstack, KA2PA(THREAD->kstack), true, DTR_KSTACK);
}
}
 
void after_thread_ran_arch(void)
{
}
/kernel/trunk/arch/ia64/src/mm/tlb.c
31,11 → 31,14
*/
 
#include <mm/tlb.h>
#include <mm/asid.h>
#include <arch/mm/tlb.h>
#include <arch/mm/page.h>
#include <arch/barrier.h>
#include <arch/interrupt.h>
#include <typedefs.h>
#include <panic.h>
#include <print.h>
 
/** Invalidate all TLB entries. */
void tlb_invalidate_all(void)
84,11 → 87,11
region_register rr;
bool restore_rr = false;
 
if (!(entry.not_present.p))
if (!(entry.p))
return;
 
rr.word = rr_read(VA_REGION(va));
if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA_REGION(va))))) {
rr.word = rr_read(VA2VRN(va));
if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
/*
* The selected region register does not contain required RID.
* Save the old content of the register and replace the RID.
96,8 → 99,8
region_register rr0;
 
rr0 = rr;
rr0.map.rid = ASID2RID(asid, VA_REGION(va));
rr_write(VA_REGION(va), rr0.word);
rr0.map.rid = ASID2RID(asid, VA2VRN(va));
rr_write(VA2VRN(va), rr0.word);
srlz_d();
srlz_i();
}
120,7 → 123,7
);
if (restore_rr) {
rr_write(VA_REGION(va),rr.word);
rr_write(VA2VRN(va), rr.word);
srlz_d();
srlz_i();
}
163,11 → 166,11
region_register rr;
bool restore_rr = false;
 
if (!(entry.not_present.p))
if (!(entry.p))
return;
 
rr.word = rr_read(VA_REGION(va));
if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA_REGION(va))))) {
rr.word = rr_read(VA2VRN(va));
if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
/*
* The selected region register does not contain required RID.
* Save the old content of the register and replace the RID.
175,8 → 178,8
region_register rr0;
 
rr0 = rr;
rr0.map.rid = ASID2RID(asid, VA_REGION(va));
rr_write(VA_REGION(va), rr0.word);
rr0.map.rid = ASID2RID(asid, VA2VRN(va));
rr_write(VA2VRN(va), rr0.word);
srlz_d();
srlz_i();
}
199,20 → 202,72
);
if (restore_rr) {
rr_write(VA_REGION(va),rr.word);
rr_write(VA2VRN(va), rr.word);
srlz_d();
srlz_i();
}
}
 
/** Insert data into DTLB.
*
* @param va Virtual page address.
* @param asid Address space identifier.
* @param entry The rest of TLB entry as required by TLB insertion format.
* @param dtr If true, insert into data translation register, use data translation cache otherwise.
* @param tr Translation register if dtr is true, ignored otherwise.
*/
void dtlb_mapping_insert(__address page, __address frame, bool dtr, index_t tr)
{
tlb_entry_t entry;
entry.word[0] = 0;
entry.word[1] = 0;
entry.p = true; /* present */
entry.ma = MA_WRITEBACK;
entry.a = true; /* already accessed */
entry.d = true; /* already dirty */
entry.pl = PL_KERNEL;
entry.ar = AR_READ | AR_WRITE;
entry.ppn = frame >> PPN_SHIFT;
entry.ps = PAGE_WIDTH;
if (dtr)
dtr_mapping_insert(page, ASID_KERNEL, entry, tr);
else
dtc_mapping_insert(page, ASID_KERNEL, entry);
}
 
void alternate_instruction_tlb_fault(__u64 vector, struct exception_regdump *pstate)
{
panic("%s\n", __FUNCTION__);
}
 
/** Data TLB fault with VHPT turned off.
*
* @param vector Interruption vector.
* @param pstate Structure with saved interruption state.
*/
void alternate_data_tlb_fault(__u64 vector, struct exception_regdump *pstate)
{
panic("%s: %P\n", __FUNCTION__, pstate->cr_ifa);
region_register rr;
rid_t rid;
__address va;
va = pstate->cr_ifa; /* faulting address */
rr.word = rr_read(VA2VRN(va));
rid = rr.map.rid;
if (RID2ASID(rid) == ASID_KERNEL) {
if (VA2VRN(va) == VRN_KERNEL) {
/*
* Provide KA2PA(identity) mapping for faulting piece of
* kernel address space.
*/
dtlb_mapping_insert(va, KA2PA(va), false, 0);
return;
}
}
panic("%s: va=%P, rid=%d\n", __FUNCTION__, pstate->cr_ifa, rr.map.rid);
}
 
void data_nested_tlb_fault(__u64 vector, struct exception_regdump *pstate)
/kernel/trunk/arch/ia64/src/mm/page.c
55,7 → 55,6
/** Initialize VHPT and region registers. */
void set_environment(void)
{
 
region_register rr;
pta_register pta;
int i;
62,17 → 61,16
 
/*
* First set up kernel region register.
* This action is redundand (see start.S) but I would to keep it to make sure that
*no unexpected changes will be made.
* This is redundant (see start.S) but we keep it here just for sure.
*/
rr.word = rr_read(VRN_KERNEL);
rr.map.ve = 0; /* disable VHPT walker */
rr.map.ps = PAGE_WIDTH;
rr.map.rid = ASID2RID(ASID_KERNEL,VRN_KERNEL);
rr.map.rid = ASID2RID(ASID_KERNEL, VRN_KERNEL);
rr_write(VRN_KERNEL, rr.word);
srlz_i();
srlz_d();
 
/*
* And invalidate the rest of region register.
*/
83,7 → 81,7
rr.word == rr_read(i);
rr.map.ve = 0; /* disable VHPT walker */
rr.map.rid = ASID2RID(ASID_INVALID,i);
rr.map.rid = RID_INVALID;
rr_write(i, rr.word);
srlz_i();
srlz_d();
100,10 → 98,6
pta_write(pta.word);
srlz_i();
srlz_d();
 
return ;
}
 
/** Calculate address of collision chain from VPN and ASID.
/kernel/trunk/arch/ia64/src/dummy.s
31,8 → 31,6
.global calibrate_delay_loop
.global asm_delay_loop
.global userspace
.global before_thread_runs_arch
.global after_thread_ran_arch
.global cpu_sleep
.global dummy
.global fpu_enable
39,8 → 37,6
.global fpu_disable
.global fpu_init
 
before_thread_runs_arch:
after_thread_ran_arch:
userspace:
calibrate_delay_loop:
asm_delay_loop:
/kernel/trunk/arch/ia64/src/start.S
52,7 → 52,7
mov r9=rr[r8]
movl r10=(RR_MASK)
and r9=r10,r9
movl r10=((ASID2RID(ASID_KERNEL,VRN_KERNEL)<<RID_SHIFT)|(KERNEL_PAGE_WIDTH<<PS_SHIFT))
movl r10=((RID_KERNEL<<RID_SHIFT)|(KERNEL_PAGE_WIDTH<<PS_SHIFT))
or r9=r10,r9
mov rr[r8]=r9