Subversion Repositories HelenOS-historic

Compare Revisions

Ignore whitespace Rev 900 → Rev 901

/kernel/trunk/arch/ia64/src/proc/scheduler.c
0,0 → 1,55
/*
* Copyright (C) 2006 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#include <proc/scheduler.h>
#include <proc/thread.h>
#include <arch.h>
#include <arch/mm/tlb.h>
#include <config.h>
#include <align.h>
 
/** Ensure that thread's kernel stack is locked in TLB. */
void before_thread_runs_arch(void)
{
__address base;
base = ALIGN_DOWN(config.base, 1<<KERNEL_PAGE_WIDTH);
 
if ((__address) THREAD->kstack < base || (__address) THREAD->kstack > base + (1<<KERNEL_PAGE_WIDTH)) {
/*
* Kernel stack of this thread is not locked in DTLB.
* First, make sure it is not mapped already.
* If not, fill respective tranlsation register.
*/
dtlb_mapping_insert((__address) THREAD->kstack, KA2PA(THREAD->kstack), true, DTR_KSTACK);
}
}
 
void after_thread_ran_arch(void)
{
}
/kernel/trunk/arch/ia64/src/mm/tlb.c
31,11 → 31,14
*/
 
#include <mm/tlb.h>
#include <mm/asid.h>
#include <arch/mm/tlb.h>
#include <arch/mm/page.h>
#include <arch/barrier.h>
#include <arch/interrupt.h>
#include <typedefs.h>
#include <panic.h>
#include <print.h>
 
/** Invalidate all TLB entries. */
void tlb_invalidate_all(void)
84,11 → 87,11
region_register rr;
bool restore_rr = false;
 
if (!(entry.not_present.p))
if (!(entry.p))
return;
 
rr.word = rr_read(VA_REGION(va));
if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA_REGION(va))))) {
rr.word = rr_read(VA2VRN(va));
if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
/*
* The selected region register does not contain required RID.
* Save the old content of the register and replace the RID.
96,8 → 99,8
region_register rr0;
 
rr0 = rr;
rr0.map.rid = ASID2RID(asid, VA_REGION(va));
rr_write(VA_REGION(va), rr0.word);
rr0.map.rid = ASID2RID(asid, VA2VRN(va));
rr_write(VA2VRN(va), rr0.word);
srlz_d();
srlz_i();
}
120,7 → 123,7
);
if (restore_rr) {
rr_write(VA_REGION(va),rr.word);
rr_write(VA2VRN(va), rr.word);
srlz_d();
srlz_i();
}
163,11 → 166,11
region_register rr;
bool restore_rr = false;
 
if (!(entry.not_present.p))
if (!(entry.p))
return;
 
rr.word = rr_read(VA_REGION(va));
if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA_REGION(va))))) {
rr.word = rr_read(VA2VRN(va));
if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
/*
* The selected region register does not contain required RID.
* Save the old content of the register and replace the RID.
175,8 → 178,8
region_register rr0;
 
rr0 = rr;
rr0.map.rid = ASID2RID(asid, VA_REGION(va));
rr_write(VA_REGION(va), rr0.word);
rr0.map.rid = ASID2RID(asid, VA2VRN(va));
rr_write(VA2VRN(va), rr0.word);
srlz_d();
srlz_i();
}
199,20 → 202,72
);
if (restore_rr) {
rr_write(VA_REGION(va),rr.word);
rr_write(VA2VRN(va), rr.word);
srlz_d();
srlz_i();
}
}
 
/** Insert data into DTLB.
*
* @param va Virtual page address.
* @param asid Address space identifier.
* @param entry The rest of TLB entry as required by TLB insertion format.
* @param dtr If true, insert into data translation register, use data translation cache otherwise.
* @param tr Translation register if dtr is true, ignored otherwise.
*/
void dtlb_mapping_insert(__address page, __address frame, bool dtr, index_t tr)
{
tlb_entry_t entry;
entry.word[0] = 0;
entry.word[1] = 0;
entry.p = true; /* present */
entry.ma = MA_WRITEBACK;
entry.a = true; /* already accessed */
entry.d = true; /* already dirty */
entry.pl = PL_KERNEL;
entry.ar = AR_READ | AR_WRITE;
entry.ppn = frame >> PPN_SHIFT;
entry.ps = PAGE_WIDTH;
if (dtr)
dtr_mapping_insert(page, ASID_KERNEL, entry, tr);
else
dtc_mapping_insert(page, ASID_KERNEL, entry);
}
 
void alternate_instruction_tlb_fault(__u64 vector, struct exception_regdump *pstate)
{
panic("%s\n", __FUNCTION__);
}
 
/** Data TLB fault with VHPT turned off.
*
* @param vector Interruption vector.
* @param pstate Structure with saved interruption state.
*/
void alternate_data_tlb_fault(__u64 vector, struct exception_regdump *pstate)
{
panic("%s: %P\n", __FUNCTION__, pstate->cr_ifa);
region_register rr;
rid_t rid;
__address va;
va = pstate->cr_ifa; /* faulting address */
rr.word = rr_read(VA2VRN(va));
rid = rr.map.rid;
if (RID2ASID(rid) == ASID_KERNEL) {
if (VA2VRN(va) == VRN_KERNEL) {
/*
* Provide KA2PA(identity) mapping for faulting piece of
* kernel address space.
*/
dtlb_mapping_insert(va, KA2PA(va), false, 0);
return;
}
}
panic("%s: va=%P, rid=%d\n", __FUNCTION__, pstate->cr_ifa, rr.map.rid);
}
 
void data_nested_tlb_fault(__u64 vector, struct exception_regdump *pstate)
/kernel/trunk/arch/ia64/src/mm/page.c
55,7 → 55,6
/** Initialize VHPT and region registers. */
void set_environment(void)
{
 
region_register rr;
pta_register pta;
int i;
62,17 → 61,16
 
/*
* First set up kernel region register.
* This action is redundand (see start.S) but I would to keep it to make sure that
*no unexpected changes will be made.
* This is redundant (see start.S) but we keep it here just for sure.
*/
rr.word = rr_read(VRN_KERNEL);
rr.map.ve = 0; /* disable VHPT walker */
rr.map.ps = PAGE_WIDTH;
rr.map.rid = ASID2RID(ASID_KERNEL,VRN_KERNEL);
rr.map.rid = ASID2RID(ASID_KERNEL, VRN_KERNEL);
rr_write(VRN_KERNEL, rr.word);
srlz_i();
srlz_d();
 
/*
* And invalidate the rest of region register.
*/
83,7 → 81,7
rr.word == rr_read(i);
rr.map.ve = 0; /* disable VHPT walker */
rr.map.rid = ASID2RID(ASID_INVALID,i);
rr.map.rid = RID_INVALID;
rr_write(i, rr.word);
srlz_i();
srlz_d();
100,10 → 98,6
pta_write(pta.word);
srlz_i();
srlz_d();
 
return ;
}
 
/** Calculate address of collision chain from VPN and ASID.
/kernel/trunk/arch/ia64/src/dummy.s
31,8 → 31,6
.global calibrate_delay_loop
.global asm_delay_loop
.global userspace
.global before_thread_runs_arch
.global after_thread_ran_arch
.global cpu_sleep
.global dummy
.global fpu_enable
39,8 → 37,6
.global fpu_disable
.global fpu_init
 
before_thread_runs_arch:
after_thread_ran_arch:
userspace:
calibrate_delay_loop:
asm_delay_loop:
/kernel/trunk/arch/ia64/src/start.S
52,7 → 52,7
mov r9=rr[r8]
movl r10=(RR_MASK)
and r9=r10,r9
movl r10=((ASID2RID(ASID_KERNEL,VRN_KERNEL)<<RID_SHIFT)|(KERNEL_PAGE_WIDTH<<PS_SHIFT))
movl r10=((RID_KERNEL<<RID_SHIFT)|(KERNEL_PAGE_WIDTH<<PS_SHIFT))
or r9=r10,r9
mov rr[r8]=r9