Subversion Repositories HelenOS

Compare Revisions

Ignore whitespace Rev 1967 → Rev 1968

/tags/0.1.1/kernel/arch/ia64/src/asm.S
0,0 → 1,156
#
# Copyright (C) 2005 Jakub Jermar
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# - The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
 
#include <arch/register.h>
 
.text
 
/** Copy memory from/to userspace.
*
* This memcpy() has been taken from the assembler output of
* the generic _memcpy() and modified to have the failover part.
*
* @param in0 Destination address.
* @param in1 Source address.
* @param in2 Number of byte to copy.
*/
.global memcpy
.global memcpy_from_uspace
.global memcpy_to_uspace
.global memcpy_from_uspace_failover_address
.global memcpy_to_uspace_failover_address
memcpy:
memcpy_from_uspace:
memcpy_to_uspace:
alloc loc0 = ar.pfs, 3, 1, 0, 0
 
shr.u r18 = in2, 3
mov r8 = in1 ;; /* prepare to return in1 on success */
cmp.ne p6, p7 = 0, r18 ;;
(p7) mov r17 = r0
(p7) br.cond.dptk 1f ;;
mov r16 = r0
mov r17 = r0 ;;
 
0:
shladd r14 = r17, 3, r0
adds r16 = 1, r16 ;;
add r15 = r14, r8
sxt4 r17 = r16
add r14 = r14, in0 ;;
ld8 r15 = [r15]
cmp.gtu p6, p7 = r18, r17 ;;
st8 [r14] = r15
(p6) br.cond.dptk 0b
 
1:
and in2 = 7, in2 ;;
cmp.eq p6, p7 = 0, in2 ;;
(p6) mov ar.pfs = loc0
(p6) br.ret.dptk.many rp
shladd r14 = r17, 3, r0
mov r16 = r0
mov r17 = r0 ;;
add in0 = in0, r14
add r18 = r8, r14 ;;
 
2:
add r14 = r16, r18
adds r17 = 1, r17
add r15 = in0, r16 ;;
ld1 r14 = [r14]
sxt4 r16 = r17 ;;
st1 [r15] = r14
cmp.gtu p6, p7 = in2, r16
(p6) br.cond.dptk 2b
 
mov ar.pfs = loc0
br.ret.sptk.many rp
memcpy_from_uspace_failover_address:
memcpy_to_uspace_failover_address:
mov r8 = r0 /* return 0 on failure */
mov ar.pfs = loc0
br.ret.sptk.many rp
 
.global memsetb
memsetb:
br _memsetb
 
.global cpu_halt
cpu_halt:
br cpu_halt
 
.global panic_printf
panic_printf:
{
br.call.sptk.many b0=printf
}
br halt
 
/** Switch to userspace - low level code.
*
* @param in0 Userspace entry point address.
* @param in1 Userspace stack pointer address.
* @param in2 Userspace register stack pointer address.
* @param in3 Userspace address of thread uspace_arg_t structure.
* @param in4 Value to be stored in IPSR.
* @param in5 Value to be stored in RSC.
*/
.global switch_to_userspace
switch_to_userspace:
alloc loc0 = ar.pfs, 6, 3, 0, 0
rsm (PSR_IC_MASK | PSR_I_MASK) /* disable interruption collection and interrupts */
srlz.d ;;
srlz.i ;;
mov cr.ipsr = in4
mov cr.iip = in0
mov r12 = in1
 
xor r1 = r1, r1
mov loc1 = cr.ifs
movl loc2 = PFM_MASK ;;
and loc1 = loc2, loc1 ;;
mov cr.ifs = loc1 ;; /* prevent decrementing BSP by rfi */
 
invala
mov loc1 = ar.rsc ;;
and loc1 = ~3, loc1 ;;
mov ar.rsc = loc1 ;; /* put RSE into enforced lazy mode */
 
flushrs ;;
mov ar.bspstore = in2 ;;
mov ar.rsc = in5 ;;
mov r8 = in3
rfi ;;
/tags/0.1.1/kernel/arch/ia64/src/mm/vhpt.c
0,0 → 1,88
/*
* Copyright (C) 2006 Jakub Vana
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#include <arch/mm/vhpt.h>
#include <mm/frame.h>
#include <print.h>
 
 
static vhpt_entry_t* vhpt_base;
 
__address vhpt_set_up(void)
{
vhpt_base=(vhpt_entry_t*) PA2KA(PFN2ADDR(frame_alloc(VHPT_WIDTH-FRAME_WIDTH,FRAME_KA)));
if(!vhpt_base) panic("Kernel configured with VHPT but no memory for table.");
vhpt_invalidate_all();
return (__address) vhpt_base;
}
 
 
void vhpt_mapping_insert(__address va, asid_t asid, tlb_entry_t entry)
{
region_register rr_save, rr;
index_t vrn;
rid_t rid;
__u64 tag;
 
vhpt_entry_t *ventry;
 
 
vrn = va >> VRN_SHIFT;
rid = ASID2RID(asid, vrn);
rr_save.word = rr_read(vrn);
rr.word = rr_save.word;
rr.map.rid = rid;
rr_write(vrn, rr.word);
srlz_i();
ventry = (vhpt_entry_t *) thash(va);
tag = ttag(va);
rr_write(vrn, rr_save.word);
srlz_i();
srlz_d();
 
ventry->word[0]=entry.word[0];
ventry->word[1]=entry.word[1];
ventry->present.tag.tag_word = tag;
 
}
 
void vhpt_invalidate_all()
{
memsetb((__address)vhpt_base,1<<VHPT_WIDTH,0);
}
 
void vhpt_invalidate_asid(asid_t asid)
{
vhpt_invalidate_all();
}
 
 
/tags/0.1.1/kernel/arch/ia64/src/mm/tlb.c
0,0 → 1,615
/*
* Copyright (C) 2006 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/*
* TLB management.
*/
 
#include <mm/tlb.h>
#include <mm/asid.h>
#include <mm/page.h>
#include <mm/as.h>
#include <arch/mm/tlb.h>
#include <arch/mm/page.h>
#include <arch/mm/vhpt.h>
#include <arch/barrier.h>
#include <arch/interrupt.h>
#include <arch/pal/pal.h>
#include <arch/asm.h>
#include <typedefs.h>
#include <panic.h>
#include <print.h>
#include <arch.h>
 
/** Invalidate all TLB entries. */
void tlb_invalidate_all(void)
{
ipl_t ipl;
__address adr;
__u32 count1, count2, stride1, stride2;
int i,j;
adr = PAL_PTCE_INFO_BASE();
count1 = PAL_PTCE_INFO_COUNT1();
count2 = PAL_PTCE_INFO_COUNT2();
stride1 = PAL_PTCE_INFO_STRIDE1();
stride2 = PAL_PTCE_INFO_STRIDE2();
ipl = interrupts_disable();
 
for(i = 0; i < count1; i++) {
for(j = 0; j < count2; j++) {
__asm__ volatile (
"ptc.e %0 ;;"
:
: "r" (adr)
);
adr += stride2;
}
adr += stride1;
}
 
interrupts_restore(ipl);
 
srlz_d();
srlz_i();
#ifdef CONFIG_VHPT
vhpt_invalidate_all();
#endif
}
 
/** Invalidate entries belonging to an address space.
*
* @param asid Address space identifier.
*/
void tlb_invalidate_asid(asid_t asid)
{
tlb_invalidate_all();
}
 
 
void tlb_invalidate_pages(asid_t asid, __address page, count_t cnt)
{
region_register rr;
bool restore_rr = false;
int b = 0;
int c = cnt;
 
__address va;
va = page;
 
rr.word = rr_read(VA2VRN(va));
if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
/*
* The selected region register does not contain required RID.
* Save the old content of the register and replace the RID.
*/
region_register rr0;
 
rr0 = rr;
rr0.map.rid = ASID2RID(asid, VA2VRN(va));
rr_write(VA2VRN(va), rr0.word);
srlz_d();
srlz_i();
}
while(c >>= 1)
b++;
b >>= 1;
__u64 ps;
switch (b) {
case 0: /*cnt 1-3*/
ps = PAGE_WIDTH;
break;
case 1: /*cnt 4-15*/
/*cnt=((cnt-1)/4)+1;*/
ps = PAGE_WIDTH+2;
va &= ~((1<<ps)-1);
break;
case 2: /*cnt 16-63*/
/*cnt=((cnt-1)/16)+1;*/
ps = PAGE_WIDTH+4;
va &= ~((1<<ps)-1);
break;
case 3: /*cnt 64-255*/
/*cnt=((cnt-1)/64)+1;*/
ps = PAGE_WIDTH+6;
va &= ~((1<<ps)-1);
break;
case 4: /*cnt 256-1023*/
/*cnt=((cnt-1)/256)+1;*/
ps = PAGE_WIDTH+8;
va &= ~((1<<ps)-1);
break;
case 5: /*cnt 1024-4095*/
/*cnt=((cnt-1)/1024)+1;*/
ps = PAGE_WIDTH+10;
va &= ~((1<<ps)-1);
break;
case 6: /*cnt 4096-16383*/
/*cnt=((cnt-1)/4096)+1;*/
ps = PAGE_WIDTH+12;
va &= ~((1<<ps)-1);
break;
case 7: /*cnt 16384-65535*/
case 8: /*cnt 65536-(256K-1)*/
/*cnt=((cnt-1)/16384)+1;*/
ps = PAGE_WIDTH+14;
va &= ~((1<<ps)-1);
break;
default:
/*cnt=((cnt-1)/(16384*16))+1;*/
ps=PAGE_WIDTH+18;
va&=~((1<<ps)-1);
break;
}
/*cnt+=(page!=va);*/
for(; va<(page+cnt*(PAGE_SIZE)); va += (1<<ps)) {
__asm__ volatile (
"ptc.l %0,%1;;"
:
: "r" (va), "r" (ps<<2)
);
}
srlz_d();
srlz_i();
if (restore_rr) {
rr_write(VA2VRN(va), rr.word);
srlz_d();
srlz_i();
}
}
 
 
/** Insert data into data translation cache.
*
* @param va Virtual page address.
* @param asid Address space identifier.
* @param entry The rest of TLB entry as required by TLB insertion format.
*/
void dtc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry)
{
tc_mapping_insert(va, asid, entry, true);
}
 
/** Insert data into instruction translation cache.
*
* @param va Virtual page address.
* @param asid Address space identifier.
* @param entry The rest of TLB entry as required by TLB insertion format.
*/
void itc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry)
{
tc_mapping_insert(va, asid, entry, false);
}
 
/** Insert data into instruction or data translation cache.
*
* @param va Virtual page address.
* @param asid Address space identifier.
* @param entry The rest of TLB entry as required by TLB insertion format.
* @param dtc If true, insert into data translation cache, use instruction translation cache otherwise.
*/
void tc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, bool dtc)
{
region_register rr;
bool restore_rr = false;
 
rr.word = rr_read(VA2VRN(va));
if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
/*
* The selected region register does not contain required RID.
* Save the old content of the register and replace the RID.
*/
region_register rr0;
 
rr0 = rr;
rr0.map.rid = ASID2RID(asid, VA2VRN(va));
rr_write(VA2VRN(va), rr0.word);
srlz_d();
srlz_i();
}
__asm__ volatile (
"mov r8=psr;;\n"
"rsm %0;;\n" /* PSR_IC_MASK */
"srlz.d;;\n"
"srlz.i;;\n"
"mov cr.ifa=%1\n" /* va */
"mov cr.itir=%2;;\n" /* entry.word[1] */
"cmp.eq p6,p7 = %4,r0;;\n" /* decide between itc and dtc */
"(p6) itc.i %3;;\n"
"(p7) itc.d %3;;\n"
"mov psr.l=r8;;\n"
"srlz.d;;\n"
:
: "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (dtc)
: "p6", "p7", "r8"
);
if (restore_rr) {
rr_write(VA2VRN(va), rr.word);
srlz_d();
srlz_i();
}
}
 
/** Insert data into instruction translation register.
*
* @param va Virtual page address.
* @param asid Address space identifier.
* @param entry The rest of TLB entry as required by TLB insertion format.
* @param tr Translation register.
*/
void itr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, index_t tr)
{
tr_mapping_insert(va, asid, entry, false, tr);
}
 
/** Insert data into data translation register.
*
* @param va Virtual page address.
* @param asid Address space identifier.
* @param entry The rest of TLB entry as required by TLB insertion format.
* @param tr Translation register.
*/
void dtr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, index_t tr)
{
tr_mapping_insert(va, asid, entry, true, tr);
}
 
/** Insert data into instruction or data translation register.
*
* @param va Virtual page address.
* @param asid Address space identifier.
* @param entry The rest of TLB entry as required by TLB insertion format.
* @param dtc If true, insert into data translation register, use instruction translation register otherwise.
* @param tr Translation register.
*/
void tr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, bool dtr, index_t tr)
{
region_register rr;
bool restore_rr = false;
 
rr.word = rr_read(VA2VRN(va));
if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
/*
* The selected region register does not contain required RID.
* Save the old content of the register and replace the RID.
*/
region_register rr0;
 
rr0 = rr;
rr0.map.rid = ASID2RID(asid, VA2VRN(va));
rr_write(VA2VRN(va), rr0.word);
srlz_d();
srlz_i();
}
 
__asm__ volatile (
"mov r8=psr;;\n"
"rsm %0;;\n" /* PSR_IC_MASK */
"srlz.d;;\n"
"srlz.i;;\n"
"mov cr.ifa=%1\n" /* va */
"mov cr.itir=%2;;\n" /* entry.word[1] */
"cmp.eq p6,p7=%5,r0;;\n" /* decide between itr and dtr */
"(p6) itr.i itr[%4]=%3;;\n"
"(p7) itr.d dtr[%4]=%3;;\n"
"mov psr.l=r8;;\n"
"srlz.d;;\n"
:
: "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (tr), "r" (dtr)
: "p6", "p7", "r8"
);
if (restore_rr) {
rr_write(VA2VRN(va), rr.word);
srlz_d();
srlz_i();
}
}
 
/** Insert data into DTLB.
*
* @param va Virtual page address.
* @param asid Address space identifier.
* @param entry The rest of TLB entry as required by TLB insertion format.
* @param dtr If true, insert into data translation register, use data translation cache otherwise.
* @param tr Translation register if dtr is true, ignored otherwise.
*/
void dtlb_kernel_mapping_insert(__address page, __address frame, bool dtr, index_t tr)
{
tlb_entry_t entry;
entry.word[0] = 0;
entry.word[1] = 0;
entry.p = true; /* present */
entry.ma = MA_WRITEBACK;
entry.a = true; /* already accessed */
entry.d = true; /* already dirty */
entry.pl = PL_KERNEL;
entry.ar = AR_READ | AR_WRITE;
entry.ppn = frame >> PPN_SHIFT;
entry.ps = PAGE_WIDTH;
if (dtr)
dtr_mapping_insert(page, ASID_KERNEL, entry, tr);
else
dtc_mapping_insert(page, ASID_KERNEL, entry);
}
 
/** Copy content of PTE into data translation cache.
*
* @param t PTE.
*/
void dtc_pte_copy(pte_t *t)
{
tlb_entry_t entry;
 
entry.word[0] = 0;
entry.word[1] = 0;
entry.p = t->p;
entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
entry.a = t->a;
entry.d = t->d;
entry.pl = t->k ? PL_KERNEL : PL_USER;
entry.ar = t->w ? AR_WRITE : AR_READ;
entry.ppn = t->frame >> PPN_SHIFT;
entry.ps = PAGE_WIDTH;
dtc_mapping_insert(t->page, t->as->asid, entry);
#ifdef CONFIG_VHPT
vhpt_mapping_insert(t->page, t->as->asid, entry);
#endif
}
 
/** Copy content of PTE into instruction translation cache.
*
* @param t PTE.
*/
void itc_pte_copy(pte_t *t)
{
tlb_entry_t entry;
 
entry.word[0] = 0;
entry.word[1] = 0;
ASSERT(t->x);
entry.p = t->p;
entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
entry.a = t->a;
entry.pl = t->k ? PL_KERNEL : PL_USER;
entry.ar = t->x ? (AR_EXECUTE | AR_READ) : AR_READ;
entry.ppn = t->frame >> PPN_SHIFT;
entry.ps = PAGE_WIDTH;
itc_mapping_insert(t->page, t->as->asid, entry);
#ifdef CONFIG_VHPT
vhpt_mapping_insert(t->page, t->as->asid, entry);
#endif
}
 
/** Instruction TLB fault handler for faults with VHPT turned off.
*
* @param vector Interruption vector.
* @param istate Structure with saved interruption state.
*/
void alternate_instruction_tlb_fault(__u64 vector, istate_t *istate)
{
region_register rr;
__address va;
pte_t *t;
va = istate->cr_ifa; /* faulting address */
page_table_lock(AS, true);
t = page_mapping_find(AS, va);
if (t) {
/*
* The mapping was found in software page hash table.
* Insert it into data translation cache.
*/
itc_pte_copy(t);
page_table_unlock(AS, true);
} else {
/*
* Forward the page fault to address space page fault handler.
*/
page_table_unlock(AS, true);
if (as_page_fault(va, istate) == AS_PF_FAULT) {
panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, istate->cr_ifa, rr.map.rid, istate->cr_iip);
}
}
}
 
/** Data TLB fault handler for faults with VHPT turned off.
*
* @param vector Interruption vector.
* @param istate Structure with saved interruption state.
*/
void alternate_data_tlb_fault(__u64 vector, istate_t *istate)
{
region_register rr;
rid_t rid;
__address va;
pte_t *t;
va = istate->cr_ifa; /* faulting address */
rr.word = rr_read(VA2VRN(va));
rid = rr.map.rid;
if (RID2ASID(rid) == ASID_KERNEL) {
if (VA2VRN(va) == VRN_KERNEL) {
/*
* Provide KA2PA(identity) mapping for faulting piece of
* kernel address space.
*/
dtlb_kernel_mapping_insert(va, KA2PA(va), false, 0);
return;
}
}
 
page_table_lock(AS, true);
t = page_mapping_find(AS, va);
if (t) {
/*
* The mapping was found in software page hash table.
* Insert it into data translation cache.
*/
dtc_pte_copy(t);
page_table_unlock(AS, true);
} else {
/*
* Forward the page fault to address space page fault handler.
*/
page_table_unlock(AS, true);
if (as_page_fault(va, istate) == AS_PF_FAULT) {
panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
}
}
}
 
/** Data nested TLB fault handler.
*
* This fault should not occur.
*
* @param vector Interruption vector.
* @param istate Structure with saved interruption state.
*/
void data_nested_tlb_fault(__u64 vector, istate_t *istate)
{
panic("%s\n", __FUNCTION__);
}
 
/** Data Dirty bit fault handler.
*
* @param vector Interruption vector.
* @param istate Structure with saved interruption state.
*/
void data_dirty_bit_fault(__u64 vector, istate_t *istate)
{
pte_t *t;
 
page_table_lock(AS, true);
t = page_mapping_find(AS, istate->cr_ifa);
ASSERT(t && t->p);
if (t && t->p) {
/*
* Update the Dirty bit in page tables and reinsert
* the mapping into DTC.
*/
t->d = true;
dtc_pte_copy(t);
}
page_table_unlock(AS, true);
}
 
/** Instruction access bit fault handler.
*
* @param vector Interruption vector.
* @param istate Structure with saved interruption state.
*/
void instruction_access_bit_fault(__u64 vector, istate_t *istate)
{
pte_t *t;
 
page_table_lock(AS, true);
t = page_mapping_find(AS, istate->cr_ifa);
ASSERT(t && t->p);
if (t && t->p) {
/*
* Update the Accessed bit in page tables and reinsert
* the mapping into ITC.
*/
t->a = true;
itc_pte_copy(t);
}
page_table_unlock(AS, true);
}
 
/** Data access bit fault handler.
*
* @param vector Interruption vector.
* @param istate Structure with saved interruption state.
*/
void data_access_bit_fault(__u64 vector, istate_t *istate)
{
pte_t *t;
 
page_table_lock(AS, true);
t = page_mapping_find(AS, istate->cr_ifa);
ASSERT(t && t->p);
if (t && t->p) {
/*
* Update the Accessed bit in page tables and reinsert
* the mapping into DTC.
*/
t->a = true;
dtc_pte_copy(t);
}
page_table_unlock(AS, true);
}
 
/** Page not present fault handler.
*
* @param vector Interruption vector.
* @param istate Structure with saved interruption state.
*/
void page_not_present(__u64 vector, istate_t *istate)
{
region_register rr;
__address va;
pte_t *t;
va = istate->cr_ifa; /* faulting address */
page_table_lock(AS, true);
t = page_mapping_find(AS, va);
ASSERT(t);
if (t->p) {
/*
* If the Present bit is set in page hash table, just copy it
* and update ITC/DTC.
*/
if (t->x)
itc_pte_copy(t);
else
dtc_pte_copy(t);
page_table_unlock(AS, true);
} else {
page_table_unlock(AS, true);
if (as_page_fault(va, istate) == AS_PF_FAULT) {
panic("%s: va=%p, rid=%d\n", __FUNCTION__, va, rr.map.rid);
}
}
}
/tags/0.1.1/kernel/arch/ia64/src/mm/page.c
0,0 → 1,258
/*
* Copyright (C) 2006 Jakub Jermar
* Copyright (C) 2006 Jakub Vana
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#include <arch/mm/page.h>
#include <genarch/mm/page_ht.h>
#include <mm/asid.h>
#include <arch/mm/asid.h>
#include <arch/mm/vhpt.h>
#include <arch/types.h>
#include <typedefs.h>
#include <print.h>
#include <mm/page.h>
#include <mm/frame.h>
#include <config.h>
#include <panic.h>
#include <arch/asm.h>
#include <arch/barrier.h>
#include <memstr.h>
 
static void set_environment(void);
 
/** Initialize ia64 virtual address translation subsystem. */
void page_arch_init(void)
{
page_mapping_operations = &ht_mapping_operations;
pk_disable();
set_environment();
}
 
/** Initialize VHPT and region registers. */
void set_environment(void)
{
region_register rr;
pta_register pta;
int i;
#ifdef CONFIG_VHPT
__address vhpt_base;
#endif
 
/*
* First set up kernel region register.
* This is redundant (see start.S) but we keep it here just for sure.
*/
rr.word = rr_read(VRN_KERNEL);
rr.map.ve = 0; /* disable VHPT walker */
rr.map.ps = PAGE_WIDTH;
rr.map.rid = ASID2RID(ASID_KERNEL, VRN_KERNEL);
rr_write(VRN_KERNEL, rr.word);
srlz_i();
srlz_d();
 
/*
* And setup the rest of region register.
*/
for(i = 0; i < REGION_REGISTERS; i++) {
/* skip kernel rr */
if (i == VRN_KERNEL)
continue;
rr.word = rr_read(i);
rr.map.ve = 0; /* disable VHPT walker */
rr.map.rid = RID_KERNEL;
rr.map.ps = PAGE_WIDTH;
rr_write(i, rr.word);
srlz_i();
srlz_d();
}
 
#ifdef CONFIG_VHPT
vhpt_base = vhpt_set_up();
#endif
/*
* Set up PTA register.
*/
pta.word = pta_read();
#ifndef CONFIG_VHPT
pta.map.ve = 0; /* disable VHPT walker */
pta.map.base = 0 >> PTA_BASE_SHIFT;
#else
pta.map.ve = 1; /* enable VHPT walker */
pta.map.base = vhpt_base >> PTA_BASE_SHIFT;
#endif
pta.map.vf = 1; /* large entry format */
pta.map.size = VHPT_WIDTH;
pta_write(pta.word);
srlz_i();
srlz_d();
}
 
/** Calculate address of collision chain from VPN and ASID.
*
* Interrupts must be disabled.
*
* @param page Address of virtual page including VRN bits.
* @param asid Address space identifier.
*
* @return VHPT entry address.
*/
vhpt_entry_t *vhpt_hash(__address page, asid_t asid)
{
region_register rr_save, rr;
index_t vrn;
rid_t rid;
vhpt_entry_t *v;
 
vrn = page >> VRN_SHIFT;
rid = ASID2RID(asid, vrn);
rr_save.word = rr_read(vrn);
if (rr_save.map.rid == rid) {
/*
* The RID is already in place, compute thash and return.
*/
v = (vhpt_entry_t *) thash(page);
return v;
}
/*
* The RID must be written to some region register.
* To speed things up, register indexed by vrn is used.
*/
rr.word = rr_save.word;
rr.map.rid = rid;
rr_write(vrn, rr.word);
srlz_i();
v = (vhpt_entry_t *) thash(page);
rr_write(vrn, rr_save.word);
srlz_i();
srlz_d();
 
return v;
}
 
/** Compare ASID and VPN against PTE.
*
* Interrupts must be disabled.
*
* @param page Address of virtual page including VRN bits.
* @param asid Address space identifier.
*
* @return True if page and asid match the page and asid of t, false otherwise.
*/
bool vhpt_compare(__address page, asid_t asid, vhpt_entry_t *v)
{
region_register rr_save, rr;
index_t vrn;
rid_t rid;
bool match;
 
ASSERT(v);
 
vrn = page >> VRN_SHIFT;
rid = ASID2RID(asid, vrn);
rr_save.word = rr_read(vrn);
if (rr_save.map.rid == rid) {
/*
* The RID is already in place, compare ttag with t and return.
*/
return ttag(page) == v->present.tag.tag_word;
}
/*
* The RID must be written to some region register.
* To speed things up, register indexed by vrn is used.
*/
rr.word = rr_save.word;
rr.map.rid = rid;
rr_write(vrn, rr.word);
srlz_i();
match = (ttag(page) == v->present.tag.tag_word);
rr_write(vrn, rr_save.word);
srlz_i();
srlz_d();
 
return match;
}
 
/** Set up one VHPT entry.
*
* @param t VHPT entry to be set up.
* @param page Virtual address of the page mapped by the entry.
* @param asid Address space identifier of the address space to which page belongs.
* @param frame Physical address of the frame to wich page is mapped.
* @param flags Different flags for the mapping.
*/
void vhpt_set_record(vhpt_entry_t *v, __address page, asid_t asid, __address frame, int flags)
{
region_register rr_save, rr;
index_t vrn;
rid_t rid;
__u64 tag;
 
ASSERT(v);
 
vrn = page >> VRN_SHIFT;
rid = ASID2RID(asid, vrn);
/*
* Compute ttag.
*/
rr_save.word = rr_read(vrn);
rr.word = rr_save.word;
rr.map.rid = rid;
rr_write(vrn, rr.word);
srlz_i();
tag = ttag(page);
rr_write(vrn, rr_save.word);
srlz_i();
srlz_d();
/*
* Clear the entry.
*/
v->word[0] = 0;
v->word[1] = 0;
v->word[2] = 0;
v->word[3] = 0;
v->present.p = true;
v->present.ma = (flags & PAGE_CACHEABLE) ? MA_WRITEBACK : MA_UNCACHEABLE;
v->present.a = false; /* not accessed */
v->present.d = false; /* not dirty */
v->present.pl = (flags & PAGE_USER) ? PL_USER : PL_KERNEL;
v->present.ar = (flags & PAGE_WRITE) ? AR_WRITE : AR_READ;
v->present.ar |= (flags & PAGE_EXEC) ? AR_EXECUTE : 0;
v->present.ppn = frame >> PPN_SHIFT;
v->present.ed = false; /* exception not deffered */
v->present.ps = PAGE_WIDTH;
v->present.key = 0;
v->present.tag.tag_word = tag;
}
/tags/0.1.1/kernel/arch/ia64/src/mm/as.c
0,0 → 1,80
/*
* Copyright (C) 2006 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#include <arch/mm/as.h>
#include <arch/mm/asid.h>
#include <arch/mm/page.h>
#include <genarch/mm/as_ht.h>
#include <genarch/mm/asid_fifo.h>
#include <mm/asid.h>
#include <arch.h>
#include <arch/barrier.h>
#include <synch/spinlock.h>
 
/** Architecture dependent address space init. */
void as_arch_init(void)
{
as_operations = &as_ht_operations;
asid_fifo_init();
}
 
/** Prepare registers for switching to another address space.
*
* @param as Address space.
*/
void as_install_arch(as_t *as)
{
ipl_t ipl;
region_register rr;
int i;
ipl = interrupts_disable();
spinlock_lock(&as->lock);
ASSERT(as->asid != ASID_INVALID);
/*
* Load respective ASID (7 consecutive RIDs) to
* region registers.
*/
for (i = 0; i < REGION_REGISTERS; i++) {
if (i == VRN_KERNEL)
continue;
rr.word = rr_read(i);
rr.map.ve = false; /* disable VHPT walker */
rr.map.rid = ASID2RID(as->asid, i);
rr.map.ps = PAGE_WIDTH;
rr_write(i, rr.word);
}
srlz_d();
srlz_i();
spinlock_unlock(&as->lock);
interrupts_restore(ipl);
}
/tags/0.1.1/kernel/arch/ia64/src/mm/frame.c
0,0 → 1,49
/*
* Copyright (C) 2005 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#include <arch/mm/frame.h>
#include <mm/frame.h>
#include <config.h>
#include <panic.h>
 
/*
* This is Ski-specific and certainly not sufficient
* for real ia64 systems that provide memory map.
*/
#define ROM_BASE 0xa0000
#define ROM_SIZE (384*1024)
 
void frame_arch_init(void)
{
zone_create(0, config.memory_size >> FRAME_WIDTH, 1, 0);
/*
* Blacklist ROM regions.
*/
frame_mark_unavailable(ADDR2PFN(ROM_BASE), ROM_SIZE >> FRAME_WIDTH);
}
/tags/0.1.1/kernel/arch/ia64/src/ddi/ddi.c
0,0 → 1,47
/*
* Copyright (C) 2006 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#include <ddi/ddi.h>
#include <proc/task.h>
#include <arch/types.h>
#include <typedefs.h>
 
/** Enable I/O space range for task.
*
* Interrupts are disabled and task is locked.
*
* @param task Task.
* @param ioaddr Startign I/O space address.
* @param size Size of the enabled I/O range.
*
* @return 0 on success or an error code from errno.h.
*/
int ddi_iospace_enable_arch(task_t *task, __address ioaddr, size_t size)
{
return 0;
}
/tags/0.1.1/kernel/arch/ia64/src/interrupt.c
0,0 → 1,250
/*
* Copyright (C) 2005 Jakub Jermar
* Copyright (C) 2005 Jakub Vana
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
 
#include <arch/interrupt.h>
#include <panic.h>
#include <print.h>
#include <console/console.h>
#include <arch/types.h>
#include <arch/asm.h>
#include <arch/barrier.h>
#include <arch/register.h>
#include <arch/drivers/it.h>
#include <arch.h>
#include <symtab.h>
#include <debug.h>
#include <syscall/syscall.h>
#include <print.h>
#include <proc/scheduler.h>
#include <ipc/sysipc.h>
 
 
#define VECTORS_64_BUNDLE 20
#define VECTORS_16_BUNDLE 48
#define VECTORS_16_BUNDLE_START 0x5000
#define VECTOR_MAX 0x7f00
 
#define BUNDLE_SIZE 16
 
char *vector_names_64_bundle[VECTORS_64_BUNDLE] = {
"VHPT Translation vector",
"Instruction TLB vector",
"Data TLB vector",
"Alternate Instruction TLB vector",
"Alternate Data TLB vector",
"Data Nested TLB vector",
"Instruction Key Miss vector",
"Data Key Miss vector",
"Dirty-Bit vector",
"Instruction Access-Bit vector",
"Data Access-Bit vector"
"Break Instruction vector",
"External Interrupt vector"
"Reserved",
"Reserved",
"Reserved",
"Reserved",
"Reserved",
"Reserved",
"Reserved"
};
 
char *vector_names_16_bundle[VECTORS_16_BUNDLE] = {
"Page Not Present vector",
"Key Permission vector",
"Instruction Access rights vector",
"Data Access Rights vector",
"General Exception vector",
"Disabled FP-Register vector",
"NaT Consumption vector",
"Speculation vector",
"Reserved",
"Debug vector",
"Unaligned Reference vector",
"Unsupported Data Reference vector",
"Floating-point Fault vector",
"Floating-point Trap vector",
"Lower-Privilege Transfer Trap vector",
"Taken Branch Trap vector",
"Single STep Trap vector",
"Reserved",
"Reserved",
"Reserved",
"Reserved",
"Reserved",
"Reserved",
"Reserved",
"Reserved",
"IA-32 Exception vector",
"IA-32 Intercept vector",
"IA-32 Interrupt vector",
"Reserved",
"Reserved",
"Reserved"
};
 
static char *vector_to_string(__u16 vector);
static void dump_interrupted_context(istate_t *istate);
 
char *vector_to_string(__u16 vector)
{
ASSERT(vector <= VECTOR_MAX);
if (vector >= VECTORS_16_BUNDLE_START)
return vector_names_16_bundle[(vector-VECTORS_16_BUNDLE_START)/(16*BUNDLE_SIZE)];
else
return vector_names_64_bundle[vector/(64*BUNDLE_SIZE)];
}
 
void dump_interrupted_context(istate_t *istate)
{
char *ifa, *iipa, *iip;
 
ifa = get_symtab_entry(istate->cr_ifa);
iipa = get_symtab_entry(istate->cr_iipa);
iip = get_symtab_entry(istate->cr_iip);
 
putchar('\n');
printf("Interrupted context dump:\n");
printf("ar.bsp=%p\tar.bspstore=%p\n", istate->ar_bsp, istate->ar_bspstore);
printf("ar.rnat=%#llx\tar.rsc=%$llx\n", istate->ar_rnat, istate->ar_rsc);
printf("ar.ifs=%#llx\tar.pfs=%#llx\n", istate->ar_ifs, istate->ar_pfs);
printf("cr.isr=%#llx\tcr.ipsr=%#llx\t\n", istate->cr_isr.value, istate->cr_ipsr);
printf("cr.iip=%#llx, #%d\t(%s)\n", istate->cr_iip, istate->cr_isr.ei, iip ? iip : "?");
printf("cr.iipa=%#llx\t(%s)\n", istate->cr_iipa, iipa ? iipa : "?");
printf("cr.ifa=%#llx\t(%s)\n", istate->cr_ifa, ifa ? ifa : "?");
}
 
void general_exception(__u64 vector, istate_t *istate)
{
char *desc = "";
 
dump_interrupted_context(istate);
 
switch (istate->cr_isr.ge_code) {
case GE_ILLEGALOP:
desc = "Illegal Operation fault";
break;
case GE_PRIVOP:
desc = "Privileged Operation fault";
break;
case GE_PRIVREG:
desc = "Privileged Register fault";
break;
case GE_RESREGFLD:
desc = "Reserved Register/Field fault";
break;
case GE_DISBLDISTRAN:
desc = "Disabled Instruction Set Transition fault";
break;
case GE_ILLEGALDEP:
desc = "Illegal Dependency fault";
break;
default:
desc = "unknown";
break;
}
 
panic("General Exception (%s)\n", desc);
}
 
void fpu_enable(void);
 
void disabled_fp_register(__u64 vector, istate_t *istate)
{
#ifdef CONFIG_FPU_LAZY
scheduler_fpu_lazy_request();
#else
dump_interrupted_context(istate);
panic("Interruption: %#hx (%s)\n", (__u16) vector, vector_to_string(vector));
#endif
}
 
 
void nop_handler(__u64 vector, istate_t *istate)
{
}
 
 
 
/** Handle syscall. */
int break_instruction(__u64 vector, istate_t *istate)
{
/*
* Move to next instruction after BREAK.
*/
if (istate->cr_ipsr.ri == 2) {
istate->cr_ipsr.ri = 0;
istate->cr_iip += 16;
} else {
istate->cr_ipsr.ri++;
}
 
if (istate->in4 < SYSCALL_END)
return syscall_table[istate->in4](istate->in0, istate->in1, istate->in2, istate->in3);
else
panic("Undefined syscall %d", istate->in4);
return -1;
}
 
void universal_handler(__u64 vector, istate_t *istate)
{
dump_interrupted_context(istate);
panic("Interruption: %#hx (%s)\n", (__u16) vector, vector_to_string(vector));
}
 
void external_interrupt(__u64 vector, istate_t *istate)
{
cr_ivr_t ivr;
ivr.value = ivr_read();
srlz_d();
 
switch(ivr.vector) {
case INTERRUPT_TIMER:
it_interrupt();
break;
case INTERRUPT_SPURIOUS:
printf("cpu%d: spurious interrupt\n", CPU->id);
break;
default:
panic("\nUnhandled External Interrupt Vector %d\n", ivr.vector);
break;
}
}
 
/* Reregister irq to be IPC-ready */
void irq_ipc_bind_arch(__native irq)
{
panic("not implemented\n");
/* TODO */
}
/tags/0.1.1/kernel/arch/ia64/src/ia64.c
0,0 → 1,116
/*
* Copyright (C) 2005 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#include <arch.h>
#include <arch/ski/ski.h>
#include <arch/drivers/it.h>
#include <arch/interrupt.h>
#include <arch/barrier.h>
#include <arch/asm.h>
#include <arch/register.h>
#include <arch/types.h>
#include <arch/context.h>
#include <arch/stack.h>
#include <arch/mm/page.h>
#include <mm/as.h>
#include <config.h>
#include <userspace.h>
#include <console/console.h>
#include <proc/uarg.h>
#include <syscall/syscall.h>
 
void arch_pre_main(void)
{
/* Setup usermode init tasks. */
init.cnt = 2;
init.tasks[0].addr = INIT0_ADDRESS;
init.tasks[0].size = INIT0_SIZE;
init.tasks[1].addr = INIT1_ADDRESS;
init.tasks[1].size = INIT1_SIZE;
}
 
void arch_pre_mm_init(void)
{
/* Set Interruption Vector Address (i.e. location of interruption vector table). */
iva_write((__address) &ivt);
srlz_d();
ski_init_console();
it_init();
}
 
void arch_post_mm_init(void)
{
}
 
void arch_pre_smp_init(void)
{
}
 
void arch_post_smp_init(void)
{
}
 
/** Enter userspace and never return. */
void userspace(uspace_arg_t *kernel_uarg)
{
psr_t psr;
rsc_t rsc;
 
psr.value = psr_read();
psr.cpl = PL_USER;
psr.i = true; /* start with interrupts enabled */
psr.ic = true;
psr.ri = 0; /* start with instruction #0 */
psr.bn = 1; /* start in bank 0 */
 
__asm__ volatile ("mov %0 = ar.rsc\n" : "=r" (rsc.value));
rsc.loadrs = 0;
rsc.be = false;
rsc.pl = PL_USER;
rsc.mode = 3; /* eager mode */
 
switch_to_userspace((__address) kernel_uarg->uspace_entry,
((__address) kernel_uarg->uspace_stack)+PAGE_SIZE-ALIGN_UP(STACK_ITEM_SIZE, STACK_ALIGNMENT),
((__address) kernel_uarg->uspace_stack)+PAGE_SIZE,
(__address) kernel_uarg->uspace_uarg,
psr.value, rsc.value);
 
while (1) {
;
}
}
 
/** Set thread-local-storage pointer.
*
* We use r13 (a.k.a. tp) for this purpose.
*/
__native sys_tls_set(__native addr)
{
return 0;
}
/tags/0.1.1/kernel/arch/ia64/src/start.S
0,0 → 1,139
#
# Copyright (C) 2005 Jakub Jermar
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# - The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
 
#include <arch/register.h>
#include <arch/mm/page.h>
#include <arch/mm/asid.h>
#include <mm/asid.h>
 
#define RR_MASK (0xFFFFFFFF00000002)
#define RID_SHIFT 8
#define PS_SHIFT 2
 
#define KERNEL_TRANSLATION_I 0x0010000000000661
#define KERNEL_TRANSLATION_D 0x0010000000000661
 
.section K_TEXT_START, "ax"
 
.global kernel_image_start
 
stack0:
kernel_image_start:
.auto
 
# Fill TR.i and TR.d using Region Register #VRN_KERNEL
 
movl r8=(VRN_KERNEL<<VRN_SHIFT)
mov r9=rr[r8]
movl r10=(RR_MASK)
and r9=r10,r9
movl r10=((RID_KERNEL<<RID_SHIFT)|(KERNEL_PAGE_WIDTH<<PS_SHIFT))
or r9=r10,r9
mov rr[r8]=r9
 
movl r8=(VRN_KERNEL<<VRN_SHIFT)
mov cr.ifa=r8
movl r10=(KERNEL_PAGE_WIDTH<<PS_SHIFT)
mov cr.itir=r10
movl r10=(KERNEL_TRANSLATION_I)
itr.i itr[r0]=r10
movl r10=(KERNEL_TRANSLATION_D)
itr.d dtr[r0]=r10
 
# initialize PSR
mov psr.l = r0
srlz.i
srlz.d
movl r10=(PSR_DT_MASK|PSR_RT_MASK|PSR_IT_MASK|PSR_IC_MASK) /* Enable paging */
mov r9=psr
or r10=r10,r9
mov cr.ipsr=r10
mov cr.ifs=r0
movl r8=paging_start
mov cr.iip=r8
srlz.d
srlz.i
 
.explicit
/*
* Return From Interupt is the only the way to fill upper half word of PSR.
*/
rfi;;
 
.global paging_start
paging_start:
 
/*
* Now we are paging.
*/
 
# switch to register bank 1
bsw.1
# initialize register stack
mov ar.rsc = r0
movl r8=(VRN_KERNEL<<VRN_SHIFT) ;;
mov ar.bspstore = r8
loadrs
 
# initialize memory stack to some sane value
movl r12 = stack0;;
add r12 = - 16, r12 /* allocate a scratch area on the stack */
 
# initialize gp (Global Pointer) register
movl r1 = _hardcoded_load_address
 
/*
* Initialize hardcoded_* variables.
*/
movl r14 = _hardcoded_ktext_size
movl r15 = _hardcoded_kdata_size
movl r16 = _hardcoded_load_address ;;
addl r17 = @gprel(hardcoded_ktext_size), gp
addl r18 = @gprel(hardcoded_kdata_size), gp
addl r19 = @gprel(hardcoded_load_address), gp
;;
st8 [r17] = r14
st8 [r18] = r15
st8 [r19] = r16
 
 
ssm (1<<19);; /*Disable f32 - f127*/
srlz.i;
srlz.d;;
 
br.call.sptk.many b0 = arch_pre_main
 
movl r18=main_bsp ;;
mov b1=r18 ;;
br.call.sptk.many b0=b1
 
 
0:
br 0b
/tags/0.1.1/kernel/arch/ia64/src/proc/scheduler.c
0,0 → 1,78
/*
* Copyright (C) 2006 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#include <proc/scheduler.h>
#include <proc/thread.h>
#include <arch.h>
#include <arch/register.h>
#include <arch/context.h>
#include <arch/stack.h>
#include <arch/mm/tlb.h>
#include <config.h>
#include <align.h>
 
/** Perform ia64 specific tasks needed before the new task is run. */
void before_task_runs_arch(void)
{
}
 
/** Prepare kernel stack pointers in bank 0 r22 and r23 and make sure the stack is mapped in DTR. */
void before_thread_runs_arch(void)
{
__address base;
base = ALIGN_DOWN(config.base, 1<<KERNEL_PAGE_WIDTH);
 
if ((__address) THREAD->kstack < base || (__address) THREAD->kstack > base + (1<<(KERNEL_PAGE_WIDTH))) {
/*
* Kernel stack of this thread is not mapped by DTR[TR_KERNEL].
* Use DTR[TR_KSTACK1] and DTR[TR_KSTACK2] to map it.
*/
dtlb_kernel_mapping_insert((__address) THREAD->kstack, KA2PA(THREAD->kstack), true, DTR_KSTACK1);
dtlb_kernel_mapping_insert((__address) THREAD->kstack + PAGE_SIZE, KA2PA(THREAD->kstack) + FRAME_SIZE, true, DTR_KSTACK2);
}
/*
* Record address of kernel backing store to bank 0 r22.
* Record address of kernel stack to bank 0 r23.
* These values will be found there after switch from userspace.
*/
__asm__ volatile (
"bsw.0\n"
"mov r22 = %0\n"
"mov r23 = %1\n"
"bsw.1\n"
:
: "r" (&THREAD->kstack[THREAD_STACK_SIZE]),
"r" (&THREAD->kstack[THREAD_STACK_SIZE - SP_DELTA])
);
}
 
void after_thread_ran_arch(void)
{
}
/tags/0.1.1/kernel/arch/ia64/src/ivt.S
0,0 → 1,582
#
# Copyright (C) 2005 Jakub Vana
# Copyright (C) 2005 Jakub Jermar
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# - The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
 
#include <arch/stack.h>
#include <arch/register.h>
#include <arch/mm/page.h>
#include <align.h>
 
#define FRS_TO_SAVE 30
#define STACK_ITEMS (19 + FRS_TO_SAVE*2)
#define STACK_FRAME_SIZE ALIGN_UP((STACK_ITEMS*STACK_ITEM_SIZE) + STACK_SCRATCH_AREA_SIZE, STACK_ALIGNMENT)
 
#if (STACK_ITEMS % 2 == 0)
# define STACK_FRAME_BIAS 8
#else
# define STACK_FRAME_BIAS 16
#endif
 
/** Partitioning of bank 0 registers. */
#define R_OFFS r16
#define R_HANDLER r17
#define R_RET r18
#define R_TMP r19
#define R_KSTACK_BSP r22 /* keep in sync with before_thread_runs_arch() */
#define R_KSTACK r23 /* keep in sync with before_thread_runs_arch() */
 
/** Heavyweight interrupt handler
*
* This macro roughly follows steps from 1 to 19 described in
* Intel Itanium Architecture Software Developer's Manual, Chapter 3.4.2.
*
* HEAVYWEIGHT_HANDLER macro must cram into 16 bundles (48 instructions).
* This goal is achieved by using procedure calls after RSE becomes operational.
*
* Some steps are skipped (enabling and disabling interrupts).
*
* @param offs Offset from the beginning of IVT.
* @param handler Interrupt handler address.
*/
.macro HEAVYWEIGHT_HANDLER offs, handler=universal_handler
.org ivt + \offs
mov R_OFFS = \offs
movl R_HANDLER = \handler ;;
br heavyweight_handler
.endm
 
.global heavyweight_handler
heavyweight_handler:
/* 1. copy interrupt registers into bank 0 */
/*
* Note that r24-r31 from bank 0 can be used only as long as PSR.ic = 0.
*/
/* Set up FPU as in interrupted context. */
mov r24 = psr
mov r25 = cr.ipsr
mov r26 = PSR_DFH_MASK
mov r27 = ~PSR_DFH_MASK ;;
and r26 = r25, r26
and r24 = r24, r27;;
or r24 = r24, r26;;
mov psr.l = r24;;
srlz.i
srlz.d;;
 
mov r24 = cr.iip
mov r25 = cr.ipsr
mov r26 = cr.iipa
mov r27 = cr.isr
mov r28 = cr.ifa
/* 2. preserve predicate register into bank 0 */
mov r29 = pr ;;
/* 3. switch to kernel memory stack */
mov r30 = cr.ipsr
shr.u r31 = r12, VRN_SHIFT ;;
 
shr.u r30 = r30, PSR_CPL_SHIFT ;;
and r30 = PSR_CPL_MASK_SHIFTED, r30 ;;
 
/*
* Set p3 to true if the interrupted context executed in kernel mode.
* Set p4 to false if the interrupted context didn't execute in kernel mode.
*/
cmp.eq p3, p4 = r30, r0 ;;
cmp.eq p1, p2 = r30, r0 ;; /* remember IPSR setting in p1 and p2 */
 
/*
* Set p3 to true if the stack register references kernel address space.
* Set p4 to false if the stack register doesn't reference kernel address space.
*/
(p3) cmp.eq p3, p4 = VRN_KERNEL, r31 ;;
/*
* Now, p4 is true iff the stack needs to be switched to kernel stack.
*/
mov r30 = r12
(p4) mov r12 = R_KSTACK ;;
add r31 = -STACK_FRAME_BIAS, r12 ;;
add r12 = -STACK_FRAME_SIZE, r12
 
/* 4. save registers in bank 0 into memory stack */
 
/*
* If this is break_instruction handler,
* copy input parameters to stack.
*/
mov R_TMP = 0x2c00 ;;
cmp.eq p6,p5 = R_OFFS, R_TMP ;;
/*
* From now on, if this is break_instruction handler, p6 is true and p5 is false.
* Otherwise p6 is false and p5 is true.
* Note that p5 is a preserved predicate register and we make use of it.
*/
 
(p6) st8 [r31] = r36, -8 ;; /* save in4 */
(p6) st8 [r31] = r35, -8 ;; /* save in3 */
(p6) st8 [r31] = r34, -8 ;; /* save in2 */
(p6) st8 [r31] = r33, -8 ;; /* save in1 */
(p6) st8 [r31] = r32, -8 ;; /* save in0 */
(p5) add r31 = -40, r31 ;;
st8 [r31] = r30, -8 ;; /* save old stack pointer */
st8 [r31] = r29, -8 ;; /* save predicate registers */
 
st8 [r31] = r24, -8 ;; /* save cr.iip */
st8 [r31] = r25, -8 ;; /* save cr.ipsr */
st8 [r31] = r26, -8 ;; /* save cr.iipa */
st8 [r31] = r27, -8 ;; /* save cr.isr */
st8 [r31] = r28, -8 ;; /* save cr.ifa */
 
/* 5. RSE switch from interrupted context */
mov r24 = ar.rsc
mov r25 = ar.pfs
cover
mov r26 = cr.ifs
st8 [r31] = r24, -8 ;; /* save ar.rsc */
st8 [r31] = r25, -8 ;; /* save ar.pfs */
st8 [r31] = r26, -8 /* save ar.ifs */
and r24 = ~(RSC_PL_MASK), r24 ;;
and r30 = ~(RSC_MODE_MASK), r24 ;;
mov ar.rsc = r30 ;; /* update RSE state */
mov r27 = ar.rnat
mov r28 = ar.bspstore ;;
/*
* Inspect BSPSTORE to figure out whether it is necessary to switch to kernel BSPSTORE.
*/
(p1) shr.u r30 = r28, VRN_SHIFT ;;
(p1) cmp.eq p1, p2 = VRN_KERNEL, r30 ;;
/*
* If BSPSTORE needs to be switched, p1 is false and p2 is true.
*/
(p1) mov r30 = r28
(p2) mov r30 = R_KSTACK_BSP ;;
(p2) mov ar.bspstore = r30 ;;
mov r29 = ar.bsp
st8 [r31] = r27, -8 ;; /* save ar.rnat */
st8 [r31] = r30, -8 ;; /* save new value written to ar.bspstore */
st8 [r31] = r28, -8 ;; /* save ar.bspstore */
st8 [r31] = r29, -8 /* save ar.bsp */
mov ar.rsc = r24 /* restore RSE's setting + kernel privileges */
/* steps 6 - 15 are done by heavyweight_handler_inner() */
mov R_RET = b0 /* save b0 belonging to interrupted context */
br.call.sptk.many b0 = heavyweight_handler_inner
0: mov b0 = R_RET /* restore b0 belonging to the interrupted context */
 
/* 16. RSE switch to interrupted context */
cover /* allocate zerro size frame (step 1 (from Intel Docs)) */
 
add r31 = (STACK_SCRATCH_AREA_SIZE+(FRS_TO_SAVE*2*8)), r12 ;;
 
ld8 r30 = [r31], +8 ;; /* load ar.bsp */
ld8 r29 = [r31], +8 ;; /* load ar.bspstore */
ld8 r28 = [r31], +8 ;; /* load ar.bspstore_new */
sub r27 = r30 , r28 ;; /* calculate loadrs (step 2) */
shl r27 = r27, 16
 
mov r24 = ar.rsc ;;
and r30 = ~3, r24 ;;
or r24 = r30 , r27 ;;
mov ar.rsc = r24 ;; /* place RSE in enforced lazy mode */
 
loadrs /* (step 3) */
 
ld8 r27 = [r31], +8 ;; /* load ar.rnat */
ld8 r26 = [r31], +8 ;; /* load cr.ifs */
ld8 r25 = [r31], +8 ;; /* load ar.pfs */
ld8 r24 = [r31], +8 ;; /* load ar.rsc */
 
mov ar.bspstore = r29 ;; /* (step 4) */
mov ar.rnat = r27 /* (step 5) */
 
mov ar.pfs = r25 /* (step 6) */
mov cr.ifs = r26
 
mov ar.rsc = r24 /* (step 7) */
 
/* 17. restore interruption state from memory stack */
ld8 r28 = [r31], +8 ;; /* load cr.ifa */
ld8 r27 = [r31], +8 ;; /* load cr.isr */
ld8 r26 = [r31], +8 ;; /* load cr.iipa */
ld8 r25 = [r31], +8 ;; /* load cr.ipsr */
ld8 r24 = [r31], +8 ;; /* load cr.iip */
 
mov cr.iip = r24;;
mov cr.iipa = r26
mov cr.isr = r27
mov cr.ifa = r28
 
/* Set up FPU as in exception. */
mov r24 = psr
mov r26 = PSR_DFH_MASK
mov r27 = ~PSR_DFH_MASK ;;
and r25 = r25, r27
and r24 = r24, r26 ;;
or r25 = r25, r24;;
mov cr.ipsr = r25
 
/* 18. restore predicate registers from memory stack */
ld8 r29 = [r31], +8 ;; /* load predicate registers */
mov pr = r29
/* 19. return from interruption */
ld8 r12 = [r31] /* load stack pointer */
rfi ;;
 
.global heavyweight_handler_inner
heavyweight_handler_inner:
/*
* From this point, the rest of the interrupted context
* will be preserved in stacked registers and backing store.
*/
alloc loc0 = ar.pfs, 0, 48, 2, 0 ;;
/* bank 0 is going to be shadowed, copy essential data from there */
mov loc1 = R_RET /* b0 belonging to interrupted context */
mov loc2 = R_HANDLER
mov out0 = R_OFFS
add out1 = STACK_SCRATCH_AREA_SIZE, r12
 
/* 6. switch to bank 1 and reenable PSR.ic */
ssm PSR_IC_MASK
bsw.1 ;;
srlz.d
/* 7. preserve branch and application registers */
mov loc3 = ar.unat
mov loc4 = ar.lc
mov loc5 = ar.ec
mov loc6 = ar.ccv
mov loc7 = ar.csd
mov loc8 = ar.ssd
mov loc9 = b0
mov loc10 = b1
mov loc11 = b2
mov loc12 = b3
mov loc13 = b4
mov loc14 = b5
mov loc15 = b6
mov loc16 = b7
/* 8. preserve general and floating-point registers */
mov loc17 = r1
mov loc18 = r2
mov loc19 = r3
mov loc20 = r4
mov loc21 = r5
mov loc22 = r6
mov loc23 = r7
(p5) mov loc24 = r8 /* only if not in break_instruction handler */
mov loc25 = r9
mov loc26 = r10
mov loc27 = r11
/* skip r12 (stack pointer) */
mov loc28 = r13
mov loc29 = r14
mov loc30 = r15
mov loc31 = r16
mov loc32 = r17
mov loc33 = r18
mov loc34 = r19
mov loc35 = r20
mov loc36 = r21
mov loc37 = r22
mov loc38 = r23
mov loc39 = r24
mov loc40 = r25
mov loc41 = r26
mov loc42 = r27
mov loc43 = r28
mov loc44 = r29
mov loc45 = r30
mov loc46 = r31
 
add r24 = 96 + STACK_SCRATCH_AREA_SIZE, r12
add r25 = 112 + STACK_SCRATCH_AREA_SIZE, r12
add r26 = 0 + STACK_SCRATCH_AREA_SIZE, r12
add r27 = 16 + STACK_SCRATCH_AREA_SIZE, r12
add r28 = 32 + STACK_SCRATCH_AREA_SIZE, r12
add r29 = 48 + STACK_SCRATCH_AREA_SIZE, r12
add r30 = 64 + STACK_SCRATCH_AREA_SIZE, r12
add r31 = 80 + STACK_SCRATCH_AREA_SIZE, r12 ;;
stf.spill [r26] = f2, 0x80
stf.spill [r27] = f3, 0x80
stf.spill [r28] = f4, 0x80
stf.spill [r29] = f5, 0x80
stf.spill [r30] = f6, 0x80
stf.spill [r31] = f7, 0x80 ;;
 
stf.spill [r24] = f8, 0x80
stf.spill [r25] = f9, 0x80
stf.spill [r26] = f10, 0x80
stf.spill [r27] = f11, 0x80
stf.spill [r28] = f12, 0x80
stf.spill [r29] = f13, 0x80
stf.spill [r30] = f14, 0x80
stf.spill [r31] = f15, 0x80 ;;
 
stf.spill [r24] = f16, 0x80
stf.spill [r25] = f17, 0x80
stf.spill [r26] = f18, 0x80
stf.spill [r27] = f19, 0x80
stf.spill [r28] = f20, 0x80
stf.spill [r29] = f21, 0x80
stf.spill [r30] = f22, 0x80
stf.spill [r31] = f23, 0x80 ;;
 
stf.spill [r24] = f24, 0x80
stf.spill [r25] = f25, 0x80
stf.spill [r26] = f26, 0x80
stf.spill [r27] = f27, 0x80
stf.spill [r28] = f28, 0x80
stf.spill [r29] = f29, 0x80
stf.spill [r30] = f30, 0x80
stf.spill [r31] = f31, 0x80 ;;
 
mov loc47 = ar.fpsr /* preserve floating point status register */
/* 9. skipped (will not enable interrupts) */
/*
* ssm PSR_I_MASK
* ;;
* srlz.d
*/
 
/* 10. call handler */
movl r1 = _hardcoded_load_address
mov b1 = loc2
br.call.sptk.many b0 = b1
 
/* 11. return from handler */
0:
/* 12. skipped (will not disable interrupts) */
/*
* rsm PSR_I_MASK
* ;;
* srlz.d
*/
 
/* 13. restore general and floating-point registers */
add r24 = 96 + STACK_SCRATCH_AREA_SIZE, r12
add r25 = 112 + STACK_SCRATCH_AREA_SIZE, r12
add r26 = 0 + STACK_SCRATCH_AREA_SIZE, r12
add r27 = 16 + STACK_SCRATCH_AREA_SIZE, r12
add r28 = 32 + STACK_SCRATCH_AREA_SIZE, r12
add r29 = 48 + STACK_SCRATCH_AREA_SIZE, r12
add r30 = 64 + STACK_SCRATCH_AREA_SIZE, r12
add r31 = 80 + STACK_SCRATCH_AREA_SIZE, r12 ;;
 
ldf.fill f2 = [r26], 0x80
ldf.fill f3 = [r27], 0x80
ldf.fill f4 = [r28], 0x80
ldf.fill f5 = [r29], 0x80
ldf.fill f6 = [r30], 0x80
ldf.fill f7 = [r31], 0x80 ;;
 
ldf.fill f8 = [r24], 0x80
ldf.fill f9 = [r25], 0x80
ldf.fill f10 = [r26], 0x80
ldf.fill f11 = [r27], 0x80
ldf.fill f12 = [r28], 0x80
ldf.fill f13 = [r29], 0x80
ldf.fill f14 = [r30], 0x80
ldf.fill f15 = [r31], 0x80 ;;
 
ldf.fill f16 = [r24], 0x80
ldf.fill f17 = [r25], 0x80
ldf.fill f18 = [r26], 0x80
ldf.fill f19 = [r27], 0x80
ldf.fill f20 = [r28], 0x80
ldf.fill f21 = [r29], 0x80
ldf.fill f22 = [r30], 0x80
ldf.fill f23 = [r31], 0x80 ;;
 
ldf.fill f24 = [r24], 0x80
ldf.fill f25 = [r25], 0x80
ldf.fill f26 = [r26], 0x80
ldf.fill f27 = [r27], 0x80
ldf.fill f28 = [r28], 0x80
ldf.fill f29 = [r29], 0x80
ldf.fill f30 = [r30], 0x80
ldf.fill f31 = [r31], 0x80 ;;
mov r1 = loc17
mov r2 = loc18
mov r3 = loc19
mov r4 = loc20
mov r5 = loc21
mov r6 = loc22
mov r7 = loc23
(p5) mov r8 = loc24 /* only if not in break_instruction handler */
mov r9 = loc25
mov r10 = loc26
mov r11 = loc27
/* skip r12 (stack pointer) */
mov r13 = loc28
mov r14 = loc29
mov r15 = loc30
mov r16 = loc31
mov r17 = loc32
mov r18 = loc33
mov r19 = loc34
mov r20 = loc35
mov r21 = loc36
mov r22 = loc37
mov r23 = loc38
mov r24 = loc39
mov r25 = loc40
mov r26 = loc41
mov r27 = loc42
mov r28 = loc43
mov r29 = loc44
mov r30 = loc45
mov r31 = loc46
 
mov ar.fpsr = loc47 /* restore floating point status register */
/* 14. restore branch and application registers */
mov ar.unat = loc3
mov ar.lc = loc4
mov ar.ec = loc5
mov ar.ccv = loc6
mov ar.csd = loc7
mov ar.ssd = loc8
mov b0 = loc9
mov b1 = loc10
mov b2 = loc11
mov b3 = loc12
mov b4 = loc13
mov b5 = loc14
mov b6 = loc15
mov b7 = loc16
/* 15. disable PSR.ic and switch to bank 0 */
rsm PSR_IC_MASK
bsw.0 ;;
srlz.d
 
mov R_RET = loc1
mov ar.pfs = loc0
br.ret.sptk.many b0
 
.global ivt
.align 32768
ivt:
HEAVYWEIGHT_HANDLER 0x0000
HEAVYWEIGHT_HANDLER 0x0400
HEAVYWEIGHT_HANDLER 0x0800
HEAVYWEIGHT_HANDLER 0x0c00 alternate_instruction_tlb_fault
HEAVYWEIGHT_HANDLER 0x1000 alternate_data_tlb_fault
HEAVYWEIGHT_HANDLER 0x1400 data_nested_tlb_fault
HEAVYWEIGHT_HANDLER 0x1800
HEAVYWEIGHT_HANDLER 0x1c00
HEAVYWEIGHT_HANDLER 0x2000 data_dirty_bit_fault
HEAVYWEIGHT_HANDLER 0x2400 instruction_access_bit_fault
HEAVYWEIGHT_HANDLER 0x2800 data_access_bit_fault
HEAVYWEIGHT_HANDLER 0x2c00 break_instruction
HEAVYWEIGHT_HANDLER 0x3000 external_interrupt /* For external interrupt, heavyweight handler is used. */
HEAVYWEIGHT_HANDLER 0x3400
HEAVYWEIGHT_HANDLER 0x3800
HEAVYWEIGHT_HANDLER 0x3c00
HEAVYWEIGHT_HANDLER 0x4000
HEAVYWEIGHT_HANDLER 0x4400
HEAVYWEIGHT_HANDLER 0x4800
HEAVYWEIGHT_HANDLER 0x4c00
 
HEAVYWEIGHT_HANDLER 0x5000 page_not_present
HEAVYWEIGHT_HANDLER 0x5100
HEAVYWEIGHT_HANDLER 0x5200
HEAVYWEIGHT_HANDLER 0x5300
HEAVYWEIGHT_HANDLER 0x5400 general_exception
HEAVYWEIGHT_HANDLER 0x5500 disabled_fp_register
HEAVYWEIGHT_HANDLER 0x5600
HEAVYWEIGHT_HANDLER 0x5700
HEAVYWEIGHT_HANDLER 0x5800
HEAVYWEIGHT_HANDLER 0x5900
HEAVYWEIGHT_HANDLER 0x5a00
HEAVYWEIGHT_HANDLER 0x5b00
HEAVYWEIGHT_HANDLER 0x5c00
HEAVYWEIGHT_HANDLER 0x5d00
HEAVYWEIGHT_HANDLER 0x5e00
HEAVYWEIGHT_HANDLER 0x5f00
HEAVYWEIGHT_HANDLER 0x6000
HEAVYWEIGHT_HANDLER 0x6100
HEAVYWEIGHT_HANDLER 0x6200
HEAVYWEIGHT_HANDLER 0x6300
HEAVYWEIGHT_HANDLER 0x6400
HEAVYWEIGHT_HANDLER 0x6500
HEAVYWEIGHT_HANDLER 0x6600
HEAVYWEIGHT_HANDLER 0x6700
HEAVYWEIGHT_HANDLER 0x6800
HEAVYWEIGHT_HANDLER 0x6900
HEAVYWEIGHT_HANDLER 0x6a00
HEAVYWEIGHT_HANDLER 0x6b00
HEAVYWEIGHT_HANDLER 0x6c00
HEAVYWEIGHT_HANDLER 0x6d00
HEAVYWEIGHT_HANDLER 0x6e00
HEAVYWEIGHT_HANDLER 0x6f00
 
HEAVYWEIGHT_HANDLER 0x7000
HEAVYWEIGHT_HANDLER 0x7100
HEAVYWEIGHT_HANDLER 0x7200
HEAVYWEIGHT_HANDLER 0x7300
HEAVYWEIGHT_HANDLER 0x7400
HEAVYWEIGHT_HANDLER 0x7500
HEAVYWEIGHT_HANDLER 0x7600
HEAVYWEIGHT_HANDLER 0x7700
HEAVYWEIGHT_HANDLER 0x7800
HEAVYWEIGHT_HANDLER 0x7900
HEAVYWEIGHT_HANDLER 0x7a00
HEAVYWEIGHT_HANDLER 0x7b00
HEAVYWEIGHT_HANDLER 0x7c00
HEAVYWEIGHT_HANDLER 0x7d00
HEAVYWEIGHT_HANDLER 0x7e00
HEAVYWEIGHT_HANDLER 0x7f00
/tags/0.1.1/kernel/arch/ia64/src/context.S
0,0 → 1,246
#
# Copyright (C) 2005 Jakub Jermar
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# - The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
 
.text
 
.global context_save_arch
.global context_restore_arch
 
context_save_arch:
alloc loc0 = ar.pfs, 1, 8, 0, 0
mov loc1 = ar.unat ;;
/* loc2 */
mov loc3 = ar.rsc
 
.auto
 
/*
* Flush dirty registers to backing store.
* After this ar.bsp and ar.bspstore are equal.
*/
flushrs
mov loc4 = ar.bsp
/*
* Put RSE to enforced lazy mode.
* So that ar.rnat can be read.
*/
and loc5 = ~3, loc3
mov ar.rsc = loc5
mov loc5 = ar.rnat
 
.explicit
 
mov loc6 = ar.lc
/*
* Save application registers
*/
st8 [in0] = loc0, 8 ;; /* save ar.pfs */
st8 [in0] = loc1, 8 ;; /* save ar.unat (caller) */
mov loc2 = in0 ;;
add in0 = 8, in0 ;; /* skip ar.unat (callee) */
st8 [in0] = loc3, 8 ;; /* save ar.rsc */
st8 [in0] = loc4, 8 ;; /* save ar.bsp */
st8 [in0] = loc5, 8 ;; /* save ar.rnat */
st8 [in0] = loc6, 8 ;; /* save ar.lc */
/*
* Save general registers including NaT bits
*/
st8.spill [in0] = r1, 8 ;;
st8.spill [in0] = r4, 8 ;;
st8.spill [in0] = r5, 8 ;;
st8.spill [in0] = r6, 8 ;;
st8.spill [in0] = r7, 8 ;;
st8.spill [in0] = r12, 8 ;; /* save sp */
st8.spill [in0] = r13, 8 ;;
 
mov loc3 = ar.unat ;;
st8 [loc2] = loc3 /* save ar.unat (callee) */
 
/*
* Save branch registers
*/
mov loc2 = b0 ;;
st8 [in0] = loc2, 8 /* save pc */
mov loc3 = b1 ;;
st8 [in0] = loc3, 8
mov loc4 = b2 ;;
st8 [in0] = loc4, 8
mov loc5 = b3 ;;
st8 [in0] = loc5, 8
mov loc6 = b4 ;;
st8 [in0] = loc6, 8
mov loc7 = b5 ;;
st8 [in0] = loc7, 8
 
/*
* Save predicate registers
*/
mov loc2 = pr ;;
st8 [in0] = loc2, 16;; /* Next fpu registers should be spilled to 16B aligned address */
 
/*
* Save floating-point registers.
*/
stf.spill [in0] = f2, 16 ;;
stf.spill [in0] = f3, 16 ;;
stf.spill [in0] = f4, 16 ;;
stf.spill [in0] = f5, 16 ;;
 
stf.spill [in0] = f16, 16 ;;
stf.spill [in0] = f17, 16 ;;
stf.spill [in0] = f18, 16 ;;
stf.spill [in0] = f19, 16 ;;
stf.spill [in0] = f20, 16 ;;
stf.spill [in0] = f21, 16 ;;
stf.spill [in0] = f22, 16 ;;
stf.spill [in0] = f23, 16 ;;
stf.spill [in0] = f24, 16 ;;
stf.spill [in0] = f25, 16 ;;
stf.spill [in0] = f26, 16 ;;
stf.spill [in0] = f27, 16 ;;
stf.spill [in0] = f28, 16 ;;
stf.spill [in0] = f29, 16 ;;
stf.spill [in0] = f30, 16 ;;
stf.spill [in0] = f31, 16 ;;
 
mov ar.unat = loc1
add r8 = r0, r0, 1 /* context_save returns 1 */
br.ret.sptk.many b0
 
context_restore_arch:
alloc loc0 = ar.pfs, 1, 9, 0, 0 ;;
 
ld8 loc0 = [in0], 8 ;; /* load ar.pfs */
ld8 loc1 = [in0], 8 ;; /* load ar.unat (caller) */
ld8 loc2 = [in0], 8 ;; /* load ar.unat (callee) */
ld8 loc3 = [in0], 8 ;; /* load ar.rsc */
ld8 loc4 = [in0], 8 ;; /* load ar.bsp */
ld8 loc5 = [in0], 8 ;; /* load ar.rnat */
ld8 loc6 = [in0], 8 ;; /* load ar.lc */
.auto
 
/*
* Invalidate the ALAT
*/
invala
 
/*
* Put RSE to enforced lazy mode.
* So that ar.bspstore and ar.rnat can be written.
*/
movl loc8 = ~3
and loc8 = loc3, loc8
mov ar.rsc = loc8
 
/*
* Flush dirty registers to backing store.
* We do this because we want the following move
* to ar.bspstore to assign the same value to ar.bsp.
*/
flushrs
 
/*
* Restore application registers
*/
mov ar.bspstore = loc4 /* rse.bspload = ar.bsp = ar.bspstore = loc4 */
mov ar.rnat = loc5
mov ar.pfs = loc0
mov ar.rsc = loc3
 
.explicit
 
mov ar.unat = loc2 ;;
mov ar.lc = loc6
/*
* Restore general registers including NaT bits
*/
ld8.fill r1 = [in0], 8 ;;
ld8.fill r4 = [in0], 8 ;;
ld8.fill r5 = [in0], 8 ;;
ld8.fill r6 = [in0], 8 ;;
ld8.fill r7 = [in0], 8 ;;
ld8.fill r12 = [in0], 8 ;; /* restore sp */
ld8.fill r13 = [in0], 8 ;;
 
/*
* Restore branch registers
*/
ld8 loc2 = [in0], 8 ;; /* restore pc */
mov b0 = loc2
ld8 loc3 = [in0], 8 ;;
mov b1 = loc3
ld8 loc4 = [in0], 8 ;;
mov b2 = loc4
ld8 loc5 = [in0], 8 ;;
mov b3 = loc5
ld8 loc6 = [in0], 8 ;;
mov b4 = loc6
ld8 loc7 = [in0], 8 ;;
mov b5 = loc7
 
/*
* Restore predicate registers
*/
ld8 loc2 = [in0], 16 ;;
mov pr = loc2, ~0
/*
* Restore floating-point registers.
*/
ldf.fill f2 = [in0], 16 ;;
ldf.fill f3 = [in0], 16 ;;
ldf.fill f4 = [in0], 16 ;;
ldf.fill f5 = [in0], 16 ;;
 
ldf.fill f16 = [in0], 16 ;;
ldf.fill f17 = [in0], 16 ;;
ldf.fill f18 = [in0], 16 ;;
ldf.fill f19 = [in0], 16 ;;
ldf.fill f20 = [in0], 16 ;;
ldf.fill f21 = [in0], 16 ;;
ldf.fill f22 = [in0], 16 ;;
ldf.fill f23 = [in0], 16 ;;
ldf.fill f24 = [in0], 16 ;;
ldf.fill f25 = [in0], 16 ;;
ldf.fill f26 = [in0], 16 ;;
ldf.fill f27 = [in0], 16 ;;
ldf.fill f28 = [in0], 16 ;;
ldf.fill f29 = [in0], 16 ;;
ldf.fill f30 = [in0], 16 ;;
ldf.fill f31 = [in0], 16 ;;
mov ar.unat = loc1
mov r8 = r0 /* context_restore returns 0 */
br.ret.sptk.many b0
/tags/0.1.1/kernel/arch/ia64/src/fpu_context.c
0,0 → 1,475
/*
* Copyright (C) 2005 Jakub Vana
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
 
#include <fpu_context.h>
#include <arch/register.h>
#include <print.h>
 
 
void fpu_context_save(fpu_context_t *fctx){
 
asm volatile(
 
"stf.spill [%0]=f32,0x80\n"
"stf.spill [%1]=f33,0x80\n"
"stf.spill [%2]=f34,0x80\n"
"stf.spill [%3]=f35,0x80\n"
"stf.spill [%4]=f36,0x80\n"
"stf.spill [%5]=f37,0x80\n"
"stf.spill [%6]=f38,0x80\n"
"stf.spill [%7]=f39,0x80\n;;"
 
"stf.spill [%0]=f40,0x80\n"
"stf.spill [%1]=f41,0x80\n"
"stf.spill [%2]=f42,0x80\n"
"stf.spill [%3]=f43,0x80\n"
"stf.spill [%4]=f44,0x80\n"
"stf.spill [%5]=f45,0x80\n"
"stf.spill [%6]=f46,0x80\n"
"stf.spill [%7]=f47,0x80\n;;"
 
"stf.spill [%0]=f48,0x80\n"
"stf.spill [%1]=f49,0x80\n"
"stf.spill [%2]=f50,0x80\n"
"stf.spill [%3]=f51,0x80\n"
"stf.spill [%4]=f52,0x80\n"
"stf.spill [%5]=f53,0x80\n"
"stf.spill [%6]=f54,0x80\n"
"stf.spill [%7]=f55,0x80\n;;"
 
"stf.spill [%0]=f56,0x80\n"
"stf.spill [%1]=f57,0x80\n"
"stf.spill [%2]=f58,0x80\n"
"stf.spill [%3]=f59,0x80\n"
"stf.spill [%4]=f60,0x80\n"
"stf.spill [%5]=f61,0x80\n"
"stf.spill [%6]=f62,0x80\n"
"stf.spill [%7]=f63,0x80\n;;"
 
"stf.spill [%0]=f64,0x80\n"
"stf.spill [%1]=f65,0x80\n"
"stf.spill [%2]=f66,0x80\n"
"stf.spill [%3]=f67,0x80\n"
"stf.spill [%4]=f68,0x80\n"
"stf.spill [%5]=f69,0x80\n"
"stf.spill [%6]=f70,0x80\n"
"stf.spill [%7]=f71,0x80\n;;"
 
"stf.spill [%0]=f72,0x80\n"
"stf.spill [%1]=f73,0x80\n"
"stf.spill [%2]=f74,0x80\n"
"stf.spill [%3]=f75,0x80\n"
"stf.spill [%4]=f76,0x80\n"
"stf.spill [%5]=f77,0x80\n"
"stf.spill [%6]=f78,0x80\n"
"stf.spill [%7]=f79,0x80\n;;"
 
"stf.spill [%0]=f80,0x80\n"
"stf.spill [%1]=f81,0x80\n"
"stf.spill [%2]=f82,0x80\n"
"stf.spill [%3]=f83,0x80\n"
"stf.spill [%4]=f84,0x80\n"
"stf.spill [%5]=f85,0x80\n"
"stf.spill [%6]=f86,0x80\n"
"stf.spill [%7]=f87,0x80\n;;"
 
"stf.spill [%0]=f88,0x80\n"
"stf.spill [%1]=f89,0x80\n"
"stf.spill [%2]=f90,0x80\n"
"stf.spill [%3]=f91,0x80\n"
"stf.spill [%4]=f92,0x80\n"
"stf.spill [%5]=f93,0x80\n"
"stf.spill [%6]=f94,0x80\n"
"stf.spill [%7]=f95,0x80\n;;"
 
 
"stf.spill [%0]=f96,0x80\n"
"stf.spill [%1]=f97,0x80\n"
"stf.spill [%2]=f98,0x80\n"
"stf.spill [%3]=f99,0x80\n"
"stf.spill [%4]=f100,0x80\n"
"stf.spill [%5]=f101,0x80\n"
"stf.spill [%6]=f102,0x80\n"
"stf.spill [%7]=f103,0x80\n;;"
 
"stf.spill [%0]=f104,0x80\n"
"stf.spill [%1]=f105,0x80\n"
"stf.spill [%2]=f106,0x80\n"
"stf.spill [%3]=f107,0x80\n"
"stf.spill [%4]=f108,0x80\n"
"stf.spill [%5]=f109,0x80\n"
"stf.spill [%6]=f110,0x80\n"
"stf.spill [%7]=f111,0x80\n;;"
 
"stf.spill [%0]=f112,0x80\n"
"stf.spill [%1]=f113,0x80\n"
"stf.spill [%2]=f114,0x80\n"
"stf.spill [%3]=f115,0x80\n"
"stf.spill [%4]=f116,0x80\n"
"stf.spill [%5]=f117,0x80\n"
"stf.spill [%6]=f118,0x80\n"
"stf.spill [%7]=f119,0x80\n;;"
 
"stf.spill [%0]=f120,0x80\n"
"stf.spill [%1]=f121,0x80\n"
"stf.spill [%2]=f122,0x80\n"
"stf.spill [%3]=f123,0x80\n"
"stf.spill [%4]=f124,0x80\n"
"stf.spill [%5]=f125,0x80\n"
"stf.spill [%6]=f126,0x80\n"
"stf.spill [%7]=f127,0x80\n;;"
 
 
:
:"r" (&((fctx->fr)[0])),"r" (&((fctx->fr)[1])),"r" (&((fctx->fr)[2])),"r" (&((fctx->fr)[3])),
"r" (&((fctx->fr)[4])),"r" (&((fctx->fr)[5])),"r" (&((fctx->fr)[6])),"r" (&((fctx->fr)[7]))
);
}
 
 
void fpu_context_restore(fpu_context_t *fctx)
{
 
asm volatile(
"ldf.fill f32=[%0],0x80\n"
"ldf.fill f33=[%1],0x80\n"
"ldf.fill f34=[%2],0x80\n"
"ldf.fill f35=[%3],0x80\n"
"ldf.fill f36=[%4],0x80\n"
"ldf.fill f37=[%5],0x80\n"
"ldf.fill f38=[%6],0x80\n"
"ldf.fill f39=[%7],0x80\n;;"
 
"ldf.fill f40=[%0],0x80\n"
"ldf.fill f41=[%1],0x80\n"
"ldf.fill f42=[%2],0x80\n"
"ldf.fill f43=[%3],0x80\n"
"ldf.fill f44=[%4],0x80\n"
"ldf.fill f45=[%5],0x80\n"
"ldf.fill f46=[%6],0x80\n"
"ldf.fill f47=[%7],0x80\n;;"
 
"ldf.fill f48=[%0],0x80\n"
"ldf.fill f49=[%1],0x80\n"
"ldf.fill f50=[%2],0x80\n"
"ldf.fill f51=[%3],0x80\n"
"ldf.fill f52=[%4],0x80\n"
"ldf.fill f53=[%5],0x80\n"
"ldf.fill f54=[%6],0x80\n"
"ldf.fill f55=[%7],0x80\n;;"
 
"ldf.fill f56=[%0],0x80\n"
"ldf.fill f57=[%1],0x80\n"
"ldf.fill f58=[%2],0x80\n"
"ldf.fill f59=[%3],0x80\n"
"ldf.fill f60=[%4],0x80\n"
"ldf.fill f61=[%5],0x80\n"
"ldf.fill f62=[%6],0x80\n"
"ldf.fill f63=[%7],0x80\n;;"
 
"ldf.fill f64=[%0],0x80\n"
"ldf.fill f65=[%1],0x80\n"
"ldf.fill f66=[%2],0x80\n"
"ldf.fill f67=[%3],0x80\n"
"ldf.fill f68=[%4],0x80\n"
"ldf.fill f69=[%5],0x80\n"
"ldf.fill f70=[%6],0x80\n"
"ldf.fill f71=[%7],0x80\n;;"
 
"ldf.fill f72=[%0],0x80\n"
"ldf.fill f73=[%1],0x80\n"
"ldf.fill f74=[%2],0x80\n"
"ldf.fill f75=[%3],0x80\n"
"ldf.fill f76=[%4],0x80\n"
"ldf.fill f77=[%5],0x80\n"
"ldf.fill f78=[%6],0x80\n"
"ldf.fill f79=[%7],0x80\n;;"
 
"ldf.fill f80=[%0],0x80\n"
"ldf.fill f81=[%1],0x80\n"
"ldf.fill f82=[%2],0x80\n"
"ldf.fill f83=[%3],0x80\n"
"ldf.fill f84=[%4],0x80\n"
"ldf.fill f85=[%5],0x80\n"
"ldf.fill f86=[%6],0x80\n"
"ldf.fill f87=[%7],0x80\n;;"
 
"ldf.fill f88=[%0],0x80\n"
"ldf.fill f89=[%1],0x80\n"
"ldf.fill f90=[%2],0x80\n"
"ldf.fill f91=[%3],0x80\n"
"ldf.fill f92=[%4],0x80\n"
"ldf.fill f93=[%5],0x80\n"
"ldf.fill f94=[%6],0x80\n"
"ldf.fill f95=[%7],0x80\n;;"
 
 
"ldf.fill f96=[%0],0x80\n"
"ldf.fill f97=[%1],0x80\n"
"ldf.fill f98=[%2],0x80\n"
"ldf.fill f99=[%3],0x80\n"
"ldf.fill f100=[%4],0x80\n"
"ldf.fill f101=[%5],0x80\n"
"ldf.fill f102=[%6],0x80\n"
"ldf.fill f103=[%7],0x80\n;;"
 
"ldf.fill f104=[%0],0x80\n"
"ldf.fill f105=[%1],0x80\n"
"ldf.fill f106=[%2],0x80\n"
"ldf.fill f107=[%3],0x80\n"
"ldf.fill f108=[%4],0x80\n"
"ldf.fill f109=[%5],0x80\n"
"ldf.fill f110=[%6],0x80\n"
"ldf.fill f111=[%7],0x80\n;;"
 
"ldf.fill f112=[%0],0x80\n"
"ldf.fill f113=[%1],0x80\n"
"ldf.fill f114=[%2],0x80\n"
"ldf.fill f115=[%3],0x80\n"
"ldf.fill f116=[%4],0x80\n"
"ldf.fill f117=[%5],0x80\n"
"ldf.fill f118=[%6],0x80\n"
"ldf.fill f119=[%7],0x80\n;;"
 
"ldf.fill f120=[%0],0x80\n"
"ldf.fill f121=[%1],0x80\n"
"ldf.fill f122=[%2],0x80\n"
"ldf.fill f123=[%3],0x80\n"
"ldf.fill f124=[%4],0x80\n"
"ldf.fill f125=[%5],0x80\n"
"ldf.fill f126=[%6],0x80\n"
"ldf.fill f127=[%7],0x80\n;;"
 
 
:
:"r" (&((fctx->fr)[0])),"r" (&((fctx->fr)[1])),"r" (&((fctx->fr)[2])),"r" (&((fctx->fr)[3])),
"r" (&((fctx->fr)[4])),"r" (&((fctx->fr)[5])),"r" (&((fctx->fr)[6])),"r" (&((fctx->fr)[7]))
);
}
 
void fpu_enable(void)
{
__u64 a = 0 ;
asm volatile(
"rsm %0;;"
"srlz.i\n"
"srlz.d;;\n"
:
:"i" (PSR_DFH_MASK)
);
asm volatile
(
"mov %0=ar.fpsr;;\n"
"or %0=%0,%1;;\n"
"mov ar.fpsr=%0;;\n"
: "+r" (a)
: "r" (0x38)
);
 
}
 
void fpu_disable(void)
{
 
__u64 a = 0 ;
asm volatile(
"ssm %0;;\n"
"srlz.i\n"
"srlz.d;;\n"
:
:"i" (PSR_DFH_MASK)
);
asm volatile
(
"mov %0=ar.fpsr;;\n"
"or %0=%0,%1;;\n"
"mov ar.fpsr=%0;;\n"
: "+r" (a)
: "r" (0x38)
);
 
}
 
void fpu_init(void)
{
__u64 a = 0 ;
asm volatile
(
"mov %0=ar.fpsr;;\n"
"or %0=%0,%1;;\n"
"mov ar.fpsr=%0;;\n"
: "+r" (a)
: "r" (0x38)
);
 
asm volatile(
"mov f2=f0\n"
"mov f3=f0\n"
"mov f4=f0\n"
"mov f5=f0\n"
"mov f6=f0\n"
"mov f7=f0\n"
"mov f8=f0\n"
"mov f9=f0\n"
 
"mov f10=f0\n"
"mov f11=f0\n"
"mov f12=f0\n"
"mov f13=f0\n"
"mov f14=f0\n"
"mov f15=f0\n"
"mov f16=f0\n"
"mov f17=f0\n"
"mov f18=f0\n"
"mov f19=f0\n"
 
"mov f20=f0\n"
"mov f21=f0\n"
"mov f22=f0\n"
"mov f23=f0\n"
"mov f24=f0\n"
"mov f25=f0\n"
"mov f26=f0\n"
"mov f27=f0\n"
"mov f28=f0\n"
"mov f29=f0\n"
 
"mov f30=f0\n"
"mov f31=f0\n"
"mov f32=f0\n"
"mov f33=f0\n"
"mov f34=f0\n"
"mov f35=f0\n"
"mov f36=f0\n"
"mov f37=f0\n"
"mov f38=f0\n"
"mov f39=f0\n"
 
"mov f40=f0\n"
"mov f41=f0\n"
"mov f42=f0\n"
"mov f43=f0\n"
"mov f44=f0\n"
"mov f45=f0\n"
"mov f46=f0\n"
"mov f47=f0\n"
"mov f48=f0\n"
"mov f49=f0\n"
 
"mov f50=f0\n"
"mov f51=f0\n"
"mov f52=f0\n"
"mov f53=f0\n"
"mov f54=f0\n"
"mov f55=f0\n"
"mov f56=f0\n"
"mov f57=f0\n"
"mov f58=f0\n"
"mov f59=f0\n"
 
"mov f60=f0\n"
"mov f61=f0\n"
"mov f62=f0\n"
"mov f63=f0\n"
"mov f64=f0\n"
"mov f65=f0\n"
"mov f66=f0\n"
"mov f67=f0\n"
"mov f68=f0\n"
"mov f69=f0\n"
 
"mov f70=f0\n"
"mov f71=f0\n"
"mov f72=f0\n"
"mov f73=f0\n"
"mov f74=f0\n"
"mov f75=f0\n"
"mov f76=f0\n"
"mov f77=f0\n"
"mov f78=f0\n"
"mov f79=f0\n"
 
"mov f80=f0\n"
"mov f81=f0\n"
"mov f82=f0\n"
"mov f83=f0\n"
"mov f84=f0\n"
"mov f85=f0\n"
"mov f86=f0\n"
"mov f87=f0\n"
"mov f88=f0\n"
"mov f89=f0\n"
 
"mov f90=f0\n"
"mov f91=f0\n"
"mov f92=f0\n"
"mov f93=f0\n"
"mov f94=f0\n"
"mov f95=f0\n"
"mov f96=f0\n"
"mov f97=f0\n"
"mov f98=f0\n"
"mov f99=f0\n"
 
"mov f100=f0\n"
"mov f101=f0\n"
"mov f102=f0\n"
"mov f103=f0\n"
"mov f104=f0\n"
"mov f105=f0\n"
"mov f106=f0\n"
"mov f107=f0\n"
"mov f108=f0\n"
"mov f109=f0\n"
 
"mov f110=f0\n"
"mov f111=f0\n"
"mov f112=f0\n"
"mov f113=f0\n"
"mov f114=f0\n"
"mov f115=f0\n"
"mov f116=f0\n"
"mov f117=f0\n"
"mov f118=f0\n"
"mov f119=f0\n"
 
"mov f120=f0\n"
"mov f121=f0\n"
"mov f122=f0\n"
"mov f123=f0\n"
"mov f124=f0\n"
"mov f125=f0\n"
"mov f126=f0\n"
"mov f127=f0\n"
 
);
 
}
 
/tags/0.1.1/kernel/arch/ia64/src/dummy.s
0,0 → 1,42
#
# Copyright (C) 2005 Jakub Jermar
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# - The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
 
.text
 
.global calibrate_delay_loop
.global asm_delay_loop
.global cpu_sleep
.global dummy
 
calibrate_delay_loop:
asm_delay_loop:
cpu_sleep:
 
dummy:
br.ret.sptk.many b0
 
/tags/0.1.1/kernel/arch/ia64/src/ski/ski.c
0,0 → 1,157
/*
* Copyright (C) 2005 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#include <arch/ski/ski.h>
#include <console/console.h>
#include <console/chardev.h>
 
static chardev_t ski_console;
static bool kb_disable;
 
static void ski_putchar(chardev_t *d, const char ch);
static __s32 ski_getchar(void);
 
/** Display character on debug console
*
* Use SSC (Simulator System Call) to
* display character on debug console.
*
* @param d Character device.
* @param ch Character to be printed.
*/
void ski_putchar(chardev_t *d, const char ch)
{
__asm__ volatile (
"mov r15=%0\n"
"mov r32=%1\n" /* r32 is in0 */
"break 0x80000\n" /* modifies r8 */
:
: "i" (SKI_PUTCHAR), "r" (ch)
: "r15", "in0", "r8"
);
if (ch == '\n')
ski_putchar(d, '\r');
}
 
/** Ask debug console if a key was pressed.
*
* Use SSC (Simulator System Call) to
* get character from debug console.
*
* This call is non-blocking.
*
* @return ASCII code of pressed key or 0 if no key pressed.
*/
__s32 ski_getchar(void)
{
__u64 ch;
__asm__ volatile (
"mov r15=%1\n"
"break 0x80000;;\n" /* modifies r8 */
"mov %0=r8;;\n"
 
: "=r" (ch)
: "i" (SKI_GETCHAR)
: "r15", "r8"
);
 
return (__s32) ch;
}
 
/**
* This is a blocking wrapper for ski_getchar().
* To be used when the kernel crashes.
*/
static char ski_getchar_blocking(chardev_t *d)
{
int ch;
 
while(!(ch=ski_getchar()))
;
if(ch == '\r')
ch = '\n';
return (char) ch;
}
 
/** Ask keyboard if a key was pressed. */
void poll_keyboard(void)
{
char ch;
 
if (kb_disable)
return;
 
ch = ski_getchar();
if(ch == '\r')
ch = '\n';
if (ch)
chardev_push_character(&ski_console, ch);
}
 
/* Called from getc(). */
static void ski_kb_enable(chardev_t *d)
{
kb_disable = false;
}
 
/* Called from getc(). */
static void ski_kb_disable(chardev_t *d)
{
kb_disable = true;
}
 
 
static chardev_operations_t ski_ops = {
.resume = ski_kb_enable,
.suspend = ski_kb_disable,
.write = ski_putchar,
.read = ski_getchar_blocking
};
 
 
/** Initialize debug console
*
* Issue SSC (Simulator System Call) to
* to open debug console.
*/
void ski_init_console(void)
{
__asm__ volatile (
"mov r15=%0\n"
"break 0x80000\n"
:
: "i" (SKI_INIT_CONSOLE)
: "r15", "r8"
);
 
chardev_initialize("ski_console", &ski_console, &ski_ops);
stdin = &ski_console;
stdout = &ski_console;
}
/tags/0.1.1/kernel/arch/ia64/src/putchar.c
0,0 → 1,35
/*
* Copyright (C) 2005 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#include <putchar.h>
#include <arch/ski/ski.h>
 
void putchar(const char ch)
{
ski_write(ch);
}
/tags/0.1.1/kernel/arch/ia64/src/drivers/it.c
0,0 → 1,69
/*
* Copyright (C) 2005 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** Interval Timer driver. */
#include <arch/drivers/it.h>
#include <arch/ski/ski.h>
#include <arch/interrupt.h>
#include <arch/register.h>
#include <arch/asm.h>
#include <arch/barrier.h>
#include <time/clock.h>
 
 
/** Initialize Interval Timer. */
void it_init(void)
{
cr_itv_t itv;
 
/* initialize Interval Timer external interrupt vector */
itv.value = itv_read();
itv.vector = INTERRUPT_TIMER;
itv.m = 0;
itv_write(itv.value);
 
/* set Interval Timer Counter to zero */
itc_write(0);
/* generate first Interval Timer interrupt in IT_DELTA ticks */
itm_write(IT_DELTA);
 
/* propagate changes */
srlz_d();
}
 
/** Process Interval Timer interrupt. */
void it_interrupt(void)
{
eoi_write(EOI);
itm_write(itc_read() + IT_DELTA); /* program next interruption */
srlz_d(); /* propagate changes */
clock();
poll_keyboard();
}
/tags/0.1.1/kernel/arch/ia64/src/cpu/cpu.c
0,0 → 1,67
/*
* Copyright (C) 2005 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#include <cpu.h>
#include <arch.h>
#include <arch/register.h>
#include <print.h>
 
void cpu_arch_init(void)
{
}
 
void cpu_identify(void)
{
CPU->arch.cpuid0 = cpuid_read(0);
CPU->arch.cpuid1 = cpuid_read(1);
CPU->arch.cpuid3.value = cpuid_read(3);
}
 
void cpu_print_report(cpu_t *m)
{
char *family_str;
char vendor[2*sizeof(__u64)+1];
*((__u64 *) &vendor[0*sizeof(__u64)]) = CPU->arch.cpuid0;
*((__u64 *) &vendor[1*sizeof(__u64)]) = CPU->arch.cpuid1;
vendor[sizeof(vendor)-1] = '\0';
switch(m->arch.cpuid3.family) {
case FAMILY_ITANIUM:
family_str = "Itanium";
break;
case FAMILY_ITANIUM2:
family_str = "Itanium 2";
break;
default:
family_str = "Unknown";
break;
}
printf("cpu%d: %s (%s), archrev=%d, model=%d, revision=%d\n", CPU->id, family_str, vendor, CPU->arch.cpuid3.archrev, CPU->arch.cpuid3.model, CPU->arch.cpuid3.revision);
}