Subversion Repositories HelenOS

Compare Revisions

Ignore whitespace Rev 1967 → Rev 1968

/tags/0.1.1/kernel/arch/ia64/include/interrupt.h
0,0 → 1,126
/*
* Copyright (C) 2005 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#ifndef __ia64_INTERRUPT_H__
#define __ia64_INTERRUPT_H__
 
#include <typedefs.h>
#include <arch/types.h>
#include <arch/register.h>
 
#define IRQ_COUNT 1 /* TODO */
 
/** External Interrupt vectors. */
#define INTERRUPT_TIMER 0
#define INTERRUPT_SPURIOUS 15
 
/** General Exception codes. */
#define GE_ILLEGALOP 0
#define GE_PRIVOP 1
#define GE_PRIVREG 2
#define GE_RESREGFLD 3
#define GE_DISBLDISTRAN 4
#define GE_ILLEGALDEP 8
 
#define EOI 0 /**< The actual value doesn't matter. */
 
struct istate {
__r128 f2;
__r128 f3;
__r128 f4;
__r128 f5;
__r128 f6;
__r128 f7;
__r128 f8;
__r128 f9;
__r128 f10;
__r128 f11;
__r128 f12;
__r128 f13;
__r128 f14;
__r128 f15;
__r128 f16;
__r128 f17;
__r128 f18;
__r128 f19;
__r128 f20;
__r128 f21;
__r128 f22;
__r128 f23;
__r128 f24;
__r128 f25;
__r128 f26;
__r128 f27;
__r128 f28;
__r128 f29;
__r128 f30;
__r128 f31;
__address ar_bsp;
__address ar_bspstore;
__address ar_bspstore_new;
__u64 ar_rnat;
__u64 ar_ifs;
__u64 ar_pfs;
__u64 ar_rsc;
__address cr_ifa;
cr_isr_t cr_isr;
__address cr_iipa;
psr_t cr_ipsr;
__address cr_iip;
__u64 pr;
__address sp;
/*
* The following variables are defined only for break_instruction handler.
*/
__u64 in0;
__u64 in1;
__u64 in2;
__u64 in3;
__u64 in4;
};
 
static inline void istate_set_retaddr(istate_t *istate, __address retaddr)
{
istate->cr_iip = retaddr;
istate->cr_ipsr.ri = 0; /* return to instruction slot #0 */
}
 
extern void *ivt;
 
extern void general_exception(__u64 vector, istate_t *istate);
extern int break_instruction(__u64 vector, istate_t *istate);
extern void universal_handler(__u64 vector, istate_t *istate);
extern void nop_handler(__u64 vector, istate_t *istate);
extern void external_interrupt(__u64 vector, istate_t *istate);
extern void disabled_fp_register(__u64 vector, istate_t *istate);
 
 
 
#endif
/tags/0.1.1/kernel/arch/ia64/include/mm/vhpt.h
0,0 → 1,54
/*
* Copyright (C) 2006 Jakub Vana
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#ifndef __ia64_VHPT_H__
#define __ia64_VHPT_H__
 
#include <arch/mm/tlb.h>
#include <arch/mm/page.h>
 
__address vhpt_set_up(void);
 
static inline vhpt_entry_t tlb_entry_t2vhpt_entry_t(tlb_entry_t tentry)
{
vhpt_entry_t ventry;
ventry.word[0]=tentry.word[0];
ventry.word[1]=tentry.word[1];
return ventry;
}
 
void vhpt_mapping_insert(__address va, asid_t asid, tlb_entry_t entry);
void vhpt_invalidate_all(void);
void vhpt_invalidate_asid(asid_t asid);
 
 
#endif
 
/tags/0.1.1/kernel/arch/ia64/include/mm/page.h
0,0 → 1,274
/*
* Copyright (C) 2005 - 2006 Jakub Jermar
* Copyright (C) 2006 Jakub Vana
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#ifndef __ia64_PAGE_H__
#define __ia64_PAGE_H__
 
#include <arch/mm/frame.h>
 
#define PAGE_SIZE FRAME_SIZE
#define PAGE_WIDTH FRAME_WIDTH
 
 
#ifdef KERNEL
 
/** Bit width of the TLB-locked portion of kernel address space. */
#define KERNEL_PAGE_WIDTH 28 /* 256M */
 
#define PPN_SHIFT 12
 
#define VRN_SHIFT 61
#define VRN_MASK (7LL << VRN_SHIFT)
#define VA2VRN(va) ((va)>>VRN_SHIFT)
 
#ifdef __ASM__
#define VRN_KERNEL 7
#else
#define VRN_KERNEL 7LL
#endif
 
#define REGION_REGISTERS 8
 
#define KA2PA(x) ((__address) (x-(VRN_KERNEL<<VRN_SHIFT)))
#define PA2KA(x) ((__address) (x+(VRN_KERNEL<<VRN_SHIFT)))
 
#define VHPT_WIDTH 20 /* 1M */
#define VHPT_SIZE (1 << VHPT_WIDTH)
 
#define PTA_BASE_SHIFT 15
 
/** Memory Attributes. */
#define MA_WRITEBACK 0x0
#define MA_UNCACHEABLE 0x4
 
/** Privilege Levels. Only the most and the least privileged ones are ever used. */
#define PL_KERNEL 0x0
#define PL_USER 0x3
 
/* Access Rigths. Only certain combinations are used by the kernel. */
#define AR_READ 0x0
#define AR_EXECUTE 0x1
#define AR_WRITE 0x2
 
#ifndef __ASM__
 
#include <arch/mm/frame.h>
#include <arch/barrier.h>
#include <genarch/mm/page_ht.h>
#include <arch/mm/asid.h>
#include <arch/types.h>
#include <typedefs.h>
#include <debug.h>
 
struct vhpt_tag_info {
unsigned long long tag : 63;
unsigned ti : 1;
} __attribute__ ((packed));
 
union vhpt_tag {
struct vhpt_tag_info tag_info;
unsigned tag_word;
};
 
struct vhpt_entry_present {
/* Word 0 */
unsigned p : 1;
unsigned : 1;
unsigned ma : 3;
unsigned a : 1;
unsigned d : 1;
unsigned pl : 2;
unsigned ar : 3;
unsigned long long ppn : 38;
unsigned : 2;
unsigned ed : 1;
unsigned ig1 : 11;
/* Word 1 */
unsigned : 2;
unsigned ps : 6;
unsigned key : 24;
unsigned : 32;
/* Word 2 */
union vhpt_tag tag;
/* Word 3 */
__u64 ig3 : 64;
} __attribute__ ((packed));
 
struct vhpt_entry_not_present {
/* Word 0 */
unsigned p : 1;
unsigned long long ig0 : 52;
unsigned ig1 : 11;
/* Word 1 */
unsigned : 2;
unsigned ps : 6;
unsigned long long ig2 : 56;
 
/* Word 2 */
union vhpt_tag tag;
/* Word 3 */
__u64 ig3 : 64;
} __attribute__ ((packed));
 
typedef union vhpt_entry {
struct vhpt_entry_present present;
struct vhpt_entry_not_present not_present;
__u64 word[4];
} vhpt_entry_t;
 
struct region_register_map {
unsigned ve : 1;
unsigned : 1;
unsigned ps : 6;
unsigned rid : 24;
unsigned : 32;
} __attribute__ ((packed));
 
typedef union region_register {
struct region_register_map map;
unsigned long long word;
} region_register;
 
struct pta_register_map {
unsigned ve : 1;
unsigned : 1;
unsigned size : 6;
unsigned vf : 1;
unsigned : 6;
unsigned long long base : 49;
} __attribute__ ((packed));
 
typedef union pta_register {
struct pta_register_map map;
__u64 word;
} pta_register;
 
/** Return Translation Hashed Entry Address.
*
* VRN bits are used to read RID (ASID) from one
* of the eight region registers registers.
*
* @param va Virtual address including VRN bits.
*
* @return Address of the head of VHPT collision chain.
*/
static inline __u64 thash(__u64 va)
{
__u64 ret;
 
__asm__ volatile ("thash %0 = %1\n" : "=r" (ret) : "r" (va));
 
return ret;
}
 
/** Return Translation Hashed Entry Tag.
*
* VRN bits are used to read RID (ASID) from one
* of the eight region registers.
*
* @param va Virtual address including VRN bits.
*
* @return The unique tag for VPN and RID in the collision chain returned by thash().
*/
static inline __u64 ttag(__u64 va)
{
__u64 ret;
 
__asm__ volatile ("ttag %0 = %1\n" : "=r" (ret) : "r" (va));
 
return ret;
}
 
/** Read Region Register.
*
* @param i Region register index.
*
* @return Current contents of rr[i].
*/
static inline __u64 rr_read(index_t i)
{
__u64 ret;
ASSERT(i < REGION_REGISTERS);
__asm__ volatile ("mov %0 = rr[%1]\n" : "=r" (ret) : "r" (i << VRN_SHIFT));
return ret;
}
 
/** Write Region Register.
*
* @param i Region register index.
* @param v Value to be written to rr[i].
*/
static inline void rr_write(index_t i, __u64 v)
{
ASSERT(i < REGION_REGISTERS);
__asm__ volatile (
"mov rr[%0] = %1\n"
:
: "r" (i << VRN_SHIFT), "r" (v)
);
}
/** Read Page Table Register.
*
* @return Current value stored in PTA.
*/
static inline __u64 pta_read(void)
{
__u64 ret;
__asm__ volatile ("mov %0 = cr.pta\n" : "=r" (ret));
return ret;
}
 
/** Write Page Table Register.
*
* @param v New value to be stored in PTA.
*/
static inline void pta_write(__u64 v)
{
__asm__ volatile ("mov cr.pta = %0\n" : : "r" (v));
}
 
extern void page_arch_init(void);
 
extern vhpt_entry_t *vhpt_hash(__address page, asid_t asid);
extern bool vhpt_compare(__address page, asid_t asid, vhpt_entry_t *v);
extern void vhpt_set_record(vhpt_entry_t *v, __address page, asid_t asid, __address frame, int flags);
 
#endif /* __ASM__ */
 
#endif /* KERNEL */
 
#endif
/tags/0.1.1/kernel/arch/ia64/include/mm/tlb.h
0,0 → 1,94
/*
* Copyright (C) 2005 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#ifndef __ia64_TLB_H__
#define __ia64_TLB_H__
 
#define tlb_arch_init()
#define tlb_print()
 
#include <arch/mm/page.h>
#include <arch/mm/asid.h>
#include <arch/interrupt.h>
#include <arch/types.h>
#include <typedefs.h>
 
/** Data and instruction Translation Register indices. */
#define DTR_KERNEL 0
#define ITR_KERNEL 0
#define DTR_KSTACK1 1
#define DTR_KSTACK2 2
 
/** Portion of TLB insertion format data structure. */
union tlb_entry {
__u64 word[2];
struct {
/* Word 0 */
unsigned p : 1; /**< Present. */
unsigned : 1;
unsigned ma : 3; /**< Memory attribute. */
unsigned a : 1; /**< Accessed. */
unsigned d : 1; /**< Dirty. */
unsigned pl : 2; /**< Privilege level. */
unsigned ar : 3; /**< Access rights. */
unsigned long long ppn : 38; /**< Physical Page Number, a.k.a. PFN. */
unsigned : 2;
unsigned ed : 1;
unsigned ig1 : 11;
 
/* Word 1 */
unsigned : 2;
unsigned ps : 6; /**< Page size will be 2^ps. */
unsigned key : 24; /**< Protection key, unused. */
unsigned : 32;
} __attribute__ ((packed));
} __attribute__ ((packed));
typedef union tlb_entry tlb_entry_t;
 
extern void tc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, bool dtc);
extern void dtc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry);
extern void itc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry);
 
extern void tr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, bool dtr, index_t tr);
extern void dtr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, index_t tr);
extern void itr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, index_t tr);
 
extern void dtlb_kernel_mapping_insert(__address page, __address frame, bool dtr, index_t tr);
 
extern void dtc_pte_copy(pte_t *t);
extern void itc_pte_copy(pte_t *t);
 
extern void alternate_instruction_tlb_fault(__u64 vector, istate_t *istate);
extern void alternate_data_tlb_fault(__u64 vector, istate_t *istate);
extern void data_nested_tlb_fault(__u64 vector, istate_t *istate);
extern void data_dirty_bit_fault(__u64 vector, istate_t *istate);
extern void instruction_access_bit_fault(__u64 vector, istate_t *istate);
extern void data_access_bit_fault(__u64 vector, istate_t *istate);
extern void page_not_present(__u64 vector, istate_t *istate);
 
#endif
/tags/0.1.1/kernel/arch/ia64/include/mm/as.h
0,0 → 1,43
/*
* Copyright (C) 2005 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#ifndef __ia64_AS_H__
#define __ia64_AS_H__
 
#define KERNEL_ADDRESS_SPACE_SHADOWED_ARCH 0
 
#define KERNEL_ADDRESS_SPACE_START_ARCH (__address) 0xe000000000000000ULL
#define KERNEL_ADDRESS_SPACE_END_ARCH (__address) 0xffffffffffffffffULL
#define USER_ADDRESS_SPACE_START_ARCH (__address) 0x0000000000000000ULL
#define USER_ADDRESS_SPACE_END_ARCH (__address) 0xdfffffffffffffffULL
 
#define USTACK_ADDRESS_ARCH 0x0000000ff0000000ULL
 
extern void as_arch_init(void);
 
#endif
/tags/0.1.1/kernel/arch/ia64/include/mm/frame.h
0,0 → 1,45
/*
* Copyright (C) 2005 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#ifndef __ia64_FRAME_H__
#define __ia64_FRAME_H__
 
#define FRAME_WIDTH 14 /* 16K */
#define FRAME_SIZE (1<<FRAME_WIDTH)
 
#ifdef KERNEL
#ifndef __ASM__
 
extern void frame_arch_init(void);
 
#define ARCH_STACK_FRAMES TWO_FRAMES
 
#endif /* __ASM__ */
#endif /* KERNEL */
 
#endif
/tags/0.1.1/kernel/arch/ia64/include/mm/memory_init.h
0,0 → 1,36
/*
* Copyright (C) 2005 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#ifndef __ia64_MEMORY_INIT_H__
#define __ia64_MEMORY_INIT_H__
 
#include <config.h>
 
#define get_memory_size() (512*1024*1024) /* 512M */
 
#endif
/tags/0.1.1/kernel/arch/ia64/include/mm/asid.h
0,0 → 1,57
/*
* Copyright (C) 2005 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#ifndef __ia64_ASID_H__
#define __ia64_ASID_H__
 
#ifndef __ASM__
 
#include <arch/types.h>
 
typedef __u16 asid_t;
typedef __u32 rid_t;
 
#endif /* __ASM__ */
 
/**
* Number of ia64 RIDs (Region Identifiers) per kernel ASID.
* Note that some architectures may support more bits,
* but those extra bits are not used by the kernel.
*/
#define RIDS_PER_ASID 7
 
#define RID_MAX 262143 /* 2^18 - 1 */
#define RID_KERNEL 0
#define RID_INVALID 1
 
#define ASID2RID(asid, vrn) (((asid)>RIDS_PER_ASID)?(((asid)*RIDS_PER_ASID)+(vrn)):(asid))
#define RID2ASID(rid) ((rid)/RIDS_PER_ASID)
 
#define ASID_MAX_ARCH (RID_MAX/RIDS_PER_ASID)
 
#endif
/tags/0.1.1/kernel/arch/ia64/include/proc/task.h
0,0 → 1,37
/*
* Copyright (C) 2006 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#ifndef __ia64_TASK_H__
#define __ia64_TASK_H__
 
typedef struct {
} task_arch_t;
 
#define task_create_arch(t)
 
#endif
/tags/0.1.1/kernel/arch/ia64/include/proc/thread.h
0,0 → 1,37
/*
* Copyright (C) 2005 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#ifndef __ia64_THREAD_H__
#define __ia64_THREAD_H__
 
typedef struct {
} thread_arch_t;
 
#define thread_create_arch(t)
 
#endif
/tags/0.1.1/kernel/arch/ia64/include/arch.h
0,0 → 1,34
/*
* Copyright (C) 2005 Martin Decky
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#ifndef __ia64_ARCH_H__
#define __ia64_ARCH_H__
 
#define LOADED_PROG_STACK_PAGES_NO 2
 
#endif
/tags/0.1.1/kernel/arch/ia64/include/context.h
0,0 → 1,127
/*
* Copyright (C) 2005 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#ifndef __ia64_CONTEXT_H__
#define __ia64_CONTEXT_H__
 
#include <arch/types.h>
#include <arch/register.h>
#include <typedefs.h>
#include <align.h>
#include <arch/stack.h>
 
/*
* context_save_arch() and context_restore_arch() are both leaf procedures.
* No need to allocate scratch area.
*
* One item is put onto the stack to support get_stack_base().
*/
#define SP_DELTA (0+ALIGN_UP(STACK_ITEM_SIZE, STACK_ALIGNMENT))
 
#ifdef context_set
#undef context_set
#endif
 
/* RSE stack starts at the bottom of memory stack. */
#define context_set(c, _pc, stack, size) \
do { \
(c)->pc = (__address) _pc; \
(c)->bsp = ((__address) stack) + ALIGN_UP((size), REGISTER_STACK_ALIGNMENT); \
(c)->ar_pfs &= PFM_MASK; \
(c)->sp = ((__address) stack) + ALIGN_UP((size), STACK_ALIGNMENT) - SP_DELTA; \
} while (0);
 
/*
* Only save registers that must be preserved across
* function calls.
*/
struct context {
 
/*
* Application registers
*/
__u64 ar_pfs;
__u64 ar_unat_caller;
__u64 ar_unat_callee;
__u64 ar_rsc;
__address bsp; /* ar_bsp */
__u64 ar_rnat;
__u64 ar_lc;
 
/*
* General registers
*/
__u64 r1;
__u64 r4;
__u64 r5;
__u64 r6;
__u64 r7;
__address sp; /* r12 */
__u64 r13;
/*
* Branch registers
*/
__address pc; /* b0 */
__u64 b1;
__u64 b2;
__u64 b3;
__u64 b4;
__u64 b5;
 
/*
* Predicate registers
*/
__u64 pr;
 
__r128 f2 __attribute__ ((aligned(16)));
__r128 f3;
__r128 f4;
__r128 f5;
 
__r128 f16;
__r128 f17;
__r128 f18;
__r128 f19;
__r128 f20;
__r128 f21;
__r128 f22;
__r128 f23;
__r128 f24;
__r128 f25;
__r128 f26;
__r128 f27;
__r128 f28;
__r128 f29;
__r128 f30;
__r128 f31;
ipl_t ipl;
};
 
#endif
/tags/0.1.1/kernel/arch/ia64/include/faddr.h
0,0 → 1,44
/*
* Copyright (C) 2005 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#ifndef __ia64_FADDR_H__
#define __ia64_FADDR_H__
 
#include <arch/types.h>
 
/**
*
* Calculate absolute address of function
* referenced by fptr pointer.
*
* @param fptr Function pointer.
*
*/
#define FADDR(f) (*((__address *)(f)));
 
#endif
/tags/0.1.1/kernel/arch/ia64/include/atomic.h
0,0 → 1,57
/*
* Copyright (C) 2005 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#ifndef __ia64_ATOMIC_H__
#define __ia64_ATOMIC_H__
 
/** Atomic addition.
*
* @param val Atomic value.
* @param imm Value to add.
*
* @return Value before addition.
*/
static inline long atomic_add(atomic_t *val, int imm)
{
long v;
 
__asm__ volatile ("fetchadd8.rel %0 = %1, %2\n" : "=r" (v), "+m" (val->count) : "i" (imm));
return v;
}
 
static inline void atomic_inc(atomic_t *val) { atomic_add(val, 1); }
static inline void atomic_dec(atomic_t *val) { atomic_add(val, -1); }
 
static inline long atomic_preinc(atomic_t *val) { return atomic_add(val, 1) + 1; }
static inline long atomic_predec(atomic_t *val) { return atomic_add(val, -1) - 1; }
 
static inline long atomic_postinc(atomic_t *val) { return atomic_add(val, 1); }
static inline long atomic_postdec(atomic_t *val) { return atomic_add(val, -1); }
 
#endif
/tags/0.1.1/kernel/arch/ia64/include/asm.h
0,0 → 1,263
/*
* Copyright (C) 2005 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#ifndef __ia64_ASM_H__
#define __ia64_ASM_H__
 
#include <config.h>
#include <arch/types.h>
#include <arch/register.h>
 
/** Return base address of current stack
*
* Return the base address of the current stack.
* The stack is assumed to be STACK_SIZE long.
* The stack must start on page boundary.
*/
static inline __address get_stack_base(void)
{
__u64 v;
 
__asm__ volatile ("and %0 = %1, r12" : "=r" (v) : "r" (~(STACK_SIZE-1)));
return v;
}
 
/** Return Processor State Register.
*
* @return PSR.
*/
static inline __u64 psr_read(void)
{
__u64 v;
__asm__ volatile ("mov %0 = psr\n" : "=r" (v));
return v;
}
 
/** Read IVA (Interruption Vector Address).
*
* @return Return location of interruption vector table.
*/
static inline __u64 iva_read(void)
{
__u64 v;
__asm__ volatile ("mov %0 = cr.iva\n" : "=r" (v));
return v;
}
 
/** Write IVA (Interruption Vector Address) register.
*
* @param New location of interruption vector table.
*/
static inline void iva_write(__u64 v)
{
__asm__ volatile ("mov cr.iva = %0\n" : : "r" (v));
}
 
 
/** Read IVR (External Interrupt Vector Register).
*
* @return Highest priority, pending, unmasked external interrupt vector.
*/
static inline __u64 ivr_read(void)
{
__u64 v;
__asm__ volatile ("mov %0 = cr.ivr\n" : "=r" (v));
return v;
}
 
/** Write ITC (Interval Timer Counter) register.
*
* @param New counter value.
*/
static inline void itc_write(__u64 v)
{
__asm__ volatile ("mov ar.itc = %0\n" : : "r" (v));
}
 
/** Read ITC (Interval Timer Counter) register.
*
* @return Current counter value.
*/
static inline __u64 itc_read(void)
{
__u64 v;
__asm__ volatile ("mov %0 = ar.itc\n" : "=r" (v));
return v;
}
 
/** Write ITM (Interval Timer Match) register.
*
* @param New match value.
*/
static inline void itm_write(__u64 v)
{
__asm__ volatile ("mov cr.itm = %0\n" : : "r" (v));
}
 
/** Read ITV (Interval Timer Vector) register.
*
* @return Current vector and mask bit.
*/
static inline __u64 itv_read(void)
{
__u64 v;
__asm__ volatile ("mov %0 = cr.itv\n" : "=r" (v));
return v;
}
 
/** Write ITV (Interval Timer Vector) register.
*
* @param New vector and mask bit.
*/
static inline void itv_write(__u64 v)
{
__asm__ volatile ("mov cr.itv = %0\n" : : "r" (v));
}
 
/** Write EOI (End Of Interrupt) register.
*
* @param This value is ignored.
*/
static inline void eoi_write(__u64 v)
{
__asm__ volatile ("mov cr.eoi = %0\n" : : "r" (v));
}
 
/** Read TPR (Task Priority Register).
*
* @return Current value of TPR.
*/
static inline __u64 tpr_read(void)
{
__u64 v;
 
__asm__ volatile ("mov %0 = cr.tpr\n" : "=r" (v));
return v;
}
 
/** Write TPR (Task Priority Register).
*
* @param New value of TPR.
*/
static inline void tpr_write(__u64 v)
{
__asm__ volatile ("mov cr.tpr = %0\n" : : "r" (v));
}
 
/** Disable interrupts.
*
* Disable interrupts and return previous
* value of PSR.
*
* @return Old interrupt priority level.
*/
static ipl_t interrupts_disable(void)
{
__u64 v;
__asm__ volatile (
"mov %0 = psr\n"
"rsm %1\n"
: "=r" (v)
: "i" (PSR_I_MASK)
);
return (ipl_t) v;
}
 
/** Enable interrupts.
*
* Enable interrupts and return previous
* value of PSR.
*
* @return Old interrupt priority level.
*/
static ipl_t interrupts_enable(void)
{
__u64 v;
__asm__ volatile (
"mov %0 = psr\n"
"ssm %1\n"
";;\n"
"srlz.d\n"
: "=r" (v)
: "i" (PSR_I_MASK)
);
return (ipl_t) v;
}
 
/** Restore interrupt priority level.
*
* Restore PSR.
*
* @param ipl Saved interrupt priority level.
*/
static inline void interrupts_restore(ipl_t ipl)
{
if (ipl & PSR_I_MASK)
(void) interrupts_enable();
else
(void) interrupts_disable();
}
 
/** Return interrupt priority level.
*
* @return PSR.
*/
static inline ipl_t interrupts_read(void)
{
return (ipl_t) psr_read();
}
 
/** Disable protection key checking. */
static inline void pk_disable(void)
{
__asm__ volatile ("rsm %0\n" : : "i" (PSR_PK_MASK));
}
 
extern void cpu_halt(void);
extern void cpu_sleep(void);
extern void asm_delay_loop(__u32 t);
 
extern void switch_to_userspace(__address entry, __address sp, __address bsp, __address uspace_uarg, __u64 ipsr, __u64 rsc);
 
#endif
/tags/0.1.1/kernel/arch/ia64/include/fpu_context.h
0,0 → 1,44
/*
* Copyright (C) 2005 Jakub Vana
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#ifndef __ia64_FPU_CONTEXT_H__
#define __ia64_FPU_CONTEXT_H__
 
#define ARCH_HAS_FPU 1
#define FPU_CONTEXT_ALIGN 16
 
#include <arch/types.h>
 
#define FRS 96
 
struct fpu_context {
__r128 fr[FRS];
};
 
 
#endif
/tags/0.1.1/kernel/arch/ia64/include/register.h
0,0 → 1,269
/*
* Copyright (C) 2005 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#ifndef __ia64_REGISTER_H__
#define __ia64_REGISTER_H__
 
#define CR_IVR_MASK 0xf
#define PSR_IC_MASK 0x2000
#define PSR_I_MASK 0x4000
#define PSR_PK_MASK 0x8000
 
#define PSR_DT_MASK (1<<17)
#define PSR_RT_MASK (1<<27)
 
#define PSR_DFL_MASK (1<<18)
#define PSR_DFH_MASK (1<<19)
 
#define PSR_IT_MASK 0x0000001000000000
 
#define PSR_CPL_SHIFT 32
#define PSR_CPL_MASK_SHIFTED 3
 
#define PFM_MASK (~0x3fffffffff)
 
#define RSC_MODE_MASK 3
#define RSC_PL_MASK 12
 
/** Application registers. */
#define AR_KR0 0
#define AR_KR1 1
#define AR_KR2 2
#define AR_KR3 3
#define AR_KR4 4
#define AR_KR5 5
#define AR_KR6 6
#define AR_KR7 7
/* AR 8-15 reserved */
#define AR_RSC 16
#define AR_BSP 17
#define AR_BSPSTORE 18
#define AR_RNAT 19
/* AR 20 reserved */
#define AR_FCR 21
/* AR 22-23 reserved */
#define AR_EFLAG 24
#define AR_CSD 25
#define AR_SSD 26
#define AR_CFLG 27
#define AR_FSR 28
#define AR_FIR 29
#define AR_FDR 30
/* AR 31 reserved */
#define AR_CCV 32
/* AR 33-35 reserved */
#define AR_UNAT 36
/* AR 37-39 reserved */
#define AR_FPSR 40
/* AR 41-43 reserved */
#define AR_ITC 44
/* AR 45-47 reserved */
/* AR 48-63 ignored */
#define AR_PFS 64
#define AR_LC 65
#define AR_EC 66
/* AR 67-111 reserved */
/* AR 112-127 ignored */
 
/** Control registers. */
#define CR_DCR 0
#define CR_ITM 1
#define CR_IVA 2
/* CR3-CR7 reserved */
#define CR_PTA 8
/* CR9-CR15 reserved */
#define CR_IPSR 16
#define CR_ISR 17
/* CR18 reserved */
#define CR_IIP 19
#define CR_IFA 20
#define CR_ITIR 21
#define CR_IIPA 22
#define CR_IFS 23
#define CR_IIM 24
#define CR_IHA 25
/* CR26-CR63 reserved */
#define CR_LID 64
#define CR_IVR 65
#define CR_TPR 66
#define CR_EOI 67
#define CR_IRR0 68
#define CR_IRR1 69
#define CR_IRR2 70
#define CR_IRR3 71
#define CR_ITV 72
#define CR_PMV 73
#define CR_CMCV 74
/* CR75-CR79 reserved */
#define CR_LRR0 80
#define CR_LRR1 81
/* CR82-CR127 reserved */
 
#ifndef __ASM__
 
#include <arch/types.h>
 
/** Processor Status Register. */
union psr {
__u64 value;
struct {
unsigned : 1;
unsigned be : 1; /**< Big-Endian data accesses. */
unsigned up : 1; /**< User Performance monitor enable. */
unsigned ac : 1; /**< Alignment Check. */
unsigned mfl : 1; /**< Lower floating-point register written. */
unsigned mfh : 1; /**< Upper floating-point register written. */
unsigned : 7;
unsigned ic : 1; /**< Interruption Collection. */
unsigned i : 1; /**< Interrupt Bit. */
unsigned pk : 1; /**< Protection Key enable. */
unsigned : 1;
unsigned dt : 1; /**< Data address Translation. */
unsigned dfl : 1; /**< Disabled Floating-point Low register set. */
unsigned dfh : 1; /**< Disabled Floating-point High register set. */
unsigned sp : 1; /**< Secure Performance monitors. */
unsigned pp : 1; /**< Privileged Performance monitor enable. */
unsigned di : 1; /**< Disable Instruction set transition. */
unsigned si : 1; /**< Secure Interval timer. */
unsigned db : 1; /**< Debug Breakpoint fault. */
unsigned lp : 1; /**< Lower Privilege transfer trap. */
unsigned tb : 1; /**< Taken Branch trap. */
unsigned rt : 1; /**< Register Stack Translation. */
unsigned : 4;
unsigned cpl : 2; /**< Current Privilege Level. */
unsigned is : 1; /**< Instruction Set. */
unsigned mc : 1; /**< Machine Check abort mask. */
unsigned it : 1; /**< Instruction address Translation. */
unsigned id : 1; /**< Instruction Debug fault disable. */
unsigned da : 1; /**< Disable Data Access and Dirty-bit faults. */
unsigned dd : 1; /**< Data Debug fault disable. */
unsigned ss : 1; /**< Single Step enable. */
unsigned ri : 2; /**< Restart Instruction. */
unsigned ed : 1; /**< Exception Deferral. */
unsigned bn : 1; /**< Register Bank. */
unsigned ia : 1; /**< Disable Instruction Access-bit faults. */
} __attribute__ ((packed));
};
typedef union psr psr_t;
 
/** Register Stack Configuration Register */
union rsc {
__u64 value;
struct {
unsigned mode : 2;
unsigned pl : 2; /**< Privilege Level. */
unsigned be : 1; /**< Big-endian. */
unsigned : 11;
unsigned loadrs : 14;
} __attribute__ ((packed));
};
typedef union rsc rsc_t;
 
/** External Interrupt Vector Register */
union cr_ivr {
__u8 vector;
__u64 value;
};
 
typedef union cr_ivr cr_ivr_t;
 
/** Task Priority Register */
union cr_tpr {
struct {
unsigned : 4;
unsigned mic: 4; /**< Mask Interrupt Class. */
unsigned : 8;
unsigned mmi: 1; /**< Mask Maskable Interrupts. */
} __attribute__ ((packed));
__u64 value;
};
 
typedef union cr_tpr cr_tpr_t;
 
/** Interval Timer Vector */
union cr_itv {
struct {
unsigned vector : 8;
unsigned : 4;
unsigned : 1;
unsigned : 3;
unsigned m : 1; /**< Mask. */
} __attribute__ ((packed));
__u64 value;
};
 
typedef union cr_itv cr_itv_t;
 
/** Interruption Status Register */
union cr_isr {
struct {
union {
/** General Exception code field structuring. */
struct {
unsigned ge_na : 4;
unsigned ge_code : 4;
} __attribute__ ((packed));
__u16 code;
};
__u8 vector;
unsigned : 8;
unsigned x : 1; /**< Execute exception. */
unsigned w : 1; /**< Write exception. */
unsigned r : 1; /**< Read exception. */
unsigned na : 1; /**< Non-access exception. */
unsigned sp : 1; /**< Speculative load exception. */
unsigned rs : 1; /**< Register stack. */
unsigned ir : 1; /**< Incomplete Register frame. */
unsigned ni : 1; /**< Nested Interruption. */
unsigned so : 1; /**< IA-32 Supervisor Override. */
unsigned ei : 2; /**< Excepting Instruction. */
unsigned ed : 1; /**< Exception Deferral. */
unsigned : 20;
} __attribute__ ((packed));
__u64 value;
};
 
typedef union cr_isr cr_isr_t;
 
/** CPUID Register 3 */
union cpuid3 {
struct {
__u8 number;
__u8 revision;
__u8 model;
__u8 family;
__u8 archrev;
} __attribute__ ((packed));
__u64 value;
};
 
typedef union cpuid3 cpuid3_t;
 
#endif /* !__ASM__ */
 
#endif
/tags/0.1.1/kernel/arch/ia64/include/types.h
0,0 → 1,62
/*
* Copyright (C) 2005 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#ifndef __TYPES_H__
#define __TYPES_H__
 
#define NULL 0
 
typedef signed char __s8;
typedef signed short int __s16;
typedef signed int __s32;
typedef signed long __s64;
 
typedef unsigned char __u8;
typedef unsigned short __u16;
typedef unsigned int __u32;
typedef unsigned long __u64;
 
 
typedef unsigned char __r8; /*Reserve byte*/
typedef unsigned short __r16;
typedef unsigned int __r32;
typedef unsigned long __r64;
typedef struct __r128{__r64 lo;__r64 hi;} __r128;
 
 
typedef __u64 __address;
typedef __u64 pfn_t;
 
typedef __u64 ipl_t;
 
typedef __u64 __native;
typedef __s64 __snative;
 
typedef struct pte pte_t;
 
#endif
/tags/0.1.1/kernel/arch/ia64/include/elf.h
0,0 → 1,36
/*
* Copyright (C) 2006 Sergey Bondari
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#ifndef __ia64_ELF_H__
#define __ia64_ELF_H__
 
#define ELF_MACHINE EM_IA_64
#define ELF_DATA_ENCODING ELFDATA2LSB
#define ELF_CLASS ELFCLASS64
 
#endif
/tags/0.1.1/kernel/arch/ia64/include/pal/pal.h
0,0 → 1,101
/*
* Copyright (C) 2005 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#ifndef __ia64_PAL_H__
#define __ia64_PAL_H__
 
#define PAL_OK 0 /**< Call completed without error. */
#define PAL_UNIMPL -1 /**< Unimplemented procedure. */
#define PAL_INVARG -2 /**< Invalid argument. */
#define PAL_ERR -3 /**< Can not compete call without error. */
 
/** These are the indices for PAL_PROC. */
#define PAL_CACHE_FLUSH 1
#define PAL_CACHE_INFO 2
#define PAL_CACHE_INIT 3
#define PAL_CACHE_PROT_INFO 38
#define PAL_CACHE_SHARED_INFO 43
#define PAL_CACHE_SUMMARY 4
 
#define PAL_MEM_ATTRIB 5
#define PAL_PREFETCH_VISIBILITY 41
#define PAL_PTCE_INFO 6
#define PAL_VM_INFO 7
#define PAL_VM_PAGE_SIZE 34
#define PAL_VM_SUMMARY 8
#define PAL_VM_TR_READ 261
 
#define PAL_BUS_GET_FEATURES 9
#define PAL_BUS_SET_FEATURES 10
#define PAL_DEBUG_INFO 11
#define PAL_FIXED_ADDR 12
#define PAL_FREQ_BASE 13
#define PAL_FREQ_RATIOS 14
#define PAL_LOGICAL_TO_PHYSICAL 42
#define PAL_PERF_MON_INFO 15
#define PAL_PLATFORM_ADDR 16
#define PAL_PROC_GET_FEATURES 17
#define PAL_PROC_SET_FEATURES 18
#define PAL_REGISTER_INFO 39
#define PAL_RSE_INFO 19
#define PAL_VERSION 20
 
#define PAL_MC_CLEAR_LOG 21
#define PAL_MC_DRAIN 22
#define PAL_MC_DYNAMIC_STATE 24
#define PAL_MC_ERROR_INFO 25
#define PAL_MC_EXPECTED 23
#define PAL_MC_REGISTER_MEM 27
#define PAL_MC_RESUME 26
 
#define PAL_HALT 28
#define PAL_HALT_INFO 257
#define PAL_HALT_LIGHT 29
 
#define PAL_CACHE_LINE_INIT 31
#define PAL_CACHE_READ 259
#define PAL_CACHE_WRITE 260
#define PAL_TEST_INFO 37
#define PAL_TEST_PROC 258
 
#define PAL_COPY_INFO 30
#define PAL_COPY_PAL 256
#define PAL_ENTER_IA_32_ENV 33
#define PAL_PMI_ENTRYPOINT 32
 
/*
Ski PTCE data
*/
#define PAL_PTCE_INFO_BASE() (0x100000000LL)
#define PAL_PTCE_INFO_COUNT1() (2)
#define PAL_PTCE_INFO_COUNT2() (3)
#define PAL_PTCE_INFO_STRIDE1() (0x10000000)
#define PAL_PTCE_INFO_STRIDE2() (0x2000)
 
 
#endif
/tags/0.1.1/kernel/arch/ia64/include/memstr.h
0,0 → 1,39
/*
* Copyright (C) 2005 Sergey Bondari
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#ifndef __ia64_MEMSTR_H__
#define __ia64_MEMSTR_H__
 
#define memcpy(dst, src, cnt) __builtin_memcpy((dst), (src), (cnt))
 
extern void memsetw(__address dst, size_t cnt, __u16 x);
extern void memsetb(__address dst, size_t cnt, __u8 x);
 
extern int memcmp(__address src, __address dst, int cnt);
 
#endif
/tags/0.1.1/kernel/arch/ia64/include/ski/ski.h
0,0 → 1,41
/*
* Copyright (C) 2005 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#ifndef __SKI_H__
#define __SKI_H__
 
#include <arch/types.h>
 
#define SKI_INIT_CONSOLE 20
#define SKI_GETCHAR 21
#define SKI_PUTCHAR 31
 
extern void ski_init_console(void);
extern void poll_keyboard(void);
 
#endif
/tags/0.1.1/kernel/arch/ia64/include/debug.h
0,0 → 1,32
/*
* Copyright (C) 2005
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#ifndef __ia64_DEBUG_H__
#define __ia64_DEBUG_H__
 
#endif
/tags/0.1.1/kernel/arch/ia64/include/drivers/it.h
0,0 → 1,43
/*
* Copyright (C) 2005 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#ifndef __ia64_IT_H__
#define __ia64_IT_H__
 
/*
* Unfortunately, Ski does not emulate PAL,
* so we can't read the real frequency ratios
* from firmware.
*
*/
#define IT_DELTA 100000
 
extern void it_init(void);
extern void it_interrupt(void);
 
#endif
/tags/0.1.1/kernel/arch/ia64/include/cpu.h
0,0 → 1,60
/*
* Copyright (C) 2005 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#ifndef __ia64_CPU_H__
#define __ia64_CPU_H__
 
#include <arch/types.h>
#include <typedefs.h>
#include <arch/register.h>
 
#define FAMILY_ITANIUM 0x7
#define FAMILY_ITANIUM2 0x1f
 
struct cpu_arch {
__u64 cpuid0;
__u64 cpuid1;
cpuid3_t cpuid3;
};
 
/** Read CPUID register.
*
* @param n CPUID register number.
*
* @return Value of CPUID[n] register.
*/
static inline __u64 cpuid_read(int n)
{
__u64 v;
__asm__ volatile ("mov %0 = cpuid[%1]\n" : "=r" (v) : "r" (n));
return v;
}
 
#endif
/tags/0.1.1/kernel/arch/ia64/include/stack.h
0,0 → 1,37
/*
* Copyright (C) 2005 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#ifndef __ia64_STACK_H__
#define __ia64_STACK_H__
 
#define STACK_ITEM_SIZE 8
#define STACK_ALIGNMENT 16
#define STACK_SCRATCH_AREA_SIZE 16
#define REGISTER_STACK_ALIGNMENT 8
 
#endif
/tags/0.1.1/kernel/arch/ia64/include/barrier.h
0,0 → 1,45
/*
* Copyright (C) 2005 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#ifndef __ia64_BARRIER_H__
#define __ia64_BARRIER_H__
 
/*
* TODO: Implement true IA-64 memory barriers for macros below.
*/
#define CS_ENTER_BARRIER() memory_barrier()
#define CS_LEAVE_BARRIER() memory_barrier()
 
#define memory_barrier() __asm__ volatile ("mf\n" ::: "memory")
#define read_barrier() memory_barrier()
#define write_barrier() memory_barrier()
 
#define srlz_i() __asm__ volatile (";; srlz.i ;;\n" ::: "memory")
#define srlz_d() __asm__ volatile (";; srlz.d\n" ::: "memory")
 
#endif
/tags/0.1.1/kernel/arch/ia64/include/byteorder.h
0,0 → 1,36
/*
* Copyright (C) 2005 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#ifndef __ia64_BYTEORDER_H__
#define __ia64_BYTEORDER_H__
 
/* IA-64 is little-endian */
#define __native_le2host(n) (n)
#define __u64_le2host(n) (n)
 
#endif
/tags/0.1.1/kernel/arch/ia64/include/arg.h
0,0 → 1,34
/*
* Copyright (C) 2005 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#ifndef __ia64_ARG_H__
#define __ia64_ARG_H__
 
#include <stdarg.h>
 
#endif
/tags/0.1.1/kernel/arch/ia64/include/smp/atomic.h
--- src/asm.S (nonexistent)
+++ src/asm.S (revision 1968)
@@ -0,0 +1,156 @@
+#
+# Copyright (C) 2005 Jakub Jermar
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# - Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# - Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+# - The name of the author may not be used to endorse or promote products
+# derived from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+#include <arch/register.h>
+
+.text
+
+/** Copy memory from/to userspace.
+ *
+ * This memcpy() has been taken from the assembler output of
+ * the generic _memcpy() and modified to have the failover part.
+ *
+ * @param in0 Destination address.
+ * @param in1 Source address.
+ * @param in2 Number of byte to copy.
+ */
+.global memcpy
+.global memcpy_from_uspace
+.global memcpy_to_uspace
+.global memcpy_from_uspace_failover_address
+.global memcpy_to_uspace_failover_address
+memcpy:
+memcpy_from_uspace:
+memcpy_to_uspace:
+ alloc loc0 = ar.pfs, 3, 1, 0, 0
+
+ shr.u r18 = in2, 3
+ mov r8 = in1 ;; /* prepare to return in1 on success */
+ cmp.ne p6, p7 = 0, r18 ;;
+(p7) mov r17 = r0
+(p7) br.cond.dptk 1f ;;
+ mov r16 = r0
+ mov r17 = r0 ;;
+
+0:
+ shladd r14 = r17, 3, r0
+ adds r16 = 1, r16 ;;
+ add r15 = r14, r8
+ sxt4 r17 = r16
+ add r14 = r14, in0 ;;
+ ld8 r15 = [r15]
+ cmp.gtu p6, p7 = r18, r17 ;;
+ st8 [r14] = r15
+(p6) br.cond.dptk 0b
+
+1:
+ and in2 = 7, in2 ;;
+ cmp.eq p6, p7 = 0, in2 ;;
+(p6) mov ar.pfs = loc0
+(p6) br.ret.dptk.many rp
+ shladd r14 = r17, 3, r0
+ mov r16 = r0
+ mov r17 = r0 ;;
+ add in0 = in0, r14
+ add r18 = r8, r14 ;;
+
+2:
+ add r14 = r16, r18
+ adds r17 = 1, r17
+ add r15 = in0, r16 ;;
+ ld1 r14 = [r14]
+ sxt4 r16 = r17 ;;
+ st1 [r15] = r14
+ cmp.gtu p6, p7 = in2, r16
+(p6) br.cond.dptk 2b
+
+ mov ar.pfs = loc0
+ br.ret.sptk.many rp
+
+memcpy_from_uspace_failover_address:
+memcpy_to_uspace_failover_address:
+ mov r8 = r0 /* return 0 on failure */
+ mov ar.pfs = loc0
+ br.ret.sptk.many rp
+
+.global memsetb
+memsetb:
+ br _memsetb
+
+.global cpu_halt
+cpu_halt:
+ br cpu_halt
+
+.global panic_printf
+panic_printf:
+ {
+ br.call.sptk.many b0=printf
+ }
+ br halt
+
+/** Switch to userspace - low level code.
+ *
+ * @param in0 Userspace entry point address.
+ * @param in1 Userspace stack pointer address.
+ * @param in2 Userspace register stack pointer address.
+ * @param in3 Userspace address of thread uspace_arg_t structure.
+ * @param in4 Value to be stored in IPSR.
+ * @param in5 Value to be stored in RSC.
+ */
+.global switch_to_userspace
+switch_to_userspace:
+ alloc loc0 = ar.pfs, 6, 3, 0, 0
+ rsm (PSR_IC_MASK | PSR_I_MASK) /* disable interruption collection and interrupts */
+ srlz.d ;;
+ srlz.i ;;
+
+ mov cr.ipsr = in4
+ mov cr.iip = in0
+ mov r12 = in1
+
+ xor r1 = r1, r1
+
+ mov loc1 = cr.ifs
+ movl loc2 = PFM_MASK ;;
+ and loc1 = loc2, loc1 ;;
+ mov cr.ifs = loc1 ;; /* prevent decrementing BSP by rfi */
+
+ invala
+
+ mov loc1 = ar.rsc ;;
+ and loc1 = ~3, loc1 ;;
+ mov ar.rsc = loc1 ;; /* put RSE into enforced lazy mode */
+
+ flushrs ;;
+
+ mov ar.bspstore = in2 ;;
+ mov ar.rsc = in5 ;;
+
+ mov r8 = in3
+
+ rfi ;;
/tags/0.1.1/kernel/arch/ia64/src/mm/vhpt.c
0,0 → 1,88
/*
* Copyright (C) 2006 Jakub Vana
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#include <arch/mm/vhpt.h>
#include <mm/frame.h>
#include <print.h>
 
 
static vhpt_entry_t* vhpt_base;
 
__address vhpt_set_up(void)
{
vhpt_base=(vhpt_entry_t*) PA2KA(PFN2ADDR(frame_alloc(VHPT_WIDTH-FRAME_WIDTH,FRAME_KA)));
if(!vhpt_base) panic("Kernel configured with VHPT but no memory for table.");
vhpt_invalidate_all();
return (__address) vhpt_base;
}
 
 
void vhpt_mapping_insert(__address va, asid_t asid, tlb_entry_t entry)
{
region_register rr_save, rr;
index_t vrn;
rid_t rid;
__u64 tag;
 
vhpt_entry_t *ventry;
 
 
vrn = va >> VRN_SHIFT;
rid = ASID2RID(asid, vrn);
rr_save.word = rr_read(vrn);
rr.word = rr_save.word;
rr.map.rid = rid;
rr_write(vrn, rr.word);
srlz_i();
ventry = (vhpt_entry_t *) thash(va);
tag = ttag(va);
rr_write(vrn, rr_save.word);
srlz_i();
srlz_d();
 
ventry->word[0]=entry.word[0];
ventry->word[1]=entry.word[1];
ventry->present.tag.tag_word = tag;
 
}
 
void vhpt_invalidate_all()
{
memsetb((__address)vhpt_base,1<<VHPT_WIDTH,0);
}
 
void vhpt_invalidate_asid(asid_t asid)
{
vhpt_invalidate_all();
}
 
 
/tags/0.1.1/kernel/arch/ia64/src/mm/tlb.c
0,0 → 1,615
/*
* Copyright (C) 2006 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/*
* TLB management.
*/
 
#include <mm/tlb.h>
#include <mm/asid.h>
#include <mm/page.h>
#include <mm/as.h>
#include <arch/mm/tlb.h>
#include <arch/mm/page.h>
#include <arch/mm/vhpt.h>
#include <arch/barrier.h>
#include <arch/interrupt.h>
#include <arch/pal/pal.h>
#include <arch/asm.h>
#include <typedefs.h>
#include <panic.h>
#include <print.h>
#include <arch.h>
 
/** Invalidate all TLB entries. */
void tlb_invalidate_all(void)
{
ipl_t ipl;
__address adr;
__u32 count1, count2, stride1, stride2;
int i,j;
adr = PAL_PTCE_INFO_BASE();
count1 = PAL_PTCE_INFO_COUNT1();
count2 = PAL_PTCE_INFO_COUNT2();
stride1 = PAL_PTCE_INFO_STRIDE1();
stride2 = PAL_PTCE_INFO_STRIDE2();
ipl = interrupts_disable();
 
for(i = 0; i < count1; i++) {
for(j = 0; j < count2; j++) {
__asm__ volatile (
"ptc.e %0 ;;"
:
: "r" (adr)
);
adr += stride2;
}
adr += stride1;
}
 
interrupts_restore(ipl);
 
srlz_d();
srlz_i();
#ifdef CONFIG_VHPT
vhpt_invalidate_all();
#endif
}
 
/** Invalidate entries belonging to an address space.
*
* @param asid Address space identifier.
*/
void tlb_invalidate_asid(asid_t asid)
{
tlb_invalidate_all();
}
 
 
void tlb_invalidate_pages(asid_t asid, __address page, count_t cnt)
{
region_register rr;
bool restore_rr = false;
int b = 0;
int c = cnt;
 
__address va;
va = page;
 
rr.word = rr_read(VA2VRN(va));
if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
/*
* The selected region register does not contain required RID.
* Save the old content of the register and replace the RID.
*/
region_register rr0;
 
rr0 = rr;
rr0.map.rid = ASID2RID(asid, VA2VRN(va));
rr_write(VA2VRN(va), rr0.word);
srlz_d();
srlz_i();
}
while(c >>= 1)
b++;
b >>= 1;
__u64 ps;
switch (b) {
case 0: /*cnt 1-3*/
ps = PAGE_WIDTH;
break;
case 1: /*cnt 4-15*/
/*cnt=((cnt-1)/4)+1;*/
ps = PAGE_WIDTH+2;
va &= ~((1<<ps)-1);
break;
case 2: /*cnt 16-63*/
/*cnt=((cnt-1)/16)+1;*/
ps = PAGE_WIDTH+4;
va &= ~((1<<ps)-1);
break;
case 3: /*cnt 64-255*/
/*cnt=((cnt-1)/64)+1;*/
ps = PAGE_WIDTH+6;
va &= ~((1<<ps)-1);
break;
case 4: /*cnt 256-1023*/
/*cnt=((cnt-1)/256)+1;*/
ps = PAGE_WIDTH+8;
va &= ~((1<<ps)-1);
break;
case 5: /*cnt 1024-4095*/
/*cnt=((cnt-1)/1024)+1;*/
ps = PAGE_WIDTH+10;
va &= ~((1<<ps)-1);
break;
case 6: /*cnt 4096-16383*/
/*cnt=((cnt-1)/4096)+1;*/
ps = PAGE_WIDTH+12;
va &= ~((1<<ps)-1);
break;
case 7: /*cnt 16384-65535*/
case 8: /*cnt 65536-(256K-1)*/
/*cnt=((cnt-1)/16384)+1;*/
ps = PAGE_WIDTH+14;
va &= ~((1<<ps)-1);
break;
default:
/*cnt=((cnt-1)/(16384*16))+1;*/
ps=PAGE_WIDTH+18;
va&=~((1<<ps)-1);
break;
}
/*cnt+=(page!=va);*/
for(; va<(page+cnt*(PAGE_SIZE)); va += (1<<ps)) {
__asm__ volatile (
"ptc.l %0,%1;;"
:
: "r" (va), "r" (ps<<2)
);
}
srlz_d();
srlz_i();
if (restore_rr) {
rr_write(VA2VRN(va), rr.word);
srlz_d();
srlz_i();
}
}
 
 
/** Insert data into data translation cache.
*
* @param va Virtual page address.
* @param asid Address space identifier.
* @param entry The rest of TLB entry as required by TLB insertion format.
*/
void dtc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry)
{
tc_mapping_insert(va, asid, entry, true);
}
 
/** Insert data into instruction translation cache.
*
* @param va Virtual page address.
* @param asid Address space identifier.
* @param entry The rest of TLB entry as required by TLB insertion format.
*/
void itc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry)
{
tc_mapping_insert(va, asid, entry, false);
}
 
/** Insert data into instruction or data translation cache.
*
* @param va Virtual page address.
* @param asid Address space identifier.
* @param entry The rest of TLB entry as required by TLB insertion format.
* @param dtc If true, insert into data translation cache, use instruction translation cache otherwise.
*/
void tc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, bool dtc)
{
region_register rr;
bool restore_rr = false;
 
rr.word = rr_read(VA2VRN(va));
if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
/*
* The selected region register does not contain required RID.
* Save the old content of the register and replace the RID.
*/
region_register rr0;
 
rr0 = rr;
rr0.map.rid = ASID2RID(asid, VA2VRN(va));
rr_write(VA2VRN(va), rr0.word);
srlz_d();
srlz_i();
}
__asm__ volatile (
"mov r8=psr;;\n"
"rsm %0;;\n" /* PSR_IC_MASK */
"srlz.d;;\n"
"srlz.i;;\n"
"mov cr.ifa=%1\n" /* va */
"mov cr.itir=%2;;\n" /* entry.word[1] */
"cmp.eq p6,p7 = %4,r0;;\n" /* decide between itc and dtc */
"(p6) itc.i %3;;\n"
"(p7) itc.d %3;;\n"
"mov psr.l=r8;;\n"
"srlz.d;;\n"
:
: "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (dtc)
: "p6", "p7", "r8"
);
if (restore_rr) {
rr_write(VA2VRN(va), rr.word);
srlz_d();
srlz_i();
}
}
 
/** Insert data into instruction translation register.
*
* @param va Virtual page address.
* @param asid Address space identifier.
* @param entry The rest of TLB entry as required by TLB insertion format.
* @param tr Translation register.
*/
void itr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, index_t tr)
{
tr_mapping_insert(va, asid, entry, false, tr);
}
 
/** Insert data into data translation register.
*
* @param va Virtual page address.
* @param asid Address space identifier.
* @param entry The rest of TLB entry as required by TLB insertion format.
* @param tr Translation register.
*/
void dtr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, index_t tr)
{
tr_mapping_insert(va, asid, entry, true, tr);
}
 
/** Insert data into instruction or data translation register.
*
* @param va Virtual page address.
* @param asid Address space identifier.
* @param entry The rest of TLB entry as required by TLB insertion format.
* @param dtc If true, insert into data translation register, use instruction translation register otherwise.
* @param tr Translation register.
*/
void tr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, bool dtr, index_t tr)
{
region_register rr;
bool restore_rr = false;
 
rr.word = rr_read(VA2VRN(va));
if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
/*
* The selected region register does not contain required RID.
* Save the old content of the register and replace the RID.
*/
region_register rr0;
 
rr0 = rr;
rr0.map.rid = ASID2RID(asid, VA2VRN(va));
rr_write(VA2VRN(va), rr0.word);
srlz_d();
srlz_i();
}
 
__asm__ volatile (
"mov r8=psr;;\n"
"rsm %0;;\n" /* PSR_IC_MASK */
"srlz.d;;\n"
"srlz.i;;\n"
"mov cr.ifa=%1\n" /* va */
"mov cr.itir=%2;;\n" /* entry.word[1] */
"cmp.eq p6,p7=%5,r0;;\n" /* decide between itr and dtr */
"(p6) itr.i itr[%4]=%3;;\n"
"(p7) itr.d dtr[%4]=%3;;\n"
"mov psr.l=r8;;\n"
"srlz.d;;\n"
:
: "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (tr), "r" (dtr)
: "p6", "p7", "r8"
);
if (restore_rr) {
rr_write(VA2VRN(va), rr.word);
srlz_d();
srlz_i();
}
}
 
/** Insert data into DTLB.
*
* @param va Virtual page address.
* @param asid Address space identifier.
* @param entry The rest of TLB entry as required by TLB insertion format.
* @param dtr If true, insert into data translation register, use data translation cache otherwise.
* @param tr Translation register if dtr is true, ignored otherwise.
*/
void dtlb_kernel_mapping_insert(__address page, __address frame, bool dtr, index_t tr)
{
tlb_entry_t entry;
entry.word[0] = 0;
entry.word[1] = 0;
entry.p = true; /* present */
entry.ma = MA_WRITEBACK;
entry.a = true; /* already accessed */
entry.d = true; /* already dirty */
entry.pl = PL_KERNEL;
entry.ar = AR_READ | AR_WRITE;
entry.ppn = frame >> PPN_SHIFT;
entry.ps = PAGE_WIDTH;
if (dtr)
dtr_mapping_insert(page, ASID_KERNEL, entry, tr);
else
dtc_mapping_insert(page, ASID_KERNEL, entry);
}
 
/** Copy content of PTE into data translation cache.
*
* @param t PTE.
*/
void dtc_pte_copy(pte_t *t)
{
tlb_entry_t entry;
 
entry.word[0] = 0;
entry.word[1] = 0;
entry.p = t->p;
entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
entry.a = t->a;
entry.d = t->d;
entry.pl = t->k ? PL_KERNEL : PL_USER;
entry.ar = t->w ? AR_WRITE : AR_READ;
entry.ppn = t->frame >> PPN_SHIFT;
entry.ps = PAGE_WIDTH;
dtc_mapping_insert(t->page, t->as->asid, entry);
#ifdef CONFIG_VHPT
vhpt_mapping_insert(t->page, t->as->asid, entry);
#endif
}
 
/** Copy content of PTE into instruction translation cache.
*
* @param t PTE.
*/
void itc_pte_copy(pte_t *t)
{
tlb_entry_t entry;
 
entry.word[0] = 0;
entry.word[1] = 0;
ASSERT(t->x);
entry.p = t->p;
entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
entry.a = t->a;
entry.pl = t->k ? PL_KERNEL : PL_USER;
entry.ar = t->x ? (AR_EXECUTE | AR_READ) : AR_READ;
entry.ppn = t->frame >> PPN_SHIFT;
entry.ps = PAGE_WIDTH;
itc_mapping_insert(t->page, t->as->asid, entry);
#ifdef CONFIG_VHPT
vhpt_mapping_insert(t->page, t->as->asid, entry);
#endif
}
 
/** Instruction TLB fault handler for faults with VHPT turned off.
*
* @param vector Interruption vector.
* @param istate Structure with saved interruption state.
*/
void alternate_instruction_tlb_fault(__u64 vector, istate_t *istate)
{
region_register rr;
__address va;
pte_t *t;
va = istate->cr_ifa; /* faulting address */
page_table_lock(AS, true);
t = page_mapping_find(AS, va);
if (t) {
/*
* The mapping was found in software page hash table.
* Insert it into data translation cache.
*/
itc_pte_copy(t);
page_table_unlock(AS, true);
} else {
/*
* Forward the page fault to address space page fault handler.
*/
page_table_unlock(AS, true);
if (as_page_fault(va, istate) == AS_PF_FAULT) {
panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, istate->cr_ifa, rr.map.rid, istate->cr_iip);
}
}
}
 
/** Data TLB fault handler for faults with VHPT turned off.
*
* @param vector Interruption vector.
* @param istate Structure with saved interruption state.
*/
void alternate_data_tlb_fault(__u64 vector, istate_t *istate)
{
region_register rr;
rid_t rid;
__address va;
pte_t *t;
va = istate->cr_ifa; /* faulting address */
rr.word = rr_read(VA2VRN(va));
rid = rr.map.rid;
if (RID2ASID(rid) == ASID_KERNEL) {
if (VA2VRN(va) == VRN_KERNEL) {
/*
* Provide KA2PA(identity) mapping for faulting piece of
* kernel address space.
*/
dtlb_kernel_mapping_insert(va, KA2PA(va), false, 0);
return;
}
}
 
page_table_lock(AS, true);
t = page_mapping_find(AS, va);
if (t) {
/*
* The mapping was found in software page hash table.
* Insert it into data translation cache.
*/
dtc_pte_copy(t);
page_table_unlock(AS, true);
} else {
/*
* Forward the page fault to address space page fault handler.
*/
page_table_unlock(AS, true);
if (as_page_fault(va, istate) == AS_PF_FAULT) {
panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
}
}
}
 
/** Data nested TLB fault handler.
*
* This fault should not occur.
*
* @param vector Interruption vector.
* @param istate Structure with saved interruption state.
*/
void data_nested_tlb_fault(__u64 vector, istate_t *istate)
{
panic("%s\n", __FUNCTION__);
}
 
/** Data Dirty bit fault handler.
*
* @param vector Interruption vector.
* @param istate Structure with saved interruption state.
*/
void data_dirty_bit_fault(__u64 vector, istate_t *istate)
{
pte_t *t;
 
page_table_lock(AS, true);
t = page_mapping_find(AS, istate->cr_ifa);
ASSERT(t && t->p);
if (t && t->p) {
/*
* Update the Dirty bit in page tables and reinsert
* the mapping into DTC.
*/
t->d = true;
dtc_pte_copy(t);
}
page_table_unlock(AS, true);
}
 
/** Instruction access bit fault handler.
*
* @param vector Interruption vector.
* @param istate Structure with saved interruption state.
*/
void instruction_access_bit_fault(__u64 vector, istate_t *istate)
{
pte_t *t;
 
page_table_lock(AS, true);
t = page_mapping_find(AS, istate->cr_ifa);
ASSERT(t && t->p);
if (t && t->p) {
/*
* Update the Accessed bit in page tables and reinsert
* the mapping into ITC.
*/
t->a = true;
itc_pte_copy(t);
}
page_table_unlock(AS, true);
}
 
/** Data access bit fault handler.
*
* @param vector Interruption vector.
* @param istate Structure with saved interruption state.
*/
void data_access_bit_fault(__u64 vector, istate_t *istate)
{
pte_t *t;
 
page_table_lock(AS, true);
t = page_mapping_find(AS, istate->cr_ifa);
ASSERT(t && t->p);
if (t && t->p) {
/*
* Update the Accessed bit in page tables and reinsert
* the mapping into DTC.
*/
t->a = true;
dtc_pte_copy(t);
}
page_table_unlock(AS, true);
}
 
/** Page not present fault handler.
*
* @param vector Interruption vector.
* @param istate Structure with saved interruption state.
*/
void page_not_present(__u64 vector, istate_t *istate)
{
region_register rr;
__address va;
pte_t *t;
va = istate->cr_ifa; /* faulting address */
page_table_lock(AS, true);
t = page_mapping_find(AS, va);
ASSERT(t);
if (t->p) {
/*
* If the Present bit is set in page hash table, just copy it
* and update ITC/DTC.
*/
if (t->x)
itc_pte_copy(t);
else
dtc_pte_copy(t);
page_table_unlock(AS, true);
} else {
page_table_unlock(AS, true);
if (as_page_fault(va, istate) == AS_PF_FAULT) {
panic("%s: va=%p, rid=%d\n", __FUNCTION__, va, rr.map.rid);
}
}
}
/tags/0.1.1/kernel/arch/ia64/src/mm/page.c
0,0 → 1,258
/*
* Copyright (C) 2006 Jakub Jermar
* Copyright (C) 2006 Jakub Vana
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#include <arch/mm/page.h>
#include <genarch/mm/page_ht.h>
#include <mm/asid.h>
#include <arch/mm/asid.h>
#include <arch/mm/vhpt.h>
#include <arch/types.h>
#include <typedefs.h>
#include <print.h>
#include <mm/page.h>
#include <mm/frame.h>
#include <config.h>
#include <panic.h>
#include <arch/asm.h>
#include <arch/barrier.h>
#include <memstr.h>
 
static void set_environment(void);
 
/** Initialize ia64 virtual address translation subsystem. */
void page_arch_init(void)
{
page_mapping_operations = &ht_mapping_operations;
pk_disable();
set_environment();
}
 
/** Initialize VHPT and region registers. */
void set_environment(void)
{
region_register rr;
pta_register pta;
int i;
#ifdef CONFIG_VHPT
__address vhpt_base;
#endif
 
/*
* First set up kernel region register.
* This is redundant (see start.S) but we keep it here just for sure.
*/
rr.word = rr_read(VRN_KERNEL);
rr.map.ve = 0; /* disable VHPT walker */
rr.map.ps = PAGE_WIDTH;
rr.map.rid = ASID2RID(ASID_KERNEL, VRN_KERNEL);
rr_write(VRN_KERNEL, rr.word);
srlz_i();
srlz_d();
 
/*
* And setup the rest of region register.
*/
for(i = 0; i < REGION_REGISTERS; i++) {
/* skip kernel rr */
if (i == VRN_KERNEL)
continue;
rr.word = rr_read(i);
rr.map.ve = 0; /* disable VHPT walker */
rr.map.rid = RID_KERNEL;
rr.map.ps = PAGE_WIDTH;
rr_write(i, rr.word);
srlz_i();
srlz_d();
}
 
#ifdef CONFIG_VHPT
vhpt_base = vhpt_set_up();
#endif
/*
* Set up PTA register.
*/
pta.word = pta_read();
#ifndef CONFIG_VHPT
pta.map.ve = 0; /* disable VHPT walker */
pta.map.base = 0 >> PTA_BASE_SHIFT;
#else
pta.map.ve = 1; /* enable VHPT walker */
pta.map.base = vhpt_base >> PTA_BASE_SHIFT;
#endif
pta.map.vf = 1; /* large entry format */
pta.map.size = VHPT_WIDTH;
pta_write(pta.word);
srlz_i();
srlz_d();
}
 
/** Calculate address of collision chain from VPN and ASID.
*
* Interrupts must be disabled.
*
* @param page Address of virtual page including VRN bits.
* @param asid Address space identifier.
*
* @return VHPT entry address.
*/
vhpt_entry_t *vhpt_hash(__address page, asid_t asid)
{
region_register rr_save, rr;
index_t vrn;
rid_t rid;
vhpt_entry_t *v;
 
vrn = page >> VRN_SHIFT;
rid = ASID2RID(asid, vrn);
rr_save.word = rr_read(vrn);
if (rr_save.map.rid == rid) {
/*
* The RID is already in place, compute thash and return.
*/
v = (vhpt_entry_t *) thash(page);
return v;
}
/*
* The RID must be written to some region register.
* To speed things up, register indexed by vrn is used.
*/
rr.word = rr_save.word;
rr.map.rid = rid;
rr_write(vrn, rr.word);
srlz_i();
v = (vhpt_entry_t *) thash(page);
rr_write(vrn, rr_save.word);
srlz_i();
srlz_d();
 
return v;
}
 
/** Compare ASID and VPN against PTE.
*
* Interrupts must be disabled.
*
* @param page Address of virtual page including VRN bits.
* @param asid Address space identifier.
*
* @return True if page and asid match the page and asid of t, false otherwise.
*/
bool vhpt_compare(__address page, asid_t asid, vhpt_entry_t *v)
{
region_register rr_save, rr;
index_t vrn;
rid_t rid;
bool match;
 
ASSERT(v);
 
vrn = page >> VRN_SHIFT;
rid = ASID2RID(asid, vrn);
rr_save.word = rr_read(vrn);
if (rr_save.map.rid == rid) {
/*
* The RID is already in place, compare ttag with t and return.
*/
return ttag(page) == v->present.tag.tag_word;
}
/*
* The RID must be written to some region register.
* To speed things up, register indexed by vrn is used.
*/
rr.word = rr_save.word;
rr.map.rid = rid;
rr_write(vrn, rr.word);
srlz_i();
match = (ttag(page) == v->present.tag.tag_word);
rr_write(vrn, rr_save.word);
srlz_i();
srlz_d();
 
return match;
}
 
/** Set up one VHPT entry.
*
* @param t VHPT entry to be set up.
* @param page Virtual address of the page mapped by the entry.
* @param asid Address space identifier of the address space to which page belongs.
* @param frame Physical address of the frame to wich page is mapped.
* @param flags Different flags for the mapping.
*/
void vhpt_set_record(vhpt_entry_t *v, __address page, asid_t asid, __address frame, int flags)
{
region_register rr_save, rr;
index_t vrn;
rid_t rid;
__u64 tag;
 
ASSERT(v);
 
vrn = page >> VRN_SHIFT;
rid = ASID2RID(asid, vrn);
/*
* Compute ttag.
*/
rr_save.word = rr_read(vrn);
rr.word = rr_save.word;
rr.map.rid = rid;
rr_write(vrn, rr.word);
srlz_i();
tag = ttag(page);
rr_write(vrn, rr_save.word);
srlz_i();
srlz_d();
/*
* Clear the entry.
*/
v->word[0] = 0;
v->word[1] = 0;
v->word[2] = 0;
v->word[3] = 0;
v->present.p = true;
v->present.ma = (flags & PAGE_CACHEABLE) ? MA_WRITEBACK : MA_UNCACHEABLE;
v->present.a = false; /* not accessed */
v->present.d = false; /* not dirty */
v->present.pl = (flags & PAGE_USER) ? PL_USER : PL_KERNEL;
v->present.ar = (flags & PAGE_WRITE) ? AR_WRITE : AR_READ;
v->present.ar |= (flags & PAGE_EXEC) ? AR_EXECUTE : 0;
v->present.ppn = frame >> PPN_SHIFT;
v->present.ed = false; /* exception not deffered */
v->present.ps = PAGE_WIDTH;
v->present.key = 0;
v->present.tag.tag_word = tag;
}
/tags/0.1.1/kernel/arch/ia64/src/mm/as.c
0,0 → 1,80
/*
* Copyright (C) 2006 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#include <arch/mm/as.h>
#include <arch/mm/asid.h>
#include <arch/mm/page.h>
#include <genarch/mm/as_ht.h>
#include <genarch/mm/asid_fifo.h>
#include <mm/asid.h>
#include <arch.h>
#include <arch/barrier.h>
#include <synch/spinlock.h>
 
/** Architecture dependent address space init. */
void as_arch_init(void)
{
as_operations = &as_ht_operations;
asid_fifo_init();
}
 
/** Prepare registers for switching to another address space.
*
* @param as Address space.
*/
void as_install_arch(as_t *as)
{
ipl_t ipl;
region_register rr;
int i;
ipl = interrupts_disable();
spinlock_lock(&as->lock);
ASSERT(as->asid != ASID_INVALID);
/*
* Load respective ASID (7 consecutive RIDs) to
* region registers.
*/
for (i = 0; i < REGION_REGISTERS; i++) {
if (i == VRN_KERNEL)
continue;
rr.word = rr_read(i);
rr.map.ve = false; /* disable VHPT walker */
rr.map.rid = ASID2RID(as->asid, i);
rr.map.ps = PAGE_WIDTH;
rr_write(i, rr.word);
}
srlz_d();
srlz_i();
spinlock_unlock(&as->lock);
interrupts_restore(ipl);
}
/tags/0.1.1/kernel/arch/ia64/src/mm/frame.c
0,0 → 1,49
/*
* Copyright (C) 2005 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#include <arch/mm/frame.h>
#include <mm/frame.h>
#include <config.h>
#include <panic.h>
 
/*
* This is Ski-specific and certainly not sufficient
* for real ia64 systems that provide memory map.
*/
#define ROM_BASE 0xa0000
#define ROM_SIZE (384*1024)
 
void frame_arch_init(void)
{
zone_create(0, config.memory_size >> FRAME_WIDTH, 1, 0);
/*
* Blacklist ROM regions.
*/
frame_mark_unavailable(ADDR2PFN(ROM_BASE), ROM_SIZE >> FRAME_WIDTH);
}
/tags/0.1.1/kernel/arch/ia64/src/ddi/ddi.c
0,0 → 1,47
/*
* Copyright (C) 2006 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#include <ddi/ddi.h>
#include <proc/task.h>
#include <arch/types.h>
#include <typedefs.h>
 
/** Enable I/O space range for task.
*
* Interrupts are disabled and task is locked.
*
* @param task Task.
* @param ioaddr Startign I/O space address.
* @param size Size of the enabled I/O range.
*
* @return 0 on success or an error code from errno.h.
*/
int ddi_iospace_enable_arch(task_t *task, __address ioaddr, size_t size)
{
return 0;
}
/tags/0.1.1/kernel/arch/ia64/src/interrupt.c
0,0 → 1,250
/*
* Copyright (C) 2005 Jakub Jermar
* Copyright (C) 2005 Jakub Vana
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
 
#include <arch/interrupt.h>
#include <panic.h>
#include <print.h>
#include <console/console.h>
#include <arch/types.h>
#include <arch/asm.h>
#include <arch/barrier.h>
#include <arch/register.h>
#include <arch/drivers/it.h>
#include <arch.h>
#include <symtab.h>
#include <debug.h>
#include <syscall/syscall.h>
#include <print.h>
#include <proc/scheduler.h>
#include <ipc/sysipc.h>
 
 
#define VECTORS_64_BUNDLE 20
#define VECTORS_16_BUNDLE 48
#define VECTORS_16_BUNDLE_START 0x5000
#define VECTOR_MAX 0x7f00
 
#define BUNDLE_SIZE 16
 
char *vector_names_64_bundle[VECTORS_64_BUNDLE] = {
"VHPT Translation vector",
"Instruction TLB vector",
"Data TLB vector",
"Alternate Instruction TLB vector",
"Alternate Data TLB vector",
"Data Nested TLB vector",
"Instruction Key Miss vector",
"Data Key Miss vector",
"Dirty-Bit vector",
"Instruction Access-Bit vector",
"Data Access-Bit vector"
"Break Instruction vector",
"External Interrupt vector"
"Reserved",
"Reserved",
"Reserved",
"Reserved",
"Reserved",
"Reserved",
"Reserved"
};
 
char *vector_names_16_bundle[VECTORS_16_BUNDLE] = {
"Page Not Present vector",
"Key Permission vector",
"Instruction Access rights vector",
"Data Access Rights vector",
"General Exception vector",
"Disabled FP-Register vector",
"NaT Consumption vector",
"Speculation vector",
"Reserved",
"Debug vector",
"Unaligned Reference vector",
"Unsupported Data Reference vector",
"Floating-point Fault vector",
"Floating-point Trap vector",
"Lower-Privilege Transfer Trap vector",
"Taken Branch Trap vector",
"Single STep Trap vector",
"Reserved",
"Reserved",
"Reserved",
"Reserved",
"Reserved",
"Reserved",
"Reserved",
"Reserved",
"IA-32 Exception vector",
"IA-32 Intercept vector",
"IA-32 Interrupt vector",
"Reserved",
"Reserved",
"Reserved"
};
 
static char *vector_to_string(__u16 vector);
static void dump_interrupted_context(istate_t *istate);
 
char *vector_to_string(__u16 vector)
{
ASSERT(vector <= VECTOR_MAX);
if (vector >= VECTORS_16_BUNDLE_START)
return vector_names_16_bundle[(vector-VECTORS_16_BUNDLE_START)/(16*BUNDLE_SIZE)];
else
return vector_names_64_bundle[vector/(64*BUNDLE_SIZE)];
}
 
void dump_interrupted_context(istate_t *istate)
{
char *ifa, *iipa, *iip;
 
ifa = get_symtab_entry(istate->cr_ifa);
iipa = get_symtab_entry(istate->cr_iipa);
iip = get_symtab_entry(istate->cr_iip);
 
putchar('\n');
printf("Interrupted context dump:\n");
printf("ar.bsp=%p\tar.bspstore=%p\n", istate->ar_bsp, istate->ar_bspstore);
printf("ar.rnat=%#llx\tar.rsc=%$llx\n", istate->ar_rnat, istate->ar_rsc);
printf("ar.ifs=%#llx\tar.pfs=%#llx\n", istate->ar_ifs, istate->ar_pfs);
printf("cr.isr=%#llx\tcr.ipsr=%#llx\t\n", istate->cr_isr.value, istate->cr_ipsr);
printf("cr.iip=%#llx, #%d\t(%s)\n", istate->cr_iip, istate->cr_isr.ei, iip ? iip : "?");
printf("cr.iipa=%#llx\t(%s)\n", istate->cr_iipa, iipa ? iipa : "?");
printf("cr.ifa=%#llx\t(%s)\n", istate->cr_ifa, ifa ? ifa : "?");
}
 
void general_exception(__u64 vector, istate_t *istate)
{
char *desc = "";
 
dump_interrupted_context(istate);
 
switch (istate->cr_isr.ge_code) {
case GE_ILLEGALOP:
desc = "Illegal Operation fault";
break;
case GE_PRIVOP:
desc = "Privileged Operation fault";
break;
case GE_PRIVREG:
desc = "Privileged Register fault";
break;
case GE_RESREGFLD:
desc = "Reserved Register/Field fault";
break;
case GE_DISBLDISTRAN:
desc = "Disabled Instruction Set Transition fault";
break;
case GE_ILLEGALDEP:
desc = "Illegal Dependency fault";
break;
default:
desc = "unknown";
break;
}
 
panic("General Exception (%s)\n", desc);
}
 
void fpu_enable(void);
 
void disabled_fp_register(__u64 vector, istate_t *istate)
{
#ifdef CONFIG_FPU_LAZY
scheduler_fpu_lazy_request();
#else
dump_interrupted_context(istate);
panic("Interruption: %#hx (%s)\n", (__u16) vector, vector_to_string(vector));
#endif
}
 
 
void nop_handler(__u64 vector, istate_t *istate)
{
}
 
 
 
/** Handle syscall. */
int break_instruction(__u64 vector, istate_t *istate)
{
/*
* Move to next instruction after BREAK.
*/
if (istate->cr_ipsr.ri == 2) {
istate->cr_ipsr.ri = 0;
istate->cr_iip += 16;
} else {
istate->cr_ipsr.ri++;
}
 
if (istate->in4 < SYSCALL_END)
return syscall_table[istate->in4](istate->in0, istate->in1, istate->in2, istate->in3);
else
panic("Undefined syscall %d", istate->in4);
return -1;
}
 
void universal_handler(__u64 vector, istate_t *istate)
{
dump_interrupted_context(istate);
panic("Interruption: %#hx (%s)\n", (__u16) vector, vector_to_string(vector));
}
 
void external_interrupt(__u64 vector, istate_t *istate)
{
cr_ivr_t ivr;
ivr.value = ivr_read();
srlz_d();
 
switch(ivr.vector) {
case INTERRUPT_TIMER:
it_interrupt();
break;
case INTERRUPT_SPURIOUS:
printf("cpu%d: spurious interrupt\n", CPU->id);
break;
default:
panic("\nUnhandled External Interrupt Vector %d\n", ivr.vector);
break;
}
}
 
/* Reregister irq to be IPC-ready */
void irq_ipc_bind_arch(__native irq)
{
panic("not implemented\n");
/* TODO */
}
/tags/0.1.1/kernel/arch/ia64/src/ia64.c
0,0 → 1,116
/*
* Copyright (C) 2005 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#include <arch.h>
#include <arch/ski/ski.h>
#include <arch/drivers/it.h>
#include <arch/interrupt.h>
#include <arch/barrier.h>
#include <arch/asm.h>
#include <arch/register.h>
#include <arch/types.h>
#include <arch/context.h>
#include <arch/stack.h>
#include <arch/mm/page.h>
#include <mm/as.h>
#include <config.h>
#include <userspace.h>
#include <console/console.h>
#include <proc/uarg.h>
#include <syscall/syscall.h>
 
void arch_pre_main(void)
{
/* Setup usermode init tasks. */
init.cnt = 2;
init.tasks[0].addr = INIT0_ADDRESS;
init.tasks[0].size = INIT0_SIZE;
init.tasks[1].addr = INIT1_ADDRESS;
init.tasks[1].size = INIT1_SIZE;
}
 
void arch_pre_mm_init(void)
{
/* Set Interruption Vector Address (i.e. location of interruption vector table). */
iva_write((__address) &ivt);
srlz_d();
ski_init_console();
it_init();
}
 
void arch_post_mm_init(void)
{
}
 
void arch_pre_smp_init(void)
{
}
 
void arch_post_smp_init(void)
{
}
 
/** Enter userspace and never return. */
void userspace(uspace_arg_t *kernel_uarg)
{
psr_t psr;
rsc_t rsc;
 
psr.value = psr_read();
psr.cpl = PL_USER;
psr.i = true; /* start with interrupts enabled */
psr.ic = true;
psr.ri = 0; /* start with instruction #0 */
psr.bn = 1; /* start in bank 0 */
 
__asm__ volatile ("mov %0 = ar.rsc\n" : "=r" (rsc.value));
rsc.loadrs = 0;
rsc.be = false;
rsc.pl = PL_USER;
rsc.mode = 3; /* eager mode */
 
switch_to_userspace((__address) kernel_uarg->uspace_entry,
((__address) kernel_uarg->uspace_stack)+PAGE_SIZE-ALIGN_UP(STACK_ITEM_SIZE, STACK_ALIGNMENT),
((__address) kernel_uarg->uspace_stack)+PAGE_SIZE,
(__address) kernel_uarg->uspace_uarg,
psr.value, rsc.value);
 
while (1) {
;
}
}
 
/** Set thread-local-storage pointer.
*
* We use r13 (a.k.a. tp) for this purpose.
*/
__native sys_tls_set(__native addr)
{
return 0;
}
/tags/0.1.1/kernel/arch/ia64/src/start.S
0,0 → 1,139
#
# Copyright (C) 2005 Jakub Jermar
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# - The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
 
#include <arch/register.h>
#include <arch/mm/page.h>
#include <arch/mm/asid.h>
#include <mm/asid.h>
 
#define RR_MASK (0xFFFFFFFF00000002)
#define RID_SHIFT 8
#define PS_SHIFT 2
 
#define KERNEL_TRANSLATION_I 0x0010000000000661
#define KERNEL_TRANSLATION_D 0x0010000000000661
 
.section K_TEXT_START, "ax"
 
.global kernel_image_start
 
stack0:
kernel_image_start:
.auto
 
# Fill TR.i and TR.d using Region Register #VRN_KERNEL
 
movl r8=(VRN_KERNEL<<VRN_SHIFT)
mov r9=rr[r8]
movl r10=(RR_MASK)
and r9=r10,r9
movl r10=((RID_KERNEL<<RID_SHIFT)|(KERNEL_PAGE_WIDTH<<PS_SHIFT))
or r9=r10,r9
mov rr[r8]=r9
 
movl r8=(VRN_KERNEL<<VRN_SHIFT)
mov cr.ifa=r8
movl r10=(KERNEL_PAGE_WIDTH<<PS_SHIFT)
mov cr.itir=r10
movl r10=(KERNEL_TRANSLATION_I)
itr.i itr[r0]=r10
movl r10=(KERNEL_TRANSLATION_D)
itr.d dtr[r0]=r10
 
# initialize PSR
mov psr.l = r0
srlz.i
srlz.d
movl r10=(PSR_DT_MASK|PSR_RT_MASK|PSR_IT_MASK|PSR_IC_MASK) /* Enable paging */
mov r9=psr
or r10=r10,r9
mov cr.ipsr=r10
mov cr.ifs=r0
movl r8=paging_start
mov cr.iip=r8
srlz.d
srlz.i
 
.explicit
/*
* Return From Interupt is the only the way to fill upper half word of PSR.
*/
rfi;;
 
.global paging_start
paging_start:
 
/*
* Now we are paging.
*/
 
# switch to register bank 1
bsw.1
# initialize register stack
mov ar.rsc = r0
movl r8=(VRN_KERNEL<<VRN_SHIFT) ;;
mov ar.bspstore = r8
loadrs
 
# initialize memory stack to some sane value
movl r12 = stack0;;
add r12 = - 16, r12 /* allocate a scratch area on the stack */
 
# initialize gp (Global Pointer) register
movl r1 = _hardcoded_load_address
 
/*
* Initialize hardcoded_* variables.
*/
movl r14 = _hardcoded_ktext_size
movl r15 = _hardcoded_kdata_size
movl r16 = _hardcoded_load_address ;;
addl r17 = @gprel(hardcoded_ktext_size), gp
addl r18 = @gprel(hardcoded_kdata_size), gp
addl r19 = @gprel(hardcoded_load_address), gp
;;
st8 [r17] = r14
st8 [r18] = r15
st8 [r19] = r16
 
 
ssm (1<<19);; /*Disable f32 - f127*/
srlz.i;
srlz.d;;
 
br.call.sptk.many b0 = arch_pre_main
 
movl r18=main_bsp ;;
mov b1=r18 ;;
br.call.sptk.many b0=b1
 
 
0:
br 0b
/tags/0.1.1/kernel/arch/ia64/src/proc/scheduler.c
0,0 → 1,78
/*
* Copyright (C) 2006 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#include <proc/scheduler.h>
#include <proc/thread.h>
#include <arch.h>
#include <arch/register.h>
#include <arch/context.h>
#include <arch/stack.h>
#include <arch/mm/tlb.h>
#include <config.h>
#include <align.h>
 
/** Perform ia64 specific tasks needed before the new task is run. */
void before_task_runs_arch(void)
{
}
 
/** Prepare kernel stack pointers in bank 0 r22 and r23 and make sure the stack is mapped in DTR. */
void before_thread_runs_arch(void)
{
__address base;
base = ALIGN_DOWN(config.base, 1<<KERNEL_PAGE_WIDTH);
 
if ((__address) THREAD->kstack < base || (__address) THREAD->kstack > base + (1<<(KERNEL_PAGE_WIDTH))) {
/*
* Kernel stack of this thread is not mapped by DTR[TR_KERNEL].
* Use DTR[TR_KSTACK1] and DTR[TR_KSTACK2] to map it.
*/
dtlb_kernel_mapping_insert((__address) THREAD->kstack, KA2PA(THREAD->kstack), true, DTR_KSTACK1);
dtlb_kernel_mapping_insert((__address) THREAD->kstack + PAGE_SIZE, KA2PA(THREAD->kstack) + FRAME_SIZE, true, DTR_KSTACK2);
}
/*
* Record address of kernel backing store to bank 0 r22.
* Record address of kernel stack to bank 0 r23.
* These values will be found there after switch from userspace.
*/
__asm__ volatile (
"bsw.0\n"
"mov r22 = %0\n"
"mov r23 = %1\n"
"bsw.1\n"
:
: "r" (&THREAD->kstack[THREAD_STACK_SIZE]),
"r" (&THREAD->kstack[THREAD_STACK_SIZE - SP_DELTA])
);
}
 
void after_thread_ran_arch(void)
{
}
/tags/0.1.1/kernel/arch/ia64/src/ivt.S
0,0 → 1,582
#
# Copyright (C) 2005 Jakub Vana
# Copyright (C) 2005 Jakub Jermar
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# - The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
 
#include <arch/stack.h>
#include <arch/register.h>
#include <arch/mm/page.h>
#include <align.h>
 
#define FRS_TO_SAVE 30
#define STACK_ITEMS (19 + FRS_TO_SAVE*2)
#define STACK_FRAME_SIZE ALIGN_UP((STACK_ITEMS*STACK_ITEM_SIZE) + STACK_SCRATCH_AREA_SIZE, STACK_ALIGNMENT)
 
#if (STACK_ITEMS % 2 == 0)
# define STACK_FRAME_BIAS 8
#else
# define STACK_FRAME_BIAS 16
#endif
 
/** Partitioning of bank 0 registers. */
#define R_OFFS r16
#define R_HANDLER r17
#define R_RET r18
#define R_TMP r19
#define R_KSTACK_BSP r22 /* keep in sync with before_thread_runs_arch() */
#define R_KSTACK r23 /* keep in sync with before_thread_runs_arch() */
 
/** Heavyweight interrupt handler
*
* This macro roughly follows steps from 1 to 19 described in
* Intel Itanium Architecture Software Developer's Manual, Chapter 3.4.2.
*
* HEAVYWEIGHT_HANDLER macro must cram into 16 bundles (48 instructions).
* This goal is achieved by using procedure calls after RSE becomes operational.
*
* Some steps are skipped (enabling and disabling interrupts).
*
* @param offs Offset from the beginning of IVT.
* @param handler Interrupt handler address.
*/
.macro HEAVYWEIGHT_HANDLER offs, handler=universal_handler
.org ivt + \offs
mov R_OFFS = \offs
movl R_HANDLER = \handler ;;
br heavyweight_handler
.endm
 
.global heavyweight_handler
heavyweight_handler:
/* 1. copy interrupt registers into bank 0 */
/*
* Note that r24-r31 from bank 0 can be used only as long as PSR.ic = 0.
*/
/* Set up FPU as in interrupted context. */
mov r24 = psr
mov r25 = cr.ipsr
mov r26 = PSR_DFH_MASK
mov r27 = ~PSR_DFH_MASK ;;
and r26 = r25, r26
and r24 = r24, r27;;
or r24 = r24, r26;;
mov psr.l = r24;;
srlz.i
srlz.d;;
 
mov r24 = cr.iip
mov r25 = cr.ipsr
mov r26 = cr.iipa
mov r27 = cr.isr
mov r28 = cr.ifa
/* 2. preserve predicate register into bank 0 */
mov r29 = pr ;;
/* 3. switch to kernel memory stack */
mov r30 = cr.ipsr
shr.u r31 = r12, VRN_SHIFT ;;
 
shr.u r30 = r30, PSR_CPL_SHIFT ;;
and r30 = PSR_CPL_MASK_SHIFTED, r30 ;;
 
/*
* Set p3 to true if the interrupted context executed in kernel mode.
* Set p4 to false if the interrupted context didn't execute in kernel mode.
*/
cmp.eq p3, p4 = r30, r0 ;;
cmp.eq p1, p2 = r30, r0 ;; /* remember IPSR setting in p1 and p2 */
 
/*
* Set p3 to true if the stack register references kernel address space.
* Set p4 to false if the stack register doesn't reference kernel address space.
*/
(p3) cmp.eq p3, p4 = VRN_KERNEL, r31 ;;
/*
* Now, p4 is true iff the stack needs to be switched to kernel stack.
*/
mov r30 = r12
(p4) mov r12 = R_KSTACK ;;
add r31 = -STACK_FRAME_BIAS, r12 ;;
add r12 = -STACK_FRAME_SIZE, r12
 
/* 4. save registers in bank 0 into memory stack */
 
/*
* If this is break_instruction handler,
* copy input parameters to stack.
*/
mov R_TMP = 0x2c00 ;;
cmp.eq p6,p5 = R_OFFS, R_TMP ;;
/*
* From now on, if this is break_instruction handler, p6 is true and p5 is false.
* Otherwise p6 is false and p5 is true.
* Note that p5 is a preserved predicate register and we make use of it.
*/
 
(p6) st8 [r31] = r36, -8 ;; /* save in4 */
(p6) st8 [r31] = r35, -8 ;; /* save in3 */
(p6) st8 [r31] = r34, -8 ;; /* save in2 */
(p6) st8 [r31] = r33, -8 ;; /* save in1 */
(p6) st8 [r31] = r32, -8 ;; /* save in0 */
(p5) add r31 = -40, r31 ;;
st8 [r31] = r30, -8 ;; /* save old stack pointer */
st8 [r31] = r29, -8 ;; /* save predicate registers */
 
st8 [r31] = r24, -8 ;; /* save cr.iip */
st8 [r31] = r25, -8 ;; /* save cr.ipsr */
st8 [r31] = r26, -8 ;; /* save cr.iipa */
st8 [r31] = r27, -8 ;; /* save cr.isr */
st8 [r31] = r28, -8 ;; /* save cr.ifa */
 
/* 5. RSE switch from interrupted context */
mov r24 = ar.rsc
mov r25 = ar.pfs
cover
mov r26 = cr.ifs
st8 [r31] = r24, -8 ;; /* save ar.rsc */
st8 [r31] = r25, -8 ;; /* save ar.pfs */
st8 [r31] = r26, -8 /* save ar.ifs */
and r24 = ~(RSC_PL_MASK), r24 ;;
and r30 = ~(RSC_MODE_MASK), r24 ;;
mov ar.rsc = r30 ;; /* update RSE state */
mov r27 = ar.rnat
mov r28 = ar.bspstore ;;
/*
* Inspect BSPSTORE to figure out whether it is necessary to switch to kernel BSPSTORE.
*/
(p1) shr.u r30 = r28, VRN_SHIFT ;;
(p1) cmp.eq p1, p2 = VRN_KERNEL, r30 ;;
/*
* If BSPSTORE needs to be switched, p1 is false and p2 is true.
*/
(p1) mov r30 = r28
(p2) mov r30 = R_KSTACK_BSP ;;
(p2) mov ar.bspstore = r30 ;;
mov r29 = ar.bsp
st8 [r31] = r27, -8 ;; /* save ar.rnat */
st8 [r31] = r30, -8 ;; /* save new value written to ar.bspstore */
st8 [r31] = r28, -8 ;; /* save ar.bspstore */
st8 [r31] = r29, -8 /* save ar.bsp */
mov ar.rsc = r24 /* restore RSE's setting + kernel privileges */
/* steps 6 - 15 are done by heavyweight_handler_inner() */
mov R_RET = b0 /* save b0 belonging to interrupted context */
br.call.sptk.many b0 = heavyweight_handler_inner
0: mov b0 = R_RET /* restore b0 belonging to the interrupted context */
 
/* 16. RSE switch to interrupted context */
cover /* allocate zerro size frame (step 1 (from Intel Docs)) */
 
add r31 = (STACK_SCRATCH_AREA_SIZE+(FRS_TO_SAVE*2*8)), r12 ;;
 
ld8 r30 = [r31], +8 ;; /* load ar.bsp */
ld8 r29 = [r31], +8 ;; /* load ar.bspstore */
ld8 r28 = [r31], +8 ;; /* load ar.bspstore_new */
sub r27 = r30 , r28 ;; /* calculate loadrs (step 2) */
shl r27 = r27, 16
 
mov r24 = ar.rsc ;;
and r30 = ~3, r24 ;;
or r24 = r30 , r27 ;;
mov ar.rsc = r24 ;; /* place RSE in enforced lazy mode */
 
loadrs /* (step 3) */
 
ld8 r27 = [r31], +8 ;; /* load ar.rnat */
ld8 r26 = [r31], +8 ;; /* load cr.ifs */
ld8 r25 = [r31], +8 ;; /* load ar.pfs */
ld8 r24 = [r31], +8 ;; /* load ar.rsc */
 
mov ar.bspstore = r29 ;; /* (step 4) */
mov ar.rnat = r27 /* (step 5) */
 
mov ar.pfs = r25 /* (step 6) */
mov cr.ifs = r26
 
mov ar.rsc = r24 /* (step 7) */
 
/* 17. restore interruption state from memory stack */
ld8 r28 = [r31], +8 ;; /* load cr.ifa */
ld8 r27 = [r31], +8 ;; /* load cr.isr */
ld8 r26 = [r31], +8 ;; /* load cr.iipa */
ld8 r25 = [r31], +8 ;; /* load cr.ipsr */
ld8 r24 = [r31], +8 ;; /* load cr.iip */
 
mov cr.iip = r24;;
mov cr.iipa = r26
mov cr.isr = r27
mov cr.ifa = r28
 
/* Set up FPU as in exception. */
mov r24 = psr
mov r26 = PSR_DFH_MASK
mov r27 = ~PSR_DFH_MASK ;;
and r25 = r25, r27
and r24 = r24, r26 ;;
or r25 = r25, r24;;
mov cr.ipsr = r25
 
/* 18. restore predicate registers from memory stack */
ld8 r29 = [r31], +8 ;; /* load predicate registers */
mov pr = r29
/* 19. return from interruption */
ld8 r12 = [r31] /* load stack pointer */
rfi ;;
 
.global heavyweight_handler_inner
heavyweight_handler_inner:
/*
* From this point, the rest of the interrupted context
* will be preserved in stacked registers and backing store.
*/
alloc loc0 = ar.pfs, 0, 48, 2, 0 ;;
/* bank 0 is going to be shadowed, copy essential data from there */
mov loc1 = R_RET /* b0 belonging to interrupted context */
mov loc2 = R_HANDLER
mov out0 = R_OFFS
add out1 = STACK_SCRATCH_AREA_SIZE, r12
 
/* 6. switch to bank 1 and reenable PSR.ic */
ssm PSR_IC_MASK
bsw.1 ;;
srlz.d
/* 7. preserve branch and application registers */
mov loc3 = ar.unat
mov loc4 = ar.lc
mov loc5 = ar.ec
mov loc6 = ar.ccv
mov loc7 = ar.csd
mov loc8 = ar.ssd
mov loc9 = b0
mov loc10 = b1
mov loc11 = b2
mov loc12 = b3
mov loc13 = b4
mov loc14 = b5
mov loc15 = b6
mov loc16 = b7
/* 8. preserve general and floating-point registers */
mov loc17 = r1
mov loc18 = r2
mov loc19 = r3
mov loc20 = r4
mov loc21 = r5
mov loc22 = r6
mov loc23 = r7
(p5) mov loc24 = r8 /* only if not in break_instruction handler */
mov loc25 = r9
mov loc26 = r10
mov loc27 = r11
/* skip r12 (stack pointer) */
mov loc28 = r13
mov loc29 = r14
mov loc30 = r15
mov loc31 = r16
mov loc32 = r17
mov loc33 = r18
mov loc34 = r19
mov loc35 = r20
mov loc36 = r21
mov loc37 = r22
mov loc38 = r23
mov loc39 = r24
mov loc40 = r25
mov loc41 = r26
mov loc42 = r27
mov loc43 = r28
mov loc44 = r29
mov loc45 = r30
mov loc46 = r31
 
add r24 = 96 + STACK_SCRATCH_AREA_SIZE, r12
add r25 = 112 + STACK_SCRATCH_AREA_SIZE, r12
add r26 = 0 + STACK_SCRATCH_AREA_SIZE, r12
add r27 = 16 + STACK_SCRATCH_AREA_SIZE, r12
add r28 = 32 + STACK_SCRATCH_AREA_SIZE, r12
add r29 = 48 + STACK_SCRATCH_AREA_SIZE, r12
add r30 = 64 + STACK_SCRATCH_AREA_SIZE, r12
add r31 = 80 + STACK_SCRATCH_AREA_SIZE, r12 ;;
stf.spill [r26] = f2, 0x80
stf.spill [r27] = f3, 0x80
stf.spill [r28] = f4, 0x80
stf.spill [r29] = f5, 0x80
stf.spill [r30] = f6, 0x80
stf.spill [r31] = f7, 0x80 ;;
 
stf.spill [r24] = f8, 0x80
stf.spill [r25] = f9, 0x80
stf.spill [r26] = f10, 0x80
stf.spill [r27] = f11, 0x80
stf.spill [r28] = f12, 0x80
stf.spill [r29] = f13, 0x80
stf.spill [r30] = f14, 0x80
stf.spill [r31] = f15, 0x80 ;;
 
stf.spill [r24] = f16, 0x80
stf.spill [r25] = f17, 0x80
stf.spill [r26] = f18, 0x80
stf.spill [r27] = f19, 0x80
stf.spill [r28] = f20, 0x80
stf.spill [r29] = f21, 0x80
stf.spill [r30] = f22, 0x80
stf.spill [r31] = f23, 0x80 ;;
 
stf.spill [r24] = f24, 0x80
stf.spill [r25] = f25, 0x80
stf.spill [r26] = f26, 0x80
stf.spill [r27] = f27, 0x80
stf.spill [r28] = f28, 0x80
stf.spill [r29] = f29, 0x80
stf.spill [r30] = f30, 0x80
stf.spill [r31] = f31, 0x80 ;;
 
mov loc47 = ar.fpsr /* preserve floating point status register */
/* 9. skipped (will not enable interrupts) */
/*
* ssm PSR_I_MASK
* ;;
* srlz.d
*/
 
/* 10. call handler */
movl r1 = _hardcoded_load_address
mov b1 = loc2
br.call.sptk.many b0 = b1
 
/* 11. return from handler */
0:
/* 12. skipped (will not disable interrupts) */
/*
* rsm PSR_I_MASK
* ;;
* srlz.d
*/
 
/* 13. restore general and floating-point registers */
add r24 = 96 + STACK_SCRATCH_AREA_SIZE, r12
add r25 = 112 + STACK_SCRATCH_AREA_SIZE, r12
add r26 = 0 + STACK_SCRATCH_AREA_SIZE, r12
add r27 = 16 + STACK_SCRATCH_AREA_SIZE, r12
add r28 = 32 + STACK_SCRATCH_AREA_SIZE, r12
add r29 = 48 + STACK_SCRATCH_AREA_SIZE, r12
add r30 = 64 + STACK_SCRATCH_AREA_SIZE, r12
add r31 = 80 + STACK_SCRATCH_AREA_SIZE, r12 ;;
 
ldf.fill f2 = [r26], 0x80
ldf.fill f3 = [r27], 0x80
ldf.fill f4 = [r28], 0x80
ldf.fill f5 = [r29], 0x80
ldf.fill f6 = [r30], 0x80
ldf.fill f7 = [r31], 0x80 ;;
 
ldf.fill f8 = [r24], 0x80
ldf.fill f9 = [r25], 0x80
ldf.fill f10 = [r26], 0x80
ldf.fill f11 = [r27], 0x80
ldf.fill f12 = [r28], 0x80
ldf.fill f13 = [r29], 0x80
ldf.fill f14 = [r30], 0x80
ldf.fill f15 = [r31], 0x80 ;;
 
ldf.fill f16 = [r24], 0x80
ldf.fill f17 = [r25], 0x80
ldf.fill f18 = [r26], 0x80
ldf.fill f19 = [r27], 0x80
ldf.fill f20 = [r28], 0x80
ldf.fill f21 = [r29], 0x80
ldf.fill f22 = [r30], 0x80
ldf.fill f23 = [r31], 0x80 ;;
 
ldf.fill f24 = [r24], 0x80
ldf.fill f25 = [r25], 0x80
ldf.fill f26 = [r26], 0x80
ldf.fill f27 = [r27], 0x80
ldf.fill f28 = [r28], 0x80
ldf.fill f29 = [r29], 0x80
ldf.fill f30 = [r30], 0x80
ldf.fill f31 = [r31], 0x80 ;;
mov r1 = loc17
mov r2 = loc18
mov r3 = loc19
mov r4 = loc20
mov r5 = loc21
mov r6 = loc22
mov r7 = loc23
(p5) mov r8 = loc24 /* only if not in break_instruction handler */
mov r9 = loc25
mov r10 = loc26
mov r11 = loc27
/* skip r12 (stack pointer) */
mov r13 = loc28
mov r14 = loc29
mov r15 = loc30
mov r16 = loc31
mov r17 = loc32
mov r18 = loc33
mov r19 = loc34
mov r20 = loc35
mov r21 = loc36
mov r22 = loc37
mov r23 = loc38
mov r24 = loc39
mov r25 = loc40
mov r26 = loc41
mov r27 = loc42
mov r28 = loc43
mov r29 = loc44
mov r30 = loc45
mov r31 = loc46
 
mov ar.fpsr = loc47 /* restore floating point status register */
/* 14. restore branch and application registers */
mov ar.unat = loc3
mov ar.lc = loc4
mov ar.ec = loc5
mov ar.ccv = loc6
mov ar.csd = loc7
mov ar.ssd = loc8
mov b0 = loc9
mov b1 = loc10
mov b2 = loc11
mov b3 = loc12
mov b4 = loc13
mov b5 = loc14
mov b6 = loc15
mov b7 = loc16
/* 15. disable PSR.ic and switch to bank 0 */
rsm PSR_IC_MASK
bsw.0 ;;
srlz.d
 
mov R_RET = loc1
mov ar.pfs = loc0
br.ret.sptk.many b0
 
.global ivt
.align 32768
ivt:
HEAVYWEIGHT_HANDLER 0x0000
HEAVYWEIGHT_HANDLER 0x0400
HEAVYWEIGHT_HANDLER 0x0800
HEAVYWEIGHT_HANDLER 0x0c00 alternate_instruction_tlb_fault
HEAVYWEIGHT_HANDLER 0x1000 alternate_data_tlb_fault
HEAVYWEIGHT_HANDLER 0x1400 data_nested_tlb_fault
HEAVYWEIGHT_HANDLER 0x1800
HEAVYWEIGHT_HANDLER 0x1c00
HEAVYWEIGHT_HANDLER 0x2000 data_dirty_bit_fault
HEAVYWEIGHT_HANDLER 0x2400 instruction_access_bit_fault
HEAVYWEIGHT_HANDLER 0x2800 data_access_bit_fault
HEAVYWEIGHT_HANDLER 0x2c00 break_instruction
HEAVYWEIGHT_HANDLER 0x3000 external_interrupt /* For external interrupt, heavyweight handler is used. */
HEAVYWEIGHT_HANDLER 0x3400
HEAVYWEIGHT_HANDLER 0x3800
HEAVYWEIGHT_HANDLER 0x3c00
HEAVYWEIGHT_HANDLER 0x4000
HEAVYWEIGHT_HANDLER 0x4400
HEAVYWEIGHT_HANDLER 0x4800
HEAVYWEIGHT_HANDLER 0x4c00
 
HEAVYWEIGHT_HANDLER 0x5000 page_not_present
HEAVYWEIGHT_HANDLER 0x5100
HEAVYWEIGHT_HANDLER 0x5200
HEAVYWEIGHT_HANDLER 0x5300
HEAVYWEIGHT_HANDLER 0x5400 general_exception
HEAVYWEIGHT_HANDLER 0x5500 disabled_fp_register
HEAVYWEIGHT_HANDLER 0x5600
HEAVYWEIGHT_HANDLER 0x5700
HEAVYWEIGHT_HANDLER 0x5800
HEAVYWEIGHT_HANDLER 0x5900
HEAVYWEIGHT_HANDLER 0x5a00
HEAVYWEIGHT_HANDLER 0x5b00
HEAVYWEIGHT_HANDLER 0x5c00
HEAVYWEIGHT_HANDLER 0x5d00
HEAVYWEIGHT_HANDLER 0x5e00
HEAVYWEIGHT_HANDLER 0x5f00
HEAVYWEIGHT_HANDLER 0x6000
HEAVYWEIGHT_HANDLER 0x6100
HEAVYWEIGHT_HANDLER 0x6200
HEAVYWEIGHT_HANDLER 0x6300
HEAVYWEIGHT_HANDLER 0x6400
HEAVYWEIGHT_HANDLER 0x6500
HEAVYWEIGHT_HANDLER 0x6600
HEAVYWEIGHT_HANDLER 0x6700
HEAVYWEIGHT_HANDLER 0x6800
HEAVYWEIGHT_HANDLER 0x6900
HEAVYWEIGHT_HANDLER 0x6a00
HEAVYWEIGHT_HANDLER 0x6b00
HEAVYWEIGHT_HANDLER 0x6c00
HEAVYWEIGHT_HANDLER 0x6d00
HEAVYWEIGHT_HANDLER 0x6e00
HEAVYWEIGHT_HANDLER 0x6f00
 
HEAVYWEIGHT_HANDLER 0x7000
HEAVYWEIGHT_HANDLER 0x7100
HEAVYWEIGHT_HANDLER 0x7200
HEAVYWEIGHT_HANDLER 0x7300
HEAVYWEIGHT_HANDLER 0x7400
HEAVYWEIGHT_HANDLER 0x7500
HEAVYWEIGHT_HANDLER 0x7600
HEAVYWEIGHT_HANDLER 0x7700
HEAVYWEIGHT_HANDLER 0x7800
HEAVYWEIGHT_HANDLER 0x7900
HEAVYWEIGHT_HANDLER 0x7a00
HEAVYWEIGHT_HANDLER 0x7b00
HEAVYWEIGHT_HANDLER 0x7c00
HEAVYWEIGHT_HANDLER 0x7d00
HEAVYWEIGHT_HANDLER 0x7e00
HEAVYWEIGHT_HANDLER 0x7f00
/tags/0.1.1/kernel/arch/ia64/src/context.S
0,0 → 1,246
#
# Copyright (C) 2005 Jakub Jermar
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# - The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
 
.text
 
.global context_save_arch
.global context_restore_arch
 
context_save_arch:
alloc loc0 = ar.pfs, 1, 8, 0, 0
mov loc1 = ar.unat ;;
/* loc2 */
mov loc3 = ar.rsc
 
.auto
 
/*
* Flush dirty registers to backing store.
* After this ar.bsp and ar.bspstore are equal.
*/
flushrs
mov loc4 = ar.bsp
/*
* Put RSE to enforced lazy mode.
* So that ar.rnat can be read.
*/
and loc5 = ~3, loc3
mov ar.rsc = loc5
mov loc5 = ar.rnat
 
.explicit
 
mov loc6 = ar.lc
/*
* Save application registers
*/
st8 [in0] = loc0, 8 ;; /* save ar.pfs */
st8 [in0] = loc1, 8 ;; /* save ar.unat (caller) */
mov loc2 = in0 ;;
add in0 = 8, in0 ;; /* skip ar.unat (callee) */
st8 [in0] = loc3, 8 ;; /* save ar.rsc */
st8 [in0] = loc4, 8 ;; /* save ar.bsp */
st8 [in0] = loc5, 8 ;; /* save ar.rnat */
st8 [in0] = loc6, 8 ;; /* save ar.lc */
/*
* Save general registers including NaT bits
*/
st8.spill [in0] = r1, 8 ;;
st8.spill [in0] = r4, 8 ;;
st8.spill [in0] = r5, 8 ;;
st8.spill [in0] = r6, 8 ;;
st8.spill [in0] = r7, 8 ;;
st8.spill [in0] = r12, 8 ;; /* save sp */
st8.spill [in0] = r13, 8 ;;
 
mov loc3 = ar.unat ;;
st8 [loc2] = loc3 /* save ar.unat (callee) */
 
/*
* Save branch registers
*/
mov loc2 = b0 ;;
st8 [in0] = loc2, 8 /* save pc */
mov loc3 = b1 ;;
st8 [in0] = loc3, 8
mov loc4 = b2 ;;
st8 [in0] = loc4, 8
mov loc5 = b3 ;;
st8 [in0] = loc5, 8
mov loc6 = b4 ;;
st8 [in0] = loc6, 8
mov loc7 = b5 ;;
st8 [in0] = loc7, 8
 
/*
* Save predicate registers
*/
mov loc2 = pr ;;
st8 [in0] = loc2, 16;; /* Next fpu registers should be spilled to 16B aligned address */
 
/*
* Save floating-point registers.
*/
stf.spill [in0] = f2, 16 ;;
stf.spill [in0] = f3, 16 ;;
stf.spill [in0] = f4, 16 ;;
stf.spill [in0] = f5, 16 ;;
 
stf.spill [in0] = f16, 16 ;;
stf.spill [in0] = f17, 16 ;;
stf.spill [in0] = f18, 16 ;;
stf.spill [in0] = f19, 16 ;;
stf.spill [in0] = f20, 16 ;;
stf.spill [in0] = f21, 16 ;;
stf.spill [in0] = f22, 16 ;;
stf.spill [in0] = f23, 16 ;;
stf.spill [in0] = f24, 16 ;;
stf.spill [in0] = f25, 16 ;;
stf.spill [in0] = f26, 16 ;;
stf.spill [in0] = f27, 16 ;;
stf.spill [in0] = f28, 16 ;;
stf.spill [in0] = f29, 16 ;;
stf.spill [in0] = f30, 16 ;;
stf.spill [in0] = f31, 16 ;;
 
mov ar.unat = loc1
add r8 = r0, r0, 1 /* context_save returns 1 */
br.ret.sptk.many b0
 
context_restore_arch:
alloc loc0 = ar.pfs, 1, 9, 0, 0 ;;
 
ld8 loc0 = [in0], 8 ;; /* load ar.pfs */
ld8 loc1 = [in0], 8 ;; /* load ar.unat (caller) */
ld8 loc2 = [in0], 8 ;; /* load ar.unat (callee) */
ld8 loc3 = [in0], 8 ;; /* load ar.rsc */
ld8 loc4 = [in0], 8 ;; /* load ar.bsp */
ld8 loc5 = [in0], 8 ;; /* load ar.rnat */
ld8 loc6 = [in0], 8 ;; /* load ar.lc */
.auto
 
/*
* Invalidate the ALAT
*/
invala
 
/*
* Put RSE to enforced lazy mode.
* So that ar.bspstore and ar.rnat can be written.
*/
movl loc8 = ~3
and loc8 = loc3, loc8
mov ar.rsc = loc8
 
/*
* Flush dirty registers to backing store.
* We do this because we want the following move
* to ar.bspstore to assign the same value to ar.bsp.
*/
flushrs
 
/*
* Restore application registers
*/
mov ar.bspstore = loc4 /* rse.bspload = ar.bsp = ar.bspstore = loc4 */
mov ar.rnat = loc5
mov ar.pfs = loc0
mov ar.rsc = loc3
 
.explicit
 
mov ar.unat = loc2 ;;
mov ar.lc = loc6
/*
* Restore general registers including NaT bits
*/
ld8.fill r1 = [in0], 8 ;;
ld8.fill r4 = [in0], 8 ;;
ld8.fill r5 = [in0], 8 ;;
ld8.fill r6 = [in0], 8 ;;
ld8.fill r7 = [in0], 8 ;;
ld8.fill r12 = [in0], 8 ;; /* restore sp */
ld8.fill r13 = [in0], 8 ;;
 
/*
* Restore branch registers
*/
ld8 loc2 = [in0], 8 ;; /* restore pc */
mov b0 = loc2
ld8 loc3 = [in0], 8 ;;
mov b1 = loc3
ld8 loc4 = [in0], 8 ;;
mov b2 = loc4
ld8 loc5 = [in0], 8 ;;
mov b3 = loc5
ld8 loc6 = [in0], 8 ;;
mov b4 = loc6
ld8 loc7 = [in0], 8 ;;
mov b5 = loc7
 
/*
* Restore predicate registers
*/
ld8 loc2 = [in0], 16 ;;
mov pr = loc2, ~0
/*
* Restore floating-point registers.
*/
ldf.fill f2 = [in0], 16 ;;
ldf.fill f3 = [in0], 16 ;;
ldf.fill f4 = [in0], 16 ;;
ldf.fill f5 = [in0], 16 ;;
 
ldf.fill f16 = [in0], 16 ;;
ldf.fill f17 = [in0], 16 ;;
ldf.fill f18 = [in0], 16 ;;
ldf.fill f19 = [in0], 16 ;;
ldf.fill f20 = [in0], 16 ;;
ldf.fill f21 = [in0], 16 ;;
ldf.fill f22 = [in0], 16 ;;
ldf.fill f23 = [in0], 16 ;;
ldf.fill f24 = [in0], 16 ;;
ldf.fill f25 = [in0], 16 ;;
ldf.fill f26 = [in0], 16 ;;
ldf.fill f27 = [in0], 16 ;;
ldf.fill f28 = [in0], 16 ;;
ldf.fill f29 = [in0], 16 ;;
ldf.fill f30 = [in0], 16 ;;
ldf.fill f31 = [in0], 16 ;;
mov ar.unat = loc1
mov r8 = r0 /* context_restore returns 0 */
br.ret.sptk.many b0
/tags/0.1.1/kernel/arch/ia64/src/fpu_context.c
0,0 → 1,475
/*
* Copyright (C) 2005 Jakub Vana
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
 
#include <fpu_context.h>
#include <arch/register.h>
#include <print.h>
 
 
void fpu_context_save(fpu_context_t *fctx){
 
asm volatile(
 
"stf.spill [%0]=f32,0x80\n"
"stf.spill [%1]=f33,0x80\n"
"stf.spill [%2]=f34,0x80\n"
"stf.spill [%3]=f35,0x80\n"
"stf.spill [%4]=f36,0x80\n"
"stf.spill [%5]=f37,0x80\n"
"stf.spill [%6]=f38,0x80\n"
"stf.spill [%7]=f39,0x80\n;;"
 
"stf.spill [%0]=f40,0x80\n"
"stf.spill [%1]=f41,0x80\n"
"stf.spill [%2]=f42,0x80\n"
"stf.spill [%3]=f43,0x80\n"
"stf.spill [%4]=f44,0x80\n"
"stf.spill [%5]=f45,0x80\n"
"stf.spill [%6]=f46,0x80\n"
"stf.spill [%7]=f47,0x80\n;;"
 
"stf.spill [%0]=f48,0x80\n"
"stf.spill [%1]=f49,0x80\n"
"stf.spill [%2]=f50,0x80\n"
"stf.spill [%3]=f51,0x80\n"
"stf.spill [%4]=f52,0x80\n"
"stf.spill [%5]=f53,0x80\n"
"stf.spill [%6]=f54,0x80\n"
"stf.spill [%7]=f55,0x80\n;;"
 
"stf.spill [%0]=f56,0x80\n"
"stf.spill [%1]=f57,0x80\n"
"stf.spill [%2]=f58,0x80\n"
"stf.spill [%3]=f59,0x80\n"
"stf.spill [%4]=f60,0x80\n"
"stf.spill [%5]=f61,0x80\n"
"stf.spill [%6]=f62,0x80\n"
"stf.spill [%7]=f63,0x80\n;;"
 
"stf.spill [%0]=f64,0x80\n"
"stf.spill [%1]=f65,0x80\n"
"stf.spill [%2]=f66,0x80\n"
"stf.spill [%3]=f67,0x80\n"
"stf.spill [%4]=f68,0x80\n"
"stf.spill [%5]=f69,0x80\n"
"stf.spill [%6]=f70,0x80\n"
"stf.spill [%7]=f71,0x80\n;;"
 
"stf.spill [%0]=f72,0x80\n"
"stf.spill [%1]=f73,0x80\n"
"stf.spill [%2]=f74,0x80\n"
"stf.spill [%3]=f75,0x80\n"
"stf.spill [%4]=f76,0x80\n"
"stf.spill [%5]=f77,0x80\n"
"stf.spill [%6]=f78,0x80\n"
"stf.spill [%7]=f79,0x80\n;;"
 
"stf.spill [%0]=f80,0x80\n"
"stf.spill [%1]=f81,0x80\n"
"stf.spill [%2]=f82,0x80\n"
"stf.spill [%3]=f83,0x80\n"
"stf.spill [%4]=f84,0x80\n"
"stf.spill [%5]=f85,0x80\n"
"stf.spill [%6]=f86,0x80\n"
"stf.spill [%7]=f87,0x80\n;;"
 
"stf.spill [%0]=f88,0x80\n"
"stf.spill [%1]=f89,0x80\n"
"stf.spill [%2]=f90,0x80\n"
"stf.spill [%3]=f91,0x80\n"
"stf.spill [%4]=f92,0x80\n"
"stf.spill [%5]=f93,0x80\n"
"stf.spill [%6]=f94,0x80\n"
"stf.spill [%7]=f95,0x80\n;;"
 
 
"stf.spill [%0]=f96,0x80\n"
"stf.spill [%1]=f97,0x80\n"
"stf.spill [%2]=f98,0x80\n"
"stf.spill [%3]=f99,0x80\n"
"stf.spill [%4]=f100,0x80\n"
"stf.spill [%5]=f101,0x80\n"
"stf.spill [%6]=f102,0x80\n"
"stf.spill [%7]=f103,0x80\n;;"
 
"stf.spill [%0]=f104,0x80\n"
"stf.spill [%1]=f105,0x80\n"
"stf.spill [%2]=f106,0x80\n"
"stf.spill [%3]=f107,0x80\n"
"stf.spill [%4]=f108,0x80\n"
"stf.spill [%5]=f109,0x80\n"
"stf.spill [%6]=f110,0x80\n"
"stf.spill [%7]=f111,0x80\n;;"
 
"stf.spill [%0]=f112,0x80\n"
"stf.spill [%1]=f113,0x80\n"
"stf.spill [%2]=f114,0x80\n"
"stf.spill [%3]=f115,0x80\n"
"stf.spill [%4]=f116,0x80\n"
"stf.spill [%5]=f117,0x80\n"
"stf.spill [%6]=f118,0x80\n"
"stf.spill [%7]=f119,0x80\n;;"
 
"stf.spill [%0]=f120,0x80\n"
"stf.spill [%1]=f121,0x80\n"
"stf.spill [%2]=f122,0x80\n"
"stf.spill [%3]=f123,0x80\n"
"stf.spill [%4]=f124,0x80\n"
"stf.spill [%5]=f125,0x80\n"
"stf.spill [%6]=f126,0x80\n"
"stf.spill [%7]=f127,0x80\n;;"
 
 
:
:"r" (&((fctx->fr)[0])),"r" (&((fctx->fr)[1])),"r" (&((fctx->fr)[2])),"r" (&((fctx->fr)[3])),
"r" (&((fctx->fr)[4])),"r" (&((fctx->fr)[5])),"r" (&((fctx->fr)[6])),"r" (&((fctx->fr)[7]))
);
}
 
 
void fpu_context_restore(fpu_context_t *fctx)
{
 
asm volatile(
"ldf.fill f32=[%0],0x80\n"
"ldf.fill f33=[%1],0x80\n"
"ldf.fill f34=[%2],0x80\n"
"ldf.fill f35=[%3],0x80\n"
"ldf.fill f36=[%4],0x80\n"
"ldf.fill f37=[%5],0x80\n"
"ldf.fill f38=[%6],0x80\n"
"ldf.fill f39=[%7],0x80\n;;"
 
"ldf.fill f40=[%0],0x80\n"
"ldf.fill f41=[%1],0x80\n"
"ldf.fill f42=[%2],0x80\n"
"ldf.fill f43=[%3],0x80\n"
"ldf.fill f44=[%4],0x80\n"
"ldf.fill f45=[%5],0x80\n"
"ldf.fill f46=[%6],0x80\n"
"ldf.fill f47=[%7],0x80\n;;"
 
"ldf.fill f48=[%0],0x80\n"
"ldf.fill f49=[%1],0x80\n"
"ldf.fill f50=[%2],0x80\n"
"ldf.fill f51=[%3],0x80\n"
"ldf.fill f52=[%4],0x80\n"
"ldf.fill f53=[%5],0x80\n"
"ldf.fill f54=[%6],0x80\n"
"ldf.fill f55=[%7],0x80\n;;"
 
"ldf.fill f56=[%0],0x80\n"
"ldf.fill f57=[%1],0x80\n"
"ldf.fill f58=[%2],0x80\n"
"ldf.fill f59=[%3],0x80\n"
"ldf.fill f60=[%4],0x80\n"
"ldf.fill f61=[%5],0x80\n"
"ldf.fill f62=[%6],0x80\n"
"ldf.fill f63=[%7],0x80\n;;"
 
"ldf.fill f64=[%0],0x80\n"
"ldf.fill f65=[%1],0x80\n"
"ldf.fill f66=[%2],0x80\n"
"ldf.fill f67=[%3],0x80\n"
"ldf.fill f68=[%4],0x80\n"
"ldf.fill f69=[%5],0x80\n"
"ldf.fill f70=[%6],0x80\n"
"ldf.fill f71=[%7],0x80\n;;"
 
"ldf.fill f72=[%0],0x80\n"
"ldf.fill f73=[%1],0x80\n"
"ldf.fill f74=[%2],0x80\n"
"ldf.fill f75=[%3],0x80\n"
"ldf.fill f76=[%4],0x80\n"
"ldf.fill f77=[%5],0x80\n"
"ldf.fill f78=[%6],0x80\n"
"ldf.fill f79=[%7],0x80\n;;"
 
"ldf.fill f80=[%0],0x80\n"
"ldf.fill f81=[%1],0x80\n"
"ldf.fill f82=[%2],0x80\n"
"ldf.fill f83=[%3],0x80\n"
"ldf.fill f84=[%4],0x80\n"
"ldf.fill f85=[%5],0x80\n"
"ldf.fill f86=[%6],0x80\n"
"ldf.fill f87=[%7],0x80\n;;"
 
"ldf.fill f88=[%0],0x80\n"
"ldf.fill f89=[%1],0x80\n"
"ldf.fill f90=[%2],0x80\n"
"ldf.fill f91=[%3],0x80\n"
"ldf.fill f92=[%4],0x80\n"
"ldf.fill f93=[%5],0x80\n"
"ldf.fill f94=[%6],0x80\n"
"ldf.fill f95=[%7],0x80\n;;"
 
 
"ldf.fill f96=[%0],0x80\n"
"ldf.fill f97=[%1],0x80\n"
"ldf.fill f98=[%2],0x80\n"
"ldf.fill f99=[%3],0x80\n"
"ldf.fill f100=[%4],0x80\n"
"ldf.fill f101=[%5],0x80\n"
"ldf.fill f102=[%6],0x80\n"
"ldf.fill f103=[%7],0x80\n;;"
 
"ldf.fill f104=[%0],0x80\n"
"ldf.fill f105=[%1],0x80\n"
"ldf.fill f106=[%2],0x80\n"
"ldf.fill f107=[%3],0x80\n"
"ldf.fill f108=[%4],0x80\n"
"ldf.fill f109=[%5],0x80\n"
"ldf.fill f110=[%6],0x80\n"
"ldf.fill f111=[%7],0x80\n;;"
 
"ldf.fill f112=[%0],0x80\n"
"ldf.fill f113=[%1],0x80\n"
"ldf.fill f114=[%2],0x80\n"
"ldf.fill f115=[%3],0x80\n"
"ldf.fill f116=[%4],0x80\n"
"ldf.fill f117=[%5],0x80\n"
"ldf.fill f118=[%6],0x80\n"
"ldf.fill f119=[%7],0x80\n;;"
 
"ldf.fill f120=[%0],0x80\n"
"ldf.fill f121=[%1],0x80\n"
"ldf.fill f122=[%2],0x80\n"
"ldf.fill f123=[%3],0x80\n"
"ldf.fill f124=[%4],0x80\n"
"ldf.fill f125=[%5],0x80\n"
"ldf.fill f126=[%6],0x80\n"
"ldf.fill f127=[%7],0x80\n;;"
 
 
:
:"r" (&((fctx->fr)[0])),"r" (&((fctx->fr)[1])),"r" (&((fctx->fr)[2])),"r" (&((fctx->fr)[3])),
"r" (&((fctx->fr)[4])),"r" (&((fctx->fr)[5])),"r" (&((fctx->fr)[6])),"r" (&((fctx->fr)[7]))
);
}
 
void fpu_enable(void)
{
__u64 a = 0 ;
asm volatile(
"rsm %0;;"
"srlz.i\n"
"srlz.d;;\n"
:
:"i" (PSR_DFH_MASK)
);
asm volatile
(
"mov %0=ar.fpsr;;\n"
"or %0=%0,%1;;\n"
"mov ar.fpsr=%0;;\n"
: "+r" (a)
: "r" (0x38)
);
 
}
 
void fpu_disable(void)
{
 
__u64 a = 0 ;
asm volatile(
"ssm %0;;\n"
"srlz.i\n"
"srlz.d;;\n"
:
:"i" (PSR_DFH_MASK)
);
asm volatile
(
"mov %0=ar.fpsr;;\n"
"or %0=%0,%1;;\n"
"mov ar.fpsr=%0;;\n"
: "+r" (a)
: "r" (0x38)
);
 
}
 
void fpu_init(void)
{
__u64 a = 0 ;
asm volatile
(
"mov %0=ar.fpsr;;\n"
"or %0=%0,%1;;\n"
"mov ar.fpsr=%0;;\n"
: "+r" (a)
: "r" (0x38)
);
 
asm volatile(
"mov f2=f0\n"
"mov f3=f0\n"
"mov f4=f0\n"
"mov f5=f0\n"
"mov f6=f0\n"
"mov f7=f0\n"
"mov f8=f0\n"
"mov f9=f0\n"
 
"mov f10=f0\n"
"mov f11=f0\n"
"mov f12=f0\n"
"mov f13=f0\n"
"mov f14=f0\n"
"mov f15=f0\n"
"mov f16=f0\n"
"mov f17=f0\n"
"mov f18=f0\n"
"mov f19=f0\n"
 
"mov f20=f0\n"
"mov f21=f0\n"
"mov f22=f0\n"
"mov f23=f0\n"
"mov f24=f0\n"
"mov f25=f0\n"
"mov f26=f0\n"
"mov f27=f0\n"
"mov f28=f0\n"
"mov f29=f0\n"
 
"mov f30=f0\n"
"mov f31=f0\n"
"mov f32=f0\n"
"mov f33=f0\n"
"mov f34=f0\n"
"mov f35=f0\n"
"mov f36=f0\n"
"mov f37=f0\n"
"mov f38=f0\n"
"mov f39=f0\n"
 
"mov f40=f0\n"
"mov f41=f0\n"
"mov f42=f0\n"
"mov f43=f0\n"
"mov f44=f0\n"
"mov f45=f0\n"
"mov f46=f0\n"
"mov f47=f0\n"
"mov f48=f0\n"
"mov f49=f0\n"
 
"mov f50=f0\n"
"mov f51=f0\n"
"mov f52=f0\n"
"mov f53=f0\n"
"mov f54=f0\n"
"mov f55=f0\n"
"mov f56=f0\n"
"mov f57=f0\n"
"mov f58=f0\n"
"mov f59=f0\n"
 
"mov f60=f0\n"
"mov f61=f0\n"
"mov f62=f0\n"
"mov f63=f0\n"
"mov f64=f0\n"
"mov f65=f0\n"
"mov f66=f0\n"
"mov f67=f0\n"
"mov f68=f0\n"
"mov f69=f0\n"
 
"mov f70=f0\n"
"mov f71=f0\n"
"mov f72=f0\n"
"mov f73=f0\n"
"mov f74=f0\n"
"mov f75=f0\n"
"mov f76=f0\n"
"mov f77=f0\n"
"mov f78=f0\n"
"mov f79=f0\n"
 
"mov f80=f0\n"
"mov f81=f0\n"
"mov f82=f0\n"
"mov f83=f0\n"
"mov f84=f0\n"
"mov f85=f0\n"
"mov f86=f0\n"
"mov f87=f0\n"
"mov f88=f0\n"
"mov f89=f0\n"
 
"mov f90=f0\n"
"mov f91=f0\n"
"mov f92=f0\n"
"mov f93=f0\n"
"mov f94=f0\n"
"mov f95=f0\n"
"mov f96=f0\n"
"mov f97=f0\n"
"mov f98=f0\n"
"mov f99=f0\n"
 
"mov f100=f0\n"
"mov f101=f0\n"
"mov f102=f0\n"
"mov f103=f0\n"
"mov f104=f0\n"
"mov f105=f0\n"
"mov f106=f0\n"
"mov f107=f0\n"
"mov f108=f0\n"
"mov f109=f0\n"
 
"mov f110=f0\n"
"mov f111=f0\n"
"mov f112=f0\n"
"mov f113=f0\n"
"mov f114=f0\n"
"mov f115=f0\n"
"mov f116=f0\n"
"mov f117=f0\n"
"mov f118=f0\n"
"mov f119=f0\n"
 
"mov f120=f0\n"
"mov f121=f0\n"
"mov f122=f0\n"
"mov f123=f0\n"
"mov f124=f0\n"
"mov f125=f0\n"
"mov f126=f0\n"
"mov f127=f0\n"
 
);
 
}
 
/tags/0.1.1/kernel/arch/ia64/src/dummy.s
0,0 → 1,42
#
# Copyright (C) 2005 Jakub Jermar
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# - The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
 
.text
 
.global calibrate_delay_loop
.global asm_delay_loop
.global cpu_sleep
.global dummy
 
calibrate_delay_loop:
asm_delay_loop:
cpu_sleep:
 
dummy:
br.ret.sptk.many b0
 
/tags/0.1.1/kernel/arch/ia64/src/ski/ski.c
0,0 → 1,157
/*
* Copyright (C) 2005 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#include <arch/ski/ski.h>
#include <console/console.h>
#include <console/chardev.h>
 
static chardev_t ski_console;
static bool kb_disable;
 
static void ski_putchar(chardev_t *d, const char ch);
static __s32 ski_getchar(void);
 
/** Display character on debug console
*
* Use SSC (Simulator System Call) to
* display character on debug console.
*
* @param d Character device.
* @param ch Character to be printed.
*/
void ski_putchar(chardev_t *d, const char ch)
{
__asm__ volatile (
"mov r15=%0\n"
"mov r32=%1\n" /* r32 is in0 */
"break 0x80000\n" /* modifies r8 */
:
: "i" (SKI_PUTCHAR), "r" (ch)
: "r15", "in0", "r8"
);
if (ch == '\n')
ski_putchar(d, '\r');
}
 
/** Ask debug console if a key was pressed.
*
* Use SSC (Simulator System Call) to
* get character from debug console.
*
* This call is non-blocking.
*
* @return ASCII code of pressed key or 0 if no key pressed.
*/
__s32 ski_getchar(void)
{
__u64 ch;
__asm__ volatile (
"mov r15=%1\n"
"break 0x80000;;\n" /* modifies r8 */
"mov %0=r8;;\n"
 
: "=r" (ch)
: "i" (SKI_GETCHAR)
: "r15", "r8"
);
 
return (__s32) ch;
}
 
/**
* This is a blocking wrapper for ski_getchar().
* To be used when the kernel crashes.
*/
static char ski_getchar_blocking(chardev_t *d)
{
int ch;
 
while(!(ch=ski_getchar()))
;
if(ch == '\r')
ch = '\n';
return (char) ch;
}
 
/** Ask keyboard if a key was pressed. */
void poll_keyboard(void)
{
char ch;
 
if (kb_disable)
return;
 
ch = ski_getchar();
if(ch == '\r')
ch = '\n';
if (ch)
chardev_push_character(&ski_console, ch);
}
 
/* Called from getc(). */
static void ski_kb_enable(chardev_t *d)
{
kb_disable = false;
}
 
/* Called from getc(). */
static void ski_kb_disable(chardev_t *d)
{
kb_disable = true;
}
 
 
static chardev_operations_t ski_ops = {
.resume = ski_kb_enable,
.suspend = ski_kb_disable,
.write = ski_putchar,
.read = ski_getchar_blocking
};
 
 
/** Initialize debug console
*
* Issue SSC (Simulator System Call) to
* to open debug console.
*/
void ski_init_console(void)
{
__asm__ volatile (
"mov r15=%0\n"
"break 0x80000\n"
:
: "i" (SKI_INIT_CONSOLE)
: "r15", "r8"
);
 
chardev_initialize("ski_console", &ski_console, &ski_ops);
stdin = &ski_console;
stdout = &ski_console;
}
/tags/0.1.1/kernel/arch/ia64/src/putchar.c
0,0 → 1,35
/*
* Copyright (C) 2005 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#include <putchar.h>
#include <arch/ski/ski.h>
 
void putchar(const char ch)
{
ski_write(ch);
}
/tags/0.1.1/kernel/arch/ia64/src/drivers/it.c
0,0 → 1,69
/*
* Copyright (C) 2005 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** Interval Timer driver. */
#include <arch/drivers/it.h>
#include <arch/ski/ski.h>
#include <arch/interrupt.h>
#include <arch/register.h>
#include <arch/asm.h>
#include <arch/barrier.h>
#include <time/clock.h>
 
 
/** Initialize Interval Timer. */
void it_init(void)
{
cr_itv_t itv;
 
/* initialize Interval Timer external interrupt vector */
itv.value = itv_read();
itv.vector = INTERRUPT_TIMER;
itv.m = 0;
itv_write(itv.value);
 
/* set Interval Timer Counter to zero */
itc_write(0);
/* generate first Interval Timer interrupt in IT_DELTA ticks */
itm_write(IT_DELTA);
 
/* propagate changes */
srlz_d();
}
 
/** Process Interval Timer interrupt. */
void it_interrupt(void)
{
eoi_write(EOI);
itm_write(itc_read() + IT_DELTA); /* program next interruption */
srlz_d(); /* propagate changes */
clock();
poll_keyboard();
}
/tags/0.1.1/kernel/arch/ia64/src/cpu/cpu.c
0,0 → 1,67
/*
* Copyright (C) 2005 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#include <cpu.h>
#include <arch.h>
#include <arch/register.h>
#include <print.h>
 
void cpu_arch_init(void)
{
}
 
void cpu_identify(void)
{
CPU->arch.cpuid0 = cpuid_read(0);
CPU->arch.cpuid1 = cpuid_read(1);
CPU->arch.cpuid3.value = cpuid_read(3);
}
 
void cpu_print_report(cpu_t *m)
{
char *family_str;
char vendor[2*sizeof(__u64)+1];
*((__u64 *) &vendor[0*sizeof(__u64)]) = CPU->arch.cpuid0;
*((__u64 *) &vendor[1*sizeof(__u64)]) = CPU->arch.cpuid1;
vendor[sizeof(vendor)-1] = '\0';
switch(m->arch.cpuid3.family) {
case FAMILY_ITANIUM:
family_str = "Itanium";
break;
case FAMILY_ITANIUM2:
family_str = "Itanium 2";
break;
default:
family_str = "Unknown";
break;
}
printf("cpu%d: %s (%s), archrev=%d, model=%d, revision=%d\n", CPU->id, family_str, vendor, CPU->arch.cpuid3.archrev, CPU->arch.cpuid3.model, CPU->arch.cpuid3.revision);
}
/tags/0.1.1/kernel/arch/ia64/Makefile.inc
0,0 → 1,90
#
# Copyright (C) 2005 Martin Decky
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# - The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
 
## Toolchain configuration
#
 
BFD_NAME = elf64-little
BFD_ARCH = ia64-elf64
BFD = elf64-ia64-little
TARGET = ia64-pc-linux-gnu
TOOLCHAIN_DIR = /usr/local/ia64/bin
 
## Make some default assumptions
#
 
INIT0_ADDRESS = 0xe000000000400000
INIT0_SIZE = 0x100000
 
INIT1_ADDRESS = 0xe000000000800000
INIT1_SIZE = 0x100000
 
CFLAGS += -mconstant-gp -fno-unwind-tables -mfixed-range=f32-f127
LFLAGS += -EL
AFLAGS += -mconstant-gp
 
DEFS += -D__64_BITS__ -DINIT0_ADDRESS=$(INIT0_ADDRESS) -DINIT0_SIZE=$(INIT0_SIZE) \
-DINIT1_ADDRESS=$(INIT1_ADDRESS) -DINIT1_SIZE=$(INIT1_SIZE)
 
## Compile with page hash table support.
#
 
CONFIG_PAGE_HT = y
DEFS += -DCONFIG_PAGE_HT
 
## Compile with support for address space identifiers.
#
 
CONFIG_ASID = y
CONFIG_ASID_FIFO = y
 
 
## Compile with support for software integer division.
#
 
CONFIG_SOFTINT = y
 
ARCH_SOURCES = \
arch/$(ARCH)/src/start.S \
arch/$(ARCH)/src/asm.S \
arch/$(ARCH)/src/dummy.s \
arch/$(ARCH)/src/ia64.c \
arch/$(ARCH)/src/fpu_context.c \
arch/$(ARCH)/src/context.S \
arch/$(ARCH)/src/ski/ski.c \
arch/$(ARCH)/src/cpu/cpu.c \
arch/$(ARCH)/src/ivt.S \
arch/$(ARCH)/src/interrupt.c \
arch/$(ARCH)/src/mm/as.c \
arch/$(ARCH)/src/mm/frame.c \
arch/$(ARCH)/src/mm/page.c \
arch/$(ARCH)/src/mm/tlb.c \
arch/$(ARCH)/src/mm/vhpt.c \
arch/$(ARCH)/src/proc/scheduler.c \
arch/$(ARCH)/src/ddi/ddi.c \
arch/$(ARCH)/src/drivers/it.c
/tags/0.1.1/kernel/arch/ia64/_link.ld.in
0,0 → 1,42
/** IA-64 linker script
*
* It is ELF format, but its only section looks like this:
* kernel text
* kernel data
*
*/
 
#define __ASM__
 
ENTRY(kernel_image_start)
 
SECTIONS {
.image 0xe000000000100000: AT (0x0000000000100000) {
ktext_start = .;
*(K_TEXT_START);
*(.text)
ktext_end = .;
kdata_start = .;
*(K_DATA_START)
*(.rodata .rodata.*)
*(.opd)
*(.data)
*(.got .got.*)
*(.sdata)
*(.sbss)
*(.scommon)
*(.bss)
*(COMMON);
 
symbol_table = .;
*(symtab.*); /* Symbol table, must be LAST symbol!*/
 
kdata_end = .;
}
 
_hardcoded_ktext_size = ktext_end - ktext_start;
_hardcoded_kdata_size = kdata_end - kdata_start;
_hardcoded_load_address = 0xe000000000100000;
 
}