Subversion Repositories HelenOS-historic

Compare Revisions

Ignore whitespace Rev 868 → Rev 869

/kernel/trunk/contrib/arch/ia64/vmaxlma.c
36,8 → 36,9
exit(2);
}
 
#define ELF_VMA (0x88/sizeof(unsigned long long))
#define ELF_LMA (0x90/sizeof(unsigned long long))
#define ELF_VMA (0x50/sizeof(unsigned long long))
#define ELF_LMA (0x58/sizeof(unsigned long long))
#define ELF_ENTRY (0x18/sizeof(unsigned long long))
 
#define LENGTH 0x98
 
44,7 → 45,7
int main(int argc, char *argv[])
{
int fd;
unsigned long long vma, lma;
unsigned long long vma, lma,entry;
unsigned long long *elf;
 
if (argc != 2)
58,10 → 59,14
if ((void *) elf == (void *) -1)
error("map failed");
vma = elf[ELF_VMA];
/*vma = elf[ELF_VMA];*/
lma = elf[ELF_LMA];
elf[ELF_VMA] = lma;
elf[ELF_LMA] = vma;
entry = elf[ELF_ENTRY];
entry &= ((~0LL)>>3);
elf[ELF_ENTRY] = entry;
elf[ELF_ENTRY] = 0x100000;
/*elf[ELF_LMA] = vma;*/
if (munmap(elf, LENGTH) == -1)
error("munmap failed");
/kernel/trunk/generic/include/mm/asid.h
34,21 → 34,27
#ifndef __ASID_H__
#define __ASID_H__
 
#ifndef __ASM__
 
#include <arch/mm/asid.h>
#include <typedefs.h>
 
#endif
 
#define ASID_KERNEL 0
#define ASID_INVALID 1
#define ASID_START 2
#define ASID_MAX ASID_MAX_ARCH
 
#ifndef __ASM__
 
 
#define ASIDS_ALLOCABLE ((ASID_MAX+1)-ASID_START)
 
extern spinlock_t asidlock;
extern link_t as_with_asid_head;
 
#ifndef asid_get
extern asid_t asid_get(void);
#endif /* !def asid_get */
extern void asid_put(asid_t asid);
 
#ifndef asid_install
64,3 → 70,6
#endif /* !def asid_put_arch */
 
#endif
 
#endif
 
/kernel/trunk/arch/ia64/_link.ld.in
11,7 → 11,7
ENTRY(kernel_image_start)
 
SECTIONS {
.image 0x0000000000100000: AT (0x0000000000100000) {
.image 0xe000000000100000: AT (0x0000000000100000) {
ktext_start = .;
*(K_TEXT_START);
*(.text)
37,6 → 37,6
 
_hardcoded_ktext_size = ktext_end - ktext_start;
_hardcoded_kdata_size = kdata_end - kdata_start;
_hardcoded_load_address = 0x0000000000100000;
_hardcoded_load_address = 0xe000000000100000;
 
}
/kernel/trunk/arch/ia64/include/faddr.h
45,11 → 45,30
__address faddr;
__asm__(
"nop 0;;"
"nop 0;;"
"nop 0;;"
"nop 0;;"
"nop 0;;"
"nop 0;;"
"nop 0;;"
"nop 0;;"
"ld8 %0 = [%1]\n\t"
"nop 0;;"
"nop 0;;"
"nop 0;;"
"nop 0;;"
"nop 0;;"
"nop 0;;"
"nop 0;;"
"nop 0;;"
 
: "=r" (faddr)
: "r" (fptr)
);
 
/*faddr = *((__address *)(fptr));;*/
return faddr;
}
 
/kernel/trunk/arch/ia64/include/mm/page.h
30,7 → 30,11
#ifndef __ia64_PAGE_H__
#define __ia64_PAGE_H__
 
#ifndef __ASM__
 
 
#include <arch/mm/frame.h>
#include <arch/barrier.h>
#include <genarch/mm/page_ht.h>
#include <arch/mm/asid.h>
#include <arch/types.h>
37,12 → 41,14
#include <typedefs.h>
#include <debug.h>
 
#endif
 
#define PAGE_SIZE FRAME_SIZE
#define PAGE_WIDTH FRAME_WIDTH
#define KERNEL_PAGE_WIDTH 26
 
#define KA2PA(x) ((__address) (x))
#define PA2KA(x) ((__address) (x))
 
 
#define SET_PTL0_ADDRESS_ARCH(x) /**< To be removed as situation permits. */
 
#define PPN_SHIFT 12
49,9 → 55,19
 
#define VRN_SHIFT 61
#define VRN_MASK (7LL << VRN_SHIFT)
#define VRN_KERNEL 0
 
#ifdef __ASM__
#define VRN_KERNEL 7
#else
#define VRN_KERNEL 7LL
#endif
 
#define REGION_REGISTERS 8
 
#define KA2PA(x) ((__address) (x-(VRN_KERNEL<<VRN_SHIFT)))
#define PA2KA(x) ((__address) (x+(VRN_KERNEL<<VRN_SHIFT)))
 
 
#define VHPT_WIDTH 20 /* 1M */
#define VHPT_SIZE (1 << VHPT_WIDTH)
#define VHPT_BASE 0 /* Must be aligned to VHPT_SIZE */
76,8 → 92,8
 
#define VA_REGION(va) (va>>VA_REGION_INDEX)
 
#ifndef __ASM__
 
 
struct vhpt_tag_info {
unsigned long long tag : 63;
unsigned ti : 1;
263,4 → 279,19
extern bool vhpt_compare(__address page, asid_t asid, vhpt_entry_t *v);
extern void vhpt_set_record(vhpt_entry_t *v, __address page, asid_t asid, __address frame, int flags);
 
 
static inline void pokus(void)
{
region_register rr;
rr.word=rr_read(0);
srlz_d();
rr_write(0,rr.word);
srlz_d();
 
}
 
#endif
 
#endif
 
 
/kernel/trunk/arch/ia64/include/mm/asid.h
29,6 → 29,8
#ifndef __ia64_ASID_H__
#define __ia64_ASID_H__
 
#ifndef __ASM__
 
#include <arch/types.h>
 
typedef __u16 asid_t;
38,6 → 40,8
* Note that some architectures may support more bits,
* but those extra bits are not used by the kernel.
*/
#endif
#define RIDS_PER_ASID 7
#define RID_MAX 262143 /* 2^18 - 1 */
 
44,8 → 48,13
#define ASID2RID(asid, vrn) (((asid)*RIDS_PER_ASID)+(vrn))
#define RID2ASID(rid) ((rid)/RIDS_PER_ASID)
 
#ifndef __ASM__
 
 
typedef __u32 rid_t;
 
#endif
 
#define ASID_MAX_ARCH (RID_MAX/RIDS_PER_ASID)
 
#endif
/kernel/trunk/arch/ia64/include/register.h
38,6 → 38,12
#define PSR_I_MASK 0x4000
#define PSR_PK_MASK 0x8000
 
#define PSR_DT_MASK (1<<17)
#define PSR_RT_MASK (1<<27)
#define PSR_IT_MASK 0x0000001000000000
 
 
 
/** Application registers. */
#define AR_KR0 0
#define AR_KR1 1
/kernel/trunk/arch/ia64/src/mm/page.c
55,10 → 55,12
/** Initialize VHPT and region registers. */
void set_environment(void)
{
 
//#ifdef NEVERDEFINED
region_register rr;
pta_register pta;
int i;
 
/*
* First set up kernel region register.
*/
97,6 → 99,11
pta_write(pta.word);
srlz_i();
srlz_d();
//#endif
 
return ;
}
 
/** Calculate address of collision chain from VPN and ASID.
/kernel/trunk/arch/ia64/src/start.S
26,8 → 26,22
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
 
 
#include <arch/register.h>
#include <arch/mm/page.h>
#include <arch/mm/asid.h>
#include <mm/asid.h>
 
 
#define RR_MASK (0xFFFFFFFF00000002)
#define RID_SHIFT 8
#define PS_SHIFT 2
 
 
#define KERNEL_TRANSLATION_I 0x0010000000000661
#define KERNEL_TRANSLATION_D 0x0010000000000661
 
 
.section K_TEXT_START
 
.global kernel_image_start
36,12 → 50,82
kernel_image_start:
.auto
 
#Fill TR.i and TR.d and enable paging
 
mov r9=rr[r0]
movl r10=(RR_MASK)
and r9=r10,r9
movl r10=((ASID2RID(ASID_KERNEL,VRN_KERNEL)<<RID_SHIFT)|(KERNEL_PAGE_WIDTH<<PS_SHIFT))
or r9=r10,r9
mov rr[r0]=r9
 
 
movl r8=(VRN_KERNEL<<VRN_SHIFT)
mov r9=rr[r8]
movl r10=(RR_MASK)
and r9=r10,r9
movl r10=((ASID2RID(ASID_KERNEL,VRN_KERNEL)<<RID_SHIFT)|(KERNEL_PAGE_WIDTH<<PS_SHIFT))
or r9=r10,r9
mov rr[r8]=r9
 
 
movl r8=(VRN_KERNEL<<VRN_SHIFT)
mov cr.ifa=r8
movl r10=(KERNEL_PAGE_WIDTH<<PS_SHIFT)
mov cr.itir=r10
movl r10=(KERNEL_TRANSLATION_I)
itr.i itr[r0]=r10
 
# mov cr.ifa=r0
# movl r10=(KERNEL_PAGE_WIDTH<<PS_SHIFT)
# mov cr.itir=r10
movl r10=(KERNEL_TRANSLATION_D)
itr.d dtr[r0]=r10
 
 
 
 
 
 
 
# initialize PSR
mov psr.l = r0
srlz.i
srlz.d
ssm PSR_IC_MASK
movl r10=(PSR_DT_MASK|PSR_RT_MASK|PSR_IT_MASK|PSR_IC_MASK) /*Enable paging*/
mov r9=psr
or r10=r10,r9
mov cr.ipsr=r10
mov cr.ifs=r0
# movl r8=(paging_start+VRN_KERNEL<<VRN_SHIFT)
movl r8=paging_start
mov cr.iip=r8
srlz.d
srlz.i
.explicit
{rfi;;}
{nop 0;;}
{nop 0;;}
{nop 0;;}
{nop 0;;}
{nop 0;;}
{nop 0;;}
{nop 0;;}
{nop 0;;}
{nop 0;;}
{nop 0;;}
{nop 0;;}
{nop 0;;}
{nop 0;;}
{nop 0;;}
{nop 0;;}
{nop 0;;}
 
.global paging_start
paging_start:
 
.auto
# switch to register bank 1
bsw.1
48,18 → 132,23
# initialize register stack
mov ar.rsc = r0
mov ar.bspstore = r0
movl r8=(VRN_KERNEL<<VRN_SHIFT)
mov ar.bspstore = r8
# mov ar.bspstore = r0
loadrs
 
.explicit
# initialize memory stack to some sane value
movl r12 = stack0 ;;
# movl r12 = stack0 ;;
movl r12 = stack0 + (VRN_KERNEL<<VRN_SHIFT);;
add r12 = - 16, r12 /* allocate a scratch area on the stack */
 
# initialize gp (Global Pointer) register
movl r1 = _hardcoded_load_address
movl r1 = _hardcoded_load_address ;;
 
# movl r1 = _hardcoded_load_address + (VRN_KERNEL<<VRN_SHIFT) ;;
;;
 
#
# Initialize hardcoded_* variables.
71,11 → 160,18
addl r18 = @gprel(hardcoded_kdata_size), gp
addl r19 = @gprel(hardcoded_load_address), gp
;;
st8 [r17] = r14
st8 [r18] = r15
st4 [r17] = r14
st4 [r18] = r15
st8 [r19] = r16
 
 
.auto
br.call.sptk.many b0=main_bsp
movl r18=main_bsp
mov b1=r18
br.call.sptk.many b0=b1
 
# br.call.sptk.many b0=main_bsp
 
0:
br 0b