Subversion Repositories HelenOS-historic

Compare Revisions

Ignore whitespace Rev 1767 → Rev 1780

/kernel/trunk/arch/ppc64/include/exception.h
42,47 → 42,47
#include <typedefs.h>
 
struct istate {
__u64 r0;
__u64 r2;
__u64 r3;
__u64 r4;
__u64 r5;
__u64 r6;
__u64 r7;
__u64 r8;
__u64 r9;
__u64 r10;
__u64 r11;
__u64 r13;
__u64 r14;
__u64 r15;
__u64 r16;
__u64 r17;
__u64 r18;
__u64 r19;
__u64 r20;
__u64 r21;
__u64 r22;
__u64 r23;
__u64 r24;
__u64 r25;
__u64 r26;
__u64 r27;
__u64 r28;
__u64 r29;
__u64 r30;
__u64 r31;
__u64 cr;
__u64 pc;
__u64 srr1;
__u64 lr;
__u64 ctr;
__u64 xer;
__u64 r12;
__u64 sp;
uint64_t r0;
uint64_t r2;
uint64_t r3;
uint64_t r4;
uint64_t r5;
uint64_t r6;
uint64_t r7;
uint64_t r8;
uint64_t r9;
uint64_t r10;
uint64_t r11;
uint64_t r13;
uint64_t r14;
uint64_t r15;
uint64_t r16;
uint64_t r17;
uint64_t r18;
uint64_t r19;
uint64_t r20;
uint64_t r21;
uint64_t r22;
uint64_t r23;
uint64_t r24;
uint64_t r25;
uint64_t r26;
uint64_t r27;
uint64_t r28;
uint64_t r29;
uint64_t r30;
uint64_t r31;
uint64_t cr;
uint64_t pc;
uint64_t srr1;
uint64_t lr;
uint64_t ctr;
uint64_t xer;
uint64_t r12;
uint64_t sp;
};
 
static inline void istate_set_retaddr(istate_t *istate, __address retaddr)
static inline void istate_set_retaddr(istate_t *istate, uintptr_t retaddr)
{
istate->pc = retaddr;
}
93,7 → 93,7
panic("istate_from_uspace not yet implemented");
return 0;
}
static inline __native istate_get_pc(istate_t *istate)
static inline unative_t istate_get_pc(istate_t *istate)
{
return istate->pc;
}
/kernel/trunk/arch/ppc64/include/fpu_context.h
40,25 → 40,25
#endif
 
struct fpu_context {
__u64 fr14;
__u64 fr15;
__u64 fr16;
__u64 fr17;
__u64 fr18;
__u64 fr19;
__u64 fr20;
__u64 fr21;
__u64 fr22;
__u64 fr23;
__u64 fr24;
__u64 fr25;
__u64 fr26;
__u64 fr27;
__u64 fr28;
__u64 fr29;
__u64 fr30;
__u64 fr31;
__u32 fpscr;
uint64_t fr14;
uint64_t fr15;
uint64_t fr16;
uint64_t fr17;
uint64_t fr18;
uint64_t fr19;
uint64_t fr20;
uint64_t fr21;
uint64_t fr22;
uint64_t fr23;
uint64_t fr24;
uint64_t fr25;
uint64_t fr26;
uint64_t fr27;
uint64_t fr28;
uint64_t fr29;
uint64_t fr30;
uint64_t fr31;
uint32_t fpscr;
} __attribute__ ((packed));
 
#endif
/kernel/trunk/arch/ppc64/include/byteorder.h
40,24 → 40,24
 
#define BIG_ENDIAN
 
static inline __u64 __u64_le2host(__u64 n)
static inline uint64_t uint64_t_le2host(uint64_t n)
{
return __u64_byteorder_swap(n);
return uint64_t_byteorder_swap(n);
}
 
 
/** Convert little-endian __native to host __native
/** Convert little-endian unative_t to host unative_t
*
* Convert little-endian __native parameter to host endianess.
* Convert little-endian unative_t parameter to host endianess.
*
* @param n Little-endian __native argument.
* @param n Little-endian unative_t argument.
*
* @return Result in host endianess.
*
*/
static inline __native __native_le2host(__native n)
static inline unative_t unative_t_le2host(unative_t n)
{
__address v;
uintptr_t v;
asm volatile (
"lwbrx %0, %1, %2\n"
/kernel/trunk/arch/ppc64/include/cpuid.h
38,8 → 38,8
#include <arch/types.h>
 
struct cpu_info {
__u16 version;
__u16 revision;
uint16_t version;
uint16_t revision;
} __attribute__ ((packed));
 
static inline void cpu_version(struct cpu_info *info)
/kernel/trunk/arch/ppc64/include/types.h
37,22 → 37,22
 
#define NULL 0
 
typedef signed char __s8;
typedef signed short __s16;
typedef signed int __s32;
typedef signed long __s64;
typedef signed char int8_t;
typedef signed short int16_t;
typedef signed int int32_t;
typedef signed long int64_t;
 
typedef unsigned char __u8;
typedef unsigned short __u16;
typedef unsigned int __u32;
typedef unsigned long __u64;
typedef unsigned char uint8_t;
typedef unsigned short uint16_t;
typedef unsigned int uint32_t;
typedef unsigned long uint64_t;
 
typedef __u64 __address;
typedef __u64 pfn_t;
typedef uint64_t uintptr_t;
typedef uint64_t pfn_t;
 
typedef __u64 ipl_t;
typedef uint64_t ipl_t;
 
typedef __u64 __native;
typedef uint64_t unative_t;
 
/** Page Table Entry. */
typedef struct {
/kernel/trunk/arch/ppc64/include/memstr.h
37,10 → 37,10
 
#define memcpy(dst, src, cnt) __builtin_memcpy((dst), (src), (cnt))
 
extern void memsetw(__address dst, size_t cnt, __u16 x);
extern void memsetb(__address dst, size_t cnt, __u8 x);
extern void memsetw(uintptr_t dst, size_t cnt, uint16_t x);
extern void memsetb(uintptr_t dst, size_t cnt, uint8_t x);
 
extern int memcmp(__address src, __address dst, int cnt);
extern int memcmp(uintptr_t src, uintptr_t dst, int cnt);
 
#endif
 
/kernel/trunk/arch/ppc64/include/boot/boot.h
48,28 → 48,28
#include <arch/types.h>
 
typedef struct {
__address addr;
__u64 size;
uintptr_t addr;
uint64_t size;
} utask_t;
 
typedef struct {
__u32 count;
uint32_t count;
utask_t tasks[TASKMAP_MAX_RECORDS];
} taskmap_t;
 
typedef struct {
__address start;
__u64 size;
uintptr_t start;
uint64_t size;
} memzone_t;
 
typedef struct {
__u64 total;
__u32 count;
uint64_t total;
uint32_t count;
memzone_t zones[MEMMAP_MAX_RECORDS];
} memmap_t;
 
typedef struct {
__address addr;
uintptr_t addr;
unsigned int width;
unsigned int height;
unsigned int bpp;
/kernel/trunk/arch/ppc64/include/faddr.h
37,7 → 37,7
 
#include <arch/types.h>
 
#define FADDR(fptr) ((__address) (fptr))
#define FADDR(fptr) ((uintptr_t) (fptr))
 
#endif
 
/kernel/trunk/arch/ppc64/include/asm.h
128,9 → 128,9
* The stack is assumed to be STACK_SIZE bytes long.
* The stack must start on page boundary.
*/
static inline __address get_stack_base(void)
static inline uintptr_t get_stack_base(void)
{
__address v;
uintptr_t v;
asm volatile (
"and %0, %%sp, %1\n"
151,9 → 151,9
);
}
 
void asm_delay_loop(__u32 t);
void asm_delay_loop(uint32_t t);
 
extern void userspace_asm(__address uspace_uarg, __address stack, __address entry);
extern void userspace_asm(uintptr_t uspace_uarg, uintptr_t stack, uintptr_t entry);
 
#endif
 
/kernel/trunk/arch/ppc64/include/mm/frame.h
43,7 → 43,7
 
#include <arch/types.h>
 
extern __address last_frame;
extern uintptr_t last_frame;
 
extern void frame_arch_init(void);
 
/kernel/trunk/arch/ppc64/include/mm/page.h
43,8 → 43,8
#ifdef KERNEL
 
#ifndef __ASM__
# define KA2PA(x) (((__address) (x)) - 0x80000000)
# define PA2KA(x) (((__address) (x)) + 0x80000000)
# define KA2PA(x) (((uintptr_t) (x)) - 0x80000000)
# define PA2KA(x) (((uintptr_t) (x)) + 0x80000000)
#else
# define KA2PA(x) ((x) - 0x80000000)
# define PA2KA(x) ((x) + 0x80000000)
94,9 → 94,9
#define SET_PTL3_FLAGS_ARCH(ptl2, i, x)
#define SET_FRAME_FLAGS_ARCH(ptl3, i, x) set_pt_flags((pte_t *) (ptl3), (index_t) (i), (x))
 
#define PTE_VALID_ARCH(pte) (*((__u32 *) (pte)) != 0)
#define PTE_VALID_ARCH(pte) (*((uint32_t *) (pte)) != 0)
#define PTE_PRESENT_ARCH(pte) ((pte)->p != 0)
#define PTE_GET_FRAME_ARCH(pte) ((__address) ((pte)->pfn << 12))
#define PTE_GET_FRAME_ARCH(pte) ((uintptr_t) ((pte)->pfn << 12))
#define PTE_WRITABLE_ARCH(pte) 1
#define PTE_EXECUTABLE_ARCH(pte) 1
 
/kernel/trunk/arch/ppc64/include/context.h
42,31 → 42,31
#define SP_DELTA 16
 
struct context {
__address sp;
__address pc;
uintptr_t sp;
uintptr_t pc;
__u64 r2;
__u64 r13;
__u64 r14;
__u64 r15;
__u64 r16;
__u64 r17;
__u64 r18;
__u64 r19;
__u64 r20;
__u64 r21;
__u64 r22;
__u64 r23;
__u64 r24;
__u64 r25;
__u64 r26;
__u64 r27;
__u64 r28;
__u64 r29;
__u64 r30;
__u64 r31;
uint64_t r2;
uint64_t r13;
uint64_t r14;
uint64_t r15;
uint64_t r16;
uint64_t r17;
uint64_t r18;
uint64_t r19;
uint64_t r20;
uint64_t r21;
uint64_t r22;
uint64_t r23;
uint64_t r24;
uint64_t r25;
uint64_t r26;
uint64_t r27;
uint64_t r28;
uint64_t r29;
uint64_t r30;
uint64_t r31;
__u64 cr;
uint64_t cr;
ipl_t ipl;
} __attribute__ ((packed));
/kernel/trunk/arch/ppc64/src/ddi/ddi.c
47,7 → 47,7
*
* @return 0 on success or an error code from errno.h.
*/
int ddi_iospace_enable_arch(task_t *task, __address ioaddr, size_t size)
int ddi_iospace_enable_arch(task_t *task, uintptr_t ioaddr, size_t size)
{
return 0;
}
/kernel/trunk/arch/ppc64/src/mm/tlb.c
70,7 → 70,7
* @param page Address of the first page whose entry is to be invalidated.
* @param cnt Number of entries to invalidate.
*/
void tlb_invalidate_pages(asid_t asid, __address page, count_t cnt)
void tlb_invalidate_pages(asid_t asid, uintptr_t page, count_t cnt)
{
tlb_invalidate_all();
}
/kernel/trunk/arch/ppc64/src/mm/frame.c
39,7 → 39,7
#include <align.h>
#include <macros.h>
 
__address last_frame = 0;
uintptr_t last_frame = 0;
 
void frame_arch_init(void)
{
/kernel/trunk/arch/ppc64/src/mm/page.c
65,7 → 65,7
* @return PTE on success, NULL otherwise.
*
*/
static pte_t *find_mapping_and_check(as_t *as, bool lock, __address badvaddr, int access,
static pte_t *find_mapping_and_check(as_t *as, bool lock, uintptr_t badvaddr, int access,
istate_t *istate, int *pfrc)
{
/*
113,7 → 113,7
}
 
 
static void pht_refill_fail(__address badvaddr, istate_t *istate)
static void pht_refill_fail(uintptr_t badvaddr, istate_t *istate)
{
char *symbol = "";
char *sym2 = "";
128,11 → 128,11
}
 
 
static void pht_insert(const __address vaddr, const pfn_t pfn)
static void pht_insert(const uintptr_t vaddr, const pfn_t pfn)
{
__u32 page = (vaddr >> 12) & 0xffff;
__u32 api = (vaddr >> 22) & 0x3f;
__u32 vsid;
uint32_t page = (vaddr >> 12) & 0xffff;
uint32_t api = (vaddr >> 22) & 0x3f;
uint32_t vsid;
asm volatile (
"mfsrin %0, %1\n"
141,10 → 141,10
);
/* Primary hash (xor) */
__u32 h = 0;
__u32 hash = vsid ^ page;
__u32 base = (hash & 0x3ff) << 3;
__u32 i;
uint32_t h = 0;
uint32_t hash = vsid ^ page;
uint32_t base = (hash & 0x3ff) << 3;
uint32_t i;
bool found = false;
/* Find unused or colliding
158,7 → 158,7
if (!found) {
/* Secondary hash (not) */
__u32 base2 = (~hash & 0x3ff) << 3;
uint32_t base2 = (~hash & 0x3ff) << 3;
/* Find unused or colliding
PTE in PTEG */
196,7 → 196,7
*/
void pht_refill(bool data, istate_t *istate)
{
__address badvaddr;
uintptr_t badvaddr;
pte_t *pte;
int pfrc;
as_t *as;
252,7 → 252,7
 
void pht_init(void)
{
memsetb((__address) phte, 1 << PHT_BITS, 0);
memsetb((uintptr_t) phte, 1 << PHT_BITS, 0);
}
 
 
261,7 → 261,7
if (config.cpu_active == 1) {
page_mapping_operations = &pt_mapping_operations;
__address cur;
uintptr_t cur;
int flags;
/* Frames below 128 MB are mapped using BAT,
276,24 → 276,24
/* Allocate page hash table */
phte_t *physical_phte = (phte_t *) frame_alloc(PHT_ORDER, FRAME_KA | FRAME_ATOMIC);
ASSERT((__address) physical_phte % (1 << PHT_BITS) == 0);
ASSERT((uintptr_t) physical_phte % (1 << PHT_BITS) == 0);
pht_init();
asm volatile (
"mtsdr1 %0\n"
:
: "r" ((__address) physical_phte)
: "r" ((uintptr_t) physical_phte)
);
}
}
 
 
__address hw_map(__address physaddr, size_t size)
uintptr_t hw_map(uintptr_t physaddr, size_t size)
{
if (last_frame + ALIGN_UP(size, PAGE_SIZE) > KA2PA(KERNEL_ADDRESS_SPACE_END_ARCH))
panic("Unable to map physical memory %p (%d bytes)", physaddr, size)
__address virtaddr = PA2KA(last_frame);
uintptr_t virtaddr = PA2KA(last_frame);
pfn_t i;
for (i = 0; i < ADDR2PFN(ALIGN_UP(size, PAGE_SIZE)); i++)
page_mapping_insert(AS_KERNEL, virtaddr + PFN2ADDR(i), physaddr + PFN2ADDR(i), PAGE_NOT_CACHEABLE);
/kernel/trunk/arch/ppc64/src/interrupt.c
65,7 → 65,7
 
 
/* Reregister irq to be IPC-ready */
void irq_ipc_bind_arch(__native irq)
void irq_ipc_bind_arch(unative_t irq)
{
panic("not implemented\n");
/* TODO */
/kernel/trunk/arch/ppc64/src/ppc64.c
48,7 → 48,7
/* Setup usermode */
init.cnt = bootinfo.taskmap.count;
__u32 i;
uint32_t i;
for (i = 0; i < bootinfo.taskmap.count; i++) {
init.tasks[i].addr = PA2KA(bootinfo.taskmap.tasks[i].addr);
90,7 → 90,7
 
void userspace(uspace_arg_t *kernel_uarg)
{
userspace_asm((__address) kernel_uarg->uspace_uarg, (__address) kernel_uarg->uspace_stack + THREAD_STACK_SIZE - SP_DELTA, (__address) kernel_uarg->uspace_entry);
userspace_asm((uintptr_t) kernel_uarg->uspace_uarg, (uintptr_t) kernel_uarg->uspace_stack + THREAD_STACK_SIZE - SP_DELTA, (uintptr_t) kernel_uarg->uspace_entry);
/* Unreachable */
for (;;)