/kernel/trunk/arch/ppc64/include/console.h |
---|
File deleted |
/kernel/trunk/arch/ppc64/include/asm/regname.h |
---|
194,6 → 194,8 |
/* MSR bits */ |
#define msr_ir (1 << 4) |
#define msr_dr (1 << 5) |
#define msr_pr (1 << 14) |
#define msr_ee (1 << 15) |
/* HID0 bits */ |
#define hid0_ice (1 << 15) |
/kernel/trunk/arch/ppc64/include/exception.h |
---|
0,0 → 1,84 |
/* |
* Copyright (C) 2006 Martin Decky |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
#ifndef __ppc64_EXCEPTION_H__ |
#define __ppc64_EXCEPTION_H__ |
#ifndef __ppc64_TYPES_H__ |
# include <arch/types.h> |
#endif |
#include <typedefs.h> |
struct istate { |
__u64 r0; |
__u64 r2; |
__u64 r3; |
__u64 r4; |
__u64 r5; |
__u64 r6; |
__u64 r7; |
__u64 r8; |
__u64 r9; |
__u64 r10; |
__u64 r11; |
__u64 r13; |
__u64 r14; |
__u64 r15; |
__u64 r16; |
__u64 r17; |
__u64 r18; |
__u64 r19; |
__u64 r20; |
__u64 r21; |
__u64 r22; |
__u64 r23; |
__u64 r24; |
__u64 r25; |
__u64 r26; |
__u64 r27; |
__u64 r28; |
__u64 r29; |
__u64 r30; |
__u64 r31; |
__u64 cr; |
__u64 pc; |
__u64 srr1; |
__u64 lr; |
__u64 ctr; |
__u64 xer; |
__u64 r12; |
__u64 sp; |
}; |
static inline void istate_set_retaddr(istate_t *istate, __address retaddr) |
{ |
istate->pc = retaddr; |
} |
#endif |
/kernel/trunk/arch/ppc64/include/byteorder.h |
---|
53,8 → 53,12 |
{ |
__address v; |
__asm__ volatile ("lwbrx %0, %1, %2\n" : "=r" (v) : "i" (0) , "r" (&n)); |
asm volatile ( |
"lwbrx %0, %1, %2\n" |
: "=r" (v) |
: "i" (0), "r" (&n) |
); |
return v; |
} |
#endif |
/kernel/trunk/arch/ppc64/include/cpuid.h |
---|
38,8 → 38,8 |
static inline void cpu_version(struct cpu_info *info) |
{ |
__asm__ volatile ( |
"mfspr %0, 287\n" |
asm volatile ( |
"mfpvr %0\n" |
: "=r" (*info) |
); |
} |
/kernel/trunk/arch/ppc64/include/types.h |
---|
33,8 → 33,8 |
typedef signed char __s8; |
typedef signed short __s16; |
typedef signed long __s32; |
typedef signed long long __s64; |
typedef signed int __s32; |
typedef signed long __s64; |
typedef unsigned char __u8; |
typedef unsigned short __u16; |
42,12 → 42,19 |
typedef unsigned long __u64; |
typedef __u64 __address; |
typedef __u32 pfn_t; |
typedef __u64 pfn_t; |
typedef __u64 ipl_t; |
typedef __u64 __native; |
typedef __u32 __native; |
typedef __u32 pte_t; |
/** Page Table Entry. */ |
typedef struct { |
unsigned p : 1; /**< Present bit. */ |
unsigned a : 1; /**< Accessed bit. */ |
unsigned g : 1; /**< Global bit. */ |
unsigned valid : 1; /**< Valid content even if not present. */ |
unsigned pfn : 20; /**< Physical frame number. */ |
} pte_t; |
#endif |
/kernel/trunk/arch/ppc64/include/elf.h |
---|
29,7 → 29,7 |
#ifndef __ppc64_ELF_H__ |
#define __ppc64_ELF_H__ |
#define ELF_MACHINE EM_PPC |
#define ELF_MACHINE EM_PPC64 |
#define ELF_DATA_ENCODING ELFDATA2MSB |
#define ELF_CLASS ELFCLASS32 |
/kernel/trunk/arch/ppc64/include/atomic.h |
---|
33,7 → 33,7 |
{ |
long tmp; |
asm __volatile__ ( |
asm volatile ( |
"1:\n" |
"lwarx %0, 0, %2\n" |
"addic %0, %0, 1\n" |
41,7 → 41,8 |
"bne- 1b" |
: "=&r" (tmp), "=m" (val->count) |
: "r" (&val->count), "m" (val->count) |
: "cc"); |
: "cc" |
); |
} |
static inline void atomic_dec(atomic_t *val) |
48,7 → 49,7 |
{ |
long tmp; |
asm __volatile__( |
asm volatile ( |
"1:\n" |
"lwarx %0, 0, %2\n" |
"addic %0, %0, -1\n" |
56,7 → 57,8 |
"bne- 1b" |
: "=&r" (tmp), "=m" (val->count) |
: "r" (&val->count), "m" (val->count) |
: "cc"); |
: "cc" |
); |
} |
static inline long atomic_postinc(atomic_t *val) |
/kernel/trunk/arch/ppc64/include/boot/boot.h |
---|
34,6 → 34,7 |
/* Temporary stack size for boot process */ |
#define TEMP_STACK_SIZE 0x100 |
#define TASKMAP_MAX_RECORDS 32 |
#define MEMMAP_MAX_RECORDS 32 |
#ifndef __ASM__ |
41,12 → 42,22 |
#include <arch/types.h> |
typedef struct { |
__address addr; |
__u64 size; |
} utask_t; |
typedef struct { |
__u32 count; |
utask_t tasks[TASKMAP_MAX_RECORDS]; |
} taskmap_t; |
typedef struct { |
__address start; |
__u32 size; |
__u64 size; |
} memzone_t; |
typedef struct { |
__u32 total; |
__u64 total; |
__u32 count; |
memzone_t zones[MEMMAP_MAX_RECORDS]; |
} memmap_t; |
60,6 → 71,7 |
} screen_t; |
typedef struct { |
taskmap_t taskmap; |
memmap_t memmap; |
screen_t screen; |
} bootinfo_t; |
/kernel/trunk/arch/ppc64/include/asm.h |
---|
39,11 → 39,12 |
* |
* @return Old interrupt priority level. |
*/ |
static inline ipl_t interrupts_enable(void) { |
static inline ipl_t interrupts_enable(void) |
{ |
ipl_t v; |
ipl_t tmp; |
__asm__ volatile ( |
asm volatile ( |
"mfmsr %0\n" |
"mfmsr %1\n" |
"ori %1, %1, 1 << 15\n" |
60,11 → 61,12 |
* |
* @return Old interrupt priority level. |
*/ |
static inline ipl_t interrupts_disable(void) { |
static inline ipl_t interrupts_disable(void) |
{ |
ipl_t v; |
ipl_t tmp; |
__asm__ volatile ( |
asm volatile ( |
"mfmsr %0\n" |
"mfmsr %1\n" |
"rlwinm %1, %1, 0, 17, 15\n" |
80,10 → 82,11 |
* |
* @param ipl Saved interrupt priority level. |
*/ |
static inline void interrupts_restore(ipl_t ipl) { |
static inline void interrupts_restore(ipl_t ipl) |
{ |
ipl_t tmp; |
__asm__ volatile ( |
asm volatile ( |
"mfmsr %1\n" |
"rlwimi %0, %1, 0, 17, 15\n" |
"cmpw 0, %0, %1\n" |
92,6 → 95,7 |
"0:\n" |
: "=r" (ipl), "=r" (tmp) |
: "0" (ipl) |
: "cr0" |
); |
} |
101,9 → 105,11 |
* |
* @return Current interrupt priority level. |
*/ |
static inline ipl_t interrupts_read(void) { |
static inline ipl_t interrupts_read(void) |
{ |
ipl_t v; |
__asm__ volatile ( |
asm volatile ( |
"mfmsr %0\n" |
: "=r" (v) |
); |
120,8 → 126,11 |
{ |
__address v; |
__asm__ volatile ("and %0, %%sp, %1\n" : "=r" (v) : "r" (~(STACK_SIZE-1))); |
asm volatile ( |
"and %0, %%sp, %1\n" |
: "=r" (v) |
: "r" (~(STACK_SIZE - 1)) |
); |
return v; |
} |
132,4 → 141,6 |
void cpu_halt(void); |
void asm_delay_loop(__u32 t); |
extern void userspace_asm(__address uspace_uarg, __address stack, __address entry); |
#endif |
/kernel/trunk/arch/ppc64/include/mm/frame.h |
---|
30,11 → 30,15 |
#define __ppc64_FRAME_H__ |
#define FRAME_WIDTH 12 /* 4K */ |
#define FRAME_SIZE (1<<FRAME_WIDTH) |
#define FRAME_SIZE (1 << FRAME_WIDTH) |
#ifdef KERNEL |
#ifndef __ASM__ |
#include <arch/types.h> |
extern __address last_frame; |
extern void frame_arch_init(void); |
#endif /* __ASM__ */ |
/kernel/trunk/arch/ppc64/include/mm/page.h |
---|
44,41 → 44,53 |
# define PA2KA(x) ((x) + 0x80000000) |
#endif |
#define PTL0_ENTRIES_ARCH 0 |
#define PTL1_ENTRIES_ARCH 0 |
#define PTL2_ENTRIES_ARCH 0 |
#define PTL3_ENTRIES_ARCH 0 |
/* |
* Implementation of generic 4-level page table interface, |
* the hardware Page Hash Table is used as cache. |
* |
* Page table layout: |
* - 32-bit virtual addressess |
* - Offset is 12 bits => pages are 4K long |
* - PTL0 has 1024 entries (10 bits) |
* - PTL1 is not used |
* - PTL2 is not used |
* - PLT3 has 1024 entries (10 bits) |
*/ |
#define PTL0_INDEX_ARCH(vaddr) 0 |
#define PTL1_INDEX_ARCH(vaddr) 0 |
#define PTL2_INDEX_ARCH(vaddr) 0 |
#define PTL3_INDEX_ARCH(vaddr) 0 |
#define PTL0_ENTRIES_ARCH 1024 |
#define PTL1_ENTRIES_ARCH 0 |
#define PTL2_ENTRIES_ARCH 0 |
#define PTL3_ENTRIES_ARCH 1024 |
#define SET_PTL0_ADDRESS_ARCH(ptl0) |
#define PTL0_INDEX_ARCH(vaddr) (((vaddr) >> 22) & 0x3ff) |
#define PTL1_INDEX_ARCH(vaddr) 0 |
#define PTL2_INDEX_ARCH(vaddr) 0 |
#define PTL3_INDEX_ARCH(vaddr) (((vaddr) >> 12) & 0x3ff) |
#define GET_PTL1_ADDRESS_ARCH(ptl0, i) ((pte_t *) 0) |
#define GET_PTL2_ADDRESS_ARCH(ptl1, i) ((pte_t *) 0) |
#define GET_PTL3_ADDRESS_ARCH(ptl2, i) ((pte_t *) 0) |
#define GET_FRAME_ADDRESS_ARCH(ptl3, i) ((pte_t *) 0) |
#define GET_PTL1_ADDRESS_ARCH(ptl0, i) (((pte_t *) (ptl0))[(i)].pfn << 12) |
#define GET_PTL2_ADDRESS_ARCH(ptl1, i) (ptl1) |
#define GET_PTL3_ADDRESS_ARCH(ptl2, i) (ptl2) |
#define GET_FRAME_ADDRESS_ARCH(ptl3, i) (((pte_t *) (ptl3))[(i)].pfn << 12) |
#define SET_PTL1_ADDRESS_ARCH(ptl0, i, a) |
#define SET_PTL0_ADDRESS_ARCH(ptl0) |
#define SET_PTL1_ADDRESS_ARCH(ptl0, i, a) (((pte_t *) (ptl0))[(i)].pfn = (a) >> 12) |
#define SET_PTL2_ADDRESS_ARCH(ptl1, i, a) |
#define SET_PTL3_ADDRESS_ARCH(ptl2, i, a) |
#define SET_FRAME_ADDRESS_ARCH(ptl3, i, a) |
#define SET_FRAME_ADDRESS_ARCH(ptl3, i, a) (((pte_t *) (ptl3))[(i)].pfn = (a) >> 12) |
#define GET_PTL1_FLAGS_ARCH(ptl0, i) 0 |
#define GET_PTL2_FLAGS_ARCH(ptl1, i) 0 |
#define GET_PTL3_FLAGS_ARCH(ptl2, i) 0 |
#define GET_FRAME_FLAGS_ARCH(ptl3, i) 0 |
#define GET_PTL1_FLAGS_ARCH(ptl0, i) get_pt_flags((pte_t *) (ptl0), (index_t) (i)) |
#define GET_PTL2_FLAGS_ARCH(ptl1, i) PAGE_PRESENT |
#define GET_PTL3_FLAGS_ARCH(ptl2, i) PAGE_PRESENT |
#define GET_FRAME_FLAGS_ARCH(ptl3, i) get_pt_flags((pte_t *) (ptl3), (index_t) (i)) |
#define SET_PTL1_FLAGS_ARCH(ptl0, i, x) |
#define SET_PTL1_FLAGS_ARCH(ptl0, i, x) set_pt_flags((pte_t *) (ptl0), (index_t) (i), (x)) |
#define SET_PTL2_FLAGS_ARCH(ptl1, i, x) |
#define SET_PTL3_FLAGS_ARCH(ptl2, i, x) |
#define SET_FRAME_FLAGS_ARCH(ptl3, i, x) |
#define SET_FRAME_FLAGS_ARCH(ptl3, i, x) set_pt_flags((pte_t *) (ptl3), (index_t) (i), (x)) |
#define PTE_VALID_ARCH(p) 1 |
#define PTE_PRESENT_ARCH(p) 1 |
#define PTE_GET_FRAME_ARCH(p) 0 |
#define PTE_VALID_ARCH(pte) (*((__u32 *) (pte)) != 0) |
#define PTE_PRESENT_ARCH(pte) ((pte)->p != 0) |
#define PTE_GET_FRAME_ARCH(pte) ((pte)->pfn << 12) |
#ifndef __ASM__ |
86,8 → 98,52 |
#include <arch/mm/frame.h> |
#include <arch/types.h> |
static inline int get_pt_flags(pte_t *pt, index_t i) |
{ |
pte_t *p = &pt[i]; |
return ( |
(1 << PAGE_CACHEABLE_SHIFT) | |
((!p->p) << PAGE_PRESENT_SHIFT) | |
(1 << PAGE_USER_SHIFT) | |
(1 << PAGE_READ_SHIFT) | |
(1 << PAGE_WRITE_SHIFT) | |
(1 << PAGE_EXEC_SHIFT) | |
(p->g << PAGE_GLOBAL_SHIFT) |
); |
} |
static inline void set_pt_flags(pte_t *pt, index_t i, int flags) |
{ |
pte_t *p = &pt[i]; |
p->p = !(flags & PAGE_NOT_PRESENT); |
p->g = (flags & PAGE_GLOBAL) != 0; |
p->valid = 1; |
} |
extern void page_arch_init(void); |
#define PHT_BITS 16 |
#define PHT_ORDER 4 |
typedef struct { |
unsigned v : 1; /**< Valid */ |
unsigned vsid : 24; /**< Virtual Segment ID */ |
unsigned h : 1; /**< Primary/secondary hash */ |
unsigned api : 6; /**< Abbreviated Page Index */ |
unsigned rpn : 20; /**< Real Page Number */ |
unsigned reserved0 : 3; |
unsigned r : 1; /**< Reference */ |
unsigned c : 1; /**< Change */ |
unsigned wimg : 4; /**< Access control */ |
unsigned reserved1 : 1; |
unsigned pp : 2; /**< Page protection */ |
} phte_t; |
extern void pht_refill(bool data, istate_t *istate); |
extern void pht_init(void); |
#endif /* __ASM__ */ |
#endif /* KERNEL */ |
/kernel/trunk/arch/ppc64/include/mm/asid.h |
---|
31,8 → 31,8 |
typedef int asid_t; |
#define ASID_MAX_ARCH 3 |
#define ASID_MAX_ARCH 3 |
#define asid_get() (ASID_START+1) |
#define asid_get() (ASID_START+1) |
#endif |
/kernel/trunk/arch/ppc64/include/mm/tlb.h |
---|
29,7 → 29,5 |
#ifndef __ppc64_TLB_H__ |
#define __ppc64_TLB_H__ |
#define tlb_arch_init() |
#define tlb_print() |
#endif |
/kernel/trunk/arch/ppc64/include/barrier.h |
---|
29,11 → 29,11 |
#ifndef __ppc64_BARRIER_H__ |
#define __ppc64_BARRIER_H__ |
#define CS_ENTER_BARRIER() __asm__ volatile ("" ::: "memory") |
#define CS_LEAVE_BARRIER() __asm__ volatile ("" ::: "memory") |
#define CS_ENTER_BARRIER() asm volatile ("" ::: "memory") |
#define CS_LEAVE_BARRIER() asm volatile ("" ::: "memory") |
#define memory_barrier() __asm__ volatile ("sync" ::: "memory") |
#define read_barrier() __asm__ volatile ("sync" ::: "memory") |
#define write_barrier() __asm__ volatile ("eieio" ::: "memory") |
#define memory_barrier() asm volatile ("sync" ::: "memory") |
#define read_barrier() asm volatile ("sync" ::: "memory") |
#define write_barrier() asm volatile ("eieio" ::: "memory") |
#endif |