Subversion Repositories HelenOS-historic

Compare Revisions

Ignore whitespace Rev 1397 → Rev 1398

/kernel/trunk/arch/ppc32/include/asm/regname.h
219,8 → 219,4
#define hid0_icfi (1 << 11)
#define hid0_dci (1 << 10)
 
/* Cache sizes */
#define L1_CACHE_LINES (128 * 8)
#define L1_CACHE_BYTES 5
 
#endif
/kernel/trunk/arch/ppc32/include/types.h
33,7 → 33,7
 
typedef signed char __s8;
typedef signed short __s16;
typedef signed long __s32;
typedef signed int __s32;
typedef signed long long __s64;
 
typedef unsigned char __u8;
/kernel/trunk/arch/ppc32/include/mm/asid.h
26,12 → 26,6
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/*
* ia32 has no hardware support for address space identifiers.
* This file is provided to do nop-implementation of mm/asid.h
* interface.
*/
 
#ifndef __ppc32_ASID_H__
#define __ppc32_ASID_H__
 
/kernel/trunk/arch/ppc64/include/console.h
File deleted
/kernel/trunk/arch/ppc64/include/asm/regname.h
194,6 → 194,8
/* MSR bits */
#define msr_ir (1 << 4)
#define msr_dr (1 << 5)
#define msr_pr (1 << 14)
#define msr_ee (1 << 15)
 
/* HID0 bits */
#define hid0_ice (1 << 15)
/kernel/trunk/arch/ppc64/include/exception.h
0,0 → 1,84
/*
* Copyright (C) 2006 Martin Decky
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#ifndef __ppc64_EXCEPTION_H__
#define __ppc64_EXCEPTION_H__
 
#ifndef __ppc64_TYPES_H__
# include <arch/types.h>
#endif
 
#include <typedefs.h>
 
struct istate {
__u64 r0;
__u64 r2;
__u64 r3;
__u64 r4;
__u64 r5;
__u64 r6;
__u64 r7;
__u64 r8;
__u64 r9;
__u64 r10;
__u64 r11;
__u64 r13;
__u64 r14;
__u64 r15;
__u64 r16;
__u64 r17;
__u64 r18;
__u64 r19;
__u64 r20;
__u64 r21;
__u64 r22;
__u64 r23;
__u64 r24;
__u64 r25;
__u64 r26;
__u64 r27;
__u64 r28;
__u64 r29;
__u64 r30;
__u64 r31;
__u64 cr;
__u64 pc;
__u64 srr1;
__u64 lr;
__u64 ctr;
__u64 xer;
__u64 r12;
__u64 sp;
};
 
static inline void istate_set_retaddr(istate_t *istate, __address retaddr)
{
istate->pc = retaddr;
}
 
#endif
/kernel/trunk/arch/ppc64/include/byteorder.h
53,8 → 53,12
{
__address v;
__asm__ volatile ("lwbrx %0, %1, %2\n" : "=r" (v) : "i" (0) , "r" (&n));
asm volatile (
"lwbrx %0, %1, %2\n"
: "=r" (v)
: "i" (0), "r" (&n)
);
return v;
}
 
#endif
/kernel/trunk/arch/ppc64/include/cpuid.h
38,8 → 38,8
 
static inline void cpu_version(struct cpu_info *info)
{
__asm__ volatile (
"mfspr %0, 287\n"
asm volatile (
"mfpvr %0\n"
: "=r" (*info)
);
}
/kernel/trunk/arch/ppc64/include/types.h
33,8 → 33,8
 
typedef signed char __s8;
typedef signed short __s16;
typedef signed long __s32;
typedef signed long long __s64;
typedef signed int __s32;
typedef signed long __s64;
 
typedef unsigned char __u8;
typedef unsigned short __u16;
42,12 → 42,19
typedef unsigned long __u64;
 
typedef __u64 __address;
typedef __u32 pfn_t;
typedef __u64 pfn_t;
 
typedef __u64 ipl_t;
 
typedef __u64 __native;
typedef __u32 __native;
 
typedef __u32 pte_t;
/** Page Table Entry. */
typedef struct {
unsigned p : 1; /**< Present bit. */
unsigned a : 1; /**< Accessed bit. */
unsigned g : 1; /**< Global bit. */
unsigned valid : 1; /**< Valid content even if not present. */
unsigned pfn : 20; /**< Physical frame number. */
} pte_t;
 
#endif
/kernel/trunk/arch/ppc64/include/elf.h
29,7 → 29,7
#ifndef __ppc64_ELF_H__
#define __ppc64_ELF_H__
 
#define ELF_MACHINE EM_PPC
#define ELF_MACHINE EM_PPC64
#define ELF_DATA_ENCODING ELFDATA2MSB
#define ELF_CLASS ELFCLASS32
 
/kernel/trunk/arch/ppc64/include/atomic.h
33,7 → 33,7
{
long tmp;
 
asm __volatile__ (
asm volatile (
"1:\n"
"lwarx %0, 0, %2\n"
"addic %0, %0, 1\n"
41,7 → 41,8
"bne- 1b"
: "=&r" (tmp), "=m" (val->count)
: "r" (&val->count), "m" (val->count)
: "cc");
: "cc"
);
}
 
static inline void atomic_dec(atomic_t *val)
48,7 → 49,7
{
long tmp;
 
asm __volatile__(
asm volatile (
"1:\n"
"lwarx %0, 0, %2\n"
"addic %0, %0, -1\n"
56,7 → 57,8
"bne- 1b"
: "=&r" (tmp), "=m" (val->count)
: "r" (&val->count), "m" (val->count)
: "cc");
: "cc"
);
}
 
static inline long atomic_postinc(atomic_t *val)
/kernel/trunk/arch/ppc64/include/boot/boot.h
34,6 → 34,7
/* Temporary stack size for boot process */
#define TEMP_STACK_SIZE 0x100
 
#define TASKMAP_MAX_RECORDS 32
#define MEMMAP_MAX_RECORDS 32
 
#ifndef __ASM__
41,12 → 42,22
#include <arch/types.h>
 
typedef struct {
__address addr;
__u64 size;
} utask_t;
 
typedef struct {
__u32 count;
utask_t tasks[TASKMAP_MAX_RECORDS];
} taskmap_t;
 
typedef struct {
__address start;
__u32 size;
__u64 size;
} memzone_t;
 
typedef struct {
__u32 total;
__u64 total;
__u32 count;
memzone_t zones[MEMMAP_MAX_RECORDS];
} memmap_t;
60,6 → 71,7
} screen_t;
 
typedef struct {
taskmap_t taskmap;
memmap_t memmap;
screen_t screen;
} bootinfo_t;
/kernel/trunk/arch/ppc64/include/asm.h
39,11 → 39,12
*
* @return Old interrupt priority level.
*/
static inline ipl_t interrupts_enable(void) {
static inline ipl_t interrupts_enable(void)
{
ipl_t v;
ipl_t tmp;
__asm__ volatile (
asm volatile (
"mfmsr %0\n"
"mfmsr %1\n"
"ori %1, %1, 1 << 15\n"
60,11 → 61,12
*
* @return Old interrupt priority level.
*/
static inline ipl_t interrupts_disable(void) {
static inline ipl_t interrupts_disable(void)
{
ipl_t v;
ipl_t tmp;
__asm__ volatile (
asm volatile (
"mfmsr %0\n"
"mfmsr %1\n"
"rlwinm %1, %1, 0, 17, 15\n"
80,10 → 82,11
*
* @param ipl Saved interrupt priority level.
*/
static inline void interrupts_restore(ipl_t ipl) {
static inline void interrupts_restore(ipl_t ipl)
{
ipl_t tmp;
__asm__ volatile (
asm volatile (
"mfmsr %1\n"
"rlwimi %0, %1, 0, 17, 15\n"
"cmpw 0, %0, %1\n"
92,6 → 95,7
"0:\n"
: "=r" (ipl), "=r" (tmp)
: "0" (ipl)
: "cr0"
);
}
 
101,9 → 105,11
*
* @return Current interrupt priority level.
*/
static inline ipl_t interrupts_read(void) {
static inline ipl_t interrupts_read(void)
{
ipl_t v;
__asm__ volatile (
asm volatile (
"mfmsr %0\n"
: "=r" (v)
);
120,8 → 126,11
{
__address v;
__asm__ volatile ("and %0, %%sp, %1\n" : "=r" (v) : "r" (~(STACK_SIZE-1)));
asm volatile (
"and %0, %%sp, %1\n"
: "=r" (v)
: "r" (~(STACK_SIZE - 1))
);
return v;
}
 
132,4 → 141,6
void cpu_halt(void);
void asm_delay_loop(__u32 t);
 
extern void userspace_asm(__address uspace_uarg, __address stack, __address entry);
 
#endif
/kernel/trunk/arch/ppc64/include/mm/frame.h
30,11 → 30,15
#define __ppc64_FRAME_H__
 
#define FRAME_WIDTH 12 /* 4K */
#define FRAME_SIZE (1<<FRAME_WIDTH)
#define FRAME_SIZE (1 << FRAME_WIDTH)
 
#ifdef KERNEL
#ifndef __ASM__
 
#include <arch/types.h>
 
extern __address last_frame;
 
extern void frame_arch_init(void);
 
#endif /* __ASM__ */
/kernel/trunk/arch/ppc64/include/mm/page.h
44,41 → 44,53
# define PA2KA(x) ((x) + 0x80000000)
#endif
 
#define PTL0_ENTRIES_ARCH 0
#define PTL1_ENTRIES_ARCH 0
#define PTL2_ENTRIES_ARCH 0
#define PTL3_ENTRIES_ARCH 0
/*
* Implementation of generic 4-level page table interface,
* the hardware Page Hash Table is used as cache.
*
* Page table layout:
* - 32-bit virtual addressess
* - Offset is 12 bits => pages are 4K long
* - PTL0 has 1024 entries (10 bits)
* - PTL1 is not used
* - PTL2 is not used
* - PLT3 has 1024 entries (10 bits)
*/
 
#define PTL0_INDEX_ARCH(vaddr) 0
#define PTL1_INDEX_ARCH(vaddr) 0
#define PTL2_INDEX_ARCH(vaddr) 0
#define PTL3_INDEX_ARCH(vaddr) 0
#define PTL0_ENTRIES_ARCH 1024
#define PTL1_ENTRIES_ARCH 0
#define PTL2_ENTRIES_ARCH 0
#define PTL3_ENTRIES_ARCH 1024
 
#define SET_PTL0_ADDRESS_ARCH(ptl0)
#define PTL0_INDEX_ARCH(vaddr) (((vaddr) >> 22) & 0x3ff)
#define PTL1_INDEX_ARCH(vaddr) 0
#define PTL2_INDEX_ARCH(vaddr) 0
#define PTL3_INDEX_ARCH(vaddr) (((vaddr) >> 12) & 0x3ff)
 
#define GET_PTL1_ADDRESS_ARCH(ptl0, i) ((pte_t *) 0)
#define GET_PTL2_ADDRESS_ARCH(ptl1, i) ((pte_t *) 0)
#define GET_PTL3_ADDRESS_ARCH(ptl2, i) ((pte_t *) 0)
#define GET_FRAME_ADDRESS_ARCH(ptl3, i) ((pte_t *) 0)
#define GET_PTL1_ADDRESS_ARCH(ptl0, i) (((pte_t *) (ptl0))[(i)].pfn << 12)
#define GET_PTL2_ADDRESS_ARCH(ptl1, i) (ptl1)
#define GET_PTL3_ADDRESS_ARCH(ptl2, i) (ptl2)
#define GET_FRAME_ADDRESS_ARCH(ptl3, i) (((pte_t *) (ptl3))[(i)].pfn << 12)
 
#define SET_PTL1_ADDRESS_ARCH(ptl0, i, a)
#define SET_PTL0_ADDRESS_ARCH(ptl0)
#define SET_PTL1_ADDRESS_ARCH(ptl0, i, a) (((pte_t *) (ptl0))[(i)].pfn = (a) >> 12)
#define SET_PTL2_ADDRESS_ARCH(ptl1, i, a)
#define SET_PTL3_ADDRESS_ARCH(ptl2, i, a)
#define SET_FRAME_ADDRESS_ARCH(ptl3, i, a)
#define SET_FRAME_ADDRESS_ARCH(ptl3, i, a) (((pte_t *) (ptl3))[(i)].pfn = (a) >> 12)
 
#define GET_PTL1_FLAGS_ARCH(ptl0, i) 0
#define GET_PTL2_FLAGS_ARCH(ptl1, i) 0
#define GET_PTL3_FLAGS_ARCH(ptl2, i) 0
#define GET_FRAME_FLAGS_ARCH(ptl3, i) 0
#define GET_PTL1_FLAGS_ARCH(ptl0, i) get_pt_flags((pte_t *) (ptl0), (index_t) (i))
#define GET_PTL2_FLAGS_ARCH(ptl1, i) PAGE_PRESENT
#define GET_PTL3_FLAGS_ARCH(ptl2, i) PAGE_PRESENT
#define GET_FRAME_FLAGS_ARCH(ptl3, i) get_pt_flags((pte_t *) (ptl3), (index_t) (i))
 
#define SET_PTL1_FLAGS_ARCH(ptl0, i, x)
#define SET_PTL1_FLAGS_ARCH(ptl0, i, x) set_pt_flags((pte_t *) (ptl0), (index_t) (i), (x))
#define SET_PTL2_FLAGS_ARCH(ptl1, i, x)
#define SET_PTL3_FLAGS_ARCH(ptl2, i, x)
#define SET_FRAME_FLAGS_ARCH(ptl3, i, x)
#define SET_FRAME_FLAGS_ARCH(ptl3, i, x) set_pt_flags((pte_t *) (ptl3), (index_t) (i), (x))
 
#define PTE_VALID_ARCH(p) 1
#define PTE_PRESENT_ARCH(p) 1
#define PTE_GET_FRAME_ARCH(p) 0
#define PTE_VALID_ARCH(pte) (*((__u32 *) (pte)) != 0)
#define PTE_PRESENT_ARCH(pte) ((pte)->p != 0)
#define PTE_GET_FRAME_ARCH(pte) ((pte)->pfn << 12)
 
#ifndef __ASM__
 
86,8 → 98,52
#include <arch/mm/frame.h>
#include <arch/types.h>
 
static inline int get_pt_flags(pte_t *pt, index_t i)
{
pte_t *p = &pt[i];
return (
(1 << PAGE_CACHEABLE_SHIFT) |
((!p->p) << PAGE_PRESENT_SHIFT) |
(1 << PAGE_USER_SHIFT) |
(1 << PAGE_READ_SHIFT) |
(1 << PAGE_WRITE_SHIFT) |
(1 << PAGE_EXEC_SHIFT) |
(p->g << PAGE_GLOBAL_SHIFT)
);
}
 
static inline void set_pt_flags(pte_t *pt, index_t i, int flags)
{
pte_t *p = &pt[i];
p->p = !(flags & PAGE_NOT_PRESENT);
p->g = (flags & PAGE_GLOBAL) != 0;
p->valid = 1;
}
 
extern void page_arch_init(void);
 
#define PHT_BITS 16
#define PHT_ORDER 4
 
typedef struct {
unsigned v : 1; /**< Valid */
unsigned vsid : 24; /**< Virtual Segment ID */
unsigned h : 1; /**< Primary/secondary hash */
unsigned api : 6; /**< Abbreviated Page Index */
unsigned rpn : 20; /**< Real Page Number */
unsigned reserved0 : 3;
unsigned r : 1; /**< Reference */
unsigned c : 1; /**< Change */
unsigned wimg : 4; /**< Access control */
unsigned reserved1 : 1;
unsigned pp : 2; /**< Page protection */
} phte_t;
 
extern void pht_refill(bool data, istate_t *istate);
extern void pht_init(void);
 
#endif /* __ASM__ */
 
#endif /* KERNEL */
/kernel/trunk/arch/ppc64/include/mm/asid.h
31,8 → 31,8
 
typedef int asid_t;
 
#define ASID_MAX_ARCH 3
#define ASID_MAX_ARCH 3
 
#define asid_get() (ASID_START+1)
#define asid_get() (ASID_START+1)
 
#endif
/kernel/trunk/arch/ppc64/include/mm/tlb.h
29,7 → 29,5
#ifndef __ppc64_TLB_H__
#define __ppc64_TLB_H__
 
#define tlb_arch_init()
#define tlb_print()
 
#endif
/kernel/trunk/arch/ppc64/include/barrier.h
29,11 → 29,11
#ifndef __ppc64_BARRIER_H__
#define __ppc64_BARRIER_H__
 
#define CS_ENTER_BARRIER() __asm__ volatile ("" ::: "memory")
#define CS_LEAVE_BARRIER() __asm__ volatile ("" ::: "memory")
#define CS_ENTER_BARRIER() asm volatile ("" ::: "memory")
#define CS_LEAVE_BARRIER() asm volatile ("" ::: "memory")
 
#define memory_barrier() __asm__ volatile ("sync" ::: "memory")
#define read_barrier() __asm__ volatile ("sync" ::: "memory")
#define write_barrier() __asm__ volatile ("eieio" ::: "memory")
#define memory_barrier() asm volatile ("sync" ::: "memory")
#define read_barrier() asm volatile ("sync" ::: "memory")
#define write_barrier() asm volatile ("eieio" ::: "memory")
 
#endif
/kernel/trunk/arch/ppc64/Makefile.inc
38,7 → 38,7
## Make some default assumptions
#
 
CFLAGS += -mcpu=powerpc64 -m64
CFLAGS += -mcpu=powerpc64 -msoft-float -m64
AFLAGS += -a64
LFLAGS += -no-check-sections -N
 
56,7 → 56,6
DEFS += -DCONFIG_PAGE_PT
 
ARCH_SOURCES = \
arch/$(ARCH)/src/console.c \
arch/$(ARCH)/src/context.S \
arch/$(ARCH)/src/debug/panic.s \
arch/$(ARCH)/src/fpu_context.S \
67,9 → 66,10
arch/$(ARCH)/src/interrupt.c \
arch/$(ARCH)/src/asm.S \
arch/$(ARCH)/src/cpu/cpu.c \
arch/$(ARCH)/src/proc/scheduler.c \
arch/$(ARCH)/src/ddi/ddi.c \
arch/$(ARCH)/src/proc/scheduler.c \
arch/$(ARCH)/src/mm/as.c \
arch/$(ARCH)/src/mm/frame.c \
arch/$(ARCH)/src/mm/memory_init.c \
arch/$(ARCH)/src/mm/page.c
arch/$(ARCH)/src/mm/page.c \
arch/$(ARCH)/src/mm/tlb.c
/kernel/trunk/arch/ppc64/src/console.c
File deleted
/kernel/trunk/arch/ppc64/src/exception.S
31,6 → 31,123
 
.section K_UNMAPPED_TEXT_START, "ax"
 
.macro CONTEXT_STORE
# save R12 in SPRG1, backup CR in R12
# save SP in SPRG2
 
mtsprg1 r12
mfcr r12
mtsprg2 sp
# check whether SP is in kernel
andis. sp, sp, 0x8000
bne 1f
# stack is in user-space
mfsprg0 sp
b 2f
1:
# stack is in kernel
mfsprg2 sp
subis sp, sp, 0x8000
2:
subi sp, sp, 160
stw r0, 8(sp)
stw r2, 12(sp)
stw r3, 16(sp)
stw r4, 20(sp)
stw r5, 24(sp)
stw r6, 28(sp)
stw r7, 32(sp)
stw r8, 36(sp)
stw r9, 40(sp)
stw r10, 44(sp)
stw r11, 48(sp)
stw r13, 52(sp)
stw r14, 56(sp)
stw r15, 60(sp)
stw r16, 64(sp)
stw r17, 68(sp)
stw r18, 72(sp)
stw r19, 76(sp)
stw r20, 80(sp)
stw r21, 84(sp)
stw r22, 88(sp)
stw r23, 92(sp)
stw r24, 96(sp)
stw r25, 100(sp)
stw r26, 104(sp)
stw r27, 108(sp)
stw r28, 112(sp)
stw r29, 116(sp)
stw r30, 120(sp)
stw r31, 124(sp)
stw r12, 128(sp)
mfsrr0 r12
stw r12, 132(sp)
mfsrr1 r12
stw r12, 136(sp)
mflr r12
stw r12, 140(sp)
mfctr r12
stw r12, 144(sp)
mfxer r12
stw r12, 148(sp)
mfsprg1 r12
stw r12, 152(sp)
mfsprg2 r12
stw r12, 156(sp)
.endm
 
.org 0x060
jump_to_kernel:
lis r12, iret@ha
addi r12, r12, iret@l
mtlr r12
 
mfmsr r12
ori r12, r12, (msr_ir | msr_dr)@l
mtsrr1 r12
addis sp, sp, 0x8000
mr r4, sp
addi r4, r4, 8
rfi
 
jump_to_kernel_syscall:
lis r12, syscall_handler@ha
addi r12, r12, syscall_handler@l
mtsrr0 r12
lis r12, iret_syscall@ha
addi r12, r12, iret_syscall@l
mtlr r12
 
mfmsr r12
ori r12, r12, (msr_ir | msr_dr)@l
mtsrr1 r12
addis sp, sp, 0x8000
rfi
 
.org 0x100
.global exc_system_reset
exc_system_reset:
44,23 → 161,27
.org 0x300
.global exc_data_storage
exc_data_storage:
b exc_data_storage
CONTEXT_STORE
lis r12, pht_refill@ha
addi r12, r12, pht_refill@l
mtsrr0 r12
li r3, 1
b jump_to_kernel
 
.org 0x380
.global exc_data_segment
exc_data_segment:
b exc_data_segment
 
.org 0x400
.global exc_instruction_storage
exc_instruction_storage:
b exc_instruction_storage
CONTEXT_STORE
lis r12, pht_refill@ha
addi r12, r12, pht_refill@l
mtsrr0 r12
li r3, 0
b jump_to_kernel
 
.org 0x480
.global exc_instruction_segment
exc_instruction_segment:
b exc_instruction_segment
 
.org 0x500
.global exc_external
exc_external:
84,76 → 205,14
.org 0x900
.global exc_decrementer
exc_decrementer:
mtspr sprg1, sp
subis sp, sp, 0x8000
subi sp, sp, 144
stw r0, 0(sp)
stw r2, 4(sp)
stw r3, 8(sp)
stw r4, 12(sp)
stw r5, 16(sp)
stw r6, 20(sp)
stw r7, 24(sp)
stw r8, 28(sp)
stw r9, 32(sp)
stw r10, 36(sp)
stw r11, 40(sp)
stw r12, 44(sp)
stw r13, 48(sp)
stw r14, 52(sp)
stw r15, 56(sp)
stw r16, 60(sp)
stw r17, 64(sp)
stw r18, 68(sp)
stw r19, 72(sp)
stw r20, 76(sp)
stw r21, 80(sp)
stw r22, 84(sp)
stw r23, 88(sp)
stw r24, 92(sp)
stw r25, 96(sp)
stw r26, 100(sp)
stw r27, 104(sp)
stw r28, 108(sp)
stw r29, 112(sp)
stw r30, 116(sp)
stw r31, 120(sp)
mfspr r3, srr0
stw r3, 124(sp)
mfspr r3, srr1
stw r3, 128(sp)
mflr r3
stw r3, 132(sp)
mfcr r3
stw r3, 136(sp)
mfctr r3
stw r3, 140(sp)
mfxer r3
stw r3, 144(sp)
CONTEXT_STORE
 
lis r3, exc_dispatch@ha
addi r3, r3, exc_dispatch@l
mtspr srr0, r3
lis r12, exc_dispatch@ha
addi r12, r12, exc_dispatch@l
mtsrr0 r12
mfmsr r3
ori r3, r3, (msr_ir | msr_dr)@l
mtspr srr1, r3
lis r3, iret@ha
addi r3, r3, iret@l
mtlr r3
addis sp, sp, 0x8000
li r3, 10
rfid
b jump_to_kernel
 
.org 0xa00
.global exc_reserved0
168,7 → 227,9
.org 0xc00
.global exc_syscall
exc_syscall:
b exc_syscall
CONTEXT_STORE
b jump_to_kernel_syscall
 
.org 0xd00
.global exc_trace
/kernel/trunk/arch/ppc64/src/asm.S
30,65 → 30,174
 
.text
 
.global userspace_asm
.global iret
.global iret_syscall
.global memsetb
.global memcpy
.global memcpy_from_uspace
.global memcpy_to_uspace
.global memcpy_from_uspace_failover_address
.global memcpy_to_uspace_failover_address
 
userspace_asm:
 
# r3 = uspace_uarg
# r4 = stack
# r5 = entry
# disable interrupts
 
mfmsr r31
rlwinm r31, r31, 0, 17, 15
mtmsr r31
# set entry point
mtsrr0 r5
# set problem state, enable interrupts
ori r31, r31, msr_pr
ori r31, r31, msr_ee
mtsrr1 r31
# set stack
mr sp, r4
# jump to userspace
rfi
 
iret:
lwz r3, 144(sp)
mtxer r3
lwz r3, 140(sp)
mtctr r3
# disable interrupts
lwz r3, 136(sp)
mtcr r3
mfmsr r31
rlwinm r31, r31, 0, 17, 15
mtmsr r31
lwz r3, 132(sp)
mtlr r3
lwz r0, 8(sp)
lwz r2, 12(sp)
lwz r3, 16(sp)
lwz r4, 20(sp)
lwz r5, 24(sp)
lwz r6, 28(sp)
lwz r7, 32(sp)
lwz r8, 36(sp)
lwz r9, 40(sp)
lwz r10, 44(sp)
lwz r11, 48(sp)
lwz r13, 52(sp)
lwz r14, 56(sp)
lwz r15, 60(sp)
lwz r16, 64(sp)
lwz r17, 68(sp)
lwz r18, 72(sp)
lwz r19, 76(sp)
lwz r20, 80(sp)
lwz r21, 84(sp)
lwz r22, 88(sp)
lwz r23, 92(sp)
lwz r24, 96(sp)
lwz r25, 100(sp)
lwz r26, 104(sp)
lwz r27, 108(sp)
lwz r28, 112(sp)
lwz r29, 116(sp)
lwz r30, 120(sp)
lwz r31, 124(sp)
lwz r3, 128(sp)
mtspr srr1, r3
lwz r12, 128(sp)
mtcr r12
lwz r3, 124(sp)
mtspr srr0, r3
lwz r12, 132(sp)
mtsrr0 r12
lwz r0, 0(sp)
lwz r2, 4(sp)
lwz r3, 8(sp)
lwz r4, 12(sp)
lwz r5, 16(sp)
lwz r6, 20(sp)
lwz r7, 24(sp)
lwz r8, 28(sp)
lwz r9, 32(sp)
lwz r10, 36(sp)
lwz r11, 40(sp)
lwz r12, 44(sp)
lwz r13, 48(sp)
lwz r14, 52(sp)
lwz r15, 56(sp)
lwz r16, 60(sp)
lwz r17, 64(sp)
lwz r18, 68(sp)
lwz r19, 72(sp)
lwz r20, 76(sp)
lwz r21, 80(sp)
lwz r22, 84(sp)
lwz r23, 88(sp)
lwz r24, 92(sp)
lwz r25, 96(sp)
lwz r26, 100(sp)
lwz r27, 104(sp)
lwz r28, 108(sp)
lwz r29, 112(sp)
lwz r30, 116(sp)
lwz r31, 120(sp)
lwz r12, 136(sp)
mtsrr1 r12
mfspr sp, sprg1
lwz r12, 140(sp)
mtlr r12
lwz r12, 144(sp)
mtctr r12
lwz r12, 148(sp)
mtxer r12
lwz r12, 152(sp)
lwz sp, 156(sp)
rfi
 
iret_syscall:
# reset decrementer
 
li r31, 1000
mtdec r31
# disable interrupts
mfmsr r31
rlwinm r31, r31, 0, 17, 15
mtmsr r31
lwz r0, 8(sp)
lwz r2, 12(sp)
lwz r4, 20(sp)
lwz r5, 24(sp)
lwz r6, 28(sp)
lwz r7, 32(sp)
lwz r8, 36(sp)
lwz r9, 40(sp)
lwz r10, 44(sp)
lwz r11, 48(sp)
lwz r13, 52(sp)
lwz r14, 56(sp)
lwz r15, 60(sp)
lwz r16, 64(sp)
lwz r17, 68(sp)
lwz r18, 72(sp)
lwz r19, 76(sp)
lwz r20, 80(sp)
lwz r21, 84(sp)
lwz r22, 88(sp)
lwz r23, 92(sp)
lwz r24, 96(sp)
lwz r25, 100(sp)
lwz r26, 104(sp)
lwz r27, 108(sp)
lwz r28, 112(sp)
lwz r29, 116(sp)
lwz r30, 120(sp)
lwz r31, 124(sp)
lwz r12, 128(sp)
mtcr r12
lwz r12, 132(sp)
mtsrr0 r12
lwz r12, 136(sp)
mtsrr1 r12
lwz r12, 140(sp)
mtlr r12
lwz r12, 144(sp)
mtctr r12
lwz r12, 148(sp)
mtxer r12
lwz r12, 152(sp)
lwz sp, 156(sp)
 
rfi
memsetb:
rlwimi r5, r5, 8, 16, 23
rlwimi r5, r5, 16, 0, 15
133,6 → 242,9
blr
 
memcpy:
memcpy_from_uspace:
memcpy_to_uspace:
 
srwi. r7, r5, 3
addi r6, r3, -4
addi r4, r4, -4
193,3 → 305,7
beq 2b
mtctr r7
b 1b
 
memcpy_from_uspace_failover_address:
memcpy_to_uspace_failover_address:
b memcpy_from_uspace_failover_address
/kernel/trunk/arch/ppc64/src/boot/boot.S
34,18 → 34,21
.global kernel_image_start
kernel_image_start:
 
# load temporary stack
# load temporal kernel stack
lis sp, end_stack@ha
addi sp, sp, end_stack@l
lis sp, kernel_stack@ha
addi sp, sp, kernel_stack@l
# set kernel stack for interrupt handling
mr r31, sp
subis r31, r31, 0x8000
mtsprg0 r31
# r3 contains physical address of bootinfo_t
# r4 contains size of bootinfo_t
lis r31, 0x80000000@ha
addi r31, r31, 0x80000000@l
add r3, r3, r31
addis r3, r3, 0x8000
 
lis r31, bootinfo@ha
addi r31, r31, bootinfo@l # r31 = bootinfo
67,9 → 70,12
bootinfo_end:
bl arch_pre_main
b main_bsp
 
.section K_DATA_START, "aw", @progbits
 
.align 12
kernel_stack_bottom:
.space TEMP_STACK_SIZE
end_stack:
kernel_stack:
/kernel/trunk/arch/ppc64/src/proc/scheduler.c
27,13 → 27,11
*/
 
#include <arch/mm/page.h>
#include <arch/boot/boot.h>
#include <proc/scheduler.h>
#include <proc/thread.h>
#include <arch.h>
 
__address supervisor_sp;
__address supervisor_sp_physical;
 
/** Perform ppc64 specific tasks needed before the new task is run. */
void before_task_runs_arch(void)
{
42,8 → 40,13
/** Perform ppc64 specific tasks needed before the new thread is scheduled. */
void before_thread_runs_arch(void)
{
supervisor_sp = (__address) &THREAD->kstack[THREAD_STACK_SIZE - SP_DELTA];
supervisor_sp_physical = KA2PA(supervisor_sp_physical);
pht_init();
tlb_invalidate_all();
asm volatile (
"mtsprg0 %0\n"
:
: "r" (KA2PA(&THREAD->kstack[THREAD_STACK_SIZE - SP_DELTA]))
);
}
 
void after_thread_ran_arch(void)
/kernel/trunk/arch/ppc64/src/mm/tlb.c
0,0 → 1,77
/*
* Copyright (C) 2006 Martin Decky
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#include <mm/tlb.h>
 
 
/** Initialize Page Hash Table.
*
* Setup the Page Hash Table with no entries.
*
*/
void tlb_arch_init(void)
{
tlb_invalidate_all();
}
 
 
void tlb_invalidate_all(void)
{
asm volatile (
"tlbia\n"
"tlbsync\n"
);
}
 
 
/** Invalidate all entries in TLB that belong to specified address space.
*
* @param asid This parameter is ignored as the architecture doesn't support it.
*/
void tlb_invalidate_asid(asid_t asid)
{
tlb_invalidate_all();
}
 
/** Invalidate TLB entries for specified page range belonging to specified address space.
*
* @param asid This parameter is ignored as the architecture doesn't support it.
* @param page Address of the first page whose entry is to be invalidated.
* @param cnt Number of entries to invalidate.
*/
void tlb_invalidate_pages(asid_t asid, __address page, count_t cnt)
{
tlb_invalidate_all();
}
 
 
 
/** Print contents of Page Hash Table. */
void tlb_print(void)
{
}
/kernel/trunk/arch/ppc64/src/mm/memory_init.c
43,5 → 43,5
count_t i;
for (i = 0; i < bootinfo.memmap.count; i++)
printf("base: %p size: %#x\n", bootinfo.memmap.zones[i].start, bootinfo.memmap.zones[i].size);
printf("base: %#x size: %#x\n", bootinfo.memmap.zones[i].start, bootinfo.memmap.zones[i].size);
}
/kernel/trunk/arch/ppc64/src/mm/page.c
29,11 → 29,268
#include <arch/mm/page.h>
#include <genarch/mm/page_pt.h>
#include <arch/mm/frame.h>
#include <arch/asm.h>
#include <mm/frame.h>
#include <mm/page.h>
#include <mm/as.h>
#include <arch.h>
#include <arch/types.h>
#include <arch/exception.h>
#include <align.h>
#include <config.h>
#include <print.h>
#include <symtab.h>
 
static phte_t *phte;
 
 
/** Try to find PTE for faulting address
*
* Try to find PTE for faulting address.
* The as->lock must be held on entry to this function
* if lock is true.
*
* @param as Address space.
* @param lock Lock/unlock the address space.
* @param badvaddr Faulting virtual address.
* @param istate Pointer to interrupted state.
* @param pfrc Pointer to variable where as_page_fault() return code will be stored.
* @return PTE on success, NULL otherwise.
*
*/
static pte_t *find_mapping_and_check(as_t *as, bool lock, __address badvaddr, istate_t *istate, int *pfcr)
{
/*
* Check if the mapping exists in page tables.
*/
pte_t *pte = page_mapping_find(as, badvaddr);
if ((pte) && (pte->p)) {
/*
* Mapping found in page tables.
* Immediately succeed.
*/
return pte;
} else {
int rc;
/*
* Mapping not found in page tables.
* Resort to higher-level page fault handler.
*/
page_table_unlock(as, lock);
switch (rc = as_page_fault(badvaddr, istate)) {
case AS_PF_OK:
/*
* The higher-level page fault handler succeeded,
* The mapping ought to be in place.
*/
page_table_lock(as, lock);
pte = page_mapping_find(as, badvaddr);
ASSERT((pte) && (pte->p));
return pte;
case AS_PF_DEFER:
page_table_lock(as, lock);
*pfcr = rc;
return NULL;
case AS_PF_FAULT:
page_table_lock(as, lock);
printf("Page fault.\n");
*pfcr = rc;
return NULL;
default:
panic("unexpected rc (%d)\n", rc);
}
}
}
 
 
static void pht_refill_fail(__address badvaddr, istate_t *istate)
{
char *symbol = "";
char *sym2 = "";
 
char *s = get_symtab_entry(istate->pc);
if (s)
symbol = s;
s = get_symtab_entry(istate->lr);
if (s)
sym2 = s;
panic("%p: PHT Refill Exception at %p (%s<-%s)\n", badvaddr, istate->pc, symbol, sym2);
}
 
 
static void pht_insert(const __address vaddr, const pfn_t pfn)
{
__u32 page = (vaddr >> 12) & 0xffff;
__u32 api = (vaddr >> 22) & 0x3f;
__u32 vsid;
asm volatile (
"mfsrin %0, %1\n"
: "=r" (vsid)
: "r" (vaddr)
);
/* Primary hash (xor) */
__u32 h = 0;
__u32 hash = vsid ^ page;
__u32 base = (hash & 0x3ff) << 3;
__u32 i;
bool found = false;
/* Find unused or colliding
PTE in PTEG */
for (i = 0; i < 8; i++) {
if ((!phte[base + i].v) || ((phte[base + i].vsid == vsid) && (phte[base + i].api == api))) {
found = true;
break;
}
}
if (!found) {
/* Secondary hash (not) */
__u32 base2 = (~hash & 0x3ff) << 3;
/* Find unused or colliding
PTE in PTEG */
for (i = 0; i < 8; i++) {
if ((!phte[base2 + i].v) || ((phte[base2 + i].vsid == vsid) && (phte[base2 + i].api == api))) {
found = true;
base = base2;
h = 1;
break;
}
}
if (!found) {
// TODO: A/C precedence groups
i = page % 8;
}
}
phte[base + i].v = 1;
phte[base + i].vsid = vsid;
phte[base + i].h = h;
phte[base + i].api = api;
phte[base + i].rpn = pfn;
phte[base + i].r = 0;
phte[base + i].c = 0;
phte[base + i].pp = 2; // FIXME
}
 
 
/** Process Instruction/Data Storage Interrupt
*
* @param data True if Data Storage Interrupt.
* @param istate Interrupted register context.
*
*/
void pht_refill(bool data, istate_t *istate)
{
__address badvaddr;
pte_t *pte;
int pfcr;
as_t *as;
bool lock;
if (AS == NULL) {
as = AS_KERNEL;
lock = false;
} else {
as = AS;
lock = true;
}
if (data) {
asm volatile (
"mfdar %0\n"
: "=r" (badvaddr)
);
} else
badvaddr = istate->pc;
page_table_lock(as, lock);
pte = find_mapping_and_check(as, lock, badvaddr, istate, &pfcr);
if (!pte) {
switch (pfcr) {
case AS_PF_FAULT:
goto fail;
break;
case AS_PF_DEFER:
/*
* The page fault came during copy_from_uspace()
* or copy_to_uspace().
*/
page_table_unlock(as, lock);
return;
default:
panic("Unexpected pfrc (%d)\n", pfcr);
}
}
pte->a = 1; /* Record access to PTE */
pht_insert(badvaddr, pte->pfn);
page_table_unlock(as, lock);
return;
fail:
page_table_unlock(as, lock);
pht_refill_fail(badvaddr, istate);
}
 
 
void pht_init(void)
{
memsetb((__address) phte, 1 << PHT_BITS, 0);
}
 
 
void page_arch_init(void)
{
page_mapping_operations = &pt_mapping_operations;
if (config.cpu_active == 1) {
page_mapping_operations = &pt_mapping_operations;
__address cur;
int flags;
/* Frames below 128 MB are mapped using BAT,
map rest of the physical memory */
for (cur = 128 << 20; cur < last_frame; cur += FRAME_SIZE) {
flags = PAGE_CACHEABLE;
if ((PA2KA(cur) >= config.base) && (PA2KA(cur) < config.base + config.kernel_size))
flags |= PAGE_GLOBAL;
page_mapping_insert(AS_KERNEL, PA2KA(cur), cur, flags);
}
/* Allocate page hash table */
phte_t *physical_phte = (phte_t *) PFN2ADDR(frame_alloc(PHT_ORDER, FRAME_KA | FRAME_PANIC));
phte = (phte_t *) PA2KA((__address) physical_phte);
ASSERT((__address) physical_phte % (1 << PHT_BITS) == 0);
pht_init();
asm volatile (
"mtsdr1 %0\n"
:
: "r" ((__address) physical_phte)
);
}
}
 
 
__address hw_map(__address physaddr, size_t size)
{
if (last_frame + ALIGN_UP(size, PAGE_SIZE) > KA2PA(KERNEL_ADDRESS_SPACE_END_ARCH))
panic("Unable to map physical memory %p (%d bytes)", physaddr, size)
__address virtaddr = PA2KA(last_frame);
pfn_t i;
for (i = 0; i < ADDR2PFN(ALIGN_UP(size, PAGE_SIZE)); i++)
page_mapping_insert(AS_KERNEL, virtaddr + PFN2ADDR(i), physaddr + PFN2ADDR(i), PAGE_NOT_CACHEABLE);
last_frame = ALIGN_UP(last_frame + size, FRAME_SIZE);
return virtaddr;
}
/kernel/trunk/arch/ppc64/src/dummy.s
28,29 → 28,9
 
.text
 
.global cpu_halt
.global asm_delay_loop
.global userspace
.global sys_tls_set
.global tlb_invalidate_all
.global tlb_invalidate_asid
.global tlb_invalidate_pages
 
cpu_halt:
b cpu_halt
 
tlb_invalidate_all:
b tlb_invalidate_all
 
tlb_invalidate_asid:
b tlb_invalidate_asid
 
tlb_invalidate_pages:
b tlb_invalidate_pages
 
userspace:
b userspace
 
sys_tls_set:
b sys_tls_set
 
/kernel/trunk/arch/ppc64/src/interrupt.c
31,7 → 31,7
#include <arch/types.h>
#include <arch.h>
#include <time/clock.h>
#include <print.h>
#include <ipc/sysipc.h>
 
 
void start_decrementer(void)
38,7 → 38,8
{
asm volatile (
"mtdec %0\n"
:: "r" (1000)
:
: "r" (1000)
);
}
 
55,3 → 56,11
{
exc_register(VECTOR_DECREMENTER, "timer", exception_decrementer);
}
 
 
/* Reregister irq to be IPC-ready */
void irq_ipc_bind_arch(__native irq)
{
panic("not implemented\n");
/* TODO */
}
/kernel/trunk/arch/ppc64/src/ppc64.c
28,13 → 28,27
 
#include <arch.h>
#include <arch/boot/boot.h>
#include <arch/console.h>
#include <arch/mm/memory_init.h>
#include <arch/interrupt.h>
#include <mm/frame.h>
#include <genarch/fb/fb.h>
#include <userspace.h>
#include <proc/uarg.h>
 
bootinfo_t bootinfo;
 
void arch_pre_main(void)
{
/* Setup usermode */
init.cnt = bootinfo.taskmap.count;
__u32 i;
for (i = 0; i < bootinfo.taskmap.count; i++) {
init.tasks[i].addr = PA2KA(bootinfo.taskmap.tasks[i].addr);
init.tasks[i].size = bootinfo.taskmap.tasks[i].size;
}
}
 
void arch_pre_mm_init(void)
{
/* Initialize dispatch table */
42,13 → 56,13
/* Start decrementer */
start_decrementer();
 
ppc64_console_init();
}
 
void arch_post_mm_init(void)
{
if (config.cpu_active == 1) {
fb_init(bootinfo.screen.addr, bootinfo.screen.width, bootinfo.screen.height, bootinfo.screen.bpp, bootinfo.screen.scanline);
/* Merge all zones to 1 big zone */
zone_merge_all();
}
67,3 → 81,11
{
}
 
void userspace(uspace_arg_t *kernel_uarg)
{
userspace_asm((__address) kernel_uarg->uspace_uarg, (__address) kernel_uarg->uspace_stack + THREAD_STACK_SIZE - SP_DELTA, (__address) kernel_uarg->uspace_entry);
/* Unreachable */
for (;;)
;
}