Subversion Repositories HelenOS

Compare Revisions

No changes between revisions

Ignore whitespace Rev 3381 → Rev 3386

/branches/network/kernel/arch/ia32/include/atomic.h
41,17 → 41,17
 
static inline void atomic_inc(atomic_t *val) {
#ifdef CONFIG_SMP
asm volatile ("lock incl %0\n" : "=m" (val->count));
asm volatile ("lock incl %0\n" : "+m" (val->count));
#else
asm volatile ("incl %0\n" : "=m" (val->count));
asm volatile ("incl %0\n" : "+m" (val->count));
#endif /* CONFIG_SMP */
}
 
static inline void atomic_dec(atomic_t *val) {
#ifdef CONFIG_SMP
asm volatile ("lock decl %0\n" : "=m" (val->count));
asm volatile ("lock decl %0\n" : "+m" (val->count));
#else
asm volatile ("decl %0\n" : "=m" (val->count));
asm volatile ("decl %0\n" : "+m" (val->count));
#endif /* CONFIG_SMP */
}
 
61,7 → 61,7
 
asm volatile (
"lock xaddl %1, %0\n"
: "=m" (val->count), "+r" (r)
: "+m" (val->count), "+r" (r)
);
 
return r;
73,14 → 73,14
asm volatile (
"lock xaddl %1, %0\n"
: "=m" (val->count), "+r"(r)
: "+m" (val->count), "+r"(r)
);
return r;
}
 
#define atomic_preinc(val) (atomic_postinc(val)+1)
#define atomic_predec(val) (atomic_postdec(val)-1)
#define atomic_preinc(val) (atomic_postinc(val) + 1)
#define atomic_predec(val) (atomic_postdec(val) - 1)
 
static inline uint32_t test_and_set(atomic_t *val) {
uint32_t v;
88,7 → 88,7
asm volatile (
"movl $1, %0\n"
"xchgl %0, %1\n"
: "=r" (v),"=m" (val->count)
: "=r" (v),"+m" (val->count)
);
return v;
101,20 → 101,20
 
preemption_disable();
asm volatile (
"0:;"
"0:\n"
#ifdef CONFIG_HT
"pause;" /* Pentium 4's HT love this instruction */
"pause\n" /* Pentium 4's HT love this instruction */
#endif
"mov %0, %1;"
"testl %1, %1;"
"jnz 0b;" /* Lightweight looping on locked spinlock */
"mov %0, %1\n"
"testl %1, %1\n"
"jnz 0b\n" /* lightweight looping on locked spinlock */
"incl %1;" /* now use the atomic operation */
"xchgl %0, %1;"
"testl %1, %1;"
"jnz 0b;"
: "=m"(val->count),"=r"(tmp)
);
"incl %1\n" /* now use the atomic operation */
"xchgl %0, %1\n"
"testl %1, %1\n"
"jnz 0b\n"
: "+m" (val->count), "=&r"(tmp)
);
/*
* Prevent critical section code from bleeding out this way up.
*/
/branches/network/kernel/arch/ia32/include/mm/page.h
40,8 → 40,6
#define PAGE_WIDTH FRAME_WIDTH
#define PAGE_SIZE FRAME_SIZE
 
#define PAGE_COLOR_BITS 0 /* dummy */
 
#ifdef KERNEL
 
#ifndef __ASM__
128,6 → 126,8
 
#include <mm/mm.h>
#include <arch/interrupt.h>
#include <arch/types.h>
#include <typedefs.h>
 
/* Page fault error codes. */
 
/branches/network/kernel/arch/ia32/include/barrier.h
84,6 → 84,15
# endif
#endif
 
/*
* On ia32, the hardware takes care about instruction and data cache coherence,
* even on SMP systems. We issue a write barrier to be sure that writes
* queueing in the store buffer drain to the memory (even though it would be
* sufficient for them to drain to the D-cache).
*/
#define smc_coherence(a) write_barrier()
#define smc_coherence_block(a, l) write_barrier()
 
#endif
 
/** @}
/branches/network/kernel/arch/ia32/include/memstr.h
35,116 → 35,13
#ifndef KERN_ia32_MEMSTR_H_
#define KERN_ia32_MEMSTR_H_
 
/** Copy memory
*
* Copy a given number of bytes (3rd argument)
* from the memory location defined by 2nd argument
* to the memory location defined by 1st argument.
* The memory areas cannot overlap.
*
* @param dst Destination
* @param src Source
* @param cnt Number of bytes
* @return Destination
*/
static inline void * memcpy(void * dst, const void * src, size_t cnt)
{
unative_t d0, d1, d2;
#define memcpy(dst, src, cnt) __builtin_memcpy((dst), (src), (cnt))
 
asm volatile(
/* copy all full dwords */
"rep movsl\n\t"
/* load count again */
"movl %4, %%ecx\n\t"
/* ecx = ecx mod 4 */
"andl $3, %%ecx\n\t"
/* are there last <=3 bytes? */
"jz 1f\n\t"
/* copy last <=3 bytes */
"rep movsb\n\t"
/* exit from asm block */
"1:\n"
: "=&c" (d0), "=&D" (d1), "=&S" (d2)
: "0" ((unative_t) (cnt / 4)), "g" ((unative_t) cnt), "1" ((unative_t) dst), "2" ((unative_t) src)
: "memory");
extern void memsetw(void *dst, size_t cnt, uint16_t x);
extern void memsetb(void *dst, size_t cnt, uint8_t x);
 
return dst;
}
extern int memcmp(const void *a, const void *b, size_t cnt);
 
 
/** Compare memory regions for equality
*
* Compare a given number of bytes (3rd argument)
* at memory locations defined by 1st and 2nd argument
* for equality. If bytes are equal function returns 0.
*
* @param src Region 1
* @param dst Region 2
* @param cnt Number of bytes
* @return Zero if bytes are equal, non-zero otherwise
*/
static inline int memcmp(const void * src, const void * dst, size_t cnt)
{
uint32_t d0, d1, d2;
int ret;
asm (
"repe cmpsb\n\t"
"je 1f\n\t"
"movl %3, %0\n\t"
"addl $1, %0\n\t"
"1:\n"
: "=a" (ret), "=%S" (d0), "=&D" (d1), "=&c" (d2)
: "0" (0), "1" ((unative_t) src), "2" ((unative_t) dst), "3" ((unative_t) cnt)
);
return ret;
}
 
/** Fill memory with words
* Fill a given number of words (2nd argument)
* at memory defined by 1st argument with the
* word value defined by 3rd argument.
*
* @param dst Destination
* @param cnt Number of words
* @param x Value to fill
*/
static inline void memsetw(uintptr_t dst, size_t cnt, uint16_t x)
{
uint32_t d0, d1;
asm volatile (
"rep stosw\n\t"
: "=&D" (d0), "=&c" (d1), "=a" (x)
: "0" (dst), "1" (cnt), "2" (x)
: "memory"
);
 
}
 
/** Fill memory with bytes
* Fill a given number of bytes (2nd argument)
* at memory defined by 1st argument with the
* word value defined by 3rd argument.
*
* @param dst Destination
* @param cnt Number of bytes
* @param x Value to fill
*/
static inline void memsetb(uintptr_t dst, size_t cnt, uint8_t x)
{
uint32_t d0, d1;
asm volatile (
"rep stosb\n\t"
: "=&D" (d0), "=&c" (d1), "=a" (x)
: "0" (dst), "1" (cnt), "2" (x)
: "memory"
);
 
}
 
#endif
 
/** @}
/branches/network/kernel/arch/ia32/include/types.h
35,10 → 35,6
#ifndef KERN_ia32_TYPES_H_
#define KERN_ia32_TYPES_H_
 
#define NULL 0
#define false 0
#define true 1
 
typedef signed char int8_t;
typedef signed short int16_t;
typedef signed long int32_t;
61,14 → 57,29
typedef uint32_t unative_t;
typedef int32_t native_t;
 
typedef uint8_t bool;
typedef uint64_t thread_id_t;
typedef uint64_t task_id_t;
typedef uint32_t context_id_t;
#define PRIp "x" /**< Format for uintptr_t. */
#define PRIs "u" /**< Format for size_t. */
#define PRIc "u" /**< Format for count_t. */
#define PRIi "u" /**< Format for index_t. */
 
typedef int32_t inr_t;
typedef int32_t devno_t;
#define PRId8 "d" /**< Format for int8_t. */
#define PRId16 "d" /**< Format for int16_t. */
#define PRId32 "d" /**< Format for int32_t. */
#define PRId64 "lld" /**< Format for int64_t. */
#define PRIdn "d" /**< Format for native_t. */
 
#define PRIu8 "u" /**< Format for uint8_t. */
#define PRIu16 "u" /**< Format for uint16_t. */
#define PRIu32 "u" /**< Format for uint32_t. */
#define PRIu64 "llu" /**< Format for uint64_t. */
#define PRIun "u" /**< Format for unative_t. */
 
#define PRIx8 "x" /**< Format for hexadecimal (u)int8_t. */
#define PRIx16 "x" /**< Format for hexadecimal (u)int16_t. */
#define PRIx32 "x" /**< Format for hexadecimal (u)uint32_t. */
#define PRIx64 "llx" /**< Format for hexadecimal (u)int64_t. */
#define PRIxn "x" /**< Format for hexadecimal (u)native_t. */
 
/** Page Table Entry. */
typedef struct {
unsigned present : 1;
/branches/network/kernel/arch/ia32/include/smp/apic.h
105,8 → 105,8
#define MODEL_CLUSTER 0x0
 
/** Interrupt Command Register. */
#define ICRlo (0x300/sizeof(uint32_t))
#define ICRhi (0x310/sizeof(uint32_t))
#define ICRlo (0x300 / sizeof(uint32_t))
#define ICRhi (0x310 / sizeof(uint32_t))
typedef struct {
union {
uint32_t lo;
133,10 → 133,10
} __attribute__ ((packed)) icr_t;
 
/* End Of Interrupt. */
#define EOI (0x0b0/sizeof(uint32_t))
#define EOI (0x0b0 / sizeof(uint32_t))
 
/** Error Status Register. */
#define ESR (0x280/sizeof(uint32_t))
#define ESR (0x280 / sizeof(uint32_t))
typedef union {
uint32_t value;
uint8_t err_bitmap;
154,7 → 154,7
} esr_t;
 
/* Task Priority Register */
#define TPR (0x080/sizeof(uint32_t))
#define TPR (0x080 / sizeof(uint32_t))
typedef union {
uint32_t value;
struct {
164,7 → 164,7
} tpr_t;
 
/** Spurious-Interrupt Vector Register. */
#define SVR (0x0f0/sizeof(uint32_t))
#define SVR (0x0f0 / sizeof(uint32_t))
typedef union {
uint32_t value;
struct {
176,7 → 176,7
} svr_t;
 
/** Time Divide Configuration Register. */
#define TDCR (0x3e0/sizeof(uint32_t))
#define TDCR (0x3e0 / sizeof(uint32_t))
typedef union {
uint32_t value;
struct {
186,13 → 186,13
} tdcr_t;
 
/* Initial Count Register for Timer */
#define ICRT (0x380/sizeof(uint32_t))
#define ICRT (0x380 / sizeof(uint32_t))
 
/* Current Count Register for Timer */
#define CCRT (0x390/sizeof(uint32_t))
#define CCRT (0x390 / sizeof(uint32_t))
 
/** LVT Timer register. */
#define LVT_Tm (0x320/sizeof(uint32_t))
#define LVT_Tm (0x320 / sizeof(uint32_t))
typedef union {
uint32_t value;
struct {
207,8 → 207,8
} lvt_tm_t;
 
/** LVT LINT registers. */
#define LVT_LINT0 (0x350/sizeof(uint32_t))
#define LVT_LINT1 (0x360/sizeof(uint32_t))
#define LVT_LINT0 (0x350 / sizeof(uint32_t))
#define LVT_LINT1 (0x360 / sizeof(uint32_t))
typedef union {
uint32_t value;
struct {
225,7 → 225,7
} lvt_lint_t;
 
/** LVT Error register. */
#define LVT_Err (0x370/sizeof(uint32_t))
#define LVT_Err (0x370 / sizeof(uint32_t))
typedef union {
uint32_t value;
struct {
239,7 → 239,7
} lvt_error_t;
 
/** Local APIC ID Register. */
#define L_APIC_ID (0x020/sizeof(uint32_t))
#define L_APIC_ID (0x020 / sizeof(uint32_t))
typedef union {
uint32_t value;
struct {
249,14 → 249,14
} l_apic_id_t;
 
/** Local APIC Version Register */
#define LAVR (0x030/sizeof(uint32_t))
#define LAVR (0x030 / sizeof(uint32_t))
#define LAVR_Mask 0xff
#define is_local_apic(x) (((x)&LAVR_Mask&0xf0)==0x1)
#define is_82489DX_apic(x) ((((x)&LAVR_Mask&0xf0)==0x0))
#define is_local_xapic(x) (((x)&LAVR_Mask)==0x14)
#define is_local_apic(x) (((x) & LAVR_Mask & 0xf0) == 0x1)
#define is_82489DX_apic(x) ((((x) & LAVR_Mask & 0xf0) == 0x0))
#define is_local_xapic(x) (((x) & LAVR_Mask) == 0x14)
 
/** Logical Destination Register. */
#define LDR (0x0d0/sizeof(uint32_t))
#define LDR (0x0d0 / sizeof(uint32_t))
typedef union {
uint32_t value;
struct {
266,7 → 266,7
} ldr_t;
 
/** Destination Format Register. */
#define DFR (0x0e0/sizeof(uint32_t))
#define DFR (0x0e0 / sizeof(uint32_t))
typedef union {
uint32_t value;
struct {
276,8 → 276,8
} dfr_t;
 
/* IO APIC */
#define IOREGSEL (0x00/sizeof(uint32_t))
#define IOWIN (0x10/sizeof(uint32_t))
#define IOREGSEL (0x00 / sizeof(uint32_t))
#define IOWIN (0x10 / sizeof(uint32_t))
 
#define IOAPICID 0x00
#define IOAPICVER 0x01
/branches/network/kernel/arch/ia32/include/byteorder.h
35,15 → 35,9
#ifndef KERN_ia32_BYTEORDER_H_
#define KERN_ia32_BYTEORDER_H_
 
#include <byteorder.h>
 
/* IA-32 is little-endian */
#define uint32_t_le2host(n) (n)
#define uint64_t_le2host(n) (n)
#define ARCH_IS_LITTLE_ENDIAN
 
#define uint32_t_be2host(n) uint32_t_byteorder_swap(n)
#define uint64_t_be2host(n) uint64_t_byteorder_swap(n)
 
#endif
 
/** @}
/branches/network/kernel/arch/ia32/include/context_offset.h
0,0 → 1,83
/*
* Copyright (c) 2008 Josef Cejka
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup ia32
* @{
*/
/** @file
*/
 
#ifndef KERN_ia32_CONTEXT_OFFSET_H_
#define KERN_ia32_CONTEXT_OFFSET_H_
 
#define OFFSET_SP 0x0
#define OFFSET_PC 0x4
#define OFFSET_EBX 0x8
#define OFFSET_ESI 0xC
#define OFFSET_EDI 0x10
#define OFFSET_EBP 0x14
 
#ifdef KERNEL
# define OFFSET_IPL 0x18
#else
# define OFFSET_TLS 0x18
#endif
 
 
#ifdef __ASM__
 
# ctx: address of the structure with saved context
# pc: return address
 
.macro CONTEXT_SAVE_ARCH_CORE ctx:req pc:req
movl %esp,OFFSET_SP(\ctx) # %esp -> ctx->sp
movl \pc,OFFSET_PC(\ctx) # %eip -> ctx->pc
movl %ebx,OFFSET_EBX(\ctx) # %ebx -> ctx->ebx
movl %esi,OFFSET_ESI(\ctx) # %esi -> ctx->esi
movl %edi,OFFSET_EDI(\ctx) # %edi -> ctx->edi
movl %ebp,OFFSET_EBP(\ctx) # %ebp -> ctx->ebp
.endm
 
# ctx: address of the structure with saved context
 
.macro CONTEXT_RESTORE_ARCH_CORE ctx:req pc:req
movl OFFSET_SP(\ctx),%esp # ctx->sp -> %esp
movl OFFSET_PC(\ctx),\pc # ctx->pc -> \pc
movl OFFSET_EBX(\ctx),%ebx # ctx->ebx -> %ebx
movl OFFSET_ESI(\ctx),%esi # ctx->esi -> %esi
movl OFFSET_EDI(\ctx),%edi # ctx->edi -> %edi
movl OFFSET_EBP(\ctx),%ebp # ctx->ebp -> %ebp
.endm
 
#endif /* __ASM__ */
 
#endif
 
/** @}
*/
 
/branches/network/kernel/arch/ia32/include/context.h
35,6 → 35,7
#ifndef KERN_ia32_CONTEXT_H_
#define KERN_ia32_CONTEXT_H_
 
#ifdef KERNEL
#include <arch/types.h>
 
#define STACK_ITEM_SIZE 4
47,6 → 48,8
*/
#define SP_DELTA (8 + STACK_ITEM_SIZE)
 
#endif /* KERNEL */
 
/*
* Only save registers that must be preserved across
* function calls.
/branches/network/kernel/arch/ia32/Makefile.inc
29,11 → 29,15
## Toolchain configuration
#
 
ifndef CROSS_PREFIX
CROSS_PREFIX = /usr/local
endif
 
BFD_NAME = elf32-i386
BFD_ARCH = i386
BFD = binary
TARGET = i686-pc-linux-gnu
TOOLCHAIN_DIR = /usr/local/i686
TOOLCHAIN_DIR = $(CROSS_PREFIX)/i686
 
DEFS += -DMACHINE=$(MACHINE) -D__32_BITS__
 
46,7 → 50,8
#
 
ifeq ($(MACHINE),athlon-xp)
CMN2 = -march=athlon-xp -mmmx -msse -m3dnow
FPU_NO_CFLAGS = -mno-mmx -mno-sse -mno-3dnow
CMN2 = -march=athlon-xp
GCC_CFLAGS += $(CMN2)
ICC_CFLAGS += $(CMN2)
SUNCC_CFLAGS += -xarch=ssea
55,7 → 60,8
CONFIG_HT = n
endif
ifeq ($(MACHINE),athlon-mp)
CMN2 = -march=athlon-mp -mmmx -msse -m3dnow
FPU_NO_CFLAGS = -mno-mmx -mno-sse -mno-3dnow
CMN2 = -march=athlon-mp
GCC_CFLAGS += $(CMN2)
ICC_CFLAGS += $(CMN2)
SUNCC_CFLAGS += xarch=ssea
63,7 → 69,8
CONFIG_HT = n
endif
ifeq ($(MACHINE),pentium3)
CMN2 = -march=pentium3 -mmmx -msse
FPU_NO_CFLAGS = -mno-mmx -mno-sse
CMN2 = -march=pentium3
GCC_CFLAGS += $(CMN2)
ICC_CFLAGS += $(CMN2)
SUNCC_CFLAGS += -xarch=sse
71,7 → 78,8
CONFIG_HT = n
endif
ifeq ($(MACHINE),core)
CMN2 = -march=prescott -mfpmath=sse -mmmx -msse -msse2 -msse3
FPU_NO_CFLAGS = -mno-mmx -mno-sse -mno-sse2 -mno-sse3
CMN2 = -march=prescott
GCC_CFLAGS += $(CMN2)
ICC_CFLAGS += $(CMN2)
SUNCC_CFLAGS += -xarch=sse3
78,7 → 86,8
DEFS += -DCONFIG_FENCES_P4
endif
ifeq ($(MACHINE),pentium4)
GCC_CFLAGS += -march=pentium4 -mfpmath=sse -mmmx -msse -msse2
FPU_NO_CFLAGS = -mno-mmx -mno-sse -mno-sse2
GCC_CFLAGS += -march=pentium4
ICC_CFLAGS += -march=pentium4
SUNCC_CFLAGS += -xarch=sse2
DEFS += -DCONFIG_FENCES_P4
120,7 → 129,7
CONFIG_SOFTINT = y
 
ARCH_SOURCES = \
arch/$(ARCH)/src/context.s \
arch/$(ARCH)/src/context.S \
arch/$(ARCH)/src/debug/panic.s \
arch/$(ARCH)/src/delay.s \
arch/$(ARCH)/src/asm.S \
/branches/network/kernel/arch/ia32/src/context.s
File deleted
/branches/network/kernel/arch/ia32/src/asm.S
37,6 → 37,8
.global paging_on
.global enable_l_apic_in_msr
.global interrupt_handlers
.global memsetb
.global memsetw
.global memcpy
.global memcpy_from_uspace
.global memcpy_from_uspace_failover_address
44,6 → 46,15
.global memcpy_to_uspace_failover_address
 
 
# Wrapper for generic memsetb
memsetb:
jmp _memsetb
 
# Wrapper for generic memsetw
memsetw:
jmp _memsetw
 
 
#define MEMCPY_DST 4
#define MEMCPY_SRC 8
#define MEMCPY_SIZE 12
60,7 → 71,7
* @param MEMCPY_SRC(%esp) Source address.
* @param MEMCPY_SIZE(%esp) Size.
*
* @return MEMCPY_SRC(%esp) on success and 0 on failure.
* @return MEMCPY_DST(%esp) on success and 0 on failure.
*/
memcpy:
memcpy_from_uspace:
85,7 → 96,7
0:
movl %edx, %edi
movl %eax, %esi
movl MEMCPY_SRC(%esp), %eax /* MEMCPY_SRC(%esp), success */
movl MEMCPY_DST(%esp), %eax /* MEMCPY_DST(%esp), success */
ret
/*
173,6 → 184,7
movw %ax, %ds
movw %ax, %es
cld
sti
# syscall_handler(edx, ecx, ebx, esi, edi, ebp, eax)
call syscall_handler
233,6 → 245,8
movw %ax, %ds
movw %ax, %es
 
cld
 
pushl %esp # *istate
pushl $(\i) # intnum
call exc_dispatch # excdispatch(intnum, *istate)
/branches/network/kernel/arch/ia32/src/mm/as.c
39,9 → 39,7
/** Architecture dependent address space init. */
void as_arch_init(void)
{
#ifndef __OBJC__
as_operations = &as_pt_operations;
#endif
}
 
/** @}
/branches/network/kernel/arch/ia32/src/drivers/ega.c
93,12 → 93,6
sysinfo_set_item_val("fb.width", NULL, ROW);
sysinfo_set_item_val("fb.height", NULL, ROWS);
sysinfo_set_item_val("fb.address.physical", NULL, VIDEORAM);
sysinfo_set_item_val("fb.address.color", NULL, PAGE_COLOR((uintptr_t)
videoram));
#ifndef CONFIG_FB
putchar('\n');
#endif
}
 
static void ega_display_char(char ch)
115,7 → 109,7
return;
 
memcpy((void *) videoram, (void *) (videoram + ROW * 2), (SCREEN - ROW) * 2);
memsetw((uintptr_t) (videoram + (SCREEN - ROW) * 2), ROW, 0x0720);
memsetw(videoram + (SCREEN - ROW) * 2, ROW, 0x0720);
ega_cursor = ega_cursor - ROW;
}
 
/branches/network/kernel/arch/ia32/src/userspace.c
70,6 → 70,10
"pushl %3\n"
"pushl %4\n"
"movl %5, %%eax\n"
 
/* %ebx is defined to hold pcb_ptr - set it to 0 */
"xorl %%ebx, %%ebx\n"
 
"iret\n"
:
: "i" (selector(UDATA_DES) | PL_USER),
/branches/network/kernel/arch/ia32/src/smp/smp.c
155,13 → 155,12
* Prepare new GDT for CPU in question.
*/
gdt_new = (struct descriptor *) malloc(GDT_ITEMS *
sizeof(struct descriptor), FRAME_ATOMIC);
sizeof(struct descriptor), FRAME_ATOMIC | FRAME_LOW_4_GiB);
if (!gdt_new)
panic("couldn't allocate memory for GDT\n");
 
memcpy(gdt_new, gdt, GDT_ITEMS * sizeof(struct descriptor));
memsetb((uintptr_t)(&gdt_new[TSS_DES]),
sizeof(struct descriptor), 0);
memsetb(&gdt_new[TSS_DES], sizeof(struct descriptor), 0);
protected_ap_gdtr.limit = GDT_ITEMS * sizeof(struct descriptor);
protected_ap_gdtr.base = KA2PA((uintptr_t) gdt_new);
gdtr.base = (uintptr_t) gdt_new;
/branches/network/kernel/arch/ia32/src/smp/ap.S
45,7 → 45,7
KTEXT=8
KDATA=16
 
# This piece of code is real-mode and is meant to be alligned at 4K boundary.
# This piece of code is real-mode and is meant to be aligned at 4K boundary.
# The requirement for such an alignment comes from MP Specification's STARTUP IPI
# requirements.
 
/branches/network/kernel/arch/ia32/src/pm.c
112,7 → 112,7
 
void tss_initialize(tss_t *t)
{
memsetb((uintptr_t) t, sizeof(struct tss), 0);
memsetb(t, sizeof(struct tss), 0);
}
 
/*
240,7 → 240,7
preemption_disable();
ipl_t ipl = interrupts_disable();
memsetb((uintptr_t) idt, sizeof(idt), 0);
memsetb(idt, sizeof(idt), 0);
ptr_16_32_t idtr;
idtr.limit = sizeof(idt);
/branches/network/kernel/arch/ia32/src/debug/panic.s
30,5 → 30,5
.global panic_printf
 
panic_printf:
movl $halt,(%esp) # fake stack to make printf return to halt
movl $halt, (%esp) # fake stack to make printf return to halt
jmp printf
/branches/network/kernel/arch/ia32/src/boot/boot.S
50,6 → 50,7
.long multiboot_image_start
multiboot_image_start:
cld
movl $START_STACK, %esp # initialize stack pointer
lgdt KA2PA(bootstrap_gdtr) # initialize Global Descriptor Table register
 
85,7 → 86,6
mov $vesa_init, %esi
mov $VESA_INIT_SEGMENT << 4, %edi
mov $e_vesa_init - vesa_init, %ecx
cld
rep movsb
 
mov $VESA_INIT_SEGMENT << 4, %edi
206,7 → 206,6
movl $BOOT_OFFSET, %esi
movl $AP_BOOT_OFFSET, %edi
movl $_hardcoded_unmapped_size, %ecx
cld
rep movsb
#endif
279,7 → 278,6
addl %eax, %edi
movw $0x0c00, %ax # black background, light red foreground
cld
ploop:
lodsb
/branches/network/kernel/arch/ia32/src/context.S
0,0 → 1,67
#
# Copyright (c) 2001-2004 Jakub Jermar
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# - The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
 
#include <arch/context_offset.h>
 
.text
 
.global context_save_arch
.global context_restore_arch
 
 
## Save current CPU context
#
# Save CPU context to the context_t variable
# pointed by the 1st argument. Returns 1 in EAX.
#
context_save_arch:
movl 0(%esp),%eax # save pc value into eax
movl 4(%esp),%edx # address of the context variable to save context to
 
# save registers to given structure
CONTEXT_SAVE_ARCH_CORE %edx %eax
 
xorl %eax,%eax # context_save returns 1
incl %eax
ret
 
 
## Restore saved CPU context
#
# Restore CPU context from context_t variable
# pointed by the 1st argument. Returns 0 in EAX.
#
context_restore_arch:
movl 4(%esp),%eax # address of the context variable to restore context from
 
# restore registers from given structure
CONTEXT_RESTORE_ARCH_CORE %eax %edx
 
movl %edx,0(%esp) # put saved pc on stack
xorl %eax,%eax # context_restore returns 0
ret
/branches/network/kernel/arch/amd64/include/atomic.h
41,17 → 41,17
 
static inline void atomic_inc(atomic_t *val) {
#ifdef CONFIG_SMP
asm volatile ("lock incq %0\n" : "=m" (val->count));
asm volatile ("lock incq %0\n" : "+m" (val->count));
#else
asm volatile ("incq %0\n" : "=m" (val->count));
asm volatile ("incq %0\n" : "+m" (val->count));
#endif /* CONFIG_SMP */
}
 
static inline void atomic_dec(atomic_t *val) {
#ifdef CONFIG_SMP
asm volatile ("lock decq %0\n" : "=m" (val->count));
asm volatile ("lock decq %0\n" : "+m" (val->count));
#else
asm volatile ("decq %0\n" : "=m" (val->count));
asm volatile ("decq %0\n" : "+m" (val->count));
#endif /* CONFIG_SMP */
}
 
61,7 → 61,7
 
asm volatile (
"lock xaddq %1, %0\n"
: "=m" (val->count), "+r" (r)
: "+m" (val->count), "+r" (r)
);
 
return r;
73,14 → 73,14
asm volatile (
"lock xaddq %1, %0\n"
: "=m" (val->count), "+r" (r)
: "+m" (val->count), "+r" (r)
);
return r;
}
 
#define atomic_preinc(val) (atomic_postinc(val)+1)
#define atomic_predec(val) (atomic_postdec(val)-1)
#define atomic_preinc(val) (atomic_postinc(val) + 1)
#define atomic_predec(val) (atomic_postdec(val) - 1)
 
static inline uint64_t test_and_set(atomic_t *val) {
uint64_t v;
88,7 → 88,7
asm volatile (
"movq $1, %0\n"
"xchgq %0, %1\n"
: "=r" (v),"=m" (val->count)
: "=r" (v), "+m" (val->count)
);
return v;
102,20 → 102,20
 
preemption_disable();
asm volatile (
"0:;"
"0:\n"
#ifdef CONFIG_HT
"pause;"
"pause\n"
#endif
"mov %0, %1;"
"testq %1, %1;"
"jnz 0b;" /* Lightweight looping on locked spinlock */
"mov %0, %1\n"
"testq %1, %1\n"
"jnz 0b\n" /* lightweight looping on locked spinlock */
"incq %1;" /* now use the atomic operation */
"xchgq %0, %1;"
"testq %1, %1;"
"jnz 0b;"
: "=m"(val->count),"=r"(tmp)
);
"incq %1\n" /* now use the atomic operation */
"xchgq %0, %1\n"
"testq %1, %1\n"
"jnz 0b\n"
: "+m" (val->count), "=&r" (tmp)
);
/*
* Prevent critical section code from bleeding out this way up.
*/
/branches/network/kernel/arch/amd64/include/mm/page.h
52,8 → 52,6
#define PAGE_WIDTH FRAME_WIDTH
#define PAGE_SIZE FRAME_SIZE
 
#define PAGE_COLOR_BITS 0 /* dummy */
 
#ifdef KERNEL
 
#ifndef __ASM__
/branches/network/kernel/arch/amd64/include/memstr.h
35,110 → 35,13
#ifndef KERN_amd64_MEMSTR_H_
#define KERN_amd64_MEMSTR_H_
 
/** Copy memory
*
* Copy a given number of bytes (3rd argument)
* from the memory location defined by 2nd argument
* to the memory location defined by 1st argument.
* The memory areas cannot overlap.
*
* @param dst Destination
* @param src Source
* @param cnt Number of bytes
* @return Destination
*/
static inline void * memcpy(void * dst, const void * src, size_t cnt)
{
unative_t d0, d1, d2;
#define memcpy(dst, src, cnt) __builtin_memcpy((dst), (src), (cnt))
 
asm volatile(
"rep movsq\n\t"
"movq %4, %%rcx\n\t"
"andq $7, %%rcx\n\t"
"jz 1f\n\t"
"rep movsb\n\t"
"1:\n"
: "=&c" (d0), "=&D" (d1), "=&S" (d2)
: "0" ((unative_t)(cnt / 8)), "g" ((unative_t)cnt), "1" ((unative_t) dst), "2" ((unative_t) src)
: "memory");
extern void memsetw(void *dst, size_t cnt, uint16_t x);
extern void memsetb(void *dst, size_t cnt, uint8_t x);
 
return dst;
}
extern int memcmp(const void *a, const void *b, size_t cnt);
 
 
/** Compare memory regions for equality
*
* Compare a given number of bytes (3rd argument)
* at memory locations defined by 1st and 2nd argument
* for equality. If bytes are equal function returns 0.
*
* @param src Region 1
* @param dst Region 2
* @param cnt Number of bytes
* @return Zero if bytes are equal, non-zero otherwise
*/
static inline int memcmp(const void * src, const void * dst, size_t cnt)
{
unative_t d0, d1, d2;
unative_t ret;
asm (
"repe cmpsb\n\t"
"je 1f\n\t"
"movq %3, %0\n\t"
"addq $1, %0\n\t"
"1:\n"
: "=a" (ret), "=%S" (d0), "=&D" (d1), "=&c" (d2)
: "0" (0), "1" (src), "2" (dst), "3" ((unative_t)cnt)
);
return ret;
}
 
/** Fill memory with words
* Fill a given number of words (2nd argument)
* at memory defined by 1st argument with the
* word value defined by 3rd argument.
*
* @param dst Destination
* @param cnt Number of words
* @param x Value to fill
*/
static inline void memsetw(uintptr_t dst, size_t cnt, uint16_t x)
{
unative_t d0, d1;
asm volatile (
"rep stosw\n\t"
: "=&D" (d0), "=&c" (d1), "=a" (x)
: "0" (dst), "1" ((unative_t)cnt), "2" (x)
: "memory"
);
 
}
 
/** Fill memory with bytes
* Fill a given number of bytes (2nd argument)
* at memory defined by 1st argument with the
* word value defined by 3rd argument.
*
* @param dst Destination
* @param cnt Number of bytes
* @param x Value to fill
*/
static inline void memsetb(uintptr_t dst, size_t cnt, uint8_t x)
{
unative_t d0, d1;
asm volatile (
"rep stosb\n\t"
: "=&D" (d0), "=&c" (d1), "=a" (x)
: "0" (dst), "1" ((unative_t)cnt), "2" (x)
: "memory"
);
 
}
 
#endif
 
/** @}
/branches/network/kernel/arch/amd64/include/types.h
35,10 → 35,6
#ifndef KERN_amd64_TYPES_H_
#define KERN_amd64_TYPES_H_
 
#define NULL 0
#define false 0
#define true 1
 
typedef signed char int8_t;
typedef signed short int16_t;
typedef signed int int32_t;
61,14 → 57,31
typedef uint64_t unative_t;
typedef int64_t native_t;
 
typedef uint8_t bool;
typedef uint64_t thread_id_t;
typedef uint64_t task_id_t;
typedef uint32_t context_id_t;
/**< Formats for uintptr_t, size_t, count_t and index_t */
#define PRIp "llx"
#define PRIs "llu"
#define PRIc "llu"
#define PRIi "llu"
 
typedef int32_t inr_t;
typedef int32_t devno_t;
/**< Formats for (u)int8_t, (u)int16_t, (u)int32_t, (u)int64_t and (u)native_t */
#define PRId8 "d"
#define PRId16 "d"
#define PRId32 "d"
#define PRId64 "lld"
#define PRIdn "lld"
 
#define PRIu8 "u"
#define PRIu16 "u"
#define PRIu32 "u"
#define PRIu64 "llu"
#define PRIun "llu"
 
#define PRIx8 "x"
#define PRIx16 "x"
#define PRIx32 "x"
#define PRIx64 "llx"
#define PRIxn "llx"
 
/** Page Table Entry. */
typedef struct {
unsigned present : 1;
/branches/network/kernel/arch/amd64/include/byteorder.h
35,15 → 35,9
#ifndef KERN_amd64_BYTEORDER_H_
#define KERN_amd64_BYTEORDER_H_
 
#include <byteorder.h>
 
/* AMD64 is little-endian */
#define uint32_t_le2host(n) (n)
#define uint64_t_le2host(n) (n)
#define ARCH_IS_LITTLE_ENDIAN
 
#define uint32_t_be2host(n) uint32_t_byteorder_swap(n)
#define uint64_t_be2host(n) uint64_t_byteorder_swap(n)
 
#endif
 
/** @}
/branches/network/kernel/arch/amd64/include/cpu.h
35,8 → 35,9
#ifndef KERN_amd64_CPU_H_
#define KERN_amd64_CPU_H_
 
#define RFLAGS_IF (1 << 9)
#define RFLAGS_RF (1 << 16)
#define RFLAGS_IF (1 << 9)
#define RFLAGS_DF (1 << 10)
#define RFLAGS_RF (1 << 16)
 
#define EFER_MSR_NUM 0xc0000080
#define AMD_SCE_FLAG 0
/branches/network/kernel/arch/amd64/include/context_offset.h
37,6 → 37,43
#define OFFSET_R13 0x28
#define OFFSET_R14 0x30
#define OFFSET_R15 0x38
#define OFFSET_IPL 0x40
 
#ifdef KERNEL
# define OFFSET_IPL 0x40
#else
# define OFFSET_TLS 0x40
#endif
 
#ifdef __ASM__
 
# ctx: address of the structure with saved context
# pc: return address
.macro CONTEXT_SAVE_ARCH_CORE ctx:req pc:req
movq \pc, OFFSET_PC(\ctx)
movq %rsp, OFFSET_SP(\ctx)
movq %rbx, OFFSET_RBX(\ctx)
movq %rbp, OFFSET_RBP(\ctx)
movq %r12, OFFSET_R12(\ctx)
movq %r13, OFFSET_R13(\ctx)
movq %r14, OFFSET_R14(\ctx)
movq %r15, OFFSET_R15(\ctx)
.endm
 
# ctx: address of the structure with saved context
.macro CONTEXT_RESTORE_ARCH_CORE ctx:req pc:req
movq OFFSET_R15(\ctx), %r15
movq OFFSET_R14(\ctx), %r14
movq OFFSET_R13(\ctx), %r13
movq OFFSET_R12(\ctx), %r12
movq OFFSET_RBP(\ctx), %rbp
movq OFFSET_RBX(\ctx), %rbx
movq OFFSET_SP(\ctx), %rsp # ctx->sp -> %rsp
movq OFFSET_PC(\ctx), \pc
.endm
 
#endif
 
#endif
/branches/network/kernel/arch/amd64/include/context.h
35,6 → 35,8
#ifndef KERN_amd64_CONTEXT_H_
#define KERN_amd64_CONTEXT_H_
 
#ifdef KERNEL
 
#include <arch/types.h>
 
/* According to ABI the stack MUST be aligned on
43,6 → 45,8
*/
#define SP_DELTA 16
 
#endif /* KERNEL */
 
/* We include only registers that must be preserved
* during function call
*/
/branches/network/kernel/arch/amd64/Makefile.inc
29,12 → 29,17
## Toolchain configuration
#
 
ifndef CROSS_PREFIX
CROSS_PREFIX = /usr/local
endif
 
BFD_NAME = elf64-x86-64
BFD_ARCH = i386:x86-64
BFD = binary
TARGET = amd64-linux-gnu
TOOLCHAIN_DIR = /usr/local/amd64
TOOLCHAIN_DIR = $(CROSS_PREFIX)/amd64
 
FPU_NO_CFLAGS = -mno-sse -mno-sse2
CMN1 = -m64 -mcmodel=kernel -mno-red-zone -fno-unwind-tables
GCC_CFLAGS += $(CMN1)
ICC_CFLAGS += $(CMN1)
/branches/network/kernel/arch/amd64/src/asm_utils.S
65,6 → 65,8
.global get_cycle
.global read_efer_flag
.global set_efer_flag
.global memsetb
.global memsetw
.global memcpy
.global memcpy_from_uspace
.global memcpy_to_uspace
71,6 → 73,14
.global memcpy_from_uspace_failover_address
.global memcpy_to_uspace_failover_address
 
# Wrapper for generic memsetb
memsetb:
jmp _memsetb
 
# Wrapper for generic memsetw
memsetw:
jmp _memsetw
 
#define MEMCPY_DST %rdi
#define MEMCPY_SRC %rsi
#define MEMCPY_SIZE %rdx
88,12 → 98,12
* @param MEMCPY_SRC Source address.
* @param MEMCPY_SIZE Number of bytes to copy.
*
* @retrun MEMCPY_SRC on success, 0 on failure.
* @retrun MEMCPY_DST on success, 0 on failure.
*/
memcpy:
memcpy_from_uspace:
memcpy_to_uspace:
movq MEMCPY_SRC, %rax
movq MEMCPY_DST, %rax
 
movq MEMCPY_SIZE, %rcx
shrq $3, %rcx /* size / 8 */
248,6 → 258,7
.endif
 
save_all_gpr
cld
 
movq $(\i), %rdi # %rdi - first parameter
movq %rsp, %rsi # %rsi - pointer to istate
/branches/network/kernel/arch/amd64/src/userspace.c
61,6 → 61,8
"pushq %3\n"
"pushq %4\n"
"movq %5, %%rax\n"
/* %rdi is defined to hold pcb_ptr - set it to 0 */
"xorq %%rdi, %%rdi\n"
"iretq\n"
: :
"i" (gdtselector(UDATA_DES) | PL_USER),
/branches/network/kernel/arch/amd64/src/debugger.c
106,25 → 106,33
{
unsigned int i;
char *symbol;
 
#ifdef __32_BITS__
printf("# Count Address In symbol\n");
printf("-- ----- ---------- ---------\n");
#endif
 
#ifdef __64_BITS__
printf("# Count Address In symbol\n");
printf("-- ----- ------------------ ---------\n");
#endif
if (sizeof(void *) == 4) {
printf("# Count Address In symbol\n");
printf("-- ----- ---------- ---------\n");
} else {
printf("# Count Address In symbol\n");
printf("-- ----- ------------------ ---------\n");
}
for (i = 0; i < BKPOINTS_MAX; i++)
if (breakpoints[i].address) {
symbol = get_symtab_entry(breakpoints[i].address);
if (sizeof(void *) == 4)
printf("%-2u %-5d %#10zx %s\n", i, breakpoints[i].counter,
breakpoints[i].address, symbol);
else
printf("%-2u %-5d %#18zx %s\n", i, breakpoints[i].counter,
breakpoints[i].address, symbol);
 
#ifdef __32_BITS__
printf("%-2u %-5d %#10zx %s\n", i,
breakpoints[i].counter, breakpoints[i].address,
symbol);
#endif
 
#ifdef __64_BITS__
printf("%-2u %-5d %#18zx %s\n", i,
breakpoints[i].counter, breakpoints[i].address,
symbol);
#endif
 
}
return 1;
}
162,19 → 170,23
if ((flags & BKPOINT_INSTR)) {
;
} else {
if (sizeof(int) == 4)
dr7 |= ((unative_t) 0x3) << (18 + 4*curidx);
else /* 8 */
dr7 |= ((unative_t) 0x2) << (18 + 4*curidx);
#ifdef __32_BITS__
dr7 |= ((unative_t) 0x3) << (18 + 4 * curidx);
#endif
 
#ifdef __64_BITS__
dr7 |= ((unative_t) 0x2) << (18 + 4 * curidx);
#endif
if ((flags & BKPOINT_WRITE))
dr7 |= ((unative_t) 0x1) << (16 + 4*curidx);
dr7 |= ((unative_t) 0x1) << (16 + 4 * curidx);
else if ((flags & BKPOINT_READ_WRITE))
dr7 |= ((unative_t) 0x3) << (16 + 4*curidx);
dr7 |= ((unative_t) 0x3) << (16 + 4 * curidx);
}
 
/* Enable global breakpoint */
dr7 |= 0x2 << (curidx*2);
dr7 |= 0x2 << (curidx * 2);
 
write_dr7(dr7);
246,15 → 258,15
if ((breakpoints[slot].flags & BKPOINT_CHECK_ZERO)) {
if (*((unative_t *) breakpoints[slot].address) != 0)
return;
printf("**** Found ZERO on address %lx (slot %d) ****\n",
breakpoints[slot].address, slot);
printf("*** Found ZERO on address %lx (slot %d) ***\n",
breakpoints[slot].address, slot);
} else {
printf("Data watchpoint - new data: %lx\n",
*((unative_t *) breakpoints[slot].address));
*((unative_t *) breakpoints[slot].address));
}
}
printf("Reached breakpoint %d:%lx(%s)\n", slot, getip(istate),
get_symtab_entry(getip(istate)));
get_symtab_entry(getip(istate)));
printf("***Type 'exit' to exit kconsole.\n");
atomic_set(&haltstate,1);
kconsole((void *) "debug");
292,7 → 304,8
/** Remove breakpoint from table */
int cmd_del_breakpoint(cmd_arg_t *argv)
{
if (argv->intval < 0 || argv->intval > BKPOINTS_MAX) {
unative_t bpno = argv->intval;
if (bpno > BKPOINTS_MAX) {
printf("Invalid breakpoint number.\n");
return 0;
}
346,7 → 359,9
}
 
#ifdef CONFIG_SMP
static void debug_ipi(int n __attribute__((unused)), istate_t *istate __attribute__((unused)))
static void
debug_ipi(int n __attribute__((unused)),
istate_t *istate __attribute__((unused)))
{
int i;
 
362,7 → 377,7
{
int i;
 
for (i=0; i<BKPOINTS_MAX; i++)
for (i = 0; i < BKPOINTS_MAX; i++)
breakpoints[i].address = NULL;
cmd_initialize(&bkpts_info);
383,11 → 398,9
panic("could not register command %s\n", addwatchp_info.name);
#endif
exc_register(VECTOR_DEBUG, "debugger",
debug_exception);
exc_register(VECTOR_DEBUG, "debugger", debug_exception);
#ifdef CONFIG_SMP
exc_register(VECTOR_DEBUG_IPI, "debugger_smp",
debug_ipi);
exc_register(VECTOR_DEBUG_IPI, "debugger_smp", debug_ipi);
#endif
}
 
/branches/network/kernel/arch/amd64/src/pm.c
155,7 → 155,7
 
void tss_initialize(tss_t *t)
{
memsetb((uintptr_t) t, sizeof(tss_t), 0);
memsetb(t, sizeof(tss_t), 0);
}
 
/*
239,7 → 239,7
preemption_disable();
ipl_t ipl = interrupts_disable();
memsetb((uintptr_t) idt, sizeof(idt), 0);
memsetb(idt, sizeof(idt), 0);
idtr_load(&idtr);
interrupts_restore(ipl);
/branches/network/kernel/arch/amd64/src/proc/thread.c
46,7 → 46,7
* Kernel RSP can be precalculated at thread creation time.
*/
t->arch.syscall_rsp[SYSCALL_KSTACK_RSP] =
(uintptr_t)&t->kstack[PAGE_SIZE - sizeof(uint64_t)];
(uintptr_t) &t->kstack[PAGE_SIZE - sizeof(uint64_t)];
}
 
/** @}
/branches/network/kernel/arch/amd64/src/syscall.c
62,9 → 62,11
write_msr(AMD_MSR_LSTAR, (uint64_t)syscall_entry);
/* Mask RFLAGS on syscall
* - disable interrupts, until we exchange the stack register
* (mask the IE bit)
* (mask the IF bit)
* - clear DF so that the string instructions operate in
* the right direction
*/
write_msr(AMD_MSR_SFMASK, 0x200);
write_msr(AMD_MSR_SFMASK, RFLAGS_IF | RFLAGS_DF);
}
 
/** @}
/branches/network/kernel/arch/amd64/src/boot/boot.S
54,6 → 54,7
.long multiboot_image_start
 
multiboot_image_start:
cld
movl $START_STACK, %esp # initialize stack pointer
lgdtl bootstrap_gdtr # initialize Global Descriptor Table register
 
126,7 → 127,6
mov $vesa_init, %esi
mov $VESA_INIT_SEGMENT << 4, %edi
mov $e_vesa_init - vesa_init, %ecx
cld
rep movsb
 
mov $VESA_INIT_SEGMENT << 4, %edi
282,7 → 282,6
movq $BOOT_OFFSET, %rsi
movq $AP_BOOT_OFFSET, %rdi
movq $_hardcoded_unmapped_size, %rcx
cld
rep movsb
#endif
556,7 → 555,6
addl %eax, %edi
movw $0x0c00, %ax # black background, light red foreground
cld
ploop:
lodsb
/branches/network/kernel/arch/amd64/src/cpu/cpu.c
124,7 → 124,8
void cpu_arch_init(void)
{
CPU->arch.tss = tss_p;
CPU->arch.tss->iomap_base = &CPU->arch.tss->iomap[0] - ((uint8_t *) CPU->arch.tss);
CPU->arch.tss->iomap_base = &CPU->arch.tss->iomap[0] -
((uint8_t *) CPU->arch.tss);
CPU->fpu_owner = NULL;
}
 
139,7 → 140,9
/*
* Check for AMD processor.
*/
if (info.cpuid_ebx==AMD_CPUID_EBX && info.cpuid_ecx==AMD_CPUID_ECX && info.cpuid_edx==AMD_CPUID_EDX) {
if (info.cpuid_ebx == AMD_CPUID_EBX &&
info.cpuid_ecx == AMD_CPUID_ECX &&
info.cpuid_edx == AMD_CPUID_EDX) {
CPU->arch.vendor = VendorAMD;
}
 
146,14 → 149,16
/*
* Check for Intel processor.
*/
if (info.cpuid_ebx==INTEL_CPUID_EBX && info.cpuid_ecx==INTEL_CPUID_ECX && info.cpuid_edx==INTEL_CPUID_EDX) {
if (info.cpuid_ebx == INTEL_CPUID_EBX &&
info.cpuid_ecx == INTEL_CPUID_ECX &&
info.cpuid_edx == INTEL_CPUID_EDX) {
CPU->arch.vendor = VendorIntel;
}
cpuid(1, &info);
CPU->arch.family = (info.cpuid_eax>>8)&0xf;
CPU->arch.model = (info.cpuid_eax>>4)&0xf;
CPU->arch.stepping = (info.cpuid_eax>>0)&0xf;
CPU->arch.family = (info.cpuid_eax >> 8) & 0xf;
CPU->arch.model = (info.cpuid_eax >> 4) & 0xf;
CPU->arch.stepping = (info.cpuid_eax >> 0) & 0xf;
}
}
 
160,8 → 165,8
void cpu_print_report(cpu_t* m)
{
printf("cpu%d: (%s family=%d model=%d stepping=%d) %dMHz\n",
m->id, vendor_str[m->arch.vendor], m->arch.family, m->arch.model, m->arch.stepping,
m->frequency_mhz);
m->id, vendor_str[m->arch.vendor], m->arch.family, m->arch.model,
m->arch.stepping, m->frequency_mhz);
}
 
/** @}
/branches/network/kernel/arch/amd64/src/context.S
40,17 → 40,10
#
context_save_arch:
movq (%rsp), %rdx # the caller's return %eip
 
# In %edi is passed 1st argument
movq %rdx, OFFSET_PC(%rdi)
movq %rsp, OFFSET_SP(%rdi)
CONTEXT_SAVE_ARCH_CORE %rdi %rdx
movq %rbx, OFFSET_RBX(%rdi)
movq %rbp, OFFSET_RBP(%rdi)
movq %r12, OFFSET_R12(%rdi)
movq %r13, OFFSET_R13(%rdi)
movq %r14, OFFSET_R14(%rdi)
movq %r15, OFFSET_R15(%rdi)
xorq %rax,%rax # context_save returns 1
incq %rax
ret
62,16 → 55,9
# pointed by the 1st argument. Returns 0 in EAX.
#
context_restore_arch:
movq OFFSET_R15(%rdi), %r15
movq OFFSET_R14(%rdi), %r14
movq OFFSET_R13(%rdi), %r13
movq OFFSET_R12(%rdi), %r12
movq OFFSET_RBP(%rdi), %rbp
movq OFFSET_RBX(%rdi), %rbx
movq OFFSET_SP(%rdi), %rsp # ctx->sp -> %rsp
movq OFFSET_PC(%rdi), %rdx
 
CONTEXT_RESTORE_ARCH_CORE %rdi %rdx
 
movq %rdx,(%rsp)
 
xorq %rax,%rax # context_restore returns 0
/branches/network/kernel/arch/amd64/src/mm/page.c
82,7 → 82,7
void page_arch_init(void)
{
uintptr_t cur;
int i;
unsigned int i;
int identity_flags = PAGE_CACHEABLE | PAGE_EXEC | PAGE_GLOBAL | PAGE_WRITE;
 
if (config.cpu_active == 1) {
/branches/network/kernel/arch/sparc64/include/atomic.h
37,6 → 37,7
 
#include <arch/barrier.h>
#include <arch/types.h>
#include <preemption.h>
 
/** Atomic add operation.
*
56,7 → 57,8
 
a = *((uint64_t *) x);
b = a + i;
asm volatile ("casx %0, %2, %1\n" : "+m" (*((uint64_t *)x)), "+r" (b) : "r" (a));
asm volatile ("casx %0, %2, %1\n" : "+m" (*((uint64_t *)x)),
"+r" (b) : "r" (a));
} while (a != b);
 
return a;
97,7 → 99,8
uint64_t v = 1;
volatile uintptr_t x = (uint64_t) &val->count;
 
asm volatile ("casx %0, %2, %1\n" : "+m" (*((uint64_t *) x)), "+r" (v) : "r" (0));
asm volatile ("casx %0, %2, %1\n" : "+m" (*((uint64_t *) x)),
"+r" (v) : "r" (0));
 
return v;
}
109,6 → 112,8
 
volatile uintptr_t x = (uint64_t) &val->count;
 
preemption_disable();
 
asm volatile (
"0:\n"
"casx %0, %3, %1\n"
/branches/network/kernel/arch/sparc64/include/mm/page.h
53,11 → 53,6
 
#define MMU_PAGES_PER_PAGE (1 << (PAGE_WIDTH - MMU_PAGE_WIDTH))
 
/*
* With 16K pages, there is only one page color.
*/
#define PAGE_COLOR_BITS 0 /**< 14 - 14; 2^14 == 16K == alias boundary. */
 
#ifdef KERNEL
 
#ifndef __ASM__
/branches/network/kernel/arch/sparc64/include/mm/tlb.h
160,7 → 160,7
static inline void mmu_primary_context_write(uint64_t v)
{
asi_u64_write(ASI_DMMU, VA_PRIMARY_CONTEXT_REG, v);
flush();
flush_pipeline();
}
 
/** Read MMU Secondary Context Register.
179,7 → 179,7
static inline void mmu_secondary_context_write(uint64_t v)
{
asi_u64_write(ASI_DMMU, VA_SECONDARY_CONTEXT_REG, v);
flush();
flush_pipeline();
}
 
/** Read IMMU TLB Data Access Register.
209,7 → 209,7
reg.value = 0;
reg.tlb_entry = entry;
asi_u64_write(ASI_ITLB_DATA_ACCESS_REG, reg.value, value);
flush();
flush_pipeline();
}
 
/** Read DMMU TLB Data Access Register.
279,7 → 279,7
static inline void itlb_tag_access_write(uint64_t v)
{
asi_u64_write(ASI_IMMU, VA_IMMU_TAG_ACCESS, v);
flush();
flush_pipeline();
}
 
/** Read IMMU TLB Tag Access Register.
318,7 → 318,7
static inline void itlb_data_in_write(uint64_t v)
{
asi_u64_write(ASI_ITLB_DATA_IN_REG, 0, v);
flush();
flush_pipeline();
}
 
/** Write DMMU TLB Data in Register.
347,7 → 347,7
static inline void itlb_sfsr_write(uint64_t v)
{
asi_u64_write(ASI_IMMU, VA_IMMU_SFSR, v);
flush();
flush_pipeline();
}
 
/** Read DTLB Synchronous Fault Status Register.
400,7 → 400,7
asi_u64_write(ASI_IMMU_DEMAP, da.value, 0); /* da.value is the
* address within the
* ASI */
flush();
flush_pipeline();
}
 
/** Perform DMMU TLB Demap Operation.
/branches/network/kernel/arch/sparc64/include/mm/cache_spec.h
0,0 → 1,57
/*
* Copyright (c) 2008 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup sparc64mm
* @{
*/
/** @file
*/
 
#ifndef KERN_sparc64_CACHE_SPEC_H_
#define KERN_sparc64_CACHE_SPEC_H_
 
/*
* The following macros are valid for the following processors:
*
* UltraSPARC, UltraSPARC II, UltraSPARC IIi
*
* Should we support other UltraSPARC processors, we need to make sure that
* the macros are defined correctly for them.
*/
 
#define DCACHE_SIZE (16 * 1024)
#define DCACHE_LINE_SIZE 32
 
#define ICACHE_SIZE (16 * 1024)
#define ICACHE_WAYS 2
#define ICACHE_LINE_SIZE 32
 
#endif
 
/** @}
*/
/branches/network/kernel/arch/sparc64/include/barrier.h
57,8 → 57,11
#define write_barrier() \
asm volatile ("membar #StoreStore\n" ::: "memory")
 
/** Flush Instruction Memory instruction. */
static inline void flush(void)
#define flush(a) \
asm volatile ("flush %0\n" :: "r" ((a)) : "memory")
 
/** Flush Instruction pipeline. */
static inline void flush_pipeline(void)
{
/*
* The FLUSH instruction takes address parameter.
79,6 → 82,21
asm volatile ("membar #Sync\n");
}
 
#define smc_coherence(a) \
{ \
write_barrier(); \
flush((a)); \
}
 
#define FLUSH_INVAL_MIN 4
#define smc_coherence_block(a, l) \
{ \
unsigned long i; \
write_barrier(); \
for (i = 0; i < (l); i += FLUSH_INVAL_MIN) \
flush((void *)(a) + i); \
}
 
#endif
 
/** @}
/branches/network/kernel/arch/sparc64/include/memstr.h
37,10 → 37,10
 
#define memcpy(dst, src, cnt) __builtin_memcpy((dst), (src), (cnt))
 
extern void memsetw(uintptr_t dst, size_t cnt, uint16_t x);
extern void memsetb(uintptr_t dst, size_t cnt, uint8_t x);
extern void memsetw(void *dst, size_t cnt, uint16_t x);
extern void memsetb(void *dst, size_t cnt, uint8_t x);
 
extern int memcmp(uintptr_t src, uintptr_t dst, int cnt);
extern int memcmp(const void *a, const void *b, size_t cnt);
 
#endif
 
/branches/network/kernel/arch/sparc64/include/asm.h
37,6 → 37,7
 
#include <arch/arch.h>
#include <arch/types.h>
#include <typedefs.h>
#include <align.h>
#include <arch/register.h>
#include <config.h>
/branches/network/kernel/arch/sparc64/include/cpu.h
36,6 → 36,7
#define KERN_sparc64_CPU_H_
 
#include <arch/types.h>
#include <typedefs.h>
#include <arch/register.h>
#include <arch/asm.h>
 
/branches/network/kernel/arch/sparc64/include/types.h
35,10 → 35,6
#ifndef KERN_sparc64_TYPES_H_
#define KERN_sparc64_TYPES_H_
 
#define NULL 0
#define false 0
#define true 1
 
typedef signed char int8_t;
typedef signed short int16_t;
typedef signed int int32_t;
61,13 → 57,31
typedef uint64_t unative_t;
typedef int64_t native_t;
 
typedef uint8_t bool;
typedef uint64_t thread_id_t;
typedef uint64_t task_id_t;
typedef uint32_t context_id_t;
/**< Formats for uintptr_t, size_t, count_t and index_t */
#define PRIp "llx"
#define PRIs "llu"
#define PRIc "llu"
#define PRIi "llu"
 
typedef int32_t inr_t;
typedef int32_t devno_t;
/**< Formats for (u)int8_t, (u)int16_t, (u)int32_t, (u)int64_t and (u)native_t */
#define PRId8 "d"
#define PRId16 "d"
#define PRId32 "d"
#define PRId64 "lld"
#define PRIdn "lld"
 
#define PRIu8 "u"
#define PRIu16 "u"
#define PRIu32 "u"
#define PRIu64 "llu"
#define PRIun "llu"
 
#define PRIx8 "x"
#define PRIx16 "x"
#define PRIx32 "x"
#define PRIx64 "llx"
#define PRIxn "llx"
 
typedef uint8_t asi_t;
 
#endif
/branches/network/kernel/arch/sparc64/include/context_offset.h
48,4 → 48,60
#define OFFSET_L6 0x80
#define OFFSET_L7 0x88
 
#ifndef KERNEL
# define OFFSET_TP 0x90
#endif
 
#ifdef __ASM__
 
.macro CONTEXT_SAVE_ARCH_CORE ctx:req
stx %sp, [\ctx + OFFSET_SP]
stx %o7, [\ctx + OFFSET_PC]
stx %i0, [\ctx + OFFSET_I0]
stx %i1, [\ctx + OFFSET_I1]
stx %i2, [\ctx + OFFSET_I2]
stx %i3, [\ctx + OFFSET_I3]
stx %i4, [\ctx + OFFSET_I4]
stx %i5, [\ctx + OFFSET_I5]
stx %fp, [\ctx + OFFSET_FP]
stx %i7, [\ctx + OFFSET_I7]
stx %l0, [\ctx + OFFSET_L0]
stx %l1, [\ctx + OFFSET_L1]
stx %l2, [\ctx + OFFSET_L2]
stx %l3, [\ctx + OFFSET_L3]
stx %l4, [\ctx + OFFSET_L4]
stx %l5, [\ctx + OFFSET_L5]
stx %l6, [\ctx + OFFSET_L6]
stx %l7, [\ctx + OFFSET_L7]
#ifndef KERNEL
stx %g7, [\ctx + OFFSET_TP]
#endif
.endm
 
.macro CONTEXT_RESTORE_ARCH_CORE ctx:req
ldx [\ctx + OFFSET_SP], %sp
ldx [\ctx + OFFSET_PC], %o7
ldx [\ctx + OFFSET_I0], %i0
ldx [\ctx + OFFSET_I1], %i1
ldx [\ctx + OFFSET_I2], %i2
ldx [\ctx + OFFSET_I3], %i3
ldx [\ctx + OFFSET_I4], %i4
ldx [\ctx + OFFSET_I5], %i5
ldx [\ctx + OFFSET_FP], %fp
ldx [\ctx + OFFSET_I7], %i7
ldx [\ctx + OFFSET_L0], %l0
ldx [\ctx + OFFSET_L1], %l1
ldx [\ctx + OFFSET_L2], %l2
ldx [\ctx + OFFSET_L3], %l3
ldx [\ctx + OFFSET_L4], %l4
ldx [\ctx + OFFSET_L5], %l5
ldx [\ctx + OFFSET_L6], %l6
ldx [\ctx + OFFSET_L7], %l7
#ifndef KERNEL
ldx [\ctx + OFFSET_TP], %g7
#endif
.endm
 
#endif /* __ASM__ */
 
#endif
/branches/network/kernel/arch/sparc64/include/byteorder.h
35,14 → 35,8
#ifndef KERN_sparc64_BYTEORDER_H_
#define KERN_sparc64_BYTEORDER_H_
 
#include <byteorder.h>
#define ARCH_IS_BIG_ENDIAN
 
#define uint32_t_le2host(n) uint32_t_byteorder_swap(n)
#define uint64_t_le2host(n) uint64_t_byteorder_swap(n)
 
#define uint32_t_be2host(n) (n)
#define uint64_t_be2host(n) (n)
 
#endif
 
/** @}
/branches/network/kernel/arch/sparc64/Makefile.inc
29,11 → 29,15
## Toolchain configuration
#
 
ifndef CROSS_PREFIX
CROSS_PREFIX = /usr/local
endif
 
BFD_NAME = elf64-sparc
BFD_ARCH = sparc
BFD = binary
TARGET = sparc64-linux-gnu
TOOLCHAIN_DIR = /usr/local/sparc64
TOOLCHAIN_DIR = $(CROSS_PREFIX)/sparc64
 
GCC_CFLAGS += -m64 -mcpu=ultrasparc
SUNCC_CFLAGS += -m64 -xarch=sparc -xregs=appl,no%float
/branches/network/kernel/arch/sparc64/src/asm.S
41,6 → 41,7
*/
.global memcpy
memcpy:
mov %o0, %o3 ! save dst
add %o1, 7, %g1
and %g1, -8, %g1
cmp %o1, %g1
59,7 → 60,7
mov %g2, %g3
2:
jmp %o7 + 8 ! exit point
mov %o1, %o0
mov %o3, %o0
3:
and %g1, -8, %g1
cmp %o0, %g1
93,7 → 94,7
mov %g2, %g3
 
jmp %o7 + 8 ! exit point
mov %o1, %o0
mov %o3, %o0
 
/*
* Almost the same as memcpy() except the loads are from userspace.
100,6 → 101,7
*/
.global memcpy_from_uspace
memcpy_from_uspace:
mov %o0, %o3 ! save dst
add %o1, 7, %g1
and %g1, -8, %g1
cmp %o1, %g1
118,7 → 120,7
mov %g2, %g3
2:
jmp %o7 + 8 ! exit point
mov %o1, %o0
mov %o3, %o0
3:
and %g1, -8, %g1
cmp %o0, %g1
152,7 → 154,7
mov %g2, %g3
 
jmp %o7 + 8 ! exit point
mov %o1, %o0
mov %o3, %o0
 
/*
* Almost the same as memcpy() except the stores are to userspace.
159,6 → 161,7
*/
.global memcpy_to_uspace
memcpy_to_uspace:
mov %o0, %o3 ! save dst
add %o1, 7, %g1
and %g1, -8, %g1
cmp %o1, %g1
177,7 → 180,7
mov %g2, %g3
2:
jmp %o7 + 8 ! exit point
mov %o1, %o0
mov %o3, %o0
3:
and %g1, -8, %g1
cmp %o0, %g1
211,7 → 214,7
mov %g2, %g3
 
jmp %o7 + 8 ! exit point
mov %o1, %o0
mov %o3, %o0
 
.global memcpy_from_uspace_failover_address
.global memcpy_to_uspace_failover_address
274,6 → 277,8
wrpr %g0, 0, %cleanwin ! avoid information leak
 
mov %i2, %o0 ! uarg
xor %o1, %o1, %o1 ! %o1 is defined to hold pcb_ptr
! set it to 0
 
clr %i2
clr %i3
/branches/network/kernel/arch/sparc64/src/mm/cache.S
27,10 → 27,8
*/
 
#include <arch/arch.h>
#include <arch/mm/cache_spec.h>
 
#define DCACHE_SIZE (16 * 1024)
#define DCACHE_LINE_SIZE 32
 
#define DCACHE_TAG_SHIFT 2
 
.register %g2, #scratch
/branches/network/kernel/arch/sparc64/src/mm/as.c
76,7 → 76,7
as->arch.dtsb = (tsb_entry_t *) (tsb + ITSB_ENTRY_COUNT *
sizeof(tsb_entry_t));
 
memsetb((uintptr_t) as->arch.itsb,
memsetb(as->arch.itsb,
(ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) * sizeof(tsb_entry_t), 0);
#endif
return 0;
/branches/network/kernel/arch/sparc64/src/mm/tlb.c
490,7 → 490,7
*/
void tlb_invalidate_pages(asid_t asid, uintptr_t page, count_t cnt)
{
int i;
unsigned int i;
tlb_context_reg_t pc_save, ctx;
/* switch to nucleus because we are mapped by the primary context */
/branches/network/kernel/arch/sparc64/src/mm/frame.c
48,7 → 48,7
*/
void frame_arch_init(void)
{
int i;
unsigned int i;
pfn_t confdata;
 
if (config.cpu_active == 1) {
/branches/network/kernel/arch/sparc64/src/mm/page.c
66,7 → 66,7
} else {
 
#ifdef CONFIG_SMP
int i;
unsigned int i;
 
/*
* Copy locked DTLB entries from the BSP.
98,7 → 98,7
uintptr_t hw_map(uintptr_t physaddr, size_t size)
{
unsigned int order;
int i;
unsigned int i;
 
ASSERT(config.cpu_active == 1);
 
/branches/network/kernel/arch/sparc64/src/smp/smp.c
99,7 → 99,7
waking_up_mid = mid;
if (waitq_sleep_timeout(&ap_completion_wq, 1000000, SYNCH_FLAGS_NONE) == ESYNCH_TIMEOUT)
printf("%s: waiting for processor (mid = %d) timed out\n",
printf("%s: waiting for processor (mid = %" PRIu32 ") timed out\n",
__func__, mid);
}
}
/branches/network/kernel/arch/sparc64/src/smp/ipi.c
116,7 → 116,7
*/
void ipi_broadcast_arch(int ipi)
{
int i;
unsigned int i;
void (* func)(void);
/branches/network/kernel/arch/sparc64/src/trap/exception.c
45,9 → 45,9
 
void dump_istate(istate_t *istate)
{
printf("TSTATE=%#llx\n", istate->tstate);
printf("TPC=%#llx (%s)\n", istate->tpc, get_symtab_entry(istate->tpc));
printf("TNPC=%#llx (%s)\n", istate->tnpc, get_symtab_entry(istate->tnpc));
printf("TSTATE=%#" PRIx64 "\n", istate->tstate);
printf("TPC=%#" PRIx64 " (%s)\n", istate->tpc, get_symtab_entry(istate->tpc));
printf("TNPC=%#" PRIx64 " (%s)\n", istate->tnpc, get_symtab_entry(istate->tnpc));
}
 
/** Handle instruction_access_exception. (0x8) */
/branches/network/kernel/arch/sparc64/src/trap/interrupt.c
97,8 → 97,8
* Spurious interrupt.
*/
#ifdef CONFIG_DEBUG
printf("cpu%d: spurious interrupt (intrcv=%#llx, "
"data0=%#llx)\n", CPU->id, intrcv, data0);
printf("cpu%u: spurious interrupt (intrcv=%#" PRIx64
", data0=%#" PRIx64 ")\n", CPU->id, intrcv, data0);
#endif
}
 
/branches/network/kernel/arch/sparc64/src/cpu/cpu.c
135,7 → 135,7
break;
}
 
printf("cpu%d: manuf=%s, impl=%s, mask=%d (%dMHz)\n", m->id, manuf,
printf("cpu%d: manuf=%s, impl=%s, mask=%d (%d MHz)\n", m->id, manuf,
impl, m->arch.ver.mask, m->arch.clock_frequency / 1000000);
}
 
/branches/network/kernel/arch/sparc64/src/context.S
36,55 → 36,15
* functions.
*/
#include <arch/context_offset.h>
 
.text
 
.global context_save_arch
.global context_restore_arch
 
.macro CONTEXT_STORE r
stx %sp, [\r + OFFSET_SP]
stx %o7, [\r + OFFSET_PC]
stx %i0, [\r + OFFSET_I0]
stx %i1, [\r + OFFSET_I1]
stx %i2, [\r + OFFSET_I2]
stx %i3, [\r + OFFSET_I3]
stx %i4, [\r + OFFSET_I4]
stx %i5, [\r + OFFSET_I5]
stx %fp, [\r + OFFSET_FP]
stx %i7, [\r + OFFSET_I7]
stx %l0, [\r + OFFSET_L0]
stx %l1, [\r + OFFSET_L1]
stx %l2, [\r + OFFSET_L2]
stx %l3, [\r + OFFSET_L3]
stx %l4, [\r + OFFSET_L4]
stx %l5, [\r + OFFSET_L5]
stx %l6, [\r + OFFSET_L6]
stx %l7, [\r + OFFSET_L7]
.endm
 
.macro CONTEXT_LOAD r
ldx [\r + OFFSET_SP], %sp
ldx [\r + OFFSET_PC], %o7
ldx [\r + OFFSET_I0], %i0
ldx [\r + OFFSET_I1], %i1
ldx [\r + OFFSET_I2], %i2
ldx [\r + OFFSET_I3], %i3
ldx [\r + OFFSET_I4], %i4
ldx [\r + OFFSET_I5], %i5
ldx [\r + OFFSET_FP], %fp
ldx [\r + OFFSET_I7], %i7
ldx [\r + OFFSET_L0], %l0
ldx [\r + OFFSET_L1], %l1
ldx [\r + OFFSET_L2], %l2
ldx [\r + OFFSET_L3], %l3
ldx [\r + OFFSET_L4], %l4
ldx [\r + OFFSET_L5], %l5
ldx [\r + OFFSET_L6], %l6
ldx [\r + OFFSET_L7], %l7
.endm
 
context_save_arch:
CONTEXT_STORE %o0
CONTEXT_SAVE_ARCH_CORE %o0
retl
mov 1, %o0 ! context_save_arch returns 1
 
98,6 → 58,6
#
flushw
CONTEXT_LOAD %o0
CONTEXT_RESTORE_ARCH_CORE %o0
retl
xor %o0, %o0, %o0 ! context_restore_arch returns 0
/branches/network/kernel/arch/ia64/Makefile.inc
29,10 → 29,14
## Toolchain configuration
#
 
ifndef CROSS_PREFIX
CROSS_PREFIX = /usr/local
endif
 
BFD_NAME = elf64-little
BFD_ARCH = ia64-elf64
TARGET = ia64-pc-linux-gnu
TOOLCHAIN_DIR = /usr/local/ia64
TOOLCHAIN_DIR = $(CROSS_PREFIX)/ia64
 
INIT0_ADDRESS = 0xe000000004404000
INIT0_SIZE = 0x100000
/branches/network/kernel/arch/ia64/src/asm.S
51,7 → 51,7
 
adds r14 = 7, in1
mov r2 = ar.lc
mov r8 = in1 ;;
mov r8 = in0
and r14 = -8, r14 ;;
cmp.ne p6, p7 = r14, in1
(p7) br.cond.dpnt 3f ;;
63,7 → 63,7
(p6) mov r17 = r0 ;;
(p6) mov ar.lc = r14
1:
add r14 = r16, r8
add r14 = r16, in1
add r15 = r16, in0
adds r17 = 1, r17 ;;
ld1 r14 = [r14]
72,7 → 72,6
br.cloop.sptk.few 1b ;;
2:
mov ar.lc = r2
 
mov ar.pfs = loc0
br.ret.sptk.many rp
3:
90,7 → 89,7
4:
shladd r14 = r16, 3, r0
adds r16 = 1, r17 ;;
add r15 = r8, r14
add r15 = in1, r14
add r14 = in0, r14
mov r17 = r16 ;;
ld8 r15 = [r15] ;;
104,7 → 103,7
cmp.eq p6, p7 = 0, r15
add in0 = r14, in0
adds r15 = -1, r15
add r17 = r14, r8
add r17 = r14, in1
(p6) br.cond.dpnt 2b ;;
mov ar.lc = r15
6:
116,7 → 115,6
st1 [r15] = r14
br.cloop.sptk.few 6b ;;
mov ar.lc = r2
 
mov ar.pfs = loc0
br.ret.sptk.many rp
163,6 → 161,9
 
xor r1 = r1, r1
/* r2 is defined to hold pcb_ptr - set it to 0 */
xor r2 = r2, r2
mov loc1 = cr.ifs
movl loc2 = PFM_MASK ;;
and loc1 = loc2, loc1 ;;
/branches/network/kernel/arch/ia64/src/mm/vhpt.c
81,7 → 81,7
 
void vhpt_invalidate_all()
{
memsetb((uintptr_t) vhpt_base, 1 << VHPT_WIDTH, 0);
memsetb(vhpt_base, 1 << VHPT_WIDTH, 0);
}
 
void vhpt_invalidate_asid(asid_t asid)
/branches/network/kernel/arch/ia64/src/mm/tlb.c
59,7 → 59,7
uintptr_t adr;
uint32_t count1, count2, stride1, stride2;
int i, j;
unsigned int i, j;
adr = PAL_PTCE_INFO_BASE();
count1 = PAL_PTCE_INFO_COUNT1();
69,8 → 69,8
ipl = interrupts_disable();
 
for(i = 0; i < count1; i++) {
for(j = 0; j < count2; j++) {
for (i = 0; i < count1; i++) {
for (j = 0; j < count2; j++) {
asm volatile (
"ptc.e %0 ;;"
:
/branches/network/kernel/arch/ia64/src/drivers/ega.c
71,7 → 71,7
/*
* Clear the screen.
*/
_memsetw((uintptr_t) videoram, SCREEN, 0x0720);
_memsetw(videoram, SCREEN, 0x0720);
 
chardev_initialize("ega_out", &ega_console, &ega_ops);
stdout = &ega_console;
102,7 → 102,7
return;
 
memcpy((void *) videoram, (void *) (videoram + ROW * 2), (SCREEN - ROW) * 2);
_memsetw((uintptr_t) (videoram + (SCREEN - ROW) * 2), ROW, 0x0720);
_memsetw(videoram + (SCREEN - ROW) * 2, ROW, 0x0720);
ega_cursor = ega_cursor - ROW;
}
 
/branches/network/kernel/arch/ia64/src/ia64.c
62,12 → 62,13
/* Setup usermode init tasks. */
 
//#ifdef I460GX
int i;
unsigned int i;
init.cnt = bootinfo->taskmap.count;
for(i=0;i<init.cnt;i++)
{
init.tasks[i].addr = ((unsigned long)bootinfo->taskmap.tasks[i].addr)|VRN_MASK;
init.tasks[i].size = bootinfo->taskmap.tasks[i].size;
for (i = 0; i < init.cnt; i++) {
init.tasks[i].addr = ((unsigned long) bootinfo->taskmap.tasks[i].addr) | VRN_MASK;
init.tasks[i].size = bootinfo->taskmap.tasks[i].size;
}
/*
#else
/branches/network/kernel/arch/ia64/include/mm/page.h
41,8 → 41,6
#define PAGE_SIZE FRAME_SIZE
#define PAGE_WIDTH FRAME_WIDTH
 
#define PAGE_COLOR_BITS 0 /* dummy */
 
#ifdef KERNEL
 
/** Bit width of the TLB-locked portion of kernel address space. */
/branches/network/kernel/arch/ia64/include/barrier.h
45,9 → 45,33
#define read_barrier() memory_barrier()
#define write_barrier() memory_barrier()
 
#define srlz_i() asm volatile (";; srlz.i ;;\n" ::: "memory")
#define srlz_d() asm volatile (";; srlz.d\n" ::: "memory")
#define srlz_i() \
asm volatile (";; srlz.i ;;\n" ::: "memory")
#define srlz_d() \
asm volatile (";; srlz.d\n" ::: "memory")
 
#define fc_i(a) \
asm volatile ("fc.i %0\n" :: "r" ((a)) : "memory")
#define sync_i() \
asm volatile (";; sync.i\n" ::: "memory")
 
#define smc_coherence(a) \
{ \
fc_i((a)); \
sync_i(); \
srlz_i(); \
}
 
#define FC_INVAL_MIN 32
#define smc_coherence_block(a, l) \
{ \
unsigned long i; \
for (i = 0; i < (l); i += FC_INVAL_MIN) \
fc_i((void *)(a) + i); \
sync_i(); \
srlz_i(); \
}
 
#endif
 
/** @}
/branches/network/kernel/arch/ia64/include/memstr.h
37,10 → 37,10
 
#define memcpy(dst, src, cnt) __builtin_memcpy((dst), (src), (cnt))
 
extern void memsetw(uintptr_t dst, size_t cnt, uint16_t x);
extern void memsetb(uintptr_t dst, size_t cnt, uint8_t x);
extern void memsetw(void *dst, size_t cnt, uint16_t x);
extern void memsetb(void *dst, size_t cnt, uint8_t x);
 
extern int memcmp(uintptr_t src, uintptr_t dst, int cnt);
extern int memcmp(const void *a, const void *b, size_t cnt);
 
#endif
 
/branches/network/kernel/arch/ia64/include/types.h
35,10 → 35,6
#ifndef KERN_ia64_TYPES_H_
#define KERN_ia64_TYPES_H_
 
#define NULL 0
#define false 0
#define true 1
 
typedef signed char int8_t;
typedef signed short int16_t;
typedef signed int int32_t;
69,14 → 65,29
typedef uint64_t unative_t;
typedef int64_t native_t;
 
typedef uint8_t bool;
typedef uint64_t thread_id_t;
typedef uint64_t task_id_t;
typedef uint32_t context_id_t;
#define PRIp "lx" /**< Format for uintptr_t. */
#define PRIs "lu" /**< Format for size_t. */
#define PRIc "lu" /**< Format for count_t. */
#define PRIi "lu" /**< Format for index_t. */
 
typedef int32_t inr_t;
typedef int32_t devno_t;
#define PRId8 "d" /**< Format for int8_t. */
#define PRId16 "d" /**< Format for int16_t. */
#define PRId32 "d" /**< Format for int32_t. */
#define PRId64 "ld" /**< Format for int64_t. */
#define PRIdn "d" /**< Format for native_t. */
 
#define PRIu8 "u" /**< Format for uint8_t. */
#define PRIu16 "u" /**< Format for uint16_t. */
#define PRIu32 "u" /**< Format for uint32_t. */
#define PRIu64 "lu" /**< Format for uint64_t. */
#define PRIun "u" /**< Format for unative_t. */
 
#define PRIx8 "x" /**< Format for hexadecimal (u)int8_t. */
#define PRIx16 "x" /**< Format for hexadecimal (u)int16_t. */
#define PRIx32 "x" /**< Format for hexadecimal (u)uint32_t. */
#define PRIx64 "lx" /**< Format for hexadecimal (u)int64_t. */
#define PRIxn "x" /**< Format for hexadecimal (u)native_t. */
 
#endif
 
/** @}
/branches/network/kernel/arch/ia64/include/byteorder.h
35,15 → 35,9
#ifndef KERN_ia64_BYTEORDER_H_
#define KERN_ia64_BYTEORDER_H_
 
#include <byteorder.h>
 
/* IA-64 is little-endian */
#define uint32_t_le2host(n) (n)
#define uint64_t_le2host(n) (n)
#define ARCH_IS_LITTLE_ENDIAN
 
#define uint32_t_be2host(n) uint32_t_byteorder_swap(n)
#define uint64_t_be2host(n) uint64_t_byteorder_swap(n)
 
#endif
 
/** @}
/branches/network/kernel/arch/arm32/Makefile.inc
29,17 → 29,21
## Toolchain configuration
#
 
ifndef CROSS_PREFIX
CROSS_PREFIX = /usr/local
endif
 
BFD_NAME = elf32-littlearm
BFD_ARCH = arm
BFD = binary
TARGET = arm-linux-gnu
TOOLCHAIN_DIR = /usr/local/arm
TOOLCHAIN_DIR = $(CROSS_PREFIX)/arm
 
KERNEL_LOAD_ADDRESS = 0x80200000
 
ifeq ($(MACHINE), gxemul_testarm)
# ifeq ($(MACHINE), gxemul_testarm)
DMACHINE = MACHINE_GXEMUL_TESTARM
endif
# endif
 
ATSIGN = %
 
90,7 → 94,7
arch/$(ARCH)/src/mm/tlb.c \
arch/$(ARCH)/src/mm/page_fault.c
 
ifeq ($(MACHINE), gxemul_testarm)
# ifeq ($(MACHINE), gxemul_testarm)
ARCH_SOURCES += arch/$(ARCH)/src/drivers/gxemul.c
endif
# endif
 
/branches/network/kernel/arch/arm32/src/asm.S
45,7 → 45,8
add r3, r1, #3
bic r3, r3, #3
cmp r1, r3
stmdb sp!, {r4, lr}
stmdb sp!, {r4, r5, lr}
mov r5, r0 /* save dst */
beq 4f
1:
cmp r2, #0
58,8 → 59,8
cmp ip, r2
bne 2b
3:
mov r0, r1
ldmia sp!, {r4, pc}
mov r0, r5
ldmia sp!, {r4, r5, pc}
4:
add r3, r0, #3
bic r3, r3, #3
94,5 → 95,5
 
memcpy_from_uspace_failover_address:
memcpy_to_uspace_failover_address:
mov r0, #0
ldmia sp!, {r4, pc}
mov r0, #0
ldmia sp!, {r4, r5, pc}
/branches/network/kernel/arch/arm32/src/userspace.c
70,8 → 70,11
/* set first parameter */
ustate.r0 = (uintptr_t) kernel_uarg->uspace_uarg;
 
/* %r1 is defined to hold pcb_ptr - set it to 0 */
ustate.r1 = 0;
 
/* clear other registers */
ustate.r1 = ustate.r2 = ustate.r3 = ustate.r4 = ustate.r5 =
ustate.r2 = ustate.r3 = ustate.r4 = ustate.r5 =
ustate.r6 = ustate.r7 = ustate.r8 = ustate.r9 = ustate.r10 =
ustate.r11 = ustate.r12 = ustate.lr = 0;
 
/branches/network/kernel/arch/arm32/src/exception.c
40,6 → 40,7
#include <interrupt.h>
#include <arch/machine.h>
#include <arch/mm/page_fault.h>
#include <arch/barrier.h>
#include <print.h>
#include <syscall/syscall.h>
 
209,7 → 210,7
*
* Addresses of handlers are stored in memory following exception vectors.
*/
static void install_handler (unsigned handler_addr, unsigned* vector)
static void install_handler(unsigned handler_addr, unsigned *vector)
{
/* relative address (related to exc. vector) of the word
* where handler's address is stored
219,6 → 220,7
/* make it LDR instruction and store at exception vector */
*vector = handler_address_ptr | LDR_OPCODE;
smc_coherence(*vector);
/* store handler's address */
*(vector + EXC_VECTORS) = handler_addr;
226,31 → 228,31
}
 
/** Low-level Reset Exception handler. */
static void reset_exception_entry()
static void reset_exception_entry(void)
{
PROCESS_EXCEPTION(EXC_RESET);
}
 
/** Low-level Software Interrupt Exception handler. */
static void swi_exception_entry()
static void swi_exception_entry(void)
{
PROCESS_EXCEPTION(EXC_SWI);
}
 
/** Low-level Undefined Instruction Exception handler. */
static void undef_instr_exception_entry()
static void undef_instr_exception_entry(void)
{
PROCESS_EXCEPTION(EXC_UNDEF_INSTR);
}
 
/** Low-level Fast Interrupt Exception handler. */
static void fiq_exception_entry()
static void fiq_exception_entry(void)
{
PROCESS_EXCEPTION(EXC_FIQ);
}
 
/** Low-level Prefetch Abort Exception handler. */
static void prefetch_abort_exception_entry()
static void prefetch_abort_exception_entry(void)
{
asm("sub lr, lr, #4");
PROCESS_EXCEPTION(EXC_PREFETCH_ABORT);
257,7 → 259,7
}
 
/** Low-level Data Abort Exception handler. */
static void data_abort_exception_entry()
static void data_abort_exception_entry(void)
{
asm("sub lr, lr, #8");
PROCESS_EXCEPTION(EXC_DATA_ABORT);
269,7 → 271,7
* because of possible occurence of nested interrupt exception, which
* would overwrite (and thus spoil) stack pointer.
*/
static void irq_exception_entry()
static void irq_exception_entry(void)
{
asm("sub lr, lr, #4");
setup_stack_and_save_regs();
/branches/network/kernel/arch/arm32/src/mm/page_fault.c
40,6 → 40,7
#include <genarch/mm/page_pt.h>
#include <arch.h>
#include <interrupt.h>
#include <print.h>
 
/** Returns value stored in fault status register.
*
173,7 → 174,8
*/
void data_abort(int exc_no, istate_t *istate)
{
fault_status_t fsr = read_fault_status_register();
fault_status_t fsr __attribute__ ((unused)) =
read_fault_status_register();
uintptr_t badvaddr = read_fault_address_register();
 
pf_access_t access = get_memory_access_type(istate->pc, badvaddr);
/branches/network/kernel/arch/arm32/src/arm32.c
55,7 → 55,7
/** Performs arm32 specific initialization before main_bsp() is called. */
void arch_pre_main(void)
{
int i;
unsigned int i;
 
init.cnt = bootinfo.cnt;
 
/branches/network/kernel/arch/arm32/src/debug/print.c
56,10 → 56,10
*/
static int debug_write(const char *str, size_t count, void *unused)
{
int i;
for (i = 0; i < count; ++i) {
unsigned int i;
for (i = 0; i < count; ++i)
putc(str[i]);
}
return i;
}
 
/branches/network/kernel/arch/arm32/src/cpu/cpu.c
56,7 → 56,7
};
 
/** Length of the #imp_data array */
static int imp_data_length = sizeof(imp_data) / sizeof(char *);
static unsigned int imp_data_length = sizeof(imp_data) / sizeof(char *);
 
/** Architecture names */
static char *arch_data[] = {
71,7 → 71,7
};
 
/** Length of the #arch_data array */
static int arch_data_length = sizeof(arch_data) / sizeof(char *);
static unsigned int arch_data_length = sizeof(arch_data) / sizeof(char *);
 
 
/** Retrieves processor identification from CP15 register 0.
/branches/network/kernel/arch/arm32/include/mm/page.h
43,8 → 43,6
#define PAGE_WIDTH FRAME_WIDTH
#define PAGE_SIZE FRAME_SIZE
 
#define PAGE_COLOR_BITS 0 /* dummy */
 
#ifndef __ASM__
# define KA2PA(x) (((uintptr_t) (x)) - 0x80000000)
# define PA2KA(x) (((uintptr_t) (x)) + 0x80000000)
/branches/network/kernel/arch/arm32/include/barrier.h
46,6 → 46,9
#define read_barrier() asm volatile ("" ::: "memory")
#define write_barrier() asm volatile ("" ::: "memory")
 
#define smc_coherence(a)
#define smc_coherence_block(a, l)
 
#endif
 
/** @}
/branches/network/kernel/arch/arm32/include/memstr.h
38,10 → 38,10
 
#define memcpy(dst, src, cnt) __builtin_memcpy((dst), (src), (cnt))
 
extern void memsetw(uintptr_t dst, size_t cnt, uint16_t x);
extern void memsetb(uintptr_t dst, size_t cnt, uint8_t x);
extern void memsetw(void *dst, size_t cnt, uint16_t x);
extern void memsetb(void *dst, size_t cnt, uint8_t x);
 
extern int memcmp(uintptr_t src, uintptr_t dst, int cnt);
extern int memcmp(const void *a, const void *b, size_t cnt);
 
#endif
 
/branches/network/kernel/arch/arm32/include/types.h
42,10 → 42,6
# define ATTRIBUTE_PACKED
#endif
 
#define NULL 0
#define false 0
#define true 1
 
typedef signed char int8_t;
typedef signed short int16_t;
typedef signed long int32_t;
68,15 → 64,29
typedef uint32_t unative_t;
typedef int32_t native_t;
 
typedef uint8_t bool;
typedef uint64_t thread_id_t;
typedef uint64_t task_id_t;
typedef uint32_t context_id_t;
#define PRIp "x" /**< Format for uintptr_t. */
#define PRIs "u" /**< Format for size_t. */
#define PRIc "u" /**< Format for count_t. */
#define PRIi "u" /**< Format for index_t. */
 
typedef int32_t inr_t;
typedef int32_t devno_t;
#define PRId8 "d" /**< Format for int8_t. */
#define PRId16 "d" /**< Format for int16_t. */
#define PRId32 "d" /**< Format for int32_t. */
#define PRId64 "lld" /**< Format for int64_t. */
#define PRIdn "d" /**< Format for native_t. */
 
#define PRIu8 "u" /**< Format for uint8_t. */
#define PRIu16 "u" /**< Format for uint16_t. */
#define PRIu32 "u" /**< Format for uint32_t. */
#define PRIu64 "llu" /**< Format for uint64_t. */
#define PRIun "u" /**< Format for unative_t. */
 
#define PRIx8 "x" /**< Format for hexadecimal (u)int8_t. */
#define PRIx16 "x" /**< Format for hexadecimal (u)int16_t. */
#define PRIx32 "x" /**< Format for hexadecimal (u)uint32_t. */
#define PRIx64 "llx" /**< Format for hexadecimal (u)int64_t. */
#define PRIxn "x" /**< Format for hexadecimal (u)native_t. */
 
/** Page table entry.
*
* We have different structs for level 0 and level 1 page table entries.
/branches/network/kernel/arch/arm32/include/byteorder.h
36,24 → 36,10
#ifndef KERN_arm32_BYTEORDER_H_
#define KERN_arm32_BYTEORDER_H_
 
#include <byteorder.h>
 
#ifdef BIG_ENDIAN
 
#define uint32_t_le2host(n) uint32_t_byteorder_swap(n)
#define uint64_t_le2host(n) uint64_t_byteorder_swap(n)
 
#define uint32_t_be2host(n) (n)
#define uint64_t_be2host(n) (n)
 
#define ARCH_IS_BIG_ENDIAN
#else
 
#define uint32_t_le2host(n) (n)
#define uint64_t_le2host(n) (n)
 
#define uint32_t_be2host(n) uint32_t_byteorder_swap(n)
#define uint64_t_be2host(n) uint64_t_byteorder_swap(n)
 
#define ARCH_IS_LITTLE_ENDIAN
#endif
 
#endif
/branches/network/kernel/arch/ppc32/Makefile.inc
29,11 → 29,15
## Toolchain configuration
#
 
ifndef CROSS_PREFIX
CROSS_PREFIX = /usr/local
endif
 
BFD_NAME = elf32-powerpc
BFD_ARCH = powerpc:common
BFD = binary
TARGET = ppc-linux-gnu
TOOLCHAIN_DIR = /usr/local/ppc
TOOLCHAIN_DIR = $(CROSS_PREFIX)/ppc
 
GCC_CFLAGS += -mcpu=powerpc -msoft-float -m32
AFLAGS += -a32
/branches/network/kernel/arch/ppc32/include/mm/page.h
40,8 → 40,6
#define PAGE_WIDTH FRAME_WIDTH
#define PAGE_SIZE FRAME_SIZE
 
#define PAGE_COLOR_BITS 0 /* dummy */
 
#ifdef KERNEL
 
#ifndef __ASM__
/branches/network/kernel/arch/ppc32/include/mm/tlb.h
36,6 → 36,8
#define KERN_ppc32_TLB_H_
 
#include <arch/interrupt.h>
#include <arch/types.h>
#include <typedefs.h>
 
typedef struct {
unsigned v : 1; /**< Valid */
/branches/network/kernel/arch/ppc32/include/barrier.h
42,6 → 42,43
#define read_barrier() asm volatile ("sync" ::: "memory")
#define write_barrier() asm volatile ("eieio" ::: "memory")
 
/*
* The IMB sequence used here is valid for all possible cache models
* on uniprocessor. SMP might require a different sequence.
* See PowerPC Programming Environment for 32-Bit Microprocessors,
* chapter 5.1.5.2
*/
 
static inline void smc_coherence(void *addr)
{
asm volatile (
"dcbst 0, %0\n"
"sync\n"
"icbi 0, %0\n"
"isync\n"
:: "r" (addr)
);
}
 
#define COHERENCE_INVAL_MIN 4
 
static inline void smc_coherence_block(void *addr, unsigned long len)
{
unsigned long i;
 
for (i = 0; i < len; i += COHERENCE_INVAL_MIN) {
asm volatile ("dcbst 0, %0\n" :: "r" (addr + i));
}
 
asm volatile ("sync");
 
for (i = 0; i < len; i += COHERENCE_INVAL_MIN) {
asm volatile ("icbi 0, %0\n" :: "r" (addr + i));
}
 
asm volatile ("isync");
}
 
#endif
 
/** @}
/branches/network/kernel/arch/ppc32/include/memstr.h
37,10 → 37,10
 
#define memcpy(dst, src, cnt) __builtin_memcpy((dst), (src), (cnt))
 
extern void memsetw(uintptr_t dst, size_t cnt, uint16_t x);
extern void memsetb(uintptr_t dst, size_t cnt, uint8_t x);
extern void memsetw(void *dst, size_t cnt, uint16_t x);
extern void memsetb(void *dst, size_t cnt, uint8_t x);
 
extern int memcmp(uintptr_t src, uintptr_t dst, int cnt);
extern int memcmp(const void *a, const void *b, size_t cnt);
 
#endif
 
/branches/network/kernel/arch/ppc32/include/exception.h
82,6 → 82,7
{
istate->pc = retaddr;
}
 
/** Return true if exception happened while in userspace */
#include <panic.h>
static inline int istate_from_uspace(istate_t *istate)
89,6 → 90,7
panic("istate_from_uspace not yet implemented");
return 0;
}
 
static inline unative_t istate_get_pc(istate_t *istate)
{
return istate->pc;
/branches/network/kernel/arch/ppc32/include/boot/boot.h
38,7 → 38,7
#define BOOT_OFFSET 0x8000
 
/* Temporary stack size for boot process */
#define TEMP_STACK_SIZE 0x100
#define TEMP_STACK_SIZE 0x1000
 
#define TASKMAP_MAX_RECORDS 32
#define MEMMAP_MAX_RECORDS 32
/branches/network/kernel/arch/ppc32/include/drivers/cuda.h
36,6 → 36,7
#define KERN_ppc32_CUDA_H_
 
#include <arch/types.h>
#include <typedefs.h>
 
extern void cuda_init(devno_t devno, uintptr_t base, size_t size);
extern int cuda_get_scancode(void);
/branches/network/kernel/arch/ppc32/include/types.h
35,10 → 35,6
#ifndef KERN_ppc32_TYPES_H_
#define KERN_ppc32_TYPES_H_
 
#define NULL 0
#define false 0
#define true 1
 
typedef signed char int8_t;
typedef signed short int16_t;
typedef signed int int32_t;
61,14 → 57,31
typedef uint32_t unative_t;
typedef int32_t native_t;
 
typedef uint8_t bool;
typedef uint64_t thread_id_t;
typedef uint64_t task_id_t;
typedef uint32_t context_id_t;
/**< Formats for uintptr_t, size_t, count_t and index_t */
#define PRIp "x"
#define PRIs "u"
#define PRIc "u"
#define PRIi "u"
 
typedef int32_t inr_t;
typedef int32_t devno_t;
/**< Formats for (u)int8_t, (u)int16_t, (u)int32_t, (u)int64_t and (u)native_t */
#define PRId8 "d"
#define PRId16 "d"
#define PRId32 "d"
#define PRId64 "lld"
#define PRIdn "d"
 
#define PRIu8 "u"
#define PRIu16 "u"
#define PRIu32 "u"
#define PRIu64 "llu"
#define PRIun "u"
 
#define PRIx8 "x"
#define PRIx16 "x"
#define PRIx32 "x"
#define PRIx64 "llx"
#define PRIxn "x"
 
/** Page Table Entry. */
typedef struct {
unsigned p : 1; /**< Present bit. */
/branches/network/kernel/arch/ppc32/include/context_offset.h
73,4 → 73,59
#define OFFSET_FR31 0x88
#define OFFSET_FPSCR 0x90
 
#ifdef __ASM__
# include <arch/asm/regname.h>
 
# ctx: address of the structure with saved context
.macro CONTEXT_SAVE_ARCH_CORE ctx:req
stw sp, OFFSET_SP(\ctx)
stw r2, OFFSET_R2(\ctx)
stw r13, OFFSET_R13(\ctx)
stw r14, OFFSET_R14(\ctx)
stw r15, OFFSET_R15(\ctx)
stw r16, OFFSET_R16(\ctx)
stw r17, OFFSET_R17(\ctx)
stw r18, OFFSET_R18(\ctx)
stw r19, OFFSET_R19(\ctx)
stw r20, OFFSET_R20(\ctx)
stw r21, OFFSET_R21(\ctx)
stw r22, OFFSET_R22(\ctx)
stw r23, OFFSET_R23(\ctx)
stw r24, OFFSET_R24(\ctx)
stw r25, OFFSET_R25(\ctx)
stw r26, OFFSET_R26(\ctx)
stw r27, OFFSET_R27(\ctx)
stw r28, OFFSET_R28(\ctx)
stw r29, OFFSET_R29(\ctx)
stw r30, OFFSET_R30(\ctx)
stw r31, OFFSET_R31(\ctx)
.endm
 
# ctx: address of the structure with saved context
.macro CONTEXT_RESTORE_ARCH_CORE ctx:req
lwz sp, OFFSET_SP(\ctx)
lwz r2, OFFSET_R2(\ctx)
lwz r13, OFFSET_R13(\ctx)
lwz r14, OFFSET_R14(\ctx)
lwz r15, OFFSET_R15(\ctx)
lwz r16, OFFSET_R16(\ctx)
lwz r17, OFFSET_R17(\ctx)
lwz r18, OFFSET_R18(\ctx)
lwz r19, OFFSET_R19(\ctx)
lwz r20, OFFSET_R20(\ctx)
lwz r21, OFFSET_R21(\ctx)
lwz r22, OFFSET_R22(\ctx)
lwz r23, OFFSET_R23(\ctx)
lwz r24, OFFSET_R24(\ctx)
lwz r25, OFFSET_R25(\ctx)
lwz r26, OFFSET_R26(\ctx)
lwz r27, OFFSET_R27(\ctx)
lwz r28, OFFSET_R28(\ctx)
lwz r29, OFFSET_R29(\ctx)
lwz r30, OFFSET_R30(\ctx)
lwz r31, OFFSET_R31(\ctx)
.endm
 
#endif /* __ASM__ */
 
#endif
/branches/network/kernel/arch/ppc32/include/byteorder.h
35,14 → 35,8
#ifndef KERN_ppc32_BYTEORDER_H_
#define KERN_ppc32_BYTEORDER_H_
 
#include <byteorder.h>
#define ARCH_IS_BIG_ENDIAN
 
#define uint32_t_le2host(n) uint32_t_byteorder_swap(n)
#define uint64_t_le2host(n) uint64_t_byteorder_swap(n)
 
#define uint32_t_be2host(n) (n)
#define uint64_t_be2host(n) (n)
 
#endif
 
/** @}
/branches/network/kernel/arch/ppc32/src/asm.S
65,6 → 65,10
# set stack
mr sp, r4
 
# %r3 is defined to hold pcb_ptr - set it to 0
 
xor r3, r3, r3
# jump to userspace
199,47 → 203,7
rfi
memsetb:
rlwimi r5, r5, 8, 16, 23
rlwimi r5, r5, 16, 0, 15
addi r14, r3, -4
cmplwi 0, r4, 4
blt 7f
stwu r5, 4(r14)
beqlr
andi. r15, r14, 3
add r4, r15, r4
subf r14, r15, r14
srwi r15, r4, 2
mtctr r15
bdz 6f
1:
stwu r5, 4(r14)
bdnz 1b
6:
andi. r4, r4, 3
7:
cmpwi 0, r4, 0
beqlr
mtctr r4
addi r6, r6, 3
8:
stbu r5, 1(r14)
bdnz 8b
blr
b _memsetb
 
memcpy:
memcpy_from_uspace:
308,4 → 272,6
 
memcpy_from_uspace_failover_address:
memcpy_to_uspace_failover_address:
b memcpy_from_uspace_failover_address
# return zero, failure
xor r3, r3, r3
blr
/branches/network/kernel/arch/ppc32/src/mm/tlb.c
47,16 → 47,19
* The as->lock must be held on entry to this function
* if lock is true.
*
* @param as Address space.
* @param lock Lock/unlock the address space.
* @param badvaddr Faulting virtual address.
* @param access Access mode that caused the fault.
* @param istate Pointer to interrupted state.
* @param pfrc Pointer to variable where as_page_fault() return code will be stored.
* @return PTE on success, NULL otherwise.
* @param as Address space.
* @param lock Lock/unlock the address space.
* @param badvaddr Faulting virtual address.
* @param access Access mode that caused the fault.
* @param istate Pointer to interrupted state.
* @param pfrc Pointer to variable where as_page_fault() return code
* will be stored.
* @return PTE on success, NULL otherwise.
*
*/
static pte_t *find_mapping_and_check(as_t *as, bool lock, uintptr_t badvaddr, int access, istate_t *istate, int *pfrc)
static pte_t *
find_mapping_and_check(as_t *as, bool lock, uintptr_t badvaddr, int access,
istate_t *istate, int *pfrc)
{
/*
* Check if the mapping exists in page tables.
77,27 → 80,27
*/
page_table_unlock(as, lock);
switch (rc = as_page_fault(badvaddr, access, istate)) {
case AS_PF_OK:
/*
* The higher-level page fault handler succeeded,
* The mapping ought to be in place.
*/
page_table_lock(as, lock);
pte = page_mapping_find(as, badvaddr);
ASSERT((pte) && (pte->p));
*pfrc = 0;
return pte;
case AS_PF_DEFER:
page_table_lock(as, lock);
*pfrc = rc;
return NULL;
case AS_PF_FAULT:
page_table_lock(as, lock);
printf("Page fault.\n");
*pfrc = rc;
return NULL;
default:
panic("unexpected rc (%d)\n", rc);
case AS_PF_OK:
/*
* The higher-level page fault handler succeeded,
* The mapping ought to be in place.
*/
page_table_lock(as, lock);
pte = page_mapping_find(as, badvaddr);
ASSERT((pte) && (pte->p));
*pfrc = 0;
return pte;
case AS_PF_DEFER:
page_table_lock(as, lock);
*pfrc = rc;
return NULL;
case AS_PF_FAULT:
page_table_lock(as, lock);
printf("Page fault.\n");
*pfrc = rc;
return NULL;
default:
panic("unexpected rc (%d)\n", rc);
}
}
}
114,7 → 117,8
s = get_symtab_entry(istate->lr);
if (s)
sym2 = s;
panic("%p: PHT Refill Exception at %p (%s<-%s)\n", badvaddr, istate->pc, symbol, sym2);
panic("%p: PHT Refill Exception at %p (%s<-%s)\n", badvaddr,
istate->pc, symbol, sym2);
}
 
 
147,7 → 151,8
/* Find unused or colliding
PTE in PTEG */
for (i = 0; i < 8; i++) {
if ((!phte[base + i].v) || ((phte[base + i].vsid == vsid) && (phte[base + i].api == api))) {
if ((!phte[base + i].v) || ((phte[base + i].vsid == vsid) &&
(phte[base + i].api == api))) {
found = true;
break;
}
160,7 → 165,9
/* Find unused or colliding
PTE in PTEG */
for (i = 0; i < 8; i++) {
if ((!phte[base2 + i].v) || ((phte[base2 + i].vsid == vsid) && (phte[base2 + i].api == api))) {
if ((!phte[base2 + i].v) ||
((phte[base2 + i].vsid == vsid) &&
(phte[base2 + i].api == api))) {
found = true;
base = base2;
h = 1;
214,7 → 221,9
/* Find unused or colliding
PTE in PTEG */
for (i = 0; i < 8; i++) {
if ((!phte_physical[base + i].v) || ((phte_physical[base + i].vsid == vsid) && (phte_physical[base + i].api == api))) {
if ((!phte_physical[base + i].v) ||
((phte_physical[base + i].vsid == vsid) &&
(phte_physical[base + i].api == api))) {
found = true;
break;
}
227,7 → 236,9
/* Find unused or colliding
PTE in PTEG */
for (i = 0; i < 8; i++) {
if ((!phte_physical[base2 + i].v) || ((phte_physical[base2 + i].vsid == vsid) && (phte_physical[base2 + i].api == api))) {
if ((!phte_physical[base2 + i].v) ||
((phte_physical[base2 + i].vsid == vsid) &&
(phte_physical[base2 + i].api == api))) {
found = true;
base = base2;
h = 1;
254,8 → 265,8
 
/** Process Instruction/Data Storage Interrupt
*
* @param n Interrupt vector number.
* @param istate Interrupted register context.
* @param n Interrupt vector number.
* @param istate Interrupted register context.
*
*/
void pht_refill(int n, istate_t *istate)
284,21 → 295,22
page_table_lock(as, lock);
pte = find_mapping_and_check(as, lock, badvaddr, PF_ACCESS_READ /* FIXME */, istate, &pfrc);
pte = find_mapping_and_check(as, lock, badvaddr,
PF_ACCESS_READ /* FIXME */, istate, &pfrc);
if (!pte) {
switch (pfrc) {
case AS_PF_FAULT:
goto fail;
break;
case AS_PF_DEFER:
/*
* The page fault came during copy_from_uspace()
* or copy_to_uspace().
*/
page_table_unlock(as, lock);
return;
default:
panic("Unexpected pfrc (%d)\n", pfrc);
case AS_PF_FAULT:
goto fail;
break;
case AS_PF_DEFER:
/*
* The page fault came during copy_from_uspace()
* or copy_to_uspace().
*/
page_table_unlock(as, lock);
return;
default:
panic("Unexpected pfrc (%d)\n", pfrc);
}
}
316,8 → 328,8
 
/** Process Instruction/Data Storage Interrupt in Real Mode
*
* @param n Interrupt vector number.
* @param istate Interrupted register context.
* @param n Interrupt vector number.
* @param istate Interrupted register context.
*
*/
bool pht_real_refill(int n, istate_t *istate)
373,7 → 385,8
uint32_t i;
for (i = 0; i < 8192; i++) {
if ((phte[i].v) && (phte[i].vsid >= (asid << 4)) && (phte[i].vsid < ((asid << 4) + 16)))
if ((phte[i].v) && (phte[i].vsid >= (asid << 4)) &&
(phte[i].vsid < ((asid << 4) + 16)))
phte[i].v = 0;
}
tlb_invalidate_all();
407,7 → 420,11
} \
} else \
length = 0; \
printf(name ": page=%.*p frame=%.*p length=%d KB (mask=%#x)%s%s\n", sizeof(upper) * 2, upper & 0xffff0000, sizeof(lower) * 2, lower & 0xffff0000, length, mask, ((upper >> 1) & 1) ? " supervisor" : "", (upper & 1) ? " user" : "");
printf(name ": page=%.*p frame=%.*p length=%d KB (mask=%#x)%s%s\n", \
sizeof(upper) * 2, upper & 0xffff0000, sizeof(lower) * 2, \
lower & 0xffff0000, length, mask, \
((upper >> 1) & 1) ? " supervisor" : "", \
(upper & 1) ? " user" : "");
 
 
void tlb_print(void)
421,7 → 438,10
: "=r" (vsid)
: "r" (sr << 28)
);
printf("vsid[%d]: VSID=%.*p (ASID=%d)%s%s\n", sr, sizeof(vsid) * 2, vsid & 0xffffff, (vsid & 0xffffff) >> 4, ((vsid >> 30) & 1) ? " supervisor" : "", ((vsid >> 29) & 1) ? " user" : "");
printf("vsid[%d]: VSID=%.*p (ASID=%d)%s%s\n", sr,
sizeof(vsid) * 2, vsid & 0xffffff, (vsid & 0xffffff) >> 4,
((vsid >> 30) & 1) ? " supervisor" : "",
((vsid >> 29) & 1) ? " user" : "");
}
uint32_t upper;
/branches/network/kernel/arch/ppc32/src/mm/page.c
47,13 → 47,16
 
uintptr_t hw_map(uintptr_t physaddr, size_t size)
{
if (last_frame + ALIGN_UP(size, PAGE_SIZE) > KA2PA(KERNEL_ADDRESS_SPACE_END_ARCH))
panic("Unable to map physical memory %p (%d bytes)", physaddr, size)
if (last_frame + ALIGN_UP(size, PAGE_SIZE) >
KA2PA(KERNEL_ADDRESS_SPACE_END_ARCH))
panic("Unable to map physical memory %p (%" PRIs " bytes)",
physaddr, size)
uintptr_t virtaddr = PA2KA(last_frame);
pfn_t i;
for (i = 0; i < ADDR2PFN(ALIGN_UP(size, PAGE_SIZE)); i++)
page_mapping_insert(AS_KERNEL, virtaddr + PFN2ADDR(i), physaddr + PFN2ADDR(i), PAGE_NOT_CACHEABLE | PAGE_WRITE);
page_mapping_insert(AS_KERNEL, virtaddr + PFN2ADDR(i),
physaddr + PFN2ADDR(i), PAGE_NOT_CACHEABLE | PAGE_WRITE);
last_frame = ALIGN_UP(last_frame + size, FRAME_SIZE);
/branches/network/kernel/arch/ppc32/src/interrupt.c
80,7 → 80,7
* Spurious interrupt.
*/
#ifdef CONFIG_DEBUG
printf("cpu%d: spurious interrupt (inum=%d)\n", CPU->id, inum);
printf("cpu%u: spurious interrupt (inum=%d)\n", CPU->id, inum);
#endif
}
/branches/network/kernel/arch/ppc32/src/context.S
26,7 → 26,6
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
 
#include <arch/asm/regname.h>
#include <arch/context_offset.h>
 
.text
34,56 → 33,8
.global context_save_arch
.global context_restore_arch
 
.macro CONTEXT_STORE r
stw sp, OFFSET_SP(\r)
stw r2, OFFSET_R2(\r)
stw r13, OFFSET_R13(\r)
stw r14, OFFSET_R14(\r)
stw r15, OFFSET_R15(\r)
stw r16, OFFSET_R16(\r)
stw r17, OFFSET_R17(\r)
stw r18, OFFSET_R18(\r)
stw r19, OFFSET_R19(\r)
stw r20, OFFSET_R20(\r)
stw r21, OFFSET_R21(\r)
stw r22, OFFSET_R22(\r)
stw r23, OFFSET_R23(\r)
stw r24, OFFSET_R24(\r)
stw r25, OFFSET_R25(\r)
stw r26, OFFSET_R26(\r)
stw r27, OFFSET_R27(\r)
stw r28, OFFSET_R28(\r)
stw r29, OFFSET_R29(\r)
stw r30, OFFSET_R30(\r)
stw r31, OFFSET_R31(\r)
.endm
 
.macro CONTEXT_LOAD r
lwz sp, OFFSET_SP(\r)
lwz r2, OFFSET_R2(\r)
lwz r13, OFFSET_R13(\r)
lwz r14, OFFSET_R14(\r)
lwz r15, OFFSET_R15(\r)
lwz r16, OFFSET_R16(\r)
lwz r17, OFFSET_R17(\r)
lwz r18, OFFSET_R18(\r)
lwz r19, OFFSET_R19(\r)
lwz r20, OFFSET_R20(\r)
lwz r21, OFFSET_R21(\r)
lwz r22, OFFSET_R22(\r)
lwz r23, OFFSET_R23(\r)
lwz r24, OFFSET_R24(\r)
lwz r25, OFFSET_R25(\r)
lwz r26, OFFSET_R26(\r)
lwz r27, OFFSET_R27(\r)
lwz r28, OFFSET_R28(\r)
lwz r29, OFFSET_R29(\r)
lwz r30, OFFSET_R30(\r)
lwz r31, OFFSET_R31(\r)
.endm
 
context_save_arch:
CONTEXT_STORE r3
CONTEXT_SAVE_ARCH_CORE r3
mflr r4
stw r4, OFFSET_PC(r3)
96,7 → 47,7
blr
context_restore_arch:
CONTEXT_LOAD r3
CONTEXT_RESTORE_ARCH_CORE r3
lwz r4, OFFSET_CR(r3)
mtcr r4
/branches/network/kernel/arch/ia32xen/Makefile.inc
29,11 → 29,15
## Toolchain configuration
#
 
ifndef CROSS_PREFIX
CROSS_PREFIX = /usr/local
endif
 
BFD_NAME = elf32-i386
BFD_ARCH = i386
BFD = elf32-i386
TARGET = i686-pc-linux-gnu
TOOLCHAIN_DIR = /usr/local/i686
TOOLCHAIN_DIR = $(CROSS_PREFIX)/i686
 
DEFS += -DMACHINE=$(MACHINE) -D__32_BITS__
 
111,7 → 115,7
CONFIG_SOFTINT = y
 
ARCH_SOURCES = \
arch/$(ARCH)/src/context.s \
arch/$(ARCH)/src/context.S \
arch/$(ARCH)/src/debug/panic.s \
arch/$(ARCH)/src/delay.s \
arch/$(ARCH)/src/asm.S \
/branches/network/kernel/arch/ia32xen/src/context.s
File deleted
\ No newline at end of file
Property changes:
Deleted: svn:special
-*
\ No newline at end of property
/branches/network/kernel/arch/ia32xen/src/asm.S
67,7 → 67,7
* @param MEMCPY_SRC(%esp) Source address.
* @param MEMCPY_SIZE(%esp) Size.
*
* @return MEMCPY_SRC(%esp) on success and 0 on failure.
* @return MEMCPY_DST(%esp) on success and 0 on failure.
*/
memcpy:
memcpy_from_uspace:
92,7 → 92,7
0:
movl %edx, %edi
movl %eax, %esi
movl MEMCPY_SRC(%esp), %eax /* MEMCPY_SRC(%esp), success */
movl MEMCPY_DST(%esp), %eax /* MEMCPY_DST(%esp), success */
ret
/*
/branches/network/kernel/arch/ia32xen/src/userspace.c
68,6 → 68,10
"pushl %3\n"
"pushl %4\n"
"movl %5, %%eax\n"
 
/* %ebx is defined to hold pcb_ptr - set it to 0 */
"xorl %%ebx, %%ebx\n"
 
"iret\n"
:
: "i" (selector(UDATA_DES) | PL_USER),
/branches/network/kernel/arch/ia32xen/src/ia32xen.c
69,7 → 69,7
void arch_pre_main(void)
{
pte_t pte;
memsetb((uintptr_t) &pte, sizeof(pte), 0);
memsetb(&pte, sizeof(pte), 0);
pte.present = 1;
pte.writeable = 1;
103,7 → 103,7
uintptr_t tpa = PFN2ADDR(meminfo.start + meminfo.reserved);
uintptr_t tva = PA2KA(tpa);
memsetb(tva, PAGE_SIZE, 0);
memsetb((void *) tva, PAGE_SIZE, 0);
pte_t *tptl3 = (pte_t *) PA2KA(GET_PTL1_ADDRESS(start_info.ptl0, PTL0_INDEX(tva)));
SET_FRAME_ADDRESS(tptl3, PTL3_INDEX(tva), 0);
/branches/network/kernel/arch/ia32xen/src/pm.c
98,7 → 98,7
 
void tss_initialize(tss_t *t)
{
memsetb((uintptr_t) t, sizeof(struct tss), 0);
memsetb(t, sizeof(struct tss), 0);
}
 
static void trap(void)
/branches/network/kernel/arch/ia32xen/src/smp/smp.c
98,7 → 98,7
*/
void kmp(void *arg)
{
int i;
unsigned int i;
ASSERT(ops != NULL);
 
142,11 → 142,11
/*
* Prepare new GDT for CPU in question.
*/
if (!(gdt_new = (struct descriptor *) malloc(GDT_ITEMS*sizeof(struct descriptor), FRAME_ATOMIC)))
if (!(gdt_new = (struct descriptor *) malloc(GDT_ITEMS * sizeof(struct descriptor), FRAME_ATOMIC)))
panic("couldn't allocate memory for GDT\n");
 
memcpy(gdt_new, gdt, GDT_ITEMS * sizeof(struct descriptor));
memsetb((uintptr_t)(&gdt_new[TSS_DES]), sizeof(struct descriptor), 0);
memsetb(&gdt_new[TSS_DES], sizeof(struct descriptor), 0);
gdtr.base = (uintptr_t) gdt_new;
 
if (l_apic_send_init_ipi(ops->cpu_apic_id(i))) {
/branches/network/kernel/arch/ia32xen/src/smp/mps.c
77,11 → 77,11
struct __io_intr_entry *io_intr_entries = NULL;
struct __l_intr_entry *l_intr_entries = NULL;
 
int processor_entry_cnt = 0;
int bus_entry_cnt = 0;
int io_apic_entry_cnt = 0;
int io_intr_entry_cnt = 0;
int l_intr_entry_cnt = 0;
unsigned int processor_entry_cnt = 0;
unsigned int bus_entry_cnt = 0;
unsigned int io_apic_entry_cnt = 0;
unsigned int io_intr_entry_cnt = 0;
unsigned int l_intr_entry_cnt = 0;
 
waitq_t ap_completion_wq;
 
417,7 → 417,7
 
int mps_irq_to_pin(unsigned int irq)
{
int i;
unsigned int i;
for (i = 0; i < io_intr_entry_cnt; i++) {
if (io_intr_entries[i].src_bus_irq == irq && io_intr_entries[i].intr_type == 0)
/branches/network/kernel/arch/ia32xen/src/context.S
0,0 → 1,0
link ../../ia32/src/context.S
Property changes:
Added: svn:special
+*
\ No newline at end of property
/branches/network/kernel/arch/ia32xen/src/mm/tlb.c
65,7 → 65,7
*/
void tlb_invalidate_pages(asid_t asid, uintptr_t page, count_t cnt)
{
int i;
unsigned int i;
 
for (i = 0; i < cnt; i++)
invlpg(page + i * PAGE_SIZE);
/branches/network/kernel/arch/ia32xen/include/mm/page.h
40,8 → 40,6
#define PAGE_WIDTH FRAME_WIDTH
#define PAGE_SIZE FRAME_SIZE
 
#define PAGE_COLOR_BITS 0 /* dummy */
 
#ifdef KERNEL
 
#ifndef __ASM__
/branches/network/kernel/arch/ia32xen/include/types.h
61,14 → 61,6
typedef uint32_t unative_t;
typedef int32_t native_t;
 
typedef uint8_t bool;
typedef uint64_t thread_id_t;
typedef uint64_t task_id_t;
typedef uint32_t context_id_t;
 
typedef int32_t inr_t;
typedef int32_t devno_t;
 
/** Page Table Entry. */
typedef struct {
unsigned present : 1;
/branches/network/kernel/arch/ia32xen/include/context_offset.h
0,0 → 1,0
link ../../ia32/include/context_offset.h
Property changes:
Added: svn:special
+*
\ No newline at end of property
/branches/network/kernel/arch/ppc64/Makefile.inc
29,11 → 29,15
## Toolchain configuration
#
 
ifndef CROSS_PREFIX
CROSS_PREFIX = /usr/local
endif
 
BFD_NAME = elf64-powerpc
BFD_ARCH = powerpc:common64
BFD = binary
TARGET = ppc64-linux-gnu
TOOLCHAIN_DIR = /usr/local/ppc64
TOOLCHAIN_DIR = $(CROSS_PREFIX)/ppc64
 
GCC_CFLAGS += -mcpu=powerpc64 -msoft-float -m64
AFLAGS += -a64
/branches/network/kernel/arch/ppc64/include/mm/page.h
40,8 → 40,6
#define PAGE_WIDTH FRAME_WIDTH
#define PAGE_SIZE FRAME_SIZE
 
#define PAGE_COLOR_BITS 0 /* dummy */
 
#ifdef KERNEL
 
#ifndef __ASM__
/branches/network/kernel/arch/ppc64/include/barrier.h
38,10 → 38,13
#define CS_ENTER_BARRIER() asm volatile ("" ::: "memory")
#define CS_LEAVE_BARRIER() asm volatile ("" ::: "memory")
 
#define memory_barrier() asm volatile ("sync" ::: "memory")
#define read_barrier() asm volatile ("sync" ::: "memory")
#define write_barrier() asm volatile ("eieio" ::: "memory")
#define memory_barrier() asm volatile ("sync" ::: "memory")
#define read_barrier() asm volatile ("sync" ::: "memory")
#define write_barrier() asm volatile ("eieio" ::: "memory")
 
#define smc_coherence(a)
#define smc_coherence_block(a, l)
 
#endif
 
/** @}
/branches/network/kernel/arch/ppc64/include/memstr.h
37,10 → 37,10
 
#define memcpy(dst, src, cnt) __builtin_memcpy((dst), (src), (cnt))
 
extern void memsetw(uintptr_t dst, size_t cnt, uint16_t x);
extern void memsetb(uintptr_t dst, size_t cnt, uint8_t x);
extern void memsetw(void *dst, size_t cnt, uint16_t x);
extern void memsetb(void *dst, size_t cnt, uint8_t x);
 
extern int memcmp(uintptr_t src, uintptr_t dst, int cnt);
extern int memcmp(const void *a, const void *b, size_t cnt);
 
#endif
 
/branches/network/kernel/arch/ppc64/include/exception.h
82,6 → 82,7
{
istate->pc = retaddr;
}
 
/** Return true if exception happened while in userspace */
#include <panic.h>
static inline int istate_from_uspace(istate_t *istate)
89,6 → 90,7
panic("istate_from_uspace not yet implemented");
return 0;
}
 
static inline unative_t istate_get_pc(istate_t *istate)
{
return istate->pc;
/branches/network/kernel/arch/ppc64/include/types.h
35,10 → 35,6
#ifndef KERN_ppc64_TYPES_H_
#define KERN_ppc64_TYPES_H_
 
#define NULL 0
#define false 0
#define true 1
 
typedef signed char int8_t;
typedef signed short int16_t;
typedef signed int int32_t;
61,14 → 57,6
typedef uint64_t unative_t;
typedef int64_t native_t;
 
typedef uint8_t bool;
typedef uint64_t thread_id_t;
typedef uint64_t task_id_t;
typedef uint32_t context_id_t;
 
typedef int32_t inr_t;
typedef int32_t devno_t;
 
/** Page Table Entry. */
typedef struct {
unsigned p : 1; /**< Present bit. */
/branches/network/kernel/arch/ppc64/include/context_offset.h
73,4 → 73,60
#define OFFSET_FR31 0x88
#define OFFSET_FPSCR 0x90
 
 
#ifdef __ASM__
# include <arch/asm/regname.h>
 
# ctx: address of the structure with saved context
.macro CONTEXT_SAVE_ARCH_CORE ctx:req
stw sp, OFFSET_SP(\ctx)
stw r2, OFFSET_R2(\ctx)
stw r13, OFFSET_R13(\ctx)
stw r14, OFFSET_R14(\ctx)
stw r15, OFFSET_R15(\ctx)
stw r16, OFFSET_R16(\ctx)
stw r17, OFFSET_R17(\ctx)
stw r18, OFFSET_R18(\ctx)
stw r19, OFFSET_R19(\ctx)
stw r20, OFFSET_R20(\ctx)
stw r21, OFFSET_R21(\ctx)
stw r22, OFFSET_R22(\ctx)
stw r23, OFFSET_R23(\ctx)
stw r24, OFFSET_R24(\ctx)
stw r25, OFFSET_R25(\ctx)
stw r26, OFFSET_R26(\ctx)
stw r27, OFFSET_R27(\ctx)
stw r28, OFFSET_R28(\ctx)
stw r29, OFFSET_R29(\ctx)
stw r30, OFFSET_R30(\ctx)
stw r31, OFFSET_R31(\ctx)
.endm
 
# ctx: address of the structure with saved context
.macro CONTEXT_RESTORE_ARCH_CORE ctx:req
lwz sp, OFFSET_SP(\ctx)
lwz r2, OFFSET_R2(\ctx)
lwz r13, OFFSET_R13(\ctx)
lwz r14, OFFSET_R14(\ctx)
lwz r15, OFFSET_R15(\ctx)
lwz r16, OFFSET_R16(\ctx)
lwz r17, OFFSET_R17(\ctx)
lwz r18, OFFSET_R18(\ctx)
lwz r19, OFFSET_R19(\ctx)
lwz r20, OFFSET_R20(\ctx)
lwz r21, OFFSET_R21(\ctx)
lwz r22, OFFSET_R22(\ctx)
lwz r23, OFFSET_R23(\ctx)
lwz r24, OFFSET_R24(\ctx)
lwz r25, OFFSET_R25(\ctx)
lwz r26, OFFSET_R26(\ctx)
lwz r27, OFFSET_R27(\ctx)
lwz r28, OFFSET_R28(\ctx)
lwz r29, OFFSET_R29(\ctx)
lwz r30, OFFSET_R30(\ctx)
lwz r31, OFFSET_R31(\ctx)
.endm
 
#endif /* __ASM__ */
 
#endif
/branches/network/kernel/arch/ppc64/include/byteorder.h
35,14 → 35,8
#ifndef KERN_ppc64_BYTEORDER_H_
#define KERN_ppc64_BYTEORDER_H_
 
#include <byteorder.h>
#define ARCH_IS_BIG_ENDIAN
 
#define uint32_t_le2host(n) uint32_t_byteorder_swap(n)
#define uint64_t_le2host(n) uint64_t_byteorder_swap(n)
 
#define uint32_t_be2host(n) (n)
#define uint64_t_be2host(n) (n)
 
#endif
 
/** @}
/branches/network/kernel/arch/ppc64/src/asm.S
66,6 → 66,10
mr sp, r4
# %r3 is defined to hold pcb_ptr - set it to 0
 
xor r3, r3, r3
 
# jump to userspace
rfi
/branches/network/kernel/arch/ppc64/src/mm/page.c
252,7 → 252,7
 
void pht_init(void)
{
memsetb((uintptr_t) phte, 1 << PHT_BITS, 0);
memsetb(phte, 1 << PHT_BITS, 0);
}
 
 
289,7 → 289,7
uintptr_t hw_map(uintptr_t physaddr, size_t size)
{
if (last_frame + ALIGN_UP(size, PAGE_SIZE) > KA2PA(KERNEL_ADDRESS_SPACE_END_ARCH))
panic("Unable to map physical memory %p (%d bytes)", physaddr, size)
panic("Unable to map physical memory %p (%" PRIs " bytes)", physaddr, size)
uintptr_t virtaddr = PA2KA(last_frame);
pfn_t i;
/branches/network/kernel/arch/ppc64/src/cpu/cpu.c
53,7 → 53,7
 
void cpu_print_report(cpu_t *m)
{
printf("cpu%d: version=%d, revision=%d\n", m->id, m->arch.version, m->arch.revision);
printf("cpu%u: version=%d, revision=%d\n", m->id, m->arch.version, m->arch.revision);
}
 
/** @}
/branches/network/kernel/arch/ppc64/src/interrupt.c
80,7 → 80,7
* Spurious interrupt.
*/
#ifdef CONFIG_DEBUG
printf("cpu%d: spurious interrupt (inum=%d)\n", CPU->id, inum);
printf("cpu%u: spurious interrupt (inum=%d)\n", CPU->id, inum);
#endif
}
/branches/network/kernel/arch/ppc64/src/context.S
34,56 → 34,8
.global context_save_arch
.global context_restore_arch
 
.macro CONTEXT_STORE r
stw sp, OFFSET_SP(\r)
stw r2, OFFSET_R2(\r)
stw r13, OFFSET_R13(\r)
stw r14, OFFSET_R14(\r)
stw r15, OFFSET_R15(\r)
stw r16, OFFSET_R16(\r)
stw r17, OFFSET_R17(\r)
stw r18, OFFSET_R18(\r)
stw r19, OFFSET_R19(\r)
stw r20, OFFSET_R20(\r)
stw r21, OFFSET_R21(\r)
stw r22, OFFSET_R22(\r)
stw r23, OFFSET_R23(\r)
stw r24, OFFSET_R24(\r)
stw r25, OFFSET_R25(\r)
stw r26, OFFSET_R26(\r)
stw r27, OFFSET_R27(\r)
stw r28, OFFSET_R28(\r)
stw r29, OFFSET_R29(\r)
stw r30, OFFSET_R30(\r)
stw r31, OFFSET_R31(\r)
.endm
 
.macro CONTEXT_LOAD r
lwz sp, OFFSET_SP(\r)
lwz r2, OFFSET_R2(\r)
lwz r13, OFFSET_R13(\r)
lwz r14, OFFSET_R14(\r)
lwz r15, OFFSET_R15(\r)
lwz r16, OFFSET_R16(\r)
lwz r17, OFFSET_R17(\r)
lwz r18, OFFSET_R18(\r)
lwz r19, OFFSET_R19(\r)
lwz r20, OFFSET_R20(\r)
lwz r21, OFFSET_R21(\r)
lwz r22, OFFSET_R22(\r)
lwz r23, OFFSET_R23(\r)
lwz r24, OFFSET_R24(\r)
lwz r25, OFFSET_R25(\r)
lwz r26, OFFSET_R26(\r)
lwz r27, OFFSET_R27(\r)
lwz r28, OFFSET_R28(\r)
lwz r29, OFFSET_R29(\r)
lwz r30, OFFSET_R30(\r)
lwz r31, OFFSET_R31(\r)
.endm
 
context_save_arch:
CONTEXT_STORE r3
CONTEXT_SAVE_ARCH_CORE r3
mflr r4
stw r4, OFFSET_PC(r3)
96,7 → 48,7
blr
context_restore_arch:
CONTEXT_LOAD r3
CONTEXT_RESTORE_ARCH_CORE r3
lwz r4, OFFSET_CR(r3)
mtcr r4
/branches/network/kernel/arch/mips32/Makefile.inc
29,9 → 29,13
## Toolchain configuration
#
 
ifndef CROSS_PREFIX
CROSS_PREFIX = /usr/local
endif
 
BFD_ARCH = mips
TARGET = mipsel-linux-gnu
TOOLCHAIN_DIR = /usr/local/mipsel
TOOLCHAIN_DIR = $(CROSS_PREFIX)/mipsel
 
KERNEL_LOAD_ADDRESS = 0x80100000
INIT_ADDRESS = 0x81000000
56,20 → 60,6
## Accepted MACHINEs
#
 
ifeq ($(MACHINE),indy)
# GCC 4.0.1 compiled for mipsEL has problems compiling in
# BigEndian mode with the swl/swr/lwl/lwr instructions.
# We have to compile it with mips-sgi-irix5 to get it right.
BFD_NAME = elf32-bigmips
BFD = ecoff-bigmips --impure
TARGET = mips-sgi-irix5
TOOLCHAIN_DIR = /usr/local/mips/bin
KERNEL_LOAD_ADDRESS = 0x88002000
GCC_CFLAGS += -EB -DBIG_ENDIAN -DARCH_HAS_FPU -march=r4600
INIT_ADDRESS = 0
INIT_SIZE = 0
endif
ifeq ($(MACHINE),lgxemul)
BFD_NAME = elf32-tradlittlemips
BFD = binary
79,7 → 69,7
BFD_NAME = elf32-bigmips
BFD = ecoff-bigmips
TARGET = mips-sgi-irix5
TOOLCHAIN_DIR = /usr/local/mips/bin
TOOLCHAIN_DIR = $(CROSS_PREFIX)/mips/bin
GCC_CFLAGS += -EB -DBIG_ENDIAN -DARCH_HAS_FPU -mips3
INIT_ADDRESS = 0x81800000
endif
123,7 → 113,6
arch/$(ARCH)/src/mm/as.c \
arch/$(ARCH)/src/fpu_context.c \
arch/$(ARCH)/src/ddi/ddi.c \
arch/$(ARCH)/src/drivers/arc.c \
arch/$(ARCH)/src/drivers/msim.c \
arch/$(ARCH)/src/drivers/serial.c \
arch/$(ARCH)/src/smp/order.c
/branches/network/kernel/arch/mips32/src/asm.S
71,6 → 71,7
and $v0,$v0,$v1
beq $a1,$v0,3f
move $t0,$a0
move $t2,$a0 # save dst
 
0:
beq $a2,$zero,2f
86,7 → 87,7
 
2:
jr $ra
move $v0,$a1
move $v0,$t2
 
3:
addiu $v0,$a0,3
126,7 → 127,7
sb $a0,0($v1)
 
jr $ra
move $v0,$a1
move $v0,$t2
 
memcpy_from_uspace_failover_address:
memcpy_to_uspace_failover_address:
/branches/network/kernel/arch/mips32/src/mm/frame.c
32,27 → 32,238
/** @file
*/
 
#include <macros.h>
#include <arch/mm/frame.h>
#include <arch/mm/tlb.h>
#include <interrupt.h>
#include <mm/frame.h>
#include <mm/asid.h>
#include <config.h>
#include <arch/drivers/arc.h>
#include <arch/drivers/msim.h>
#include <arch/drivers/serial.h>
#include <print.h>
 
#define ZERO_PAGE_MASK TLB_PAGE_MASK_256K
#define ZERO_FRAMES 2048
#define ZERO_PAGE_WIDTH 18 /* 256K */
#define ZERO_PAGE_SIZE (1 << ZERO_PAGE_WIDTH)
#define ZERO_PAGE_ASID ASID_INVALID
#define ZERO_PAGE_TLBI 0
#define ZERO_PAGE_ADDR 0
#define ZERO_PAGE_OFFSET (ZERO_PAGE_SIZE / sizeof(uint32_t) - 1)
#define ZERO_PAGE_VALUE (((volatile uint32_t *) ZERO_PAGE_ADDR)[ZERO_PAGE_OFFSET])
 
#define ZERO_PAGE_VALUE_KSEG1(frame) (((volatile uint32_t *) (0xa0000000 + (frame << ZERO_PAGE_WIDTH)))[ZERO_PAGE_OFFSET])
 
#define MAX_REGIONS 32
 
typedef struct {
pfn_t start;
pfn_t count;
} phys_region_t;
 
static count_t phys_regions_count = 0;
static phys_region_t phys_regions[MAX_REGIONS];
 
 
/** Check whether frame is available
*
* Returns true if given frame is generally available for use.
* Returns false if given frame is used for physical memory
* mapped devices and cannot be used.
*
*/
static bool frame_available(pfn_t frame)
{
#if MACHINE == msim
/* MSIM device (dprinter) */
if (frame == (KA2PA(MSIM_VIDEORAM) >> ZERO_PAGE_WIDTH))
return false;
/* MSIM device (dkeyboard) */
if (frame == (KA2PA(MSIM_KBD_ADDRESS) >> ZERO_PAGE_WIDTH))
return false;
#endif
 
#if MACHINE == simics
/* Simics device (serial line) */
if (frame == (KA2PA(SERIAL_ADDRESS) >> ZERO_PAGE_WIDTH))
return false;
#endif
 
#if (MACHINE == lgxemul) || (MACHINE == bgxemul)
/* gxemul devices */
if (overlaps(frame << ZERO_PAGE_WIDTH, ZERO_PAGE_SIZE,
0x10000000, MB2SIZE(256)))
return false;
#endif
return true;
}
 
 
/** Check whether frame is safe to write
*
* Returns true if given frame is safe for read/write test.
* Returns false if given frame should not be touched.
*
*/
static bool frame_safe(pfn_t frame)
{
/* Kernel structures */
if ((frame << ZERO_PAGE_WIDTH) < KA2PA(config.base))
return false;
/* Kernel */
if (overlaps(frame << ZERO_PAGE_WIDTH, ZERO_PAGE_SIZE,
KA2PA(config.base), config.kernel_size))
return false;
/* Kernel stack */
if (overlaps(frame << ZERO_PAGE_WIDTH, ZERO_PAGE_SIZE,
KA2PA(config.stack_base), config.stack_size))
return false;
/* Init tasks */
bool safe = true;
count_t i;
for (i = 0; i < init.cnt; i++)
if (overlaps(frame << ZERO_PAGE_WIDTH, ZERO_PAGE_SIZE,
KA2PA(init.tasks[i].addr), init.tasks[i].size)) {
safe = false;
break;
}
return safe;
}
 
static void frame_add_region(pfn_t start_frame, pfn_t end_frame)
{
if (end_frame > start_frame) {
/* Convert 1M frames to 16K frames */
pfn_t first = ADDR2PFN(start_frame << ZERO_PAGE_WIDTH);
pfn_t count = ADDR2PFN((end_frame - start_frame) << ZERO_PAGE_WIDTH);
/* Interrupt vector frame is blacklisted */
pfn_t conf_frame;
if (first == 0)
conf_frame = 1;
else
conf_frame = first;
zone_create(first, count, conf_frame, 0);
if (phys_regions_count < MAX_REGIONS) {
phys_regions[phys_regions_count].start = first;
phys_regions[phys_regions_count].count = count;
phys_regions_count++;
}
}
}
 
 
/** Create memory zones
*
* If ARC is known, read information from ARC, otherwise
* assume some defaults.
* - blacklist first FRAME because there is an exception vector
* Walk through available 256 KB chunks of physical
* memory and create zones.
*
* Note: It is assumed that the TLB is not yet being
* used in any way, thus there is no interference.
*
*/
void frame_arch_init(void)
{
if (!arc_frame_init()) {
zone_create(0, ADDR2PFN(CONFIG_MEMORY_SIZE), 1, 0);
/*
* Blacklist interrupt vector
*/
frame_mark_unavailable(0, 1);
ipl_t ipl = interrupts_disable();
/* Clear and initialize TLB */
cp0_pagemask_write(ZERO_PAGE_MASK);
cp0_entry_lo0_write(0);
cp0_entry_lo1_write(0);
cp0_entry_hi_write(0);
 
count_t i;
for (i = 0; i < TLB_ENTRY_COUNT; i++) {
cp0_index_write(i);
tlbwi();
}
pfn_t start_frame = 0;
pfn_t frame;
bool avail = true;
/* Walk through all 1 MB frames */
for (frame = 0; frame < ZERO_FRAMES; frame++) {
if (!frame_available(frame))
avail = false;
else {
if (frame_safe(frame)) {
entry_lo_t lo0;
entry_lo_t lo1;
entry_hi_t hi;
tlb_prepare_entry_lo(&lo0, false, true, true, false, frame << (ZERO_PAGE_WIDTH - 12));
tlb_prepare_entry_lo(&lo1, false, false, false, false, 0);
tlb_prepare_entry_hi(&hi, ZERO_PAGE_ASID, ZERO_PAGE_ADDR);
cp0_pagemask_write(ZERO_PAGE_MASK);
cp0_entry_lo0_write(lo0.value);
cp0_entry_lo1_write(lo1.value);
cp0_entry_hi_write(hi.value);
cp0_index_write(ZERO_PAGE_TLBI);
tlbwi();
ZERO_PAGE_VALUE = 0;
if (ZERO_PAGE_VALUE != 0)
avail = false;
else {
ZERO_PAGE_VALUE = 0xdeadbeef;
if (ZERO_PAGE_VALUE != 0xdeadbeef)
avail = false;
#if (MACHINE == lgxemul) || (MACHINE == bgxemul)
else {
ZERO_PAGE_VALUE_KSEG1(frame) = 0xaabbccdd;
if (ZERO_PAGE_VALUE_KSEG1(frame) != 0xaabbccdd)
avail = false;
}
#endif
}
}
}
if (!avail) {
frame_add_region(start_frame, frame);
start_frame = frame + 1;
avail = true;
}
}
frame_add_region(start_frame, frame);
/* Blacklist interrupt vector frame */
frame_mark_unavailable(0, 1);
/* Cleanup */
cp0_pagemask_write(ZERO_PAGE_MASK);
cp0_entry_lo0_write(0);
cp0_entry_lo1_write(0);
cp0_entry_hi_write(0);
cp0_index_write(ZERO_PAGE_TLBI);
tlbwi();
interrupts_restore(ipl);
}
 
 
void physmem_print(void)
{
printf("Base Size\n");
printf("---------- ----------\n");
count_t i;
for (i = 0; i < phys_regions_count; i++) {
printf("%#010x %10u\n",
PFN2ADDR(phys_regions[i].start), PFN2ADDR(phys_regions[i].count));
}
}
 
/** @}
*/
/branches/network/kernel/arch/mips32/src/mm/tlb.c
53,9 → 53,6
 
static pte_t *find_mapping_and_check(uintptr_t badvaddr, int access, istate_t *istate, int *pfrc);
 
static void prepare_entry_lo(entry_lo_t *lo, bool g, bool v, bool d, bool cacheable, uintptr_t pfn);
static void prepare_entry_hi(entry_hi_t *hi, asid_t asid, uintptr_t addr);
 
/** Initialize TLB
*
* Initialize TLB.
76,7 → 73,6
cp0_index_write(i);
tlbwi();
}
 
/*
* The kernel is going to make use of some wired
131,8 → 127,8
*/
pte->a = 1;
 
prepare_entry_hi(&hi, asid, badvaddr);
prepare_entry_lo(&lo, pte->g, pte->p, pte->d, pte->cacheable, pte->pfn);
tlb_prepare_entry_hi(&hi, asid, badvaddr);
tlb_prepare_entry_lo(&lo, pte->g, pte->p, pte->d, pte->cacheable, pte->pfn);
 
/*
* New entry is to be inserted into TLB
178,7 → 174,7
* Locate the faulting entry in TLB.
*/
hi.value = cp0_entry_hi_read();
prepare_entry_hi(&hi, hi.asid, badvaddr);
tlb_prepare_entry_hi(&hi, hi.asid, badvaddr);
cp0_entry_hi_write(hi.value);
tlbp();
index.value = cp0_index_read();
221,7 → 217,7
*/
pte->a = 1;
 
prepare_entry_lo(&lo, pte->g, pte->p, pte->d, pte->cacheable, pte->pfn);
tlb_prepare_entry_lo(&lo, pte->g, pte->p, pte->d, pte->cacheable, pte->pfn);
 
/*
* The entry is to be updated in TLB.
262,7 → 258,7
* Locate the faulting entry in TLB.
*/
hi.value = cp0_entry_hi_read();
prepare_entry_hi(&hi, hi.asid, badvaddr);
tlb_prepare_entry_hi(&hi, hi.asid, badvaddr);
cp0_entry_hi_write(hi.value);
tlbp();
index.value = cp0_index_read();
312,7 → 308,7
pte->a = 1;
pte->d = 1;
 
prepare_entry_lo(&lo, pte->g, pte->p, pte->w, pte->cacheable, pte->pfn);
tlb_prepare_entry_lo(&lo, pte->g, pte->p, pte->w, pte->cacheable, pte->pfn);
 
/*
* The entry is to be updated in TLB.
445,7 → 441,7
}
}
 
void prepare_entry_lo(entry_lo_t *lo, bool g, bool v, bool d, bool cacheable, uintptr_t pfn)
void tlb_prepare_entry_lo(entry_lo_t *lo, bool g, bool v, bool d, bool cacheable, uintptr_t pfn)
{
lo->value = 0;
lo->g = g;
455,7 → 451,7
lo->pfn = pfn;
}
 
void prepare_entry_hi(entry_hi_t *hi, asid_t asid, uintptr_t addr)
void tlb_prepare_entry_hi(entry_hi_t *hi, asid_t asid, uintptr_t addr)
{
hi->value = ALIGN_DOWN(addr, PAGE_SIZE * 2);
hi->asid = asid;
572,7 → 568,7
*/
void tlb_invalidate_pages(asid_t asid, uintptr_t page, count_t cnt)
{
int i;
unsigned int i;
ipl_t ipl;
entry_lo_t lo0, lo1;
entry_hi_t hi, hi_save;
583,9 → 579,9
hi_save.value = cp0_entry_hi_read();
ipl = interrupts_disable();
 
for (i = 0; i < cnt+1; i+=2) {
for (i = 0; i < cnt + 1; i += 2) {
hi.value = 0;
prepare_entry_hi(&hi, asid, page + i * PAGE_SIZE);
tlb_prepare_entry_hi(&hi, asid, page + i * PAGE_SIZE);
cp0_entry_hi_write(hi.value);
 
tlbp();
/branches/network/kernel/arch/mips32/src/mm/as.c
44,7 → 44,7
/** Architecture dependent address space init. */
void as_arch_init(void)
{
as_operations = &as_pt_operations;
as_operations = &as_pt_operations;
asid_fifo_init();
}
 
/branches/network/kernel/arch/mips32/src/drivers/arc.c
File deleted
/branches/network/kernel/arch/mips32/src/drivers/msim.c
39,12 → 39,9
#include <arch/cp0.h>
#include <console/console.h>
#include <sysinfo/sysinfo.h>
#include <ddi/ddi.h>
 
/** Address of devices. */
#define MSIM_VIDEORAM 0xB0000000
#define MSIM_KBD_ADDRESS 0xB0000000
#define MSIM_KBD_IRQ 2
 
static parea_t msim_parea;
static chardev_t console;
static irq_t msim_irq;
 
158,6 → 155,16
sysinfo_set_item_val("kbd.devno", NULL, devno);
sysinfo_set_item_val("kbd.inr", NULL, MSIM_KBD_IRQ);
sysinfo_set_item_val("kbd.address.virtual", NULL, MSIM_KBD_ADDRESS);
msim_parea.pbase = KA2PA(MSIM_VIDEORAM);
msim_parea.vbase = MSIM_VIDEORAM;
msim_parea.frames = 1;
msim_parea.cacheable = false;
ddi_parea_register(&msim_parea);
sysinfo_set_item_val("fb", NULL, true);
sysinfo_set_item_val("fb.kind", NULL, 3);
sysinfo_set_item_val("fb.address.physical", NULL, KA2PA(MSIM_VIDEORAM));
}
 
/** @}
/branches/network/kernel/arch/mips32/src/console.c
34,18 → 34,15
 
#include <console/console.h>
#include <arch/console.h>
#include <arch/drivers/arc.h>
#include <arch/drivers/serial.h>
#include <arch/drivers/msim.h>
 
void console_init(devno_t devno)
{
if (!arc_console()) {
if (serial_init())
serial_console(devno);
else
msim_console(devno);
}
if (serial_init())
serial_console(devno);
else
msim_console(devno);
}
 
/** Acquire console back for kernel
/branches/network/kernel/arch/mips32/src/mips32.c
48,8 → 48,8
#include <sysinfo/sysinfo.h>
 
#include <arch/interrupt.h>
#include <arch/drivers/arc.h>
#include <console/chardev.h>
#include <arch/barrier.h>
#include <arch/debugger.h>
#include <genarch/fb/fb.h>
#include <genarch/fb/visuals.h>
96,18 → 96,21
/* Initialize dispatch table */
exception_init();
arc_init();
 
/* Copy the exception vectors to the right places */
memcpy(TLB_EXC, (char *) tlb_refill_entry, EXCEPTION_JUMP_SIZE);
smc_coherence_block(TLB_EXC, EXCEPTION_JUMP_SIZE);
memcpy(NORM_EXC, (char *) exception_entry, EXCEPTION_JUMP_SIZE);
smc_coherence_block(NORM_EXC, EXCEPTION_JUMP_SIZE);
memcpy(CACHE_EXC, (char *) cache_error_entry, EXCEPTION_JUMP_SIZE);
smc_coherence_block(CACHE_EXC, EXCEPTION_JUMP_SIZE);
/*
* Switch to BEV normal level so that exception vectors point to the kernel.
* Clear the error level.
* Switch to BEV normal level so that exception vectors point to the
* kernel. Clear the error level.
*/
cp0_status_write(cp0_status_read() & ~(cp0_status_bev_bootstrap_bit|cp0_status_erl_error_bit));
cp0_status_write(cp0_status_read() &
~(cp0_status_bev_bootstrap_bit | cp0_status_erl_error_bit));
 
/*
* Mask all interrupts
122,7 → 125,8
interrupt_init();
console_init(device_assign_devno());
#ifdef CONFIG_FB
fb_init(0x12000000, 640, 480, 1920, VISUAL_RGB_8_8_8); // gxemul framebuffer
/* GXemul framebuffer */
fb_init(0x12000000, 640, 480, 1920, VISUAL_RGB_8_8_8);
#endif
sysinfo_set_item_val("machine." STRING(MACHINE), NULL, 1);
}
143,13 → 147,14
{
/* EXL = 1, UM = 1, IE = 1 */
cp0_status_write(cp0_status_read() | (cp0_status_exl_exception_bit |
cp0_status_um_bit | cp0_status_ie_enabled_bit));
cp0_status_um_bit | cp0_status_ie_enabled_bit));
cp0_epc_write((uintptr_t) kernel_uarg->uspace_entry);
userspace_asm(((uintptr_t) kernel_uarg->uspace_stack + PAGE_SIZE),
(uintptr_t) kernel_uarg->uspace_uarg,
(uintptr_t) kernel_uarg->uspace_entry);
(uintptr_t) kernel_uarg->uspace_uarg,
(uintptr_t) kernel_uarg->uspace_entry);
while (1);
while (1)
;
}
 
/** Perform mips32 specific tasks needed before the new task is run. */
160,7 → 165,8
/** Perform mips32 specific tasks needed before the new thread is scheduled. */
void before_thread_runs_arch(void)
{
supervisor_sp = (uintptr_t) &THREAD->kstack[THREAD_STACK_SIZE-SP_DELTA];
supervisor_sp = (uintptr_t) &THREAD->kstack[THREAD_STACK_SIZE -
SP_DELTA];
}
 
void after_thread_ran_arch(void)
179,10 → 185,10
 
void arch_reboot(void)
{
if (!arc_reboot())
___halt();
___halt();
while (1);
while (1)
;
}
 
/** @}
/branches/network/kernel/arch/mips32/src/interrupt.c
38,7 → 38,6
#include <arch.h>
#include <arch/cp0.h>
#include <time/clock.h>
#include <arch/drivers/arc.h>
#include <ipc/sysipc.h>
#include <ddi/device.h>
 
/branches/network/kernel/arch/mips32/src/start.S
349,5 → 349,7
userspace_asm:
add $sp, $a0, 0
add $v0, $a1, 0
add $t9, $a2, 0 # Set up correct entry into PIC code
add $t9, $a2, 0 # Set up correct entry into PIC code
xor $a0, $a0, $a0 # $a0 is defined to hold pcb_ptr
# set it to 0
eret
/branches/network/kernel/arch/mips32/src/debugger.c
33,6 → 33,7
*/
 
#include <arch/debugger.h>
#include <arch/barrier.h>
#include <memstr.h>
#include <console/kconsole.h>
#include <console/cmd.h>
72,7 → 73,8
};
static cmd_info_t addbkpt_info = {
.name = "addbkpt",
.description = "addbkpt <&symbol> - new bkpoint. Break on J/Branch insts unsupported.",
.description = "addbkpt <&symbol> - new bkpoint. Break on J/Branch "
"insts unsupported.",
.func = cmd_add_breakpoint,
.argc = 1,
.argv = &add_argv
84,7 → 86,8
};
static cmd_info_t addbkpte_info = {
.name = "addbkpte",
.description = "addebkpte <&symbol> <&func> - new bkpoint. Call func(or Nothing if 0).",
.description = "addebkpte <&symbol> <&func> - new bkpoint. Call "
"func(or Nothing if 0).",
.func = cmd_add_breakpoint,
.argc = 2,
.argv = adde_argv
93,7 → 96,7
static struct {
uint32_t andmask;
uint32_t value;
}jmpinstr[] = {
} jmpinstr[] = {
{0xf3ff0000, 0x41000000}, /* BCzF */
{0xf3ff0000, 0x41020000}, /* BCzFL */
{0xf3ff0000, 0x41010000}, /* BCzT */
117,7 → 120,7
{0xfc000000, 0x08000000}, /* J */
{0xfc000000, 0x0c000000}, /* JAL */
{0xfc1f07ff, 0x00000009}, /* JALR */
{0,0} /* EndOfTable */
{0, 0} /* EndOfTable */
};
 
/** Test, if the given instruction is a jump or branch instruction
129,7 → 132,7
{
int i;
 
for (i=0;jmpinstr[i].andmask;i++) {
for (i = 0; jmpinstr[i].andmask; i++) {
if ((instr & jmpinstr[i].andmask) == jmpinstr[i].value)
return true;
}
152,14 → 155,16
spinlock_lock(&bkpoint_lock);
 
/* Check, that the breakpoints do not conflict */
for (i=0; i<BKPOINTS_MAX; i++) {
for (i = 0; i < BKPOINTS_MAX; i++) {
if (breakpoints[i].address == (uintptr_t)argv->intval) {
printf("Duplicate breakpoint %d.\n", i);
spinlock_unlock(&bkpoints_lock);
return 0;
} else if (breakpoints[i].address == (uintptr_t)argv->intval + sizeof(unative_t) || \
breakpoints[i].address == (uintptr_t)argv->intval - sizeof(unative_t)) {
printf("Adjacent breakpoints not supported, conflict with %d.\n", i);
} else if (breakpoints[i].address == (uintptr_t)argv->intval +
sizeof(unative_t) || breakpoints[i].address ==
(uintptr_t)argv->intval - sizeof(unative_t)) {
printf("Adjacent breakpoints not supported, conflict "
"with %d.\n", i);
spinlock_unlock(&bkpoints_lock);
return 0;
}
166,7 → 171,7
}
 
for (i=0; i<BKPOINTS_MAX; i++)
for (i = 0; i < BKPOINTS_MAX; i++)
if (!breakpoints[i].address) {
cur = &breakpoints[i];
break;
185,7 → 190,7
cur->flags = 0;
} else { /* We are add extended */
cur->flags = BKPOINT_FUNCCALL;
cur->bkfunc = (void (*)(void *, istate_t *)) argv[1].intval;
cur->bkfunc = (void (*)(void *, istate_t *)) argv[1].intval;
}
if (is_jump(cur->instruction))
cur->flags |= BKPOINT_ONESHOT;
193,6 → 198,7
 
/* Set breakpoint */
*((unative_t *)cur->address) = 0x0d;
smc_coherence(cur->address);
 
spinlock_unlock(&bkpoint_lock);
interrupts_restore(ipl);
200,8 → 206,6
return 1;
}
 
 
 
/** Remove breakpoint from table */
int cmd_del_breakpoint(cmd_arg_t *argv)
{
208,7 → 212,7
bpinfo_t *cur;
ipl_t ipl;
 
if (argv->intval < 0 || argv->intval > BKPOINTS_MAX) {
if (argv->intval > BKPOINTS_MAX) {
printf("Invalid breakpoint number.\n");
return 0;
}
229,7 → 233,9
return 0;
}
((uint32_t *)cur->address)[0] = cur->instruction;
smc_coherence(((uint32_t *)cur->address)[0]);
((uint32_t *)cur->address)[1] = cur->nextinstruction;
smc_coherence(((uint32_t *)cur->address)[1]);
 
cur->address = NULL;
 
252,11 → 258,11
symbol = get_symtab_entry(breakpoints[i].address);
printf("%-2u %-5d %#10zx %-6s %-7s %-8s %s\n", i,
breakpoints[i].counter, breakpoints[i].address,
((breakpoints[i].flags & BKPOINT_INPROG) ? "true" : "false"),
((breakpoints[i].flags & BKPOINT_ONESHOT) ? "true" : "false"),
((breakpoints[i].flags & BKPOINT_FUNCCALL) ? "true" : "false"),
symbol);
breakpoints[i].counter, breakpoints[i].address,
((breakpoints[i].flags & BKPOINT_INPROG) ? "true" :
"false"), ((breakpoints[i].flags & BKPOINT_ONESHOT)
? "true" : "false"), ((breakpoints[i].flags &
BKPOINT_FUNCCALL) ? "true" : "false"), symbol);
}
return 1;
}
266,7 → 272,7
{
int i;
 
for (i=0; i<BKPOINTS_MAX; i++)
for (i = 0; i < BKPOINTS_MAX; i++)
breakpoints[i].address = NULL;
cmd_initialize(&bkpts_info);
305,16 → 311,16
panic("Breakpoint in branch delay slot not supported.\n");
 
spinlock_lock(&bkpoint_lock);
for (i=0; i<BKPOINTS_MAX; i++) {
for (i = 0; i < BKPOINTS_MAX; i++) {
/* Normal breakpoint */
if (fireaddr == breakpoints[i].address \
&& !(breakpoints[i].flags & BKPOINT_REINST)) {
if (fireaddr == breakpoints[i].address &&
!(breakpoints[i].flags & BKPOINT_REINST)) {
cur = &breakpoints[i];
break;
}
/* Reinst only breakpoint */
if ((breakpoints[i].flags & BKPOINT_REINST) \
&& (fireaddr ==breakpoints[i].address+sizeof(unative_t))) {
if ((breakpoints[i].flags & BKPOINT_REINST) &&
(fireaddr == breakpoints[i].address + sizeof(unative_t))) {
cur = &breakpoints[i];
break;
}
323,8 → 329,10
if (cur->flags & BKPOINT_REINST) {
/* Set breakpoint on first instruction */
((uint32_t *)cur->address)[0] = 0x0d;
smc_coherence(((uint32_t *)cur->address)[0]);
/* Return back the second */
((uint32_t *)cur->address)[1] = cur->nextinstruction;
smc_coherence(((uint32_t *)cur->address)[1]);
cur->flags &= ~BKPOINT_REINST;
spinlock_unlock(&bkpoint_lock);
return;
333,11 → 341,12
printf("Warning: breakpoint recursion\n");
if (!(cur->flags & BKPOINT_FUNCCALL))
printf("***Breakpoint %d: %p in %s.\n", i,
fireaddr, get_symtab_entry(istate->epc));
printf("***Breakpoint %d: %p in %s.\n", i, fireaddr,
get_symtab_entry(istate->epc));
 
/* Return first instruction back */
((uint32_t *)cur->address)[0] = cur->instruction;
smc_coherence(cur->address);
 
if (! (cur->flags & BKPOINT_ONESHOT)) {
/* Set Breakpoint on next instruction */
/branches/network/kernel/arch/mips32/src/exception.c
161,7 → 161,7
* Spurious interrupt.
*/
#ifdef CONFIG_DEBUG
printf("cpu%d: spurious interrupt (inum=%d)\n", CPU->id, i);
printf("cpu%u: spurious interrupt (inum=%d)\n", CPU->id, i);
#endif
}
}
/branches/network/kernel/arch/mips32/src/context.S
26,7 → 26,6
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
 
#include <arch/asm/regname.h>
#include <arch/context_offset.h>
.text
38,41 → 37,9
.global context_save_arch
.global context_restore_arch
 
.macro CONTEXT_STORE r
sw $s0,OFFSET_S0(\r)
sw $s1,OFFSET_S1(\r)
sw $s2,OFFSET_S2(\r)
sw $s3,OFFSET_S3(\r)
sw $s4,OFFSET_S4(\r)
sw $s5,OFFSET_S5(\r)
sw $s6,OFFSET_S6(\r)
sw $s7,OFFSET_S7(\r)
sw $s8,OFFSET_S8(\r)
sw $gp,OFFSET_GP(\r)
sw $ra,OFFSET_PC(\r)
sw $sp,OFFSET_SP(\r)
.endm
 
.macro CONTEXT_LOAD r
lw $s0,OFFSET_S0(\r)
lw $s1,OFFSET_S1(\r)
lw $s2,OFFSET_S2(\r)
lw $s3,OFFSET_S3(\r)
lw $s4,OFFSET_S4(\r)
lw $s5,OFFSET_S5(\r)
lw $s6,OFFSET_S6(\r)
lw $s7,OFFSET_S7(\r)
lw $s8,OFFSET_S8(\r)
lw $gp,OFFSET_GP(\r)
lw $ra,OFFSET_PC(\r)
lw $sp,OFFSET_SP(\r)
.endm
 
context_save_arch:
CONTEXT_STORE $a0
CONTEXT_SAVE_ARCH_CORE $a0
 
# context_save returns 1
j $31
79,7 → 46,7
li $2, 1
context_restore_arch:
CONTEXT_LOAD $a0
CONTEXT_RESTORE_ARCH_CORE $a0
 
# context_restore returns 0
j $31
/branches/network/kernel/arch/mips32/src/cpu/cpu.c
104,22 → 104,20
void cpu_print_report(cpu_t *m)
{
struct data_t *data;
int i;
unsigned int i;
 
if (m->arch.imp_num & 0x80) {
/* Count records */
for (i=0;imp_data80[i].vendor;i++)
;
for (i = 0; imp_data80[i].vendor; i++);
if ((m->arch.imp_num & 0x7f) >= i) {
printf("imp=%d\n",m->arch.imp_num);
printf("imp=%d\n", m->arch.imp_num);
return;
}
data = &imp_data80[m->arch.imp_num & 0x7f];
} else {
for (i=0;imp_data[i].vendor;i++)
;
for (i = 0; imp_data[i].vendor; i++);
if (m->arch.imp_num >= i) {
printf("imp=%d\n",m->arch.imp_num);
printf("imp=%d\n", m->arch.imp_num);
return;
}
data = &imp_data[m->arch.imp_num];
127,7 → 125,7
 
printf("cpu%d: %s %s (rev=%d.%d, imp=%d)\n",
m->id, data->vendor, data->model, m->arch.rev_num >> 4,
m->arch.rev_num & 0xf, m->arch.imp_num);
m->arch.rev_num & 0xf, m->arch.imp_num);
}
 
/** @}
/branches/network/kernel/arch/mips32/include/drivers/arc.h
File deleted
/branches/network/kernel/arch/mips32/include/drivers/serial.h
37,6 → 37,8
 
#include <console/chardev.h>
 
#define SERIAL_ADDRESS 0x98000000
 
#define SERIAL_MAX 4
#define SERIAL_COM1 0x3f8
#define SERIAL_COM1_IRQ 4
43,16 → 45,19
#define SERIAL_COM2 0x2f8
#define SERIAL_COM2_IRQ 3
 
#define P_WRITEB(where,what) (*((volatile char *) (0xB8000000+where))=what)
#define P_READB(where) (*((volatile char *)(0xB8000000+where)))
#define P_WRITEB(where, what) (*((volatile char *) (SERIAL_ADDRESS + where)) = what)
#define P_READB(where) (*((volatile char *) (SERIAL_ADDRESS + where)))
 
#define SERIAL_READ(x) P_READB(x)
#define SERIAL_WRITE(x,c) P_WRITEB(x,c)
#define SERIAL_READ(x) P_READB(x)
#define SERIAL_WRITE(x, c) P_WRITEB(x, c)
 
/* Interrupt enable register */
#define SERIAL_READ_IER(x) (P_READB((x) + 1))
#define SERIAL_WRITE_IER(x,c) (P_WRITEB((x)+1,c))
#define SERIAL_WRITE_IER(x,c) (P_WRITEB((x) + 1, c))
 
/* Interrupt identification register */
#define SERIAL_READ_IIR(x) (P_READB((x) + 2))
 
/* Line status register */
#define SERIAL_READ_LSR(x) (P_READB((x) + 5))
#define TRANSMIT_EMPTY_BIT 5
/branches/network/kernel/arch/mips32/include/drivers/msim.h
35,6 → 35,11
#ifndef KERN_mips32_MSIM_H_
#define KERN_mips32_MSIM_H_
 
/** Address of devices. */
#define MSIM_VIDEORAM 0x90000000
#define MSIM_KBD_ADDRESS 0x90000000
#define MSIM_KBD_IRQ 2
 
#include <console/chardev.h>
 
void msim_console(devno_t devno);
/branches/network/kernel/arch/mips32/include/mm/page.h
40,8 → 40,6
#define PAGE_WIDTH FRAME_WIDTH
#define PAGE_SIZE FRAME_SIZE
 
#define PAGE_COLOR_BITS 0 /* dummy */
 
#ifndef __ASM__
# define KA2PA(x) (((uintptr_t) (x)) - 0x80000000)
# define PA2KA(x) (((uintptr_t) (x)) + 0x80000000)
/branches/network/kernel/arch/mips32/include/mm/tlb.h
35,6 → 35,9
#ifndef KERN_mips32_TLB_H_
#define KERN_mips32_TLB_H_
 
#include <arch/types.h>
#include <typedefs.h>
#include <arch/mm/asid.h>
#include <arch/exception.h>
 
#ifdef TLBCNT
46,7 → 49,13
#define TLB_WIRED 1
#define TLB_KSTACK_WIRED_INDEX 0
 
#define TLB_PAGE_MASK_16K (0x3<<13)
#define TLB_PAGE_MASK_4K (0x000 << 13)
#define TLB_PAGE_MASK_16K (0x003 << 13)
#define TLB_PAGE_MASK_64K (0x00f << 13)
#define TLB_PAGE_MASK_256K (0x03f << 13)
#define TLB_PAGE_MASK_1M (0x0ff << 13)
#define TLB_PAGE_MASK_4M (0x3ff << 13)
#define TLB_PAGE_MASK_16M (0xfff << 13)
 
#define PAGE_UNCACHED 2
#define PAGE_CACHEABLE_EXC_WRITE 5
159,6 → 168,8
extern void tlb_invalid(istate_t *istate);
extern void tlb_refill(istate_t *istate);
extern void tlb_modified(istate_t *istate);
extern void tlb_prepare_entry_lo(entry_lo_t *lo, bool g, bool v, bool d, bool cacheable, uintptr_t pfn);
extern void tlb_prepare_entry_hi(entry_hi_t *hi, asid_t asid, uintptr_t addr);
 
#endif
 
/branches/network/kernel/arch/mips32/include/mm/as.h
38,7 → 38,7
#define KERNEL_ADDRESS_SPACE_SHADOWED_ARCH 0
 
#define KERNEL_ADDRESS_SPACE_START_ARCH (unsigned long) 0x80000000
#define KERNEL_ADDRESS_SPACE_END_ARCH (unsigned long) 0xffffffff
#define KERNEL_ADDRESS_SPACE_END_ARCH (unsigned long) 0x9fffffff
#define USER_ADDRESS_SPACE_START_ARCH (unsigned long) 0x00000000
#define USER_ADDRESS_SPACE_END_ARCH (unsigned long) 0x7fffffff
 
/branches/network/kernel/arch/mips32/include/atomic.h
58,13 → 58,13
asm volatile (
"1:\n"
" ll %0, %1\n"
" addiu %0, %0, %3\n" /* same as addi, but never traps on overflow */
" addu %0, %0, %3\n" /* same as addi, but never traps on overflow */
" move %2, %0\n"
" sc %0, %1\n"
" beq %0, %4, 1b\n" /* if the atomic operation failed, try again */
" nop\n"
: "=r" (tmp), "=m" (val->count), "=r" (v)
: "i" (i), "i" (0)
: "=&r" (tmp), "+m" (val->count), "=&r" (v)
: "r" (i), "i" (0)
);
 
return v;
/branches/network/kernel/arch/mips32/include/barrier.h
45,6 → 45,9
#define read_barrier() asm volatile ("" ::: "memory")
#define write_barrier() asm volatile ("" ::: "memory")
 
#define smc_coherence(a)
#define smc_coherence_block(a, l)
 
#endif
 
/** @}
/branches/network/kernel/arch/mips32/include/memstr.h
37,10 → 37,10
 
#define memcpy(dst, src, cnt) __builtin_memcpy((dst), (src), (cnt))
 
extern void memsetw(uintptr_t dst, size_t cnt, uint16_t x);
extern void memsetb(uintptr_t dst, size_t cnt, uint8_t x);
extern void memsetw(void *dst, size_t cnt, uint16_t x);
extern void memsetb(void *dst, size_t cnt, uint8_t x);
 
extern int memcmp(uintptr_t src, uintptr_t dst, int cnt);
extern int memcmp(const void *a, const void *b, size_t cnt);
 
#endif
 
/branches/network/kernel/arch/mips32/include/types.h
35,10 → 35,6
#ifndef KERN_mips32_TYPES_H_
#define KERN_mips32_TYPES_H_
 
#define NULL 0
#define false 0
#define true 1
 
typedef signed char int8_t;
typedef signed short int16_t;
typedef signed long int32_t;
61,14 → 57,29
typedef uint32_t unative_t;
typedef int32_t native_t;
 
typedef uint8_t bool;
typedef uint64_t thread_id_t;
typedef uint64_t task_id_t;
typedef uint32_t context_id_t;
#define PRIp "x" /**< Format for uintptr_t. */
#define PRIs "u" /**< Format for size_t. */
#define PRIc "u" /**< Format for count_t. */
#define PRIi "u" /**< Format for index_t. */
 
typedef int32_t inr_t;
typedef int32_t devno_t;
#define PRId8 "d" /**< Format for int8_t. */
#define PRId16 "d" /**< Format for int16_t. */
#define PRId32 "ld" /**< Format for int32_t. */
#define PRId64 "lld" /**< Format for int64_t. */
#define PRIdn "d" /**< Format for native_t. */
 
#define PRIu8 "u" /**< Format for uint8_t. */
#define PRIu16 "u" /**< Format for uint16_t. */
#define PRIu32 "u" /**< Format for uint32_t. */
#define PRIu64 "llu" /**< Format for uint64_t. */
#define PRIun "u" /**< Format for unative_t. */
 
#define PRIx8 "x" /**< Format for hexadecimal (u)int8_t. */
#define PRIx16 "x" /**< Format for hexadecimal (u)int16_t. */
#define PRIx32 "x" /**< Format for hexadecimal (u)uint32_t. */
#define PRIx64 "llx" /**< Format for hexadecimal (u)int64_t. */
#define PRIxn "x" /**< Format for hexadecimal (u)native_t. */
 
/** Page Table Entry. */
typedef struct {
unsigned g : 1; /**< Global bit. */
/branches/network/kernel/arch/mips32/include/byteorder.h
35,24 → 35,10
#ifndef KERN_mips32_BYTEORDER_H_
#define KERN_mips32_BYTEORDER_H_
 
#include <byteorder.h>
 
#ifdef BIG_ENDIAN
 
#define uint32_t_le2host(n) uint32_t_byteorder_swap(n)
#define uint64_t_le2host(n) uint64_t_byteorder_swap(n)
 
#define uint32_t_be2host(n) (n)
#define uint64_t_be2host(n) (n)
 
#define ARCH_IS_BIG_ENDIAN
#else
 
#define uint32_t_le2host(n) (n)
#define uint64_t_le2host(n) (n)
 
#define uint32_t_be2host(n) uint32_t_byteorder_swap(n)
#define uint64_t_be2host(n) uint64_t_byteorder_swap(n)
 
#define ARCH_IS_LITTLE_ENDIAN
#endif
 
#endif
/branches/network/kernel/arch/mips32/include/context_offset.h
42,6 → 42,24
#define OFFSET_S8 0x28
#define OFFSET_GP 0x2c
 
#ifdef KERNEL
# define OFFSET_IPL 0x30
#else
# define OFFSET_TLS 0x30
 
# define OFFSET_F20 0x34
# define OFFSET_F21 0x38
# define OFFSET_F22 0x3c
# define OFFSET_F23 0x40
# define OFFSET_F24 0x44
# define OFFSET_F25 0x48
# define OFFSET_F26 0x4c
# define OFFSET_F27 0x50
# define OFFSET_F28 0x54
# define OFFSET_F29 0x58
# define OFFSET_F30 0x5c
#endif /* KERNEL */
 
/* istate_t */
#define EOFFSET_AT 0x0
#define EOFFSET_V0 0x4
79,4 → 97,122
#define EOFFSET_K1 0x84
#define REGISTER_SPACE 136
 
#ifdef __ASM__
 
#include <arch/asm/regname.h>
 
# ctx: address of the structure with saved context
.macro CONTEXT_SAVE_ARCH_CORE ctx:req
sw $s0,OFFSET_S0(\ctx)
sw $s1,OFFSET_S1(\ctx)
sw $s2,OFFSET_S2(\ctx)
sw $s3,OFFSET_S3(\ctx)
sw $s4,OFFSET_S4(\ctx)
sw $s5,OFFSET_S5(\ctx)
sw $s6,OFFSET_S6(\ctx)
sw $s7,OFFSET_S7(\ctx)
sw $s8,OFFSET_S8(\ctx)
sw $gp,OFFSET_GP(\ctx)
 
#ifndef KERNEL
sw $k1,OFFSET_TLS(\ctx)
 
# ifdef CONFIG_MIPS_FPU
mfc1 $t0,$20
sw $t0, OFFSET_F20(\ctx)
 
mfc1 $t0,$21
sw $t0, OFFSET_F21(\ctx)
 
mfc1 $t0,$22
sw $t0, OFFSET_F22(\ctx)
 
mfc1 $t0,$23
sw $t0, OFFSET_F23(\ctx)
 
mfc1 $t0,$24
sw $t0, OFFSET_F24(\ctx)
 
mfc1 $t0,$25
sw $t0, OFFSET_F25(\ctx)
 
mfc1 $t0,$26
sw $t0, OFFSET_F26(\ctx)
 
mfc1 $t0,$27
sw $t0, OFFSET_F27(\ctx)
 
mfc1 $t0,$28
sw $t0, OFFSET_F28(\ctx)
 
mfc1 $t0,$29
sw $t0, OFFSET_F29(\ctx)
mfc1 $t0,$30
sw $t0, OFFSET_F30(\ctx)
# endif /* CONFIG_MIPS_FPU */
#endif /* KERNEL */
 
sw $ra,OFFSET_PC(\ctx)
sw $sp,OFFSET_SP(\ctx)
.endm
 
# ctx: address of the structure with saved context
.macro CONTEXT_RESTORE_ARCH_CORE ctx:req
lw $s0,OFFSET_S0(\ctx)
lw $s1,OFFSET_S1(\ctx)
lw $s2,OFFSET_S2(\ctx)
lw $s3,OFFSET_S3(\ctx)
lw $s4,OFFSET_S4(\ctx)
lw $s5,OFFSET_S5(\ctx)
lw $s6,OFFSET_S6(\ctx)
lw $s7,OFFSET_S7(\ctx)
lw $s8,OFFSET_S8(\ctx)
lw $gp,OFFSET_GP(\ctx)
#ifndef KERNEL
lw $k1,OFFSET_TLS(\ctx)
 
# ifdef CONFIG_MIPS_FPU
lw $t0, OFFSET_F20(\ctx)
mtc1 $t0,$20
 
lw $t0, OFFSET_F21(\ctx)
mtc1 $t0,$21
 
lw $t0, OFFSET_F22(\ctx)
mtc1 $t0,$22
 
lw $t0, OFFSET_F23(\ctx)
mtc1 $t0,$23
 
lw $t0, OFFSET_F24(\ctx)
mtc1 $t0,$24
 
lw $t0, OFFSET_F25(\ctx)
mtc1 $t0,$25
 
lw $t0, OFFSET_F26(\ctx)
mtc1 $t0,$26
 
lw $t0, OFFSET_F27(\ctx)
mtc1 $t0,$27
 
lw $t0, OFFSET_F28(\ctx)
mtc1 $t0,$28
 
lw $t0, OFFSET_F29(\ctx)
mtc1 $t0,$29
 
lw $t0, OFFSET_F30(\ctx)
mtc1 $t0,$30
# endif /* CONFIG_MIPS_FPU */
#endif /* KERNEL */
lw $ra,OFFSET_PC(\ctx)
lw $sp,OFFSET_SP(\ctx)
.endm
 
#endif
 
 
#endif