Subversion Repositories HelenOS

Compare Revisions

Ignore whitespace Rev 3190 → Rev 3191

/branches/dynload/kernel/arch/ppc32/src/asm.S
312,4 → 312,6
 
memcpy_from_uspace_failover_address:
memcpy_to_uspace_failover_address:
b memcpy_from_uspace_failover_address
# return zero, failure
xor r3, r3, r3
blr
/branches/dynload/kernel/arch/amd64/include/atomic.h
41,17 → 41,17
 
static inline void atomic_inc(atomic_t *val) {
#ifdef CONFIG_SMP
asm volatile ("lock incq %0\n" : "=m" (val->count));
asm volatile ("lock incq %0\n" : "+m" (val->count));
#else
asm volatile ("incq %0\n" : "=m" (val->count));
asm volatile ("incq %0\n" : "+m" (val->count));
#endif /* CONFIG_SMP */
}
 
static inline void atomic_dec(atomic_t *val) {
#ifdef CONFIG_SMP
asm volatile ("lock decq %0\n" : "=m" (val->count));
asm volatile ("lock decq %0\n" : "+m" (val->count));
#else
asm volatile ("decq %0\n" : "=m" (val->count));
asm volatile ("decq %0\n" : "+m" (val->count));
#endif /* CONFIG_SMP */
}
 
61,7 → 61,7
 
asm volatile (
"lock xaddq %1, %0\n"
: "=m" (val->count), "+r" (r)
: "+m" (val->count), "+r" (r)
);
 
return r;
73,14 → 73,14
asm volatile (
"lock xaddq %1, %0\n"
: "=m" (val->count), "+r" (r)
: "+m" (val->count), "+r" (r)
);
return r;
}
 
#define atomic_preinc(val) (atomic_postinc(val)+1)
#define atomic_predec(val) (atomic_postdec(val)-1)
#define atomic_preinc(val) (atomic_postinc(val) + 1)
#define atomic_predec(val) (atomic_postdec(val) - 1)
 
static inline uint64_t test_and_set(atomic_t *val) {
uint64_t v;
88,7 → 88,7
asm volatile (
"movq $1, %0\n"
"xchgq %0, %1\n"
: "=r" (v),"=m" (val->count)
: "=r" (v), "+m" (val->count)
);
return v;
102,20 → 102,20
 
preemption_disable();
asm volatile (
"0:;"
"0:\n"
#ifdef CONFIG_HT
"pause;"
"pause\n"
#endif
"mov %0, %1;"
"testq %1, %1;"
"jnz 0b;" /* Lightweight looping on locked spinlock */
"mov %0, %1\n"
"testq %1, %1\n"
"jnz 0b\n" /* Lightweight looping on locked spinlock */
"incq %1;" /* now use the atomic operation */
"xchgq %0, %1;"
"testq %1, %1;"
"jnz 0b;"
: "=m"(val->count),"=r"(tmp)
);
"incq %1\n" /* now use the atomic operation */
"xchgq %0, %1\n"
"testq %1, %1\n"
"jnz 0b\n"
: "+m" (val->count), "=r"(tmp)
);
/*
* Prevent critical section code from bleeding out this way up.
*/
/branches/dynload/kernel/arch/amd64/Makefile.inc
35,6 → 35,7
TARGET = amd64-linux-gnu
TOOLCHAIN_DIR = /usr/local/amd64
 
FPU_NO_CFLAGS = -mno-sse -mno-sse2
CMN1 = -m64 -mcmodel=kernel -mno-red-zone -fno-unwind-tables
GCC_CFLAGS += $(CMN1)
ICC_CFLAGS += $(CMN1)
/branches/dynload/kernel/arch/mips32/include/atomic.h
63,7 → 63,7
" sc %0, %1\n"
" beq %0, %4, 1b\n" /* if the atomic operation failed, try again */
" nop\n"
: "=&r" (tmp), "=m" (val->count), "=&r" (v)
: "=&r" (tmp), "+m" (val->count), "=&r" (v)
: "r" (i), "i" (0)
);
 
/branches/dynload/kernel/arch/ia32/include/atomic.h
41,17 → 41,17
 
static inline void atomic_inc(atomic_t *val) {
#ifdef CONFIG_SMP
asm volatile ("lock incl %0\n" : "=m" (val->count));
asm volatile ("lock incl %0\n" : "+m" (val->count));
#else
asm volatile ("incl %0\n" : "=m" (val->count));
asm volatile ("incl %0\n" : "+m" (val->count));
#endif /* CONFIG_SMP */
}
 
static inline void atomic_dec(atomic_t *val) {
#ifdef CONFIG_SMP
asm volatile ("lock decl %0\n" : "=m" (val->count));
asm volatile ("lock decl %0\n" : "+m" (val->count));
#else
asm volatile ("decl %0\n" : "=m" (val->count));
asm volatile ("decl %0\n" : "+m" (val->count));
#endif /* CONFIG_SMP */
}
 
61,7 → 61,7
 
asm volatile (
"lock xaddl %1, %0\n"
: "=m" (val->count), "+r" (r)
: "+m" (val->count), "+r" (r)
);
 
return r;
73,14 → 73,14
asm volatile (
"lock xaddl %1, %0\n"
: "=m" (val->count), "+r"(r)
: "+m" (val->count), "+r"(r)
);
return r;
}
 
#define atomic_preinc(val) (atomic_postinc(val)+1)
#define atomic_predec(val) (atomic_postdec(val)-1)
#define atomic_preinc(val) (atomic_postinc(val) + 1)
#define atomic_predec(val) (atomic_postdec(val) - 1)
 
static inline uint32_t test_and_set(atomic_t *val) {
uint32_t v;
88,7 → 88,7
asm volatile (
"movl $1, %0\n"
"xchgl %0, %1\n"
: "=r" (v),"=m" (val->count)
: "=r" (v),"+m" (val->count)
);
return v;
101,20 → 101,20
 
preemption_disable();
asm volatile (
"0:;"
"0:\n"
#ifdef CONFIG_HT
"pause;" /* Pentium 4's HT love this instruction */
"pause\n" /* Pentium 4's HT love this instruction */
#endif
"mov %0, %1;"
"testl %1, %1;"
"jnz 0b;" /* Lightweight looping on locked spinlock */
"mov %0, %1\n"
"testl %1, %1\n"
"jnz 0b\n" /* lightweight looping on locked spinlock */
"incl %1;" /* now use the atomic operation */
"xchgl %0, %1;"
"testl %1, %1;"
"jnz 0b;"
: "=m"(val->count),"=r"(tmp)
);
"incl %1\n" /* now use the atomic operation */
"xchgl %0, %1\n"
"testl %1, %1\n"
"jnz 0b\n"
: "+m" (val->count), "=r"(tmp)
);
/*
* Prevent critical section code from bleeding out this way up.
*/
/branches/dynload/kernel/arch/ia32/Makefile.inc
46,7 → 46,8
#
 
ifeq ($(MACHINE),athlon-xp)
CMN2 = -march=athlon-xp -mmmx -msse -m3dnow
FPU_NO_CFLAGS = -mno-mmx -mno-sse -mno-3dnow
CMN2 = -march=athlon-xp
GCC_CFLAGS += $(CMN2)
ICC_CFLAGS += $(CMN2)
SUNCC_CFLAGS += -xarch=ssea
55,7 → 56,8
CONFIG_HT = n
endif
ifeq ($(MACHINE),athlon-mp)
CMN2 = -march=athlon-mp -mmmx -msse -m3dnow
FPU_NO_CFLAGS = -mno-mmx -mno-sse -mno-3dnow
CMN2 = -march=athlon-mp
GCC_CFLAGS += $(CMN2)
ICC_CFLAGS += $(CMN2)
SUNCC_CFLAGS += xarch=ssea
63,7 → 65,8
CONFIG_HT = n
endif
ifeq ($(MACHINE),pentium3)
CMN2 = -march=pentium3 -mmmx -msse
FPU_NO_CFLAGS = -mno-mmx -mno-sse
CMN2 = -march=pentium3
GCC_CFLAGS += $(CMN2)
ICC_CFLAGS += $(CMN2)
SUNCC_CFLAGS += -xarch=sse
71,7 → 74,8
CONFIG_HT = n
endif
ifeq ($(MACHINE),core)
CMN2 = -march=prescott -mfpmath=sse -mmmx -msse -msse2 -msse3
FPU_NO_CFLAGS = -mno-mmmx -mno-sse -mno-sse2 -mno-sse3
CMN2 = -march=prescott
GCC_CFLAGS += $(CMN2)
ICC_CFLAGS += $(CMN2)
SUNCC_CFLAGS += -xarch=sse3
78,7 → 82,8
DEFS += -DCONFIG_FENCES_P4
endif
ifeq ($(MACHINE),pentium4)
GCC_CFLAGS += -march=pentium4 -mfpmath=sse -mmmx -msse -msse2
FPU_NO_CFLAGS = -mno-mmx -mno-sse -mno-sse2
GCC_CFLAGS += -march=pentium4
ICC_CFLAGS += -march=pentium4
SUNCC_CFLAGS += -xarch=sse2
DEFS += -DCONFIG_FENCES_P4