Subversion Repositories HelenOS

Compare Revisions

Ignore whitespace Rev 2081 → Rev 2082

/trunk/uspace/libc/malloc/malloc.c
1569,7 → 1569,7
I = NTREEBINS-1;\
else {\
unsigned int K;\
__asm__("bsrl %1,%0\n\t" : "=r" (K) : "rm" (X));\
asm("bsrl %1,%0\n\t" : "=r" (K) : "rm" (X));\
I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
}\
}
1628,7 → 1628,7
#define compute_bit2idx(X, I)\
{\
unsigned int J;\
__asm__("bsfl %1,%0\n\t" : "=r" (J) : "rm" (X));\
asm("bsfl %1,%0\n\t" : "=r" (J) : "rm" (X));\
I = (bindex_t)J;\
}
 
/trunk/uspace/libc/arch/sparc64/include/syscall.h
46,7 → 46,7
register uint64_t a3 asm("o2") = p3;
register uint64_t a4 asm("o3") = p4;
 
__asm__ volatile (
asm volatile (
"ta %5\n"
: "=r" (a1)
: "r" (a1), "r" (a2), "r" (a3), "r" (a4), "i" (id)
/trunk/uspace/libc/arch/sparc64/include/atomic.h
53,7 → 53,7
do {
a = val->count;
b = a + i;
__asm__ volatile ("casx %0, %2, %1\n" : "+m" (*val), "+r" (b) : "r" (a));
asm volatile ("casx %0, %2, %1\n" : "+m" (*val), "+r" (b) : "r" (a));
} while (a != b);
 
return a;
/trunk/uspace/libc/arch/sparc64/include/thread.h
45,7 → 45,7
 
static inline void __tcb_set(tcb_t *tcb)
{
__asm__ volatile ("mov %0, %%g7\n" : : "r" (tcb) : "g7");
asm volatile ("mov %0, %%g7\n" : : "r" (tcb) : "g7");
}
 
static inline tcb_t * __tcb_get(void)
52,7 → 52,7
{
void *retval;
 
__asm__ volatile ("mov %%g7, %0\n" : "=r" (retval));
asm volatile ("mov %%g7, %0\n" : "=r" (retval));
 
return retval;
}
/trunk/uspace/libc/arch/ia64/include/atomic.h
46,7 → 46,7
{
long v;
 
__asm__ volatile ("fetchadd8.rel %0 = %1, %2\n" : "=r" (v), "+m" (val->count) : "i" (imm));
asm volatile ("fetchadd8.rel %0 = %1, %2\n" : "=r" (v), "+m" (val->count) : "i" (imm));
return v;
}
/trunk/uspace/libc/arch/ia64/include/thread.h
45,7 → 45,7
 
static inline void __tcb_set(tcb_t *tcb)
{
__asm__ volatile ("mov r13 = %0\n" : : "r" (tcb) : "r13");
asm volatile ("mov r13 = %0\n" : : "r" (tcb) : "r13");
}
 
static inline tcb_t *__tcb_get(void)
52,7 → 52,7
{
void *retval;
 
__asm__ volatile ("mov %0 = r13\n" : "=r" (retval));
asm volatile ("mov %0 = r13\n" : "=r" (retval));
 
return retval;
}
/trunk/uspace/libc/arch/ppc32/include/atomic.h
39,7 → 39,7
{
long tmp;
 
asm __volatile__ (
asm volatile (
"1:\n"
"lwarx %0, 0, %2\n"
"addic %0, %0, 1\n"
54,7 → 54,7
{
long tmp;
 
asm __volatile__(
asm volatile (
"1:\n"
"lwarx %0, 0, %2\n"
"addic %0, %0, -1\n"
/trunk/uspace/libc/arch/amd64/include/atomic.h
38,11 → 38,11
#define LIBC_amd64_ATOMIC_H_
 
static inline void atomic_inc(atomic_t *val) {
__asm__ volatile ("lock incq %0\n" : "=m" (val->count));
asm volatile ("lock incq %0\n" : "=m" (val->count));
}
 
static inline void atomic_dec(atomic_t *val) {
__asm__ volatile ("lock decq %0\n" : "=m" (val->count));
asm volatile ("lock decq %0\n" : "=m" (val->count));
}
 
static inline long atomic_postinc(atomic_t *val)
49,7 → 49,7
{
long r;
 
__asm__ volatile (
asm volatile (
"movq $1, %0\n"
"lock xaddq %0, %1\n"
: "=r" (r), "=m" (val->count)
62,7 → 62,7
{
long r;
__asm__ volatile (
asm volatile (
"movq $-1, %0\n"
"lock xaddq %0, %1\n"
: "=r" (r), "=m" (val->count)
/trunk/uspace/libc/arch/amd64/include/thread.h
51,7 → 51,7
{
void * retval;
 
__asm__ ("movq %%fs:0, %0" : "=r"(retval));
asm ("movq %%fs:0, %0" : "=r"(retval));
return retval;
}
 
/trunk/uspace/libc/arch/ppc64/include/atomic.h
39,7 → 39,7
{
long tmp;
 
asm __volatile__ (
asm volatile (
"1:\n"
"lwarx %0, 0, %2\n"
"addic %0, %0, 1\n"
54,7 → 54,7
{
long tmp;
 
asm __volatile__(
asm volatile (
"1:\n"
"lwarx %0, 0, %2\n"
"addic %0, %0, -1\n"
/trunk/uspace/libc/arch/mips32/include/atomic.h
56,7 → 56,7
{
long tmp, v;
 
__asm__ volatile (
asm volatile (
"1:\n"
" ll %0, %1\n"
" addiu %0, %0, %3\n" /* same as addi, but never traps on overflow */
/trunk/uspace/libc/arch/mips32/include/thread.h
61,7 → 61,7
void *tp = tcb;
tp += MIPS_TP_OFFSET + sizeof(tcb_t);
 
__asm__ volatile ("add $27, %0, $0" : : "r"(tp)); /* Move tls to K1 */
asm volatile ("add $27, %0, $0" : : "r"(tp)); /* Move tls to K1 */
}
 
static inline tcb_t * __tcb_get(void)
68,7 → 68,7
{
void * retval;
 
__asm__ volatile("add %0, $27, $0" : "=r"(retval));
asm volatile("add %0, $27, $0" : "=r"(retval));
 
return (tcb_t *)(retval - MIPS_TP_OFFSET - sizeof(tcb_t));
}
/trunk/uspace/libc/arch/ia32/include/atomic.h
36,11 → 36,11
#define LIBC_ia32_ATOMIC_H_
 
static inline void atomic_inc(atomic_t *val) {
__asm__ volatile ("lock incl %0\n" : "=m" (val->count));
asm volatile ("lock incl %0\n" : "=m" (val->count));
}
 
static inline void atomic_dec(atomic_t *val) {
__asm__ volatile ("lock decl %0\n" : "=m" (val->count));
asm volatile ("lock decl %0\n" : "=m" (val->count));
}
 
static inline long atomic_postinc(atomic_t *val)
47,7 → 47,7
{
long r;
 
__asm__ volatile (
asm volatile (
"movl $1, %0\n"
"lock xaddl %0, %1\n"
: "=r" (r), "=m" (val->count)
60,7 → 60,7
{
long r;
__asm__ volatile (
asm volatile (
"movl $-1, %0\n"
"lock xaddl %0, %1\n"
: "=r" (r), "=m" (val->count)
/trunk/uspace/libc/arch/ia32/include/thread.h
51,7 → 51,7
{
void * retval;
 
__asm__ ("movl %%gs:0, %0" : "=r"(retval));
asm ("movl %%gs:0, %0" : "=r"(retval));
return retval;
}