/SPARTAN/trunk/arch/ia32/include/atomic.h |
---|
31,7 → 31,7 |
#include <arch/types.h> |
static inline void atomic_inc(volatile count_t *val) { |
static inline void atomic_inc(volatile int *val) { |
#ifdef __SMP__ |
__asm__ volatile ("lock incl (%0)\n" : : "r" (val)); |
#else |
39,7 → 39,7 |
#endif /* __SMP__ */ |
} |
static inline void atomic_dec(volatile count_t *val) { |
static inline void atomic_dec(volatile int *val) { |
#ifdef __SMP__ |
__asm__ volatile ("lock decl (%0)\n" : : "r" (val)); |
#else |
/SPARTAN/trunk/arch/amd64/src/fmath.c |
---|
122,7 → 122,7 |
fmath_ld_union.bf = num; |
fmath_ld_union.ldd[7]=((fmath_ld_union.ldd[7])&0x7f)|(sign<<7); // change 64th bit (IA32 is a little endian) |
return fmath_ld_union.bf; |
*/ return 1.0; |
*/ return 1.0 |
} |
double fmath_abs(double num) |
/SPARTAN/trunk/arch/amd64/include/atomic.h |
---|
29,22 → 29,17 |
#ifndef __amd64_ATOMIC_H__ |
#define __amd64_ATOMIC_H__ |
/* |
* TODO: these are just placeholders for real implementations of atomic_inc and atomic_dec. |
* WARNING: the following functions cause the code to be preemption-unsafe !!! |
*/ |
/* Count_t is 32-bits on AMD-64 */ |
static inline void atomic_inc(volatile count_t *val) { |
#ifdef __SMP__ |
__asm__ volatile ("lock incl (%0)\n" : : "r" (val)); |
#else |
__asm__ volatile ("incl (%0)\n" : : "r" (val)); |
#endif /* __SMP__ */ |
static inline atomic_inc(volatile int *val) { |
*val++; |
} |
static inline void atomic_dec(volatile count_t *val) { |
#ifdef __SMP__ |
__asm__ volatile ("lock decl (%0)\n" : : "r" (val)); |
#else |
__asm__ volatile ("decl (%0)\n" : : "r" (val)); |
#endif /* __SMP__ */ |
static inline atomic_dec(volatile int *val) { |
*val--; |
} |
#endif |
/SPARTAN/trunk/src/proc/scheduler.c |
---|
149,7 → 149,7 |
CPU->nrdy--; |
spinlock_unlock(&CPU->lock); |
atomic_dec(&nrdy); |
atomic_dec((int *) &nrdy); |
r->n--; |
/* |
/SPARTAN/trunk/src/proc/thread.c |
---|
136,7 → 136,7 |
r->n++; |
spinlock_unlock(&r->lock); |
atomic_inc(&nrdy); |
atomic_inc((int *) &nrdy); |
avg = nrdy / config.cpu_active; |
spinlock_lock(&cpu->lock); |