/SPARTAN/trunk/Makefile.config |
---|
36,11 → 36,11 |
# |
CONFIG_USERSPACE = n |
CONFIG_TEST = |
#CONFIG_TEST = |
#CONFIG_TEST = synch/rwlock1 |
#CONFIG_TEST = synch/rwlock2 |
#CONFIG_TEST = synch/rwlock3 |
#CONFIG_TEST = synch/rwlock4 |
CONFIG_TEST = synch/rwlock4 |
#CONFIG_TEST = synch/rwlock5 |
#CONFIG_TEST = synch/semaphore1 |
#CONFIG_TEST = synch/semaphore2 |
/SPARTAN/trunk/arch/ia64/include/atomic.h |
---|
37,15 → 37,26 |
{ |
atomic_t v; |
/* |
* __asm__ volatile ("fetchadd8.rel %0 = %1, %2\n" : "=r" (v), "=m" (val) : "i" (imm)); |
*/ |
__asm__ volatile ("fetchadd8.rel %0 = %1, %2\n" : "=r" (v), "+m" (*val) : "i" (imm)); |
*val += imm; |
return v; |
} |
static inline atomic_t atomic_inc(atomic_t *val) { return atomic_add(val, 1); } |
static inline atomic_t atomic_dec(atomic_t *val) { return atomic_add(val, -1); } |
static inline void atomic_inc(atomic_t *val) { atomic_add(val, 1); } |
static inline void atomic_dec(atomic_t *val) { atomic_add(val, -1); } |
static inline atomic_t atomic_inc_pre(atomic_t *val) { return atomic_add(val, 1); } |
static inline atomic_t atomic_dec_pre(atomic_t *val) { return atomic_add(val, -1); } |
static inline atomic_t atomic_inc_post(atomic_t *val) { return atomic_add(val, 1)+1; } |
static inline atomic_t atomic_dec_post(atomic_t *val) { return atomic_add(val, -1)-1; } |
#endif |
/SPARTAN/trunk/arch/ia64/src/ivt.S |
---|
1,6 → 1,5 |
# |
# Copyright (C) 2005 Jakub Vana |
# Copyright (C) 2005 Jakub Jermar |
# All rights reserved. |
# |
# Redistribution and use in source and binary forms, with or without |
28,7 → 27,6 |
# |
#include <arch/stack.h> |
#include <arch/register.h> |
#define STACK_ITEMS 12 |
#define STACK_FRAME_SIZE ((STACK_ITEMS*STACK_ITEM_SIZE) + STACK_SCRATCH_AREA_SIZE) |
106,7 → 104,7 |
mov r28 = ar.bspstore ;; |
/* assume kernel backing store */ |
/* mov ar.bspstore = r28 ;; */ |
mov ar.bspstore = r28 ;; |
mov r29 = ar.bsp |
146,8 → 144,8 |
ld8 r25 = [r31], +8 ;; /* load ar.pfs */ |
ld8 r24 = [r31], +8 ;; /* load ar.rsc */ |
/* mov ar.bspstore = r28 ;; */ /* (step 4) */ |
/* mov ar.rnat = r27 */ /* (step 5) */ |
mov ar.bspstore = r28 ;; /* (step 4) */ |
mov ar.rnat = r27 /* (step 5) */ |
mov ar.pfs = r25 /* (step 6) */ |
mov cr.ifs = r26 |
191,7 → 189,7 |
add out1 = STACK_SCRATCH_AREA_SIZE, r12 |
/* 6. switch to bank 1 and reenable PSR.ic */ |
ssm PSR_IC_MASK |
ssm 0x2000 |
bsw.1 ;; |
srlz.d |
247,11 → 245,6 |
mov loc46 = r31 |
/* 9. skipped (will not enable interrupts) */ |
/* |
* ssm PSR_I_MASK |
* ;; |
* srlz.d |
*/ |
/* 10. call handler */ |
mov b1 = loc2 |
261,11 → 254,6 |
0: |
/* 12. skipped (will not disable interrupts) */ |
/* |
* rsm PSR_I_MASK |
* ;; |
* srlz.d |
*/ |
/* 13. restore general and floating-point registers */ |
/* TODO: restore floating-point context */ |
319,7 → 307,7 |
mov b7 = loc16 |
/* 15. disable PSR.ic and switch to bank 0 */ |
rsm PSR_IC_MASK |
rsm 0x2000 |
bsw.0 ;; |
srlz.d |
/SPARTAN/trunk/arch/mips32/include/atomic.h |
---|
34,6 → 34,13 |
#define atomic_inc(x) (a_add(x,1)) |
#define atomic_dec(x) (a_sub(x,1)) |
#define atomic_inc_pre(x) (a_add(x,1)-1) |
#define atomic_dec_pre(x) (a_sub(x,1)+1) |
#define atomic_inc_post(x) (a_add(x,1)) |
#define atomic_dec_post(x) (a_sub(x,1)) |
typedef volatile __u32 atomic_t; |
/* |
44,6 → 51,9 |
* (store), except that the LL (load-linked) instruction loads the address |
* of the variable to a special register and if another process writes to |
* the same location, the SC (store-conditional) instruction fails. |
Returns (*val)+i |
*/ |
static inline atomic_t a_add(atomic_t *val, int i) |
{ |
72,6 → 82,9 |
* Atomic subtraction |
* |
* Implemented in the same manner as a_add, except we substract the value. |
Returns (*val)-i |
*/ |
static inline atomic_t a_sub(atomic_t *val, int i) |
/SPARTAN/trunk/arch/ia32/include/atomic.h |
---|
35,20 → 35,49 |
static inline void atomic_inc(atomic_t *val) { |
#ifdef CONFIG_SMP |
__asm__ volatile ("lock incl %0\n" : "=m" (*val)); |
__asm__ volatile ("lock incl %0\n" : "+m" (*val)); |
#else |
__asm__ volatile ("incl %0\n" : "=m" (*val)); |
__asm__ volatile ("incl %0\n" : "+m" (*val)); |
#endif /* CONFIG_SMP */ |
} |
static inline void atomic_dec(atomic_t *val) { |
#ifdef CONFIG_SMP |
__asm__ volatile ("lock decl %0\n" : "=m" (*val)); |
__asm__ volatile ("lock decl %0\n" : "+m" (*val)); |
#else |
__asm__ volatile ("decl %0\n" : "=m" (*val)); |
__asm__ volatile ("decl %0\n" : "+m" (*val)); |
#endif /* CONFIG_SMP */ |
} |
static inline atomic_t atomic_inc_pre(atomic_t *val) |
{ |
atomic_t r; |
__asm__ volatile ( |
"movl $1,%0;" |
"lock xaddl %0,%1;" |
: "=r"(r), "+m" (*val) |
); |
return r; |
} |
static inline atomic_t atomic_dec_pre(atomic_t *val) |
{ |
atomic_t r; |
__asm__ volatile ( |
"movl $-1,%0;" |
"lock xaddl %0,%1;" |
: "=r"(r), "+m" (*val) |
); |
return r; |
} |
#define atomic_inc_post(val) (atomic_inc_pre(val)+1) |
#define atomic_dec_post(val) (atomic_dec_pre(val)-1) |
static inline int test_and_set(volatile int *val) { |
int v; |
55,7 → 84,7 |
__asm__ volatile ( |
"movl $1, %0\n" |
"xchgl %0, %1\n" |
: "=r" (v),"=m" (*val) |
: "=r" (v),"+m" (*val) |
); |
return v; |