Rev 477 | Rev 501 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 477 | Rev 483 | ||
---|---|---|---|
Line 29... | Line 29... | ||
29 | #ifndef __mips32_ATOMIC_H__ |
29 | #ifndef __mips32_ATOMIC_H__ |
30 | #define __mips32_ATOMIC_H__ |
30 | #define __mips32_ATOMIC_H__ |
31 | 31 | ||
32 | #include <arch/types.h> |
32 | #include <arch/types.h> |
33 | 33 | ||
34 | #define atomic_inc(x) (a_add(x,1)) |
34 | #define atomic_inc(x) ((void) atomic_add(x, 1)) |
35 | #define atomic_dec(x) (a_sub(x,1)) |
35 | #define atomic_dec(x) ((void) atomic_add(x, -1)) |
36 | 36 | ||
37 | #define atomic_inc_pre(x) (a_add(x,1)-1) |
37 | #define atomic_inc_pre(x) (atomic_add(x, 1) - 1) |
38 | #define atomic_dec_pre(x) (a_sub(x,1)+1) |
38 | #define atomic_dec_pre(x) (atomic_add(x, -1) + 1) |
39 | 39 | ||
40 | #define atomic_inc_post(x) (a_add(x,1)) |
40 | #define atomic_inc_post(x) atomic_add(x, 1) |
41 | #define atomic_dec_post(x) (a_sub(x,1)) |
41 | #define atomic_dec_post(x) atomic_add(x, -1) |
42 | 42 | ||
43 | 43 | ||
44 | typedef volatile __u32 atomic_t; |
44 | typedef volatile __u32 atomic_t; |
45 | 45 | ||
46 | /* |
- | |
47 | * Atomic addition |
46 | /* Atomic addition of immediate value. |
48 | * |
47 | * |
49 | * This case is harder, and we have to use the special LL and SC operations |
- | |
50 | * to achieve atomicity. The instructions are similar to LW (load) and SW |
- | |
51 | * (store), except that the LL (load-linked) instruction loads the address |
- | |
52 | * of the variable to a special register and if another process writes to |
- | |
53 | * the same location, the SC (store-conditional) instruction fails. |
48 | * @param val Memory location to which will be the immediate value added. |
54 | |
- | |
55 | Returns (*val)+i |
- | |
56 | |
- | |
57 | */ |
- | |
58 | static inline atomic_t a_add(atomic_t *val, int i) |
49 | * @param i Signed immediate that will be added to *val. |
59 | { |
- | |
60 | atomic_t tmp, tmp2; |
- | |
61 | - | ||
62 | asm volatile ( |
- | |
63 | " .set push\n" |
- | |
64 | " .set noreorder\n" |
- | |
65 | " nop\n" |
- | |
66 | "1:\n" |
- | |
67 | " ll %0, %1\n" |
- | |
68 | " addu %0, %0, %3\n" |
- | |
69 | " move %2, %0\n" |
- | |
70 | " sc %0, %1\n" |
- | |
71 | " beq %0, 0x0, 1b\n" |
- | |
72 | " move %0, %2\n" |
- | |
73 | " .set pop\n" |
- | |
74 | : "=&r" (tmp), "=o" (*val), "=r" (tmp2) |
- | |
75 | : "r" (i) |
- | |
76 | ); |
- | |
77 | return tmp; |
- | |
78 | } |
- | |
79 | - | ||
80 | - | ||
81 | /* |
- | |
82 | * Atomic subtraction |
- | |
83 | * |
50 | * |
84 | * Implemented in the same manner as a_add, except we substract the value. |
- | |
85 | - | ||
86 | Returns (*val)-i |
51 | * @return Value after addition. |
87 | - | ||
88 | */ |
52 | */ |
89 | static inline atomic_t a_sub(atomic_t *val, int i) |
53 | static inline atomic_t atomic_add(atomic_t *val, int i) |
90 | - | ||
91 | { |
54 | { |
92 | atomic_t tmp, tmp2; |
55 | atomic_t tmp, v; |
93 | 56 | ||
94 | asm volatile ( |
57 | __asm__ volatile ( |
95 | " .set push\n" |
- | |
96 | " .set noreorder\n" |
- | |
97 | " nop\n" |
- | |
98 | "1:\n" |
58 | "1:\n" |
99 | " ll %0, %1\n" |
59 | " ll %0, %1\n" |
100 | " subu %0, %0, %3\n" |
60 | " addiu %0, %0, %3\n" /* same as addi, but never traps on overflow */ |
101 | " move %2, %0\n" |
61 | " move %2, %0\n" |
102 | " sc %0, %1\n" |
62 | " sc %0, %1\n" |
103 | " beq %0, 0x0, 1b\n" |
63 | " beq %0, %4, 1b\n" /* if the atomic operation failed, try again */ |
104 | " move %0, %2\n" |
64 | /* nop */ /* nop is inserted automatically by compiler */ |
105 | " .set pop\n" |
- | |
106 | : "=&r" (tmp), "=o" (*val), "=r" (tmp2) |
65 | : "=r" (tmp), "=m" (*val), "=r" (v) |
107 | : "r" (i) |
66 | : "i" (i), "i" (0) |
108 | ); |
67 | ); |
- | 68 | ||
109 | return tmp; |
69 | return v; |
110 | } |
70 | } |
111 | 71 | ||
112 | 72 | ||
113 | #endif |
73 | #endif |