Rev 223 | Rev 430 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 223 | Rev 383 | ||
---|---|---|---|
Line 24... | Line 24... | ||
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | #include <arch.h> |
29 | #include <synch/spinlock.h> |
30 | - | ||
31 | #include <arch/atomic.h> |
30 | #include <arch/atomic.h> |
32 | #include <arch/barrier.h> |
31 | #include <arch/barrier.h> |
33 | #include <synch/spinlock.h> |
32 | #include <arch.h> |
34 | #include <preemption.h> |
33 | #include <preemption.h> |
35 | #include <print.h> |
34 | #include <print.h> |
- | 35 | #include <debug.h> |
|
36 | 36 | ||
37 | #ifdef __SMP__ |
37 | #ifdef __SMP__ |
38 | 38 | ||
- | 39 | /** Initialize spinlock |
|
- | 40 | * |
|
- | 41 | * Initialize spinlock. |
|
- | 42 | * |
|
- | 43 | * @param sl Pointer to spinlock_t structure. |
|
- | 44 | */ |
|
39 | void spinlock_initialize(spinlock_t *sl) |
45 | void spinlock_initialize(spinlock_t *sl) |
40 | { |
46 | { |
41 | sl->val = 0; |
47 | sl->val = 0; |
42 | } |
48 | } |
43 | 49 | ||
44 | #ifdef DEBUG_SPINLOCK |
50 | #ifdef DEBUG_SPINLOCK |
- | 51 | /** Lock spinlock |
|
- | 52 | * |
|
- | 53 | * Lock spinlock. |
|
- | 54 | * This version has limitted ability to report |
|
- | 55 | * possible occurence of deadlock. |
|
- | 56 | * |
|
- | 57 | * @param sl Pointer to spinlock_t structure. |
|
- | 58 | */ |
|
45 | void spinlock_lock(spinlock_t *sl) |
59 | void spinlock_lock(spinlock_t *sl) |
46 | { |
60 | { |
47 | int i = 0; |
61 | int i = 0; |
48 | __address caller = ((__u32 *) &sl)[-1]; |
62 | __address caller = ((__u32 *) &sl)[-1]; |
49 | 63 | ||
Line 52... | Line 66... | ||
52 | if (i++ > 300000) { |
66 | if (i++ > 300000) { |
53 | printf("cpu%d: looping on spinlock %X, caller=%X\n", CPU->id, sl, caller); |
67 | printf("cpu%d: looping on spinlock %X, caller=%X\n", CPU->id, sl, caller); |
54 | i = 0; |
68 | i = 0; |
55 | } |
69 | } |
56 | } |
70 | } |
- | 71 | ||
- | 72 | /* |
|
- | 73 | * Prevent critical section code from bleeding out this way up. |
|
- | 74 | */ |
|
57 | CS_ENTER_BARRIER(); |
75 | CS_ENTER_BARRIER(); |
58 | 76 | ||
59 | } |
77 | } |
- | 78 | ||
60 | #else |
79 | #else |
- | 80 | ||
- | 81 | /** Lock spinlock |
|
- | 82 | * |
|
- | 83 | * Lock spinlock. |
|
- | 84 | * |
|
- | 85 | * @param sl Pointer to spinlock_t structure. |
|
- | 86 | */ |
|
61 | void spinlock_lock(spinlock_t *sl) |
87 | void spinlock_lock(spinlock_t *sl) |
62 | { |
88 | { |
63 | preemption_disable(); |
89 | preemption_disable(); |
64 | 90 | ||
65 | /* |
91 | /* |
66 | * Each architecture has its own efficient/recommended |
92 | * Each architecture has its own efficient/recommended |
67 | * implementation of spinlock. |
93 | * implementation of spinlock. |
68 | */ |
94 | */ |
69 | spinlock_arch(&sl->val); |
95 | spinlock_arch(&sl->val); |
- | 96 | ||
- | 97 | /* |
|
- | 98 | * Prevent critical section code from bleeding out this way up. |
|
- | 99 | */ |
|
70 | CS_ENTER_BARRIER(); |
100 | CS_ENTER_BARRIER(); |
71 | } |
101 | } |
72 | #endif |
102 | #endif |
73 | 103 | ||
- | 104 | /** Lock spinlock conditionally |
|
- | 105 | * |
|
- | 106 | * Lock spinlock conditionally. |
|
- | 107 | * If the spinlock is not available at the moment, |
|
- | 108 | * signal failure. |
|
- | 109 | * |
|
- | 110 | * @param sl Pointer to spinlock_t structure. |
|
- | 111 | * |
|
- | 112 | * @return Zero on failure, non-zero otherwise. |
|
- | 113 | */ |
|
74 | int spinlock_trylock(spinlock_t *sl) |
114 | int spinlock_trylock(spinlock_t *sl) |
75 | { |
115 | { |
76 | int rc; |
116 | int rc; |
77 | 117 | ||
78 | preemption_disable(); |
118 | preemption_disable(); |
79 | rc = !test_and_set(&sl->val); |
119 | rc = !test_and_set(&sl->val); |
- | 120 | ||
- | 121 | /* |
|
- | 122 | * Prevent critical section code from bleeding out this way up. |
|
- | 123 | */ |
|
80 | CS_ENTER_BARRIER(); |
124 | CS_ENTER_BARRIER(); |
81 | 125 | ||
82 | if (!rc) |
126 | if (!rc) |
83 | preemption_enable(); |
127 | preemption_enable(); |
84 | 128 | ||
85 | return rc; |
129 | return rc; |
86 | } |
130 | } |
87 | 131 | ||
- | 132 | /** Unlock spinlock |
|
- | 133 | * |
|
- | 134 | * Unlock spinlock. |
|
- | 135 | * |
|
- | 136 | * @param sl Pointer to spinlock_t structure. |
|
- | 137 | */ |
|
88 | void spinlock_unlock(spinlock_t *sl) |
138 | void spinlock_unlock(spinlock_t *sl) |
89 | { |
139 | { |
- | 140 | ASSERT(sl->val != 0); |
|
- | 141 | ||
- | 142 | /* |
|
- | 143 | * Prevent critical section code from bleeding out this way down. |
|
- | 144 | */ |
|
90 | CS_LEAVE_BARRIER(); |
145 | CS_LEAVE_BARRIER(); |
- | 146 | ||
91 | sl->val = 0; |
147 | sl->val = 0; |
92 | preemption_enable(); |
148 | preemption_enable(); |
93 | } |
149 | } |
94 | 150 | ||
95 | #endif |
151 | #endif |