Rev 534 | Rev 557 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
1 | jermar | 1 | /* |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
||
3 | * All rights reserved. |
||
4 | * |
||
5 | * Redistribution and use in source and binary forms, with or without |
||
6 | * modification, are permitted provided that the following conditions |
||
7 | * are met: |
||
8 | * |
||
9 | * - Redistributions of source code must retain the above copyright |
||
10 | * notice, this list of conditions and the following disclaimer. |
||
11 | * - Redistributions in binary form must reproduce the above copyright |
||
12 | * notice, this list of conditions and the following disclaimer in the |
||
13 | * documentation and/or other materials provided with the distribution. |
||
14 | * - The name of the author may not be used to endorse or promote products |
||
15 | * derived from this software without specific prior written permission. |
||
16 | * |
||
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
||
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
||
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
||
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
||
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
||
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
||
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
||
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
||
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
||
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
||
27 | */ |
||
28 | |||
383 | jermar | 29 | #include <synch/spinlock.h> |
111 | palkovsky | 30 | #include <arch/atomic.h> |
153 | jermar | 31 | #include <arch/barrier.h> |
383 | jermar | 32 | #include <arch.h> |
223 | jermar | 33 | #include <preemption.h> |
195 | vana | 34 | #include <print.h> |
383 | jermar | 35 | #include <debug.h> |
552 | palkovsky | 36 | #include <symtab.h> |
1 | jermar | 37 | |
458 | decky | 38 | #ifdef CONFIG_SMP |
1 | jermar | 39 | |
383 | jermar | 40 | /** Initialize spinlock |
41 | * |
||
42 | * Initialize spinlock. |
||
43 | * |
||
44 | * @param sl Pointer to spinlock_t structure. |
||
45 | */ |
||
552 | palkovsky | 46 | void spinlock_initialize(spinlock_t *sl, char *name) |
1 | jermar | 47 | { |
48 | sl->val = 0; |
||
552 | palkovsky | 49 | #ifdef CONFIG_DEBUG_SPINLOCK |
50 | sl->name = name; |
||
51 | #endif |
||
1 | jermar | 52 | } |
53 | |||
458 | decky | 54 | #ifdef CONFIG_DEBUG_SPINLOCK |
383 | jermar | 55 | /** Lock spinlock |
56 | * |
||
57 | * Lock spinlock. |
||
58 | * This version has limitted ability to report |
||
59 | * possible occurence of deadlock. |
||
60 | * |
||
61 | * @param sl Pointer to spinlock_t structure. |
||
62 | */ |
||
1 | jermar | 63 | void spinlock_lock(spinlock_t *sl) |
64 | { |
||
65 | int i = 0; |
||
552 | palkovsky | 66 | __address caller = ((__address *) &sl)[-1]; |
67 | char *symbol; |
||
1 | jermar | 68 | |
223 | jermar | 69 | preemption_disable(); |
1 | jermar | 70 | while (test_and_set(&sl->val)) { |
71 | if (i++ > 300000) { |
||
552 | palkovsky | 72 | printf("cpu%d: looping on spinlock %p:%s, caller=%p", |
73 | CPU->id, sl, sl->name, caller); |
||
74 | symbol = get_symtab_entry(caller); |
||
75 | if (symbol) |
||
76 | printf("(%s)", symbol); |
||
77 | printf("\n"); |
||
1 | jermar | 78 | i = 0; |
79 | } |
||
80 | } |
||
383 | jermar | 81 | |
82 | /* |
||
83 | * Prevent critical section code from bleeding out this way up. |
||
84 | */ |
||
153 | jermar | 85 | CS_ENTER_BARRIER(); |
1 | jermar | 86 | |
87 | } |
||
383 | jermar | 88 | |
1 | jermar | 89 | #else |
383 | jermar | 90 | |
91 | /** Lock spinlock |
||
92 | * |
||
93 | * Lock spinlock. |
||
94 | * |
||
95 | * @param sl Pointer to spinlock_t structure. |
||
96 | */ |
||
1 | jermar | 97 | void spinlock_lock(spinlock_t *sl) |
98 | { |
||
223 | jermar | 99 | preemption_disable(); |
100 | |||
1 | jermar | 101 | /* |
102 | * Each architecture has its own efficient/recommended |
||
103 | * implementation of spinlock. |
||
104 | */ |
||
105 | spinlock_arch(&sl->val); |
||
383 | jermar | 106 | |
107 | /* |
||
108 | * Prevent critical section code from bleeding out this way up. |
||
109 | */ |
||
153 | jermar | 110 | CS_ENTER_BARRIER(); |
1 | jermar | 111 | } |
112 | #endif |
||
113 | |||
383 | jermar | 114 | /** Lock spinlock conditionally |
115 | * |
||
116 | * Lock spinlock conditionally. |
||
117 | * If the spinlock is not available at the moment, |
||
118 | * signal failure. |
||
119 | * |
||
120 | * @param sl Pointer to spinlock_t structure. |
||
121 | * |
||
122 | * @return Zero on failure, non-zero otherwise. |
||
123 | */ |
||
1 | jermar | 124 | int spinlock_trylock(spinlock_t *sl) |
125 | { |
||
153 | jermar | 126 | int rc; |
127 | |||
223 | jermar | 128 | preemption_disable(); |
153 | jermar | 129 | rc = !test_and_set(&sl->val); |
383 | jermar | 130 | |
131 | /* |
||
132 | * Prevent critical section code from bleeding out this way up. |
||
133 | */ |
||
153 | jermar | 134 | CS_ENTER_BARRIER(); |
223 | jermar | 135 | |
136 | if (!rc) |
||
137 | preemption_enable(); |
||
153 | jermar | 138 | |
139 | return rc; |
||
1 | jermar | 140 | } |
141 | |||
383 | jermar | 142 | /** Unlock spinlock |
143 | * |
||
144 | * Unlock spinlock. |
||
145 | * |
||
146 | * @param sl Pointer to spinlock_t structure. |
||
147 | */ |
||
1 | jermar | 148 | void spinlock_unlock(spinlock_t *sl) |
149 | { |
||
383 | jermar | 150 | ASSERT(sl->val != 0); |
151 | |||
152 | /* |
||
153 | * Prevent critical section code from bleeding out this way down. |
||
154 | */ |
||
153 | jermar | 155 | CS_LEAVE_BARRIER(); |
383 | jermar | 156 | |
1 | jermar | 157 | sl->val = 0; |
223 | jermar | 158 | preemption_enable(); |
1 | jermar | 159 | } |
160 | |||
161 | #endif |