Rev 2541 | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
| Rev 2541 | Rev 2682 | ||
|---|---|---|---|
| Line 1... | Line 1... | ||
| 1 | /* |
1 | /* |
| 2 | * Copyright (c) 2006 Jakub Jermar |
2 | * Copyright (c) 2008 Jakub Jermar |
| 3 | * All rights reserved. |
3 | * All rights reserved. |
| 4 | * |
4 | * |
| 5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
| 6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
| 7 | * are met: |
7 | * are met: |
| Line 39... | Line 39... | ||
| 39 | #include <sys/types.h> |
39 | #include <sys/types.h> |
| 40 | #include <kernel/synch/synch.h> |
40 | #include <kernel/synch/synch.h> |
| 41 | 41 | ||
| 42 | /* |
42 | /* |
| 43 | * Note about race conditions. |
43 | * Note about race conditions. |
| 44 | * Because of non-atomic nature of operations performed sequentially on the futex |
44 | * Because of non-atomic nature of operations performed sequentially on the |
| 45 | * counter and the futex wait queue, there is a race condition: |
45 | * futex counter and the futex wait queue, there is a race condition: |
| 46 | * |
46 | * |
| 47 | * (wq->missed_wakeups == 1) && (futex->count = 1) |
47 | * (wq->missed_wakeups == 1) && (futex->count = 1) |
| 48 | * |
48 | * |
| 49 | * Scenario 1 (wait queue timeout vs. futex_up()): |
49 | * Scenario 1 (wait queue timeout vs. futex_up()): |
| 50 | * 1. assume wq->missed_wakeups == 0 && futex->count == -1 |
50 | * 1. assume wq->missed_wakeups == 0 && futex->count == -1 |
| Line 52... | Line 52... | ||
| 52 | * 2. A receives timeout and gets removed from the wait queue |
52 | * 2. A receives timeout and gets removed from the wait queue |
| 53 | * 3. B wants to leave the critical section and calls futex_up() |
53 | * 3. B wants to leave the critical section and calls futex_up() |
| 54 | * 4. B thus changes futex->count from -1 to 0 |
54 | * 4. B thus changes futex->count from -1 to 0 |
| 55 | * 5. B has to call SYS_FUTEX_WAKEUP syscall to wake up the sleeping thread |
55 | * 5. B has to call SYS_FUTEX_WAKEUP syscall to wake up the sleeping thread |
| 56 | * 6. B finds the wait queue empty and changes wq->missed_wakeups from 0 to 1 |
56 | * 6. B finds the wait queue empty and changes wq->missed_wakeups from 0 to 1 |
| 57 | * 7. A fixes futex->count (i.e. the number of waiting threads) by changing it from 0 to 1 |
57 | * 7. A fixes futex->count (i.e. the number of waiting threads) by changing it |
| - | 58 | * from 0 to 1 |
|
| 58 | * |
59 | * |
| 59 | * Scenario 2 (conditional down operation vs. futex_up) |
60 | * Scenario 2 (conditional down operation vs. futex_up) |
| 60 | * 1. assume wq->missed_wakeups == 0 && futex->count == 0 |
61 | * 1. assume wq->missed_wakeups == 0 && futex->count == 0 |
| 61 | * (i.e. thread A is in the critical section) |
62 | * (i.e. thread A is in the critical section) |
| 62 | * 2. thread B performs futex_trydown() operation and changes futex->count from 0 to -1 |
63 | * 2. thread B performs futex_trydown() operation and changes futex->count from |
| - | 64 | * 0 to -1 |
|
| 63 | * B is now obliged to call SYS_FUTEX_SLEEP syscall |
65 | * B is now obliged to call SYS_FUTEX_SLEEP syscall |
| 64 | * 3. A wants to leave the critical section and does futex_up() |
66 | * 3. A wants to leave the critical section and does futex_up() |
| 65 | * 4. A thus changes futex->count from -1 to 0 and must call SYS_FUTEX_WAKEUP syscall |
67 | * 4. A thus changes futex->count from -1 to 0 and must call SYS_FUTEX_WAKEUP |
| - | 68 | * syscall |
|
| 66 | * 5. B finds the wait queue empty and immediatelly aborts the conditional sleep |
69 | * 5. B finds the wait queue empty and immediatelly aborts the conditional sleep |
| 67 | * 6. No thread is queueing in the wait queue so wq->missed_wakeups changes from 0 to 1 |
70 | * 6. No thread is queueing in the wait queue so wq->missed_wakeups changes from |
| - | 71 | * 0 to 1 |
|
| 68 | * 6. B fixes futex->count (i.e. the number of waiting threads) by changing it from 0 to 1 |
72 | * 6. B fixes futex->count (i.e. the number of waiting threads) by changing it |
| - | 73 | * from 0 to 1 |
|
| 69 | * |
74 | * |
| 70 | * Both scenarios allow two threads to be in the critical section simultaneously. |
75 | * Both scenarios allow two threads to be in the critical section |
| 71 | * One without kernel intervention and the other through wq->missed_wakeups being 1. |
76 | * simultaneously. One without kernel intervention and the other through |
| - | 77 | * wq->missed_wakeups being 1. |
|
| 72 | * |
78 | * |
| 73 | * To mitigate this problem, futex_down_timeout() detects that the syscall didn't sleep |
79 | * To mitigate this problem, futex_down_timeout() detects that the syscall |
| 74 | * in the wait queue, fixes the futex counter and RETRIES the whole operation again. |
80 | * didn't sleep in the wait queue, fixes the futex counter and RETRIES the |
| 75 | * |
81 | * whole operation again. |
| 76 | */ |
82 | */ |
| 77 | 83 | ||
| 78 | /** Initialize futex counter. |
84 | /** Initialize futex counter. |
| 79 | * |
85 | * |
| 80 | * @param futex Futex. |
86 | * @param futex Futex. |
| 81 | * @param val Initialization value. |
87 | * @param val Initialization value. |
| 82 | */ |
88 | */ |
| 83 | void futex_initialize(atomic_t *futex, int val) |
89 | void futex_initialize(futex_t *futex, int val) |
| 84 | { |
90 | { |
| 85 | atomic_set(futex, val); |
91 | atomic_set(futex, val); |
| 86 | } |
92 | } |
| 87 | 93 | ||
| 88 | int futex_down(atomic_t *futex) |
94 | int futex_down(futex_t *futex) |
| 89 | { |
95 | { |
| 90 | return futex_down_timeout(futex, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE); |
96 | return futex_down_timeout(futex, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE); |
| 91 | } |
97 | } |
| 92 | 98 | ||
| 93 | int futex_trydown(atomic_t *futex) |
99 | int futex_trydown(futex_t *futex) |
| 94 | { |
100 | { |
| 95 | return futex_down_timeout(futex, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NON_BLOCKING); |
101 | return futex_down_timeout(futex, SYNCH_NO_TIMEOUT, |
| - | 102 | SYNCH_FLAGS_NON_BLOCKING); |
|
| 96 | } |
103 | } |
| 97 | 104 | ||
| 98 | /** Try to down the futex. |
105 | /** Try to down the futex. |
| 99 | * |
106 | * |
| 100 | * @param futex Futex. |
107 | * @param futex Futex. |
| 101 | * @param usec Microseconds to wait. Zero value means sleep without timeout. |
108 | * @param usec Microseconds to wait. Zero value means sleep without |
| - | 109 | * timeout. |
|
| 102 | * @param flags Select mode of operation. See comment for waitq_sleep_timeout(). |
110 | * @param flags Select mode of operation. See comment for |
| - | 111 | * waitq_sleep_timeout(). |
|
| 103 | * |
112 | * |
| 104 | * @return ENOENT if there is no such virtual address. One of ESYNCH_OK_ATOMIC |
113 | * @return ENOENT if there is no such virtual address. One of |
| 105 | * and ESYNCH_OK_BLOCKED on success or ESYNCH_TIMEOUT if the lock was |
114 | * ESYNCH_OK_ATOMIC and ESYNCH_OK_BLOCKED on success or |
| - | 115 | * ESYNCH_TIMEOUT if the lock was not acquired because of |
|
| 106 | * not acquired because of a timeout or ESYNCH_WOULD_BLOCK if the |
116 | * a timeout or ESYNCH_WOULD_BLOCK if the operation could |
| 107 | * operation could not be carried out atomically (if requested so). |
117 | * not be carried out atomically (if requested so). |
| 108 | */ |
118 | */ |
| 109 | int futex_down_timeout(atomic_t *futex, uint32_t usec, int flags) |
119 | int futex_down_timeout(futex_t *futex, uint32_t usec, int flags) |
| 110 | { |
120 | { |
| 111 | int rc; |
121 | int rc; |
| 112 | 122 | ||
| 113 | while (atomic_predec(futex) < 0) { |
123 | while (atomic_predec(futex) < 0) { |
| 114 | rc = __SYSCALL3(SYS_FUTEX_SLEEP, (sysarg_t) &futex->count, (sysarg_t) usec, (sysarg_t) flags); |
124 | rc = __SYSCALL3(SYS_FUTEX_SLEEP, (sysarg_t) &futex->count, |
| - | 125 | (sysarg_t) usec, (sysarg_t) flags); |
|
| 115 | 126 | ||
| 116 | switch (rc) { |
127 | switch (rc) { |
| 117 | case ESYNCH_OK_ATOMIC: |
128 | case ESYNCH_OK_ATOMIC: |
| 118 | /* |
129 | /* |
| 119 | * Because of a race condition between timeout and futex_up() |
130 | * Because of a race condition between timeout and |
| - | 131 | * futex_up() and between conditional |
|
| 120 | * and between conditional futex_down_timeout() and futex_up(), |
132 | * futex_down_timeout() and futex_up(), we have to give |
| 121 | * we have to give up and try again in this special case. |
133 | * up and try again in this special case. |
| 122 | */ |
134 | */ |
| 123 | atomic_inc(futex); |
135 | atomic_inc(futex); |
| 124 | break; |
136 | break; |
| 125 | 137 | ||
| 126 | case ESYNCH_TIMEOUT: |
138 | case ESYNCH_TIMEOUT: |
| Line 128... | Line 140... | ||
| 128 | return ESYNCH_TIMEOUT; |
140 | return ESYNCH_TIMEOUT; |
| 129 | break; |
141 | break; |
| 130 | 142 | ||
| 131 | case ESYNCH_WOULD_BLOCK: |
143 | case ESYNCH_WOULD_BLOCK: |
| 132 | /* |
144 | /* |
| 133 | * The conditional down operation should be implemented this way. |
145 | * The conditional down operation should be implemented |
| 134 | * The userspace-only variant tends to accumulate missed wakeups |
146 | * this way. The userspace-only variant tends to |
| 135 | * in the kernel futex wait queue. |
147 | * accumulate missed wakeups in the kernel futex wait |
| - | 148 | * queue. |
|
| 136 | */ |
149 | */ |
| 137 | atomic_inc(futex); |
150 | atomic_inc(futex); |
| 138 | return ESYNCH_WOULD_BLOCK; |
151 | return ESYNCH_WOULD_BLOCK; |
| 139 | break; |
152 | break; |
| 140 | 153 | ||
| 141 | case ESYNCH_OK_BLOCKED: |
154 | case ESYNCH_OK_BLOCKED: |
| 142 | /* |
155 | /* |
| 143 | * Enter the critical section. |
156 | * Enter the critical section. |
| 144 | * The futex counter has already been incremented for us. |
157 | * The futex counter has already been incremented for |
| - | 158 | * us. |
|
| 145 | */ |
159 | */ |
| 146 | return ESYNCH_OK_BLOCKED; |
160 | return ESYNCH_OK_BLOCKED; |
| 147 | break; |
161 | break; |
| 148 | default: |
162 | default: |
| 149 | return rc; |
163 | return rc; |
| Line 156... | Line 170... | ||
| 156 | return ESYNCH_OK_ATOMIC; |
170 | return ESYNCH_OK_ATOMIC; |
| 157 | } |
171 | } |
| 158 | 172 | ||
| 159 | /** Up the futex. |
173 | /** Up the futex. |
| 160 | * |
174 | * |
| 161 | * @param futex Futex. |
175 | * @param futex Futex. |
| 162 | * |
176 | * |
| 163 | * @return ENOENT if there is no such virtual address. Otherwise zero. |
177 | * @return ENOENT if there is no such virtual address. Otherwise |
| - | 178 | * zero. |
|
| 164 | */ |
179 | */ |
| 165 | int futex_up(atomic_t *futex) |
180 | int futex_up(futex_t *futex) |
| 166 | { |
181 | { |
| 167 | long val; |
182 | long val; |
| 168 | 183 | ||
| 169 | val = atomic_postinc(futex); |
184 | val = atomic_postinc(futex); |
| 170 | if (val < 0) |
185 | if (val < 0) |