/kernel/trunk/generic/src/synch/rwlock.c |
---|
89,14 → 89,14 |
* |
* @param rwl Reader/Writer lock. |
* @param usec Timeout in microseconds. |
* @param trylock Switches between blocking and non-blocking mode. |
* @param flags Specify mode of operation. |
* |
* For exact description of possible combinations of |
* @usec and @trylock, see comment for waitq_sleep_timeout(). |
* usec and flags, see comment for waitq_sleep_timeout(). |
* |
* @return See comment for waitq_sleep_timeout(). |
*/ |
int _rwlock_write_lock_timeout(rwlock_t *rwl, __u32 usec, int trylock) |
int _rwlock_write_lock_timeout(rwlock_t *rwl, __u32 usec, int flags) |
{ |
ipl_t ipl; |
int rc; |
111,11 → 111,11 |
* Writers take the easy part. |
* They just need to acquire the exclusive mutex. |
*/ |
rc = _mutex_lock_timeout(&rwl->exclusive, usec, trylock); |
rc = _mutex_lock_timeout(&rwl->exclusive, usec, flags); |
if (SYNCH_FAILED(rc)) { |
/* |
* Lock operation timed out. |
* Lock operation timed out or was interrupted. |
* The state of rwl is UNKNOWN at this point. |
* No claims about its holder can be made. |
*/ |
143,14 → 143,14 |
* |
* @param rwl Reader/Writer lock. |
* @param usec Timeout in microseconds. |
* @param trylock Switches between blocking and non-blocking mode. |
* @param flags Select mode of operation. |
* |
* For exact description of possible combinations of |
* usec and trylock, see comment for waitq_sleep_timeout(). |
* usec and flags, see comment for waitq_sleep_timeout(). |
* |
* @return See comment for waitq_sleep_timeout(). |
*/ |
int _rwlock_read_lock_timeout(rwlock_t *rwl, __u32 usec, int trylock) |
int _rwlock_read_lock_timeout(rwlock_t *rwl, __u32 usec, int flags) |
{ |
int rc; |
ipl_t ipl; |
199,7 → 199,7 |
thread_register_call_me(release_spinlock, NULL); |
#endif |
rc = _mutex_lock_timeout(&rwl->exclusive, usec, trylock); |
rc = _mutex_lock_timeout(&rwl->exclusive, usec, flags); |
switch (rc) { |
case ESYNCH_WOULD_BLOCK: |
/* |
208,8 → 208,9 |
thread_register_call_me(NULL, NULL); |
spinlock_unlock(&rwl->lock); |
case ESYNCH_TIMEOUT: |
case ESYNCH_INTERRUPTED: |
/* |
* The sleep timeouted. |
* The sleep timed out. |
* We just restore interrupt priority level. |
*/ |
case ESYNCH_OK_BLOCKED: |
/kernel/trunk/generic/src/synch/mutex.c |
---|
53,16 → 53,16 |
* |
* @param mtx Mutex. |
* @param usec Timeout in microseconds. |
* @param trylock Switches between blocking and non-blocking mode. |
* @param flags Specify mode of operation. |
* |
* For exact description of possible combinations of |
* usec and trylock, see comment for waitq_sleep_timeout(). |
* usec and flags, see comment for waitq_sleep_timeout(). |
* |
* @return See comment for waitq_sleep_timeout(). |
*/ |
int _mutex_lock_timeout(mutex_t *mtx, __u32 usec, int trylock) |
int _mutex_lock_timeout(mutex_t *mtx, __u32 usec, int flags) |
{ |
return _semaphore_down_timeout(&mtx->sem, usec, trylock); |
return _semaphore_down_timeout(&mtx->sem, usec, flags); |
} |
/** Release mutex |
75,3 → 75,4 |
{ |
semaphore_up(&mtx->sem); |
} |
/kernel/trunk/generic/src/synch/semaphore.c |
---|
67,16 → 67,16 |
* |
* @param s Semaphore. |
* @param usec Timeout in microseconds. |
* @param trydown Switches between blocking and non-blocking mode. |
* @param flags Select mode of operation. |
* |
* For exact description of possible combinations of |
* usec and trydown, see comment for waitq_sleep_timeout(). |
* usec and flags, see comment for waitq_sleep_timeout(). |
* |
* @return See comment for waitq_sleep_timeout(). |
*/ |
int _semaphore_down_timeout(semaphore_t *s, __u32 usec, int trydown) |
int _semaphore_down_timeout(semaphore_t *s, __u32 usec, int flags) |
{ |
return waitq_sleep_timeout(&s->wq, usec, trydown); |
return waitq_sleep_timeout(&s->wq, usec, flags); |
} |
/** Semaphore up |
/kernel/trunk/generic/src/synch/waitq.c |
---|
135,6 → 135,14 |
grab_locks: |
spinlock_lock(&t->lock); |
if ((wq = t->sleep_queue)) { /* assignment */ |
if (!(t->sleep_interruptible)) { |
/* |
* The sleep cannot be interrupted. |
*/ |
spinlock_unlock(&t->lock); |
goto out; |
} |
if (!spinlock_trylock(&wq->lock)) { |
spinlock_unlock(&t->lock); |
goto grab_locks; /* avoid deadlock */ |
159,7 → 167,7 |
/** Sleep until either wakeup, timeout or interruption occurs |
* |
* This is a sleep implementation which allows itself to be |
* This is a sleep implementation which allows itself to time out or to be |
* interrupted from the sleep, restoring a failover context. |
* |
* Sleepers are organised in a FIFO fashion in a structure called wait queue. |
169,18 → 177,22 |
* |
* @param wq Pointer to wait queue. |
* @param usec Timeout in microseconds. |
* @param nonblocking Blocking vs. non-blocking operation mode switch. |
* @param flags Specify mode of the sleep. |
* |
* If usec is greater than zero, regardless of the value of nonblocking, |
* the call will not return until either timeout or wakeup comes. |
* The sleep can be interrupted only if the |
* SYNCH_FLAGS_INTERRUPTIBLE bit is specified in flags. |
* If usec is greater than zero, regardless of the value of the |
* SYNCH_FLAGS_NON_BLOCKING bit in flags, the call will not return until either timeout, |
* interruption or wakeup comes. |
* |
* If usec is zero and @nonblocking is zero (false), the call |
* will not return until wakeup comes. |
* If usec is zero and the SYNCH_FLAGS_NON_BLOCKING bit is not set in flags, the call |
* will not return until wakeup or interruption comes. |
* |
* If usec is zero and nonblocking is non-zero (true), the call will |
* If usec is zero and the SYNCH_FLAGS_NON_BLOCKING bit is set in flags, the call will |
* immediately return, reporting either success or failure. |
* |
* @return Returns one of: ESYNCH_WOULD_BLOCK, ESYNCH_TIMEOUT, |
* @return Returns one of: ESYNCH_WOULD_BLOCK, ESYNCH_TIMEOUT, ESYNCH_INTERRUPTED, |
* ESYNCH_OK_ATOMIC, ESYNCH_OK_BLOCKED. |
* |
* @li ESYNCH_WOULD_BLOCK means that the sleep failed because at the time |
197,13 → 209,13 |
* @li ESYNCH_OK_BLOCKED means that the sleep succeeded; the full sleep was |
* attempted. |
*/ |
int waitq_sleep_timeout(waitq_t *wq, __u32 usec, int nonblocking) |
int waitq_sleep_timeout(waitq_t *wq, __u32 usec, int flags) |
{ |
ipl_t ipl; |
int rc; |
ipl = waitq_sleep_prepare(wq); |
rc = waitq_sleep_timeout_unsafe(wq, usec, nonblocking); |
rc = waitq_sleep_timeout_unsafe(wq, usec, flags); |
waitq_sleep_finish(wq, rc, ipl); |
return rc; |
} |
276,11 → 288,11 |
* |
* @param wq See waitq_sleep_timeout(). |
* @param usec See waitq_sleep_timeout(). |
* @param nonblocking See waitq_sleep_timeout(). |
* @param flags See waitq_sleep_timeout(). |
* |
* @return See waitq_sleep_timeout(). |
*/ |
int waitq_sleep_timeout_unsafe(waitq_t *wq, __u32 usec, int nonblocking) |
int waitq_sleep_timeout_unsafe(waitq_t *wq, __u32 usec, int flags) |
{ |
/* checks whether to go to sleep at all */ |
if (wq->missed_wakeups) { |
288,7 → 300,7 |
return ESYNCH_OK_ATOMIC; |
} |
else { |
if (nonblocking && (usec == 0)) { |
if ((flags & SYNCH_FLAGS_NON_BLOCKING) && (usec == 0)) { |
/* return immediatelly instead of going to sleep */ |
return ESYNCH_WOULD_BLOCK; |
} |
299,14 → 311,19 |
*/ |
spinlock_lock(&THREAD->lock); |
/* |
* Set context that will be restored if the sleep |
* of this thread is ever interrupted. |
*/ |
if (!context_save(&THREAD->sleep_interruption_context)) { |
/* Short emulation of scheduler() return code. */ |
spinlock_unlock(&THREAD->lock); |
return ESYNCH_INTERRUPTED; |
if (flags & SYNCH_FLAGS_INTERRUPTIBLE) { |
/* |
* Set context that will be restored if the sleep |
* of this thread is ever interrupted. |
*/ |
THREAD->sleep_interruptible = true; |
if (!context_save(&THREAD->sleep_interruption_context)) { |
/* Short emulation of scheduler() return code. */ |
spinlock_unlock(&THREAD->lock); |
return ESYNCH_INTERRUPTED; |
} |
} else { |
THREAD->sleep_interruptible = false; |
} |
if (usec) { |
/kernel/trunk/generic/src/synch/futex.c |
---|
99,12 → 99,12 |
* |
* @param uaddr Userspace address of the futex counter. |
* @param usec If non-zero, number of microseconds this thread is willing to sleep. |
* @param trydown If usec is zero and trydown is non-zero, conditional operation will be attempted. |
* @param flags Select mode of operation. |
* |
* @return One of ESYNCH_TIMEOUT, ESYNCH_OK_ATOMIC and ESYNCH_OK_BLOCKED. See synch.h. |
* If there is no physical mapping for uaddr ENOENT is returned. |
*/ |
__native sys_futex_sleep_timeout(__address uaddr, __u32 usec, int trydown) |
__native sys_futex_sleep_timeout(__address uaddr, __u32 usec, int flags) |
{ |
futex_t *futex; |
__address paddr; |
130,7 → 130,7 |
futex = futex_find(paddr); |
return (__native) waitq_sleep_timeout(&futex->wq, usec, trydown); |
return (__native) waitq_sleep_timeout(&futex->wq, usec, flags | SYNCH_FLAGS_INTERRUPTIBLE); |
} |
/** Wakeup one thread waiting in futex wait queue. |
/kernel/trunk/generic/src/synch/condvar.c |
---|
74,13 → 74,16 |
* @param cv Condition variable. |
* @param mtx Mutex. |
* @param usec Timeout value in microseconds. |
* @param flags Select mode of operation. |
* |
* For exact description of meaning of possible values of usec, |
* see comment for waitq_sleep_timeout(). |
* For exact description of meaning of possible combinations |
* of usec and flags, see comment for waitq_sleep_timeout(). |
* Note that when SYNCH_FLAGS_NON_BLOCKING is specified here, |
* ESYNCH_WOULD_BLOCK is always returned. |
* |
* @return See comment for waitq_sleep_timeout(). |
*/ |
int _condvar_wait_timeout(condvar_t *cv, mutex_t *mtx, __u32 usec) |
int _condvar_wait_timeout(condvar_t *cv, mutex_t *mtx, __u32 usec, int flags) |
{ |
int rc; |
ipl_t ipl; |
89,7 → 92,7 |
mutex_unlock(mtx); |
cv->wq.missed_wakeups = 0; /* Enforce blocking. */ |
rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, SYNCH_BLOCKING); |
rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, flags); |
mutex_lock(mtx); |
waitq_sleep_finish(&cv->wq, rc, ipl); |