Subversion Repositories HelenOS-historic

Compare Revisions

Ignore whitespace Rev 1501 → Rev 1502

/kernel/trunk/generic/include/proc/thread.h
87,6 → 87,7
/** From here, the stored interruption context is restored when sleep is interrupted. */
context_t sleep_interruption_context;
 
bool sleep_interruptible; /**< If true, the thread can be interrupted from sleep. */
waitq_t *sleep_queue; /**< Wait queue in which this thread sleeps. */
timeout_t sleep_timeout; /**< Timeout used for timeoutable sleeping. */
volatile int timeout_pending; /**< Flag signalling sleep timeout in progress. */
/kernel/trunk/generic/include/synch/futex.h
44,7 → 44,7
};
 
extern void futex_init(void);
extern __native sys_futex_sleep_timeout(__address uaddr, __u32 usec, int trydown);
extern __native sys_futex_sleep_timeout(__address uaddr, __u32 usec, int flags);
extern __native sys_futex_wakeup(__address uaddr);
 
#endif
/kernel/trunk/generic/include/synch/condvar.h
39,13 → 39,13
};
 
#define condvar_wait(cv,mtx) \
_condvar_wait_timeout((cv),(mtx),SYNCH_NO_TIMEOUT)
_condvar_wait_timeout((cv),(mtx),SYNCH_NO_TIMEOUT,SYNCH_FLAGS_NONE)
#define condvar_wait_timeout(cv,mtx,usec) \
_condvar_wait_timeout((cv),(mtx),(usec))
_condvar_wait_timeout((cv),(mtx),(usec),SYNCH_FLAGS_NONE)
 
extern void condvar_initialize(condvar_t *cv);
extern void condvar_signal(condvar_t *cv);
extern void condvar_broadcast(condvar_t *cv);
extern int _condvar_wait_timeout(condvar_t *cv, mutex_t *mtx, __u32 usec);
extern int _condvar_wait_timeout(condvar_t *cv, mutex_t *mtx, __u32 usec, int flags);
 
#endif
/kernel/trunk/generic/include/synch/rwlock.h
48,22 → 48,23
};
 
#define rwlock_write_lock(rwl) \
_rwlock_write_lock_timeout((rwl),SYNCH_NO_TIMEOUT,SYNCH_BLOCKING)
_rwlock_write_lock_timeout((rwl),SYNCH_NO_TIMEOUT,SYNCH_FLAGS_NONE)
#define rwlock_read_lock(rwl) \
_rwlock_read_lock_timeout((rwl),SYNCH_NO_TIMEOUT,SYNCH_BLOCKING)
_rwlock_read_lock_timeout((rwl),SYNCH_NO_TIMEOUT,SYNCH_FLAGS_NONE)
#define rwlock_write_trylock(rwl) \
_rwlock_write_lock_timeout((rwl),SYNCH_NO_TIMEOUT,SYNCH_NON_BLOCKING)
_rwlock_write_lock_timeout((rwl),SYNCH_NO_TIMEOUT,SYNCH_FLAGS_NON_BLOCKING)
#define rwlock_read_trylock(rwl) \
_rwlock_read_lock_timeout((rwl),SYNCH_NO_TIMEOUT,SYNCH_NON_BLOCKING)
_rwlock_read_lock_timeout((rwl),SYNCH_NO_TIMEOUT,SYNCH_FLAGS_NON_BLOCKING)
#define rwlock_write_lock_timeout(rwl,usec) \
_rwlock_write_lock_timeout((rwl),(usec),SYNCH_NON_BLOCKING)
_rwlock_write_lock_timeout((rwl),(usec),SYNCH_FLAGS_NONE)
#define rwlock_read_lock_timeout(rwl,usec) \
_rwlock_read_lock_timeout((rwl),(usec),SYNCH_NON_BLOCKING)
_rwlock_read_lock_timeout((rwl),(usec),SYNCH_FLAGS_NONE)
 
extern void rwlock_initialize(rwlock_t *rwl);
extern void rwlock_read_unlock(rwlock_t *rwl);
extern void rwlock_write_unlock(rwlock_t *rwl);
extern int _rwlock_read_lock_timeout(rwlock_t *rwl, __u32 usec, int trylock);
extern int _rwlock_write_lock_timeout(rwlock_t *rwl, __u32 usec, int trylock);
extern int _rwlock_read_lock_timeout(rwlock_t *rwl, __u32 usec, int flags);
extern int _rwlock_write_lock_timeout(rwlock_t *rwl, __u32 usec, int flags);
 
#endif
 
/kernel/trunk/generic/include/synch/mutex.h
39,16 → 39,16
};
 
#define mutex_lock(mtx) \
_mutex_lock_timeout((mtx),SYNCH_NO_TIMEOUT,SYNCH_BLOCKING)
_mutex_lock_timeout((mtx),SYNCH_NO_TIMEOUT,SYNCH_FLAGS_NONE)
#define mutex_trylock(mtx) \
_mutex_lock_timeout((mtx),SYNCH_NO_TIMEOUT,SYNCH_NON_BLOCKING)
_mutex_lock_timeout((mtx),SYNCH_NO_TIMEOUT,SYNCH_FLAGS_NON_BLOCKING)
#define mutex_lock_timeout(mtx,usec) \
_mutex_lock_timeout((mtx),(usec),SYNCH_NON_BLOCKING)
_mutex_lock_timeout((mtx),(usec),SYNCH_FLAGS_NON_BLOCKING)
#define mutex_lock_active(mtx) \
while (mutex_trylock((mtx)) != ESYNCH_OK_ATOMIC)
 
extern void mutex_initialize(mutex_t *mtx);
extern int _mutex_lock_timeout(mutex_t *mtx, __u32 usec, int trylock);
extern int _mutex_lock_timeout(mutex_t *mtx, __u32 usec, int flags);
extern void mutex_unlock(mutex_t *mtx);
 
#endif
/kernel/trunk/generic/include/synch/semaphore.h
40,14 → 40,15
};
 
#define semaphore_down(s) \
_semaphore_down_timeout((s),SYNCH_NO_TIMEOUT,SYNCH_BLOCKING)
_semaphore_down_timeout((s),SYNCH_NO_TIMEOUT,SYNCH_FLAGS_NONE)
#define semaphore_trydown(s) \
_semaphore_down_timeout((s),SYNCH_NO_TIMEOUT,SYNCH_NON_BLOCKING)
_semaphore_down_timeout((s),SYNCH_NO_TIMEOUT,SYNCH_FLAGS_NON_BLOCKING)
#define semaphore_down_timeout(s,usec) \
_semaphore_down_timeout((s),(usec),SYNCH_NON_BLOCKING)
_semaphore_down_timeout((s),(usec),SYNCH_FLAGS_NONE)
 
extern void semaphore_initialize(semaphore_t *s, int val);
extern int _semaphore_down_timeout(semaphore_t *s, __u32 usec, int trydown);
extern int _semaphore_down_timeout(semaphore_t *s, __u32 usec, int flags);
extern void semaphore_up(semaphore_t *s);
 
#endif
 
/kernel/trunk/generic/include/synch/synch.h
30,9 → 30,11
#define __SYNCH_H__
 
#define SYNCH_NO_TIMEOUT 0 /**< Request with no timeout. */
#define SYNCH_BLOCKING 0 /**< Blocking operation request. */
#define SYNCH_NON_BLOCKING 1 /**< Non-blocking operation request. */
 
#define SYNCH_FLAGS_NONE 0 /**< No flags specified. */
#define SYNCH_FLAGS_NON_BLOCKING (1<<0) /**< Non-blocking operation request. */
#define SYNCH_FLAGS_INTERRUPTIBLE (1<<1) /**< Interruptible operation. */
 
#define ESYNCH_WOULD_BLOCK 1 /**< Could not satisfy the request without going to sleep. */
#define ESYNCH_TIMEOUT 2 /**< Timeout occurred. */
#define ESYNCH_INTERRUPTED 4 /**< Sleep was interrupted. */
/kernel/trunk/generic/include/synch/waitq.h
52,12 → 52,12
};
 
#define waitq_sleep(wq) \
waitq_sleep_timeout((wq),SYNCH_NO_TIMEOUT,SYNCH_BLOCKING)
waitq_sleep_timeout((wq),SYNCH_NO_TIMEOUT,SYNCH_FLAGS_NONE)
 
extern void waitq_initialize(waitq_t *wq);
extern int waitq_sleep_timeout(waitq_t *wq, __u32 usec, int nonblocking);
extern int waitq_sleep_timeout(waitq_t *wq, __u32 usec, int flags);
extern ipl_t waitq_sleep_prepare(waitq_t *wq);
extern int waitq_sleep_timeout_unsafe(waitq_t *wq, __u32 usec, int nonblocking);
extern int waitq_sleep_timeout_unsafe(waitq_t *wq, __u32 usec, int flags);
extern void waitq_sleep_finish(waitq_t *wq, int rc, ipl_t ipl);
extern void waitq_wakeup(waitq_t *wq, bool all);
extern void _waitq_wakeup_unsafe(waitq_t *wq, bool all);
/kernel/trunk/generic/include/ipc/ipc.h
209,7 → 209,7
}call_t;
 
extern void ipc_init(void);
extern call_t * ipc_wait_for_call(answerbox_t *box, __u32 usec, int nonblocking);
extern call_t * ipc_wait_for_call(answerbox_t *box, __u32 usec, int flags);
extern void ipc_answer(answerbox_t *box, call_t *request);
extern int ipc_call(phone_t *phone, call_t *call);
extern void ipc_call_sync(phone_t *phone, call_t *request);
/kernel/trunk/generic/src/synch/rwlock.c
89,14 → 89,14
*
* @param rwl Reader/Writer lock.
* @param usec Timeout in microseconds.
* @param trylock Switches between blocking and non-blocking mode.
* @param flags Specify mode of operation.
*
* For exact description of possible combinations of
* @usec and @trylock, see comment for waitq_sleep_timeout().
* usec and flags, see comment for waitq_sleep_timeout().
*
* @return See comment for waitq_sleep_timeout().
*/
int _rwlock_write_lock_timeout(rwlock_t *rwl, __u32 usec, int trylock)
int _rwlock_write_lock_timeout(rwlock_t *rwl, __u32 usec, int flags)
{
ipl_t ipl;
int rc;
111,11 → 111,11
* Writers take the easy part.
* They just need to acquire the exclusive mutex.
*/
rc = _mutex_lock_timeout(&rwl->exclusive, usec, trylock);
rc = _mutex_lock_timeout(&rwl->exclusive, usec, flags);
if (SYNCH_FAILED(rc)) {
 
/*
* Lock operation timed out.
* Lock operation timed out or was interrupted.
* The state of rwl is UNKNOWN at this point.
* No claims about its holder can be made.
*/
143,14 → 143,14
*
* @param rwl Reader/Writer lock.
* @param usec Timeout in microseconds.
* @param trylock Switches between blocking and non-blocking mode.
* @param flags Select mode of operation.
*
* For exact description of possible combinations of
* usec and trylock, see comment for waitq_sleep_timeout().
* usec and flags, see comment for waitq_sleep_timeout().
*
* @return See comment for waitq_sleep_timeout().
*/
int _rwlock_read_lock_timeout(rwlock_t *rwl, __u32 usec, int trylock)
int _rwlock_read_lock_timeout(rwlock_t *rwl, __u32 usec, int flags)
{
int rc;
ipl_t ipl;
199,7 → 199,7
thread_register_call_me(release_spinlock, NULL);
#endif
rc = _mutex_lock_timeout(&rwl->exclusive, usec, trylock);
rc = _mutex_lock_timeout(&rwl->exclusive, usec, flags);
switch (rc) {
case ESYNCH_WOULD_BLOCK:
/*
208,8 → 208,9
thread_register_call_me(NULL, NULL);
spinlock_unlock(&rwl->lock);
case ESYNCH_TIMEOUT:
case ESYNCH_INTERRUPTED:
/*
* The sleep timeouted.
* The sleep timed out.
* We just restore interrupt priority level.
*/
case ESYNCH_OK_BLOCKED:
/kernel/trunk/generic/src/synch/mutex.c
53,16 → 53,16
*
* @param mtx Mutex.
* @param usec Timeout in microseconds.
* @param trylock Switches between blocking and non-blocking mode.
* @param flags Specify mode of operation.
*
* For exact description of possible combinations of
* usec and trylock, see comment for waitq_sleep_timeout().
* usec and flags, see comment for waitq_sleep_timeout().
*
* @return See comment for waitq_sleep_timeout().
*/
int _mutex_lock_timeout(mutex_t *mtx, __u32 usec, int trylock)
int _mutex_lock_timeout(mutex_t *mtx, __u32 usec, int flags)
{
return _semaphore_down_timeout(&mtx->sem, usec, trylock);
return _semaphore_down_timeout(&mtx->sem, usec, flags);
}
 
/** Release mutex
75,3 → 75,4
{
semaphore_up(&mtx->sem);
}
 
/kernel/trunk/generic/src/synch/semaphore.c
67,16 → 67,16
*
* @param s Semaphore.
* @param usec Timeout in microseconds.
* @param trydown Switches between blocking and non-blocking mode.
* @param flags Select mode of operation.
*
* For exact description of possible combinations of
* usec and trydown, see comment for waitq_sleep_timeout().
* usec and flags, see comment for waitq_sleep_timeout().
*
* @return See comment for waitq_sleep_timeout().
*/
int _semaphore_down_timeout(semaphore_t *s, __u32 usec, int trydown)
int _semaphore_down_timeout(semaphore_t *s, __u32 usec, int flags)
{
return waitq_sleep_timeout(&s->wq, usec, trydown);
return waitq_sleep_timeout(&s->wq, usec, flags);
}
 
/** Semaphore up
/kernel/trunk/generic/src/synch/waitq.c
135,6 → 135,14
grab_locks:
spinlock_lock(&t->lock);
if ((wq = t->sleep_queue)) { /* assignment */
if (!(t->sleep_interruptible)) {
/*
* The sleep cannot be interrupted.
*/
spinlock_unlock(&t->lock);
goto out;
}
if (!spinlock_trylock(&wq->lock)) {
spinlock_unlock(&t->lock);
goto grab_locks; /* avoid deadlock */
159,7 → 167,7
 
/** Sleep until either wakeup, timeout or interruption occurs
*
* This is a sleep implementation which allows itself to be
* This is a sleep implementation which allows itself to time out or to be
* interrupted from the sleep, restoring a failover context.
*
* Sleepers are organised in a FIFO fashion in a structure called wait queue.
169,18 → 177,22
*
* @param wq Pointer to wait queue.
* @param usec Timeout in microseconds.
* @param nonblocking Blocking vs. non-blocking operation mode switch.
* @param flags Specify mode of the sleep.
*
* If usec is greater than zero, regardless of the value of nonblocking,
* the call will not return until either timeout or wakeup comes.
* The sleep can be interrupted only if the
* SYNCH_FLAGS_INTERRUPTIBLE bit is specified in flags.
* If usec is greater than zero, regardless of the value of the
* SYNCH_FLAGS_NON_BLOCKING bit in flags, the call will not return until either timeout,
* interruption or wakeup comes.
*
* If usec is zero and @nonblocking is zero (false), the call
* will not return until wakeup comes.
* If usec is zero and the SYNCH_FLAGS_NON_BLOCKING bit is not set in flags, the call
* will not return until wakeup or interruption comes.
*
* If usec is zero and nonblocking is non-zero (true), the call will
* If usec is zero and the SYNCH_FLAGS_NON_BLOCKING bit is set in flags, the call will
* immediately return, reporting either success or failure.
*
* @return Returns one of: ESYNCH_WOULD_BLOCK, ESYNCH_TIMEOUT,
* @return Returns one of: ESYNCH_WOULD_BLOCK, ESYNCH_TIMEOUT, ESYNCH_INTERRUPTED,
* ESYNCH_OK_ATOMIC, ESYNCH_OK_BLOCKED.
*
* @li ESYNCH_WOULD_BLOCK means that the sleep failed because at the time
197,13 → 209,13
* @li ESYNCH_OK_BLOCKED means that the sleep succeeded; the full sleep was
* attempted.
*/
int waitq_sleep_timeout(waitq_t *wq, __u32 usec, int nonblocking)
int waitq_sleep_timeout(waitq_t *wq, __u32 usec, int flags)
{
ipl_t ipl;
int rc;
ipl = waitq_sleep_prepare(wq);
rc = waitq_sleep_timeout_unsafe(wq, usec, nonblocking);
rc = waitq_sleep_timeout_unsafe(wq, usec, flags);
waitq_sleep_finish(wq, rc, ipl);
return rc;
}
276,11 → 288,11
*
* @param wq See waitq_sleep_timeout().
* @param usec See waitq_sleep_timeout().
* @param nonblocking See waitq_sleep_timeout().
* @param flags See waitq_sleep_timeout().
*
* @return See waitq_sleep_timeout().
*/
int waitq_sleep_timeout_unsafe(waitq_t *wq, __u32 usec, int nonblocking)
int waitq_sleep_timeout_unsafe(waitq_t *wq, __u32 usec, int flags)
{
/* checks whether to go to sleep at all */
if (wq->missed_wakeups) {
288,7 → 300,7
return ESYNCH_OK_ATOMIC;
}
else {
if (nonblocking && (usec == 0)) {
if ((flags & SYNCH_FLAGS_NON_BLOCKING) && (usec == 0)) {
/* return immediatelly instead of going to sleep */
return ESYNCH_WOULD_BLOCK;
}
299,14 → 311,19
*/
spinlock_lock(&THREAD->lock);
 
/*
* Set context that will be restored if the sleep
* of this thread is ever interrupted.
*/
if (!context_save(&THREAD->sleep_interruption_context)) {
/* Short emulation of scheduler() return code. */
spinlock_unlock(&THREAD->lock);
return ESYNCH_INTERRUPTED;
if (flags & SYNCH_FLAGS_INTERRUPTIBLE) {
/*
* Set context that will be restored if the sleep
* of this thread is ever interrupted.
*/
THREAD->sleep_interruptible = true;
if (!context_save(&THREAD->sleep_interruption_context)) {
/* Short emulation of scheduler() return code. */
spinlock_unlock(&THREAD->lock);
return ESYNCH_INTERRUPTED;
}
} else {
THREAD->sleep_interruptible = false;
}
 
if (usec) {
/kernel/trunk/generic/src/synch/futex.c
99,12 → 99,12
*
* @param uaddr Userspace address of the futex counter.
* @param usec If non-zero, number of microseconds this thread is willing to sleep.
* @param trydown If usec is zero and trydown is non-zero, conditional operation will be attempted.
* @param flags Select mode of operation.
*
* @return One of ESYNCH_TIMEOUT, ESYNCH_OK_ATOMIC and ESYNCH_OK_BLOCKED. See synch.h.
* If there is no physical mapping for uaddr ENOENT is returned.
*/
__native sys_futex_sleep_timeout(__address uaddr, __u32 usec, int trydown)
__native sys_futex_sleep_timeout(__address uaddr, __u32 usec, int flags)
{
futex_t *futex;
__address paddr;
130,7 → 130,7
 
futex = futex_find(paddr);
return (__native) waitq_sleep_timeout(&futex->wq, usec, trydown);
return (__native) waitq_sleep_timeout(&futex->wq, usec, flags | SYNCH_FLAGS_INTERRUPTIBLE);
}
 
/** Wakeup one thread waiting in futex wait queue.
/kernel/trunk/generic/src/synch/condvar.c
74,13 → 74,16
* @param cv Condition variable.
* @param mtx Mutex.
* @param usec Timeout value in microseconds.
* @param flags Select mode of operation.
*
* For exact description of meaning of possible values of usec,
* see comment for waitq_sleep_timeout().
* For exact description of meaning of possible combinations
* of usec and flags, see comment for waitq_sleep_timeout().
* Note that when SYNCH_FLAGS_NON_BLOCKING is specified here,
* ESYNCH_WOULD_BLOCK is always returned.
*
* @return See comment for waitq_sleep_timeout().
*/
int _condvar_wait_timeout(condvar_t *cv, mutex_t *mtx, __u32 usec)
int _condvar_wait_timeout(condvar_t *cv, mutex_t *mtx, __u32 usec, int flags)
{
int rc;
ipl_t ipl;
89,7 → 92,7
mutex_unlock(mtx);
 
cv->wq.missed_wakeups = 0; /* Enforce blocking. */
rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, SYNCH_BLOCKING);
rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, flags);
 
mutex_lock(mtx);
waitq_sleep_finish(&cv->wq, rc, ipl);
/kernel/trunk/generic/src/proc/thread.c
303,6 → 303,7
t->call_me_with = NULL;
timeout_initialize(&t->sleep_timeout);
t->sleep_interruptible = false;
t->sleep_queue = NULL;
t->timeout_pending = 0;
 
385,7 → 386,7
waitq_initialize(&wq);
 
(void) waitq_sleep_timeout(&wq, usec, SYNCH_NON_BLOCKING);
(void) waitq_sleep_timeout(&wq, usec, SYNCH_FLAGS_NON_BLOCKING);
}
 
/** Register thread out-of-context invocation
/kernel/trunk/generic/src/ipc/sysipc.c
502,16 → 502,16
*
* @param calldata Pointer to buffer where the call/answer data is stored
* @param usec Timeout. See waitq_sleep_timeout() for explanation.
* @param nonblocking See waitq_sleep_timeout() for explanation.
* @param flags Select mode of sleep operation. See waitq_sleep_timeout() for explanation.
*
* @return Callid, if callid & 1, then the call is answer
*/
__native sys_ipc_wait_for_call(ipc_data_t *calldata, __u32 usec, int nonblocking)
__native sys_ipc_wait_for_call(ipc_data_t *calldata, __u32 usec, int flags)
{
call_t *call;
 
restart:
call = ipc_wait_for_call(&TASK->answerbox, usec, nonblocking);
call = ipc_wait_for_call(&TASK->answerbox, usec, flags | SYNCH_FLAGS_INTERRUPTIBLE);
if (!call)
return 0;
 
/kernel/trunk/generic/src/ipc/ipc.c
142,7 → 142,7
request->callerbox = &sync_box;
 
ipc_call(phone, request);
ipc_wait_for_call(&sync_box, SYNCH_NO_TIMEOUT, SYNCH_BLOCKING);
ipc_wait_for_call(&sync_box, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE);
}
 
/** Answer message that was not dispatched and is not entered in
305,12 → 305,12
* @param box Answerbox expecting the call.
* @param usec Timeout in microseconds. See documentation for waitq_sleep_timeout() for
* decription of its special meaning.
* @param nonblocking Blocking vs. non-blocking operation mode switch. See documentation
* for waitq_sleep_timeout() for description of its special meaning.
* @param flags Select mode of sleep operation. See documentation for waitq_sleep_timeout()i
* for description of its special meaning.
* @return Recived message address
* - to distinguish between call and answer, look at call->flags
*/
call_t * ipc_wait_for_call(answerbox_t *box, __u32 usec, int nonblocking)
call_t * ipc_wait_for_call(answerbox_t *box, __u32 usec, int flags)
{
call_t *request;
ipl_t ipl;
317,7 → 317,7
int rc;
 
restart:
rc = waitq_sleep_timeout(&box->wq, usec, nonblocking);
rc = waitq_sleep_timeout(&box->wq, usec, flags);
if (SYNCH_FAILED(rc))
return NULL;
412,7 → 412,7
/* Wait for all async answers to arrive */
while (atomic_get(&task->active_calls)) {
call = ipc_wait_for_call(&task->answerbox, SYNCH_NO_TIMEOUT, SYNCH_BLOCKING);
call = ipc_wait_for_call(&task->answerbox, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE);
ASSERT((call->flags & IPC_CALL_ANSWERED) || (call->flags & IPC_CALL_NOTIF));
ASSERT(! (call->flags & IPC_CALL_STATIC_ALLOC));
/kernel/trunk/arch/ia32/src/smp/smp.c
167,7 → 167,7
* the time. After it comes completely up, it is
* supposed to wake us up.
*/
if (waitq_sleep_timeout(&ap_completion_wq, 1000000, SYNCH_BLOCKING) == ESYNCH_TIMEOUT)
if (waitq_sleep_timeout(&ap_completion_wq, 1000000, SYNCH_FLAGS_NONE) == ESYNCH_TIMEOUT)
printf("%s: waiting for cpu%d (APIC ID = %d) timed out\n", __FUNCTION__, config.cpu_active > i ? config.cpu_active : i, ops->cpu_apic_id(i));
} else
printf("INIT IPI for l_apic%d failed\n", ops->cpu_apic_id(i));