/branches/rcu/kernel/generic/src/synch/waitq.c |
---|
0,0 → 1,453 |
/* |
* Copyright (c) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup sync |
* @{ |
*/ |
/** |
* @file |
* @brief Wait queue. |
* |
* Wait queue is the basic synchronization primitive upon which all |
* other synchronization primitives build. |
* |
* It allows threads to wait for an event in first-come, first-served |
* fashion. Conditional operation as well as timeouts and interruptions |
* are supported. |
*/ |
#include <synch/waitq.h> |
#include <synch/synch.h> |
#include <synch/spinlock.h> |
#include <proc/thread.h> |
#include <proc/scheduler.h> |
#include <arch/asm.h> |
#include <arch/types.h> |
#include <time/timeout.h> |
#include <arch.h> |
#include <context.h> |
#include <adt/list.h> |
static void waitq_timeouted_sleep(void *data); |
/** Initialize wait queue |
* |
* Initialize wait queue. |
* |
* @param wq Pointer to wait queue to be initialized. |
*/ |
void waitq_initialize(waitq_t *wq) |
{ |
spinlock_initialize(&wq->lock, "waitq_lock"); |
list_initialize(&wq->head); |
wq->missed_wakeups = 0; |
} |
/** Handle timeout during waitq_sleep_timeout() call |
* |
* This routine is called when waitq_sleep_timeout() timeouts. |
* Interrupts are disabled. |
* |
* It is supposed to try to remove 'its' thread from the wait queue; |
* it can eventually fail to achieve this goal when these two events |
* overlap. In that case it behaves just as though there was no |
* timeout at all. |
* |
* @param data Pointer to the thread that called waitq_sleep_timeout(). |
*/ |
void waitq_timeouted_sleep(void *data) |
{ |
thread_t *t = (thread_t *) data; |
waitq_t *wq; |
bool do_wakeup = false; |
spinlock_lock(&threads_lock); |
if (!thread_exists(t)) |
goto out; |
grab_locks: |
spinlock_lock(&t->lock); |
if ((wq = t->sleep_queue)) { /* assignment */ |
if (!spinlock_trylock(&wq->lock)) { |
spinlock_unlock(&t->lock); |
goto grab_locks; /* avoid deadlock */ |
} |
list_remove(&t->wq_link); |
t->saved_context = t->sleep_timeout_context; |
do_wakeup = true; |
t->sleep_queue = NULL; |
spinlock_unlock(&wq->lock); |
} |
t->timeout_pending = false; |
spinlock_unlock(&t->lock); |
if (do_wakeup) |
thread_ready(t); |
out: |
spinlock_unlock(&threads_lock); |
} |
/** Interrupt sleeping thread. |
* |
* This routine attempts to interrupt a thread from its sleep in a waitqueue. |
* If the thread is not found sleeping, no action is taken. |
* |
* @param t Thread to be interrupted. |
*/ |
void waitq_interrupt_sleep(thread_t *t) |
{ |
waitq_t *wq; |
bool do_wakeup = false; |
ipl_t ipl; |
ipl = interrupts_disable(); |
spinlock_lock(&threads_lock); |
if (!thread_exists(t)) |
goto out; |
grab_locks: |
spinlock_lock(&t->lock); |
if ((wq = t->sleep_queue)) { /* assignment */ |
if (!(t->sleep_interruptible)) { |
/* |
* The sleep cannot be interrupted. |
*/ |
spinlock_unlock(&t->lock); |
goto out; |
} |
if (!spinlock_trylock(&wq->lock)) { |
spinlock_unlock(&t->lock); |
goto grab_locks; /* avoid deadlock */ |
} |
if (t->timeout_pending && timeout_unregister(&t->sleep_timeout)) |
t->timeout_pending = false; |
list_remove(&t->wq_link); |
t->saved_context = t->sleep_interruption_context; |
do_wakeup = true; |
t->sleep_queue = NULL; |
spinlock_unlock(&wq->lock); |
} |
spinlock_unlock(&t->lock); |
if (do_wakeup) |
thread_ready(t); |
out: |
spinlock_unlock(&threads_lock); |
interrupts_restore(ipl); |
} |
/** Sleep until either wakeup, timeout or interruption occurs |
* |
* This is a sleep implementation which allows itself to time out or to be |
* interrupted from the sleep, restoring a failover context. |
* |
* Sleepers are organised in a FIFO fashion in a structure called wait queue. |
* |
* This function is really basic in that other functions as waitq_sleep() |
* and all the *_timeout() functions use it. |
* |
* @param wq Pointer to wait queue. |
* @param usec Timeout in microseconds. |
* @param flags Specify mode of the sleep. |
* |
* The sleep can be interrupted only if the |
* SYNCH_FLAGS_INTERRUPTIBLE bit is specified in flags. |
* |
* If usec is greater than zero, regardless of the value of the |
* SYNCH_FLAGS_NON_BLOCKING bit in flags, the call will not return until either |
* timeout, interruption or wakeup comes. |
* |
* If usec is zero and the SYNCH_FLAGS_NON_BLOCKING bit is not set in flags, |
* the call will not return until wakeup or interruption comes. |
* |
* If usec is zero and the SYNCH_FLAGS_NON_BLOCKING bit is set in flags, the |
* call will immediately return, reporting either success or failure. |
* |
* @return One of: ESYNCH_WOULD_BLOCK, ESYNCH_TIMEOUT, ESYNCH_INTERRUPTED, |
* ESYNCH_OK_ATOMIC, ESYNCH_OK_BLOCKED. |
* |
* @li ESYNCH_WOULD_BLOCK means that the sleep failed because at the time of the |
* call there was no pending wakeup. |
* |
* @li ESYNCH_TIMEOUT means that the sleep timed out. |
* |
* @li ESYNCH_INTERRUPTED means that somebody interrupted the sleeping thread. |
* |
* @li ESYNCH_OK_ATOMIC means that the sleep succeeded and that there was |
* a pending wakeup at the time of the call. The caller was not put |
* asleep at all. |
* |
* @li ESYNCH_OK_BLOCKED means that the sleep succeeded; the full sleep was |
* attempted. |
*/ |
int waitq_sleep_timeout(waitq_t *wq, uint32_t usec, int flags) |
{ |
ipl_t ipl; |
int rc; |
ipl = waitq_sleep_prepare(wq); |
rc = waitq_sleep_timeout_unsafe(wq, usec, flags); |
waitq_sleep_finish(wq, rc, ipl); |
return rc; |
} |
/** Prepare to sleep in a waitq. |
* |
* This function will return holding the lock of the wait queue |
* and interrupts disabled. |
* |
* @param wq Wait queue. |
* |
* @return Interrupt level as it existed on entry to this function. |
*/ |
ipl_t waitq_sleep_prepare(waitq_t *wq) |
{ |
ipl_t ipl; |
restart: |
ipl = interrupts_disable(); |
if (THREAD) { /* needed during system initiailzation */ |
/* |
* Busy waiting for a delayed timeout. |
* This is an important fix for the race condition between |
* a delayed timeout and a next call to waitq_sleep_timeout(). |
* Simply, the thread is not allowed to go to sleep if |
* there are timeouts in progress. |
*/ |
spinlock_lock(&THREAD->lock); |
if (THREAD->timeout_pending) { |
spinlock_unlock(&THREAD->lock); |
interrupts_restore(ipl); |
goto restart; |
} |
spinlock_unlock(&THREAD->lock); |
} |
spinlock_lock(&wq->lock); |
return ipl; |
} |
/** Finish waiting in a wait queue. |
* |
* This function restores interrupts to the state that existed prior |
* to the call to waitq_sleep_prepare(). If necessary, the wait queue |
* lock is released. |
* |
* @param wq Wait queue. |
* @param rc Return code of waitq_sleep_timeout_unsafe(). |
* @param ipl Interrupt level returned by waitq_sleep_prepare(). |
*/ |
void waitq_sleep_finish(waitq_t *wq, int rc, ipl_t ipl) |
{ |
switch (rc) { |
case ESYNCH_WOULD_BLOCK: |
case ESYNCH_OK_ATOMIC: |
spinlock_unlock(&wq->lock); |
break; |
default: |
break; |
} |
interrupts_restore(ipl); |
} |
/** Internal implementation of waitq_sleep_timeout(). |
* |
* This function implements logic of sleeping in a wait queue. |
* This call must be preceeded by a call to waitq_sleep_prepare() |
* and followed by a call to waitq_slee_finish(). |
* |
* @param wq See waitq_sleep_timeout(). |
* @param usec See waitq_sleep_timeout(). |
* @param flags See waitq_sleep_timeout(). |
* |
* @return See waitq_sleep_timeout(). |
*/ |
int waitq_sleep_timeout_unsafe(waitq_t *wq, uint32_t usec, int flags) |
{ |
/* checks whether to go to sleep at all */ |
if (wq->missed_wakeups) { |
wq->missed_wakeups--; |
return ESYNCH_OK_ATOMIC; |
} |
else { |
if ((flags & SYNCH_FLAGS_NON_BLOCKING) && (usec == 0)) { |
/* return immediatelly instead of going to sleep */ |
return ESYNCH_WOULD_BLOCK; |
} |
} |
/* |
* Now we are firmly decided to go to sleep. |
*/ |
spinlock_lock(&THREAD->lock); |
if (flags & SYNCH_FLAGS_INTERRUPTIBLE) { |
/* |
* If the thread was already interrupted, |
* don't go to sleep at all. |
*/ |
if (THREAD->interrupted) { |
spinlock_unlock(&THREAD->lock); |
spinlock_unlock(&wq->lock); |
return ESYNCH_INTERRUPTED; |
} |
/* |
* Set context that will be restored if the sleep |
* of this thread is ever interrupted. |
*/ |
THREAD->sleep_interruptible = true; |
if (!context_save(&THREAD->sleep_interruption_context)) { |
/* Short emulation of scheduler() return code. */ |
spinlock_unlock(&THREAD->lock); |
return ESYNCH_INTERRUPTED; |
} |
} else { |
THREAD->sleep_interruptible = false; |
} |
if (usec) { |
/* We use the timeout variant. */ |
if (!context_save(&THREAD->sleep_timeout_context)) { |
/* Short emulation of scheduler() return code. */ |
spinlock_unlock(&THREAD->lock); |
return ESYNCH_TIMEOUT; |
} |
THREAD->timeout_pending = true; |
timeout_register(&THREAD->sleep_timeout, (uint64_t) usec, |
waitq_timeouted_sleep, THREAD); |
} |
list_append(&THREAD->wq_link, &wq->head); |
/* |
* Suspend execution. |
*/ |
THREAD->state = Sleeping; |
THREAD->sleep_queue = wq; |
spinlock_unlock(&THREAD->lock); |
/* wq->lock is released in scheduler_separated_stack() */ |
scheduler(); |
return ESYNCH_OK_BLOCKED; |
} |
/** Wake up first thread sleeping in a wait queue |
* |
* Wake up first thread sleeping in a wait queue. This is the SMP- and IRQ-safe |
* wrapper meant for general use. |
* |
* Besides its 'normal' wakeup operation, it attempts to unregister possible |
* timeout. |
* |
* @param wq Pointer to wait queue. |
* @param all If this is non-zero, all sleeping threads will be woken up and |
* missed count will be zeroed. |
*/ |
void waitq_wakeup(waitq_t *wq, bool all) |
{ |
ipl_t ipl; |
ipl = interrupts_disable(); |
spinlock_lock(&wq->lock); |
_waitq_wakeup_unsafe(wq, all); |
spinlock_unlock(&wq->lock); |
interrupts_restore(ipl); |
} |
/** Internal SMP- and IRQ-unsafe version of waitq_wakeup() |
* |
* This is the internal SMP- and IRQ-unsafe version of waitq_wakeup(). It |
* assumes wq->lock is already locked and interrupts are already disabled. |
* |
* @param wq Pointer to wait queue. |
* @param all If this is non-zero, all sleeping threads will be woken up and |
* missed count will be zeroed. |
*/ |
void _waitq_wakeup_unsafe(waitq_t *wq, bool all) |
{ |
thread_t *t; |
loop: |
if (list_empty(&wq->head)) { |
wq->missed_wakeups++; |
if (all) |
wq->missed_wakeups = 0; |
return; |
} |
t = list_get_instance(wq->head.next, thread_t, wq_link); |
/* |
* Lock the thread prior to removing it from the wq. |
* This is not necessary because of mutual exclusion |
* (the link belongs to the wait queue), but because |
* of synchronization with waitq_timeouted_sleep() |
* and thread_interrupt_sleep(). |
* |
* In order for these two functions to work, the following |
* invariant must hold: |
* |
* t->sleep_queue != NULL <=> t sleeps in a wait queue |
* |
* For an observer who locks the thread, the invariant |
* holds only when the lock is held prior to removing |
* it from the wait queue. |
*/ |
spinlock_lock(&t->lock); |
list_remove(&t->wq_link); |
if (t->timeout_pending && timeout_unregister(&t->sleep_timeout)) |
t->timeout_pending = false; |
t->sleep_queue = NULL; |
spinlock_unlock(&t->lock); |
thread_ready(t); |
if (all) |
goto loop; |
} |
/** @} |
*/ |
/branches/rcu/kernel/generic/src/synch/rwlock.c |
---|
0,0 → 1,393 |
/* |
* Copyright (c) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup sync |
* @{ |
*/ |
/** |
* @file |
* @brief Reader/Writer locks. |
* |
* A reader/writer lock can be held by multiple readers at a time. |
* Or it can be exclusively held by a sole writer at a time. |
* |
* These locks are not recursive. |
* Because a technique called direct hand-off is used and because |
* waiting takes place in a single wait queue, neither readers |
* nor writers will suffer starvation. |
* |
* If there is a writer followed by a reader waiting for the rwlock |
* and the writer times out, all leading readers are automatically woken up |
* and allowed in. |
*/ |
/* |
* NOTE ON rwlock_holder_type |
* This field is set on an attempt to acquire the exclusive mutex |
* to the respective value depending whether the caller is a reader |
* or a writer. The field is examined only if the thread had been |
* previously blocked on the exclusive mutex. Thus it is save |
* to store the rwlock type in the thread structure, because |
* each thread can block on only one rwlock at a time. |
*/ |
#include <synch/rwlock.h> |
#include <synch/spinlock.h> |
#include <synch/mutex.h> |
#include <synch/waitq.h> |
#include <synch/synch.h> |
#include <adt/list.h> |
#include <arch/asm.h> |
#include <arch.h> |
#include <proc/thread.h> |
#include <panic.h> |
#define ALLOW_ALL 0 |
#define ALLOW_READERS_ONLY 1 |
static void let_others_in(rwlock_t *rwl, int readers_only); |
static void release_spinlock(void *arg); |
/** Initialize reader/writer lock |
* |
* Initialize reader/writer lock. |
* |
* @param rwl Reader/Writer lock. |
*/ |
void rwlock_initialize(rwlock_t *rwl) { |
spinlock_initialize(&rwl->lock, "rwlock_t"); |
mutex_initialize(&rwl->exclusive); |
rwl->readers_in = 0; |
} |
/** Acquire reader/writer lock for reading |
* |
* Acquire reader/writer lock for reading. |
* Timeout and willingness to block may be specified. |
* |
* @param rwl Reader/Writer lock. |
* @param usec Timeout in microseconds. |
* @param flags Specify mode of operation. |
* |
* For exact description of possible combinations of |
* usec and flags, see comment for waitq_sleep_timeout(). |
* |
* @return See comment for waitq_sleep_timeout(). |
*/ |
int _rwlock_write_lock_timeout(rwlock_t *rwl, uint32_t usec, int flags) |
{ |
ipl_t ipl; |
int rc; |
ipl = interrupts_disable(); |
spinlock_lock(&THREAD->lock); |
THREAD->rwlock_holder_type = RWLOCK_WRITER; |
spinlock_unlock(&THREAD->lock); |
interrupts_restore(ipl); |
/* |
* Writers take the easy part. |
* They just need to acquire the exclusive mutex. |
*/ |
rc = _mutex_lock_timeout(&rwl->exclusive, usec, flags); |
if (SYNCH_FAILED(rc)) { |
/* |
* Lock operation timed out or was interrupted. |
* The state of rwl is UNKNOWN at this point. |
* No claims about its holder can be made. |
*/ |
ipl = interrupts_disable(); |
spinlock_lock(&rwl->lock); |
/* |
* Now when rwl is locked, we can inspect it again. |
* If it is held by some readers already, we can let |
* readers from the head of the wait queue in. |
*/ |
if (rwl->readers_in) |
let_others_in(rwl, ALLOW_READERS_ONLY); |
spinlock_unlock(&rwl->lock); |
interrupts_restore(ipl); |
} |
return rc; |
} |
/** Acquire reader/writer lock for writing |
* |
* Acquire reader/writer lock for writing. |
* Timeout and willingness to block may be specified. |
* |
* @param rwl Reader/Writer lock. |
* @param usec Timeout in microseconds. |
* @param flags Select mode of operation. |
* |
* For exact description of possible combinations of |
* usec and flags, see comment for waitq_sleep_timeout(). |
* |
* @return See comment for waitq_sleep_timeout(). |
*/ |
int _rwlock_read_lock_timeout(rwlock_t *rwl, uint32_t usec, int flags) |
{ |
int rc; |
ipl_t ipl; |
ipl = interrupts_disable(); |
spinlock_lock(&THREAD->lock); |
THREAD->rwlock_holder_type = RWLOCK_READER; |
spinlock_unlock(&THREAD->lock); |
spinlock_lock(&rwl->lock); |
/* |
* Find out whether we can get what we want without blocking. |
*/ |
rc = mutex_trylock(&rwl->exclusive); |
if (SYNCH_FAILED(rc)) { |
/* |
* 'exclusive' mutex is being held by someone else. |
* If the holder is a reader and there is no one |
* else waiting for it, we can enter the critical |
* section. |
*/ |
if (rwl->readers_in) { |
spinlock_lock(&rwl->exclusive.sem.wq.lock); |
if (list_empty(&rwl->exclusive.sem.wq.head)) { |
/* |
* We can enter. |
*/ |
spinlock_unlock(&rwl->exclusive.sem.wq.lock); |
goto shortcut; |
} |
spinlock_unlock(&rwl->exclusive.sem.wq.lock); |
} |
/* |
* In order to prevent a race condition when a reader |
* could block another reader at the head of the waitq, |
* we register a function to unlock rwl->lock |
* after this thread is put asleep. |
*/ |
#ifdef CONFIG_SMP |
thread_register_call_me(release_spinlock, &rwl->lock); |
#else |
thread_register_call_me(release_spinlock, NULL); |
#endif |
rc = _mutex_lock_timeout(&rwl->exclusive, usec, flags); |
switch (rc) { |
case ESYNCH_WOULD_BLOCK: |
/* |
* release_spinlock() wasn't called |
*/ |
thread_register_call_me(NULL, NULL); |
spinlock_unlock(&rwl->lock); |
case ESYNCH_TIMEOUT: |
case ESYNCH_INTERRUPTED: |
/* |
* The sleep timed out. |
* We just restore interrupt priority level. |
*/ |
case ESYNCH_OK_BLOCKED: |
/* |
* We were woken with rwl->readers_in already |
* incremented. |
* |
* Note that this arrangement avoids race condition |
* between two concurrent readers. (Race is avoided if |
* 'exclusive' is locked at the same time as |
* 'readers_in' is incremented. Same time means both |
* events happen atomically when rwl->lock is held.) |
*/ |
interrupts_restore(ipl); |
break; |
case ESYNCH_OK_ATOMIC: |
panic("_mutex_lock_timeout()==ESYNCH_OK_ATOMIC\n"); |
break; |
default: |
panic("invalid ESYNCH\n"); |
break; |
} |
return rc; |
} |
shortcut: |
/* |
* We can increment readers_in only if we didn't go to sleep. |
* For sleepers, rwlock_let_others_in() will do the job. |
*/ |
rwl->readers_in++; |
spinlock_unlock(&rwl->lock); |
interrupts_restore(ipl); |
return ESYNCH_OK_ATOMIC; |
} |
/** Release reader/writer lock held by writer |
* |
* Release reader/writer lock held by writer. |
* Handoff reader/writer lock ownership directly |
* to waiting readers or a writer. |
* |
* @param rwl Reader/Writer lock. |
*/ |
void rwlock_write_unlock(rwlock_t *rwl) |
{ |
ipl_t ipl; |
ipl = interrupts_disable(); |
spinlock_lock(&rwl->lock); |
let_others_in(rwl, ALLOW_ALL); |
spinlock_unlock(&rwl->lock); |
interrupts_restore(ipl); |
} |
/** Release reader/writer lock held by reader |
* |
* Release reader/writer lock held by reader. |
* Handoff reader/writer lock ownership directly |
* to a waiting writer or don't do anything if more |
* readers poses the lock. |
* |
* @param rwl Reader/Writer lock. |
*/ |
void rwlock_read_unlock(rwlock_t *rwl) |
{ |
ipl_t ipl; |
ipl = interrupts_disable(); |
spinlock_lock(&rwl->lock); |
if (!--rwl->readers_in) |
let_others_in(rwl, ALLOW_ALL); |
spinlock_unlock(&rwl->lock); |
interrupts_restore(ipl); |
} |
/** Direct handoff of reader/writer lock ownership. |
* |
* Direct handoff of reader/writer lock ownership |
* to waiting readers or a writer. |
* |
* Must be called with rwl->lock locked. |
* Must be called with interrupts_disable()'d. |
* |
* @param rwl Reader/Writer lock. |
* @param readers_only See the description below. |
* |
* If readers_only is false: (unlock scenario) |
* Let the first sleeper on 'exclusive' mutex in, no matter |
* whether it is a reader or a writer. If there are more leading |
* readers in line, let each of them in. |
* |
* Otherwise: (timeout scenario) |
* Let all leading readers in. |
*/ |
void let_others_in(rwlock_t *rwl, int readers_only) |
{ |
rwlock_type_t type = RWLOCK_NONE; |
thread_t *t = NULL; |
bool one_more = true; |
spinlock_lock(&rwl->exclusive.sem.wq.lock); |
if (!list_empty(&rwl->exclusive.sem.wq.head)) |
t = list_get_instance(rwl->exclusive.sem.wq.head.next, thread_t, |
wq_link); |
do { |
if (t) { |
spinlock_lock(&t->lock); |
type = t->rwlock_holder_type; |
spinlock_unlock(&t->lock); |
} |
/* |
* If readers_only is true, we wake all leading readers |
* if and only if rwl is locked by another reader. |
* Assumption: readers_only ==> rwl->readers_in |
*/ |
if (readers_only && (type != RWLOCK_READER)) |
break; |
if (type == RWLOCK_READER) { |
/* |
* Waking up a reader. |
* We are responsible for incrementing rwl->readers_in |
* for it. |
*/ |
rwl->readers_in++; |
} |
/* |
* Only the last iteration through this loop can increment |
* rwl->exclusive.sem.wq.missed_wakeup's. All preceeding |
* iterations will wake up a thread. |
*/ |
/* We call the internal version of waitq_wakeup, which |
* relies on the fact that the waitq is already locked. |
*/ |
_waitq_wakeup_unsafe(&rwl->exclusive.sem.wq, WAKEUP_FIRST); |
t = NULL; |
if (!list_empty(&rwl->exclusive.sem.wq.head)) { |
t = list_get_instance(rwl->exclusive.sem.wq.head.next, |
thread_t, wq_link); |
if (t) { |
spinlock_lock(&t->lock); |
if (t->rwlock_holder_type != RWLOCK_READER) |
one_more = false; |
spinlock_unlock(&t->lock); |
} |
} |
} while ((type == RWLOCK_READER) && t && one_more); |
spinlock_unlock(&rwl->exclusive.sem.wq.lock); |
} |
/** Release spinlock callback |
* |
* This is a callback function invoked from the scheduler. |
* The callback is registered in _rwlock_read_lock_timeout(). |
* |
* @param arg Spinlock. |
*/ |
void release_spinlock(void *arg) |
{ |
spinlock_unlock((spinlock_t *) arg); |
} |
/** @} |
*/ |
/branches/rcu/kernel/generic/src/synch/condvar.c |
---|
0,0 → 1,107 |
/* |
* Copyright (c) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup sync |
* @{ |
*/ |
/** |
* @file |
* @brief Condition variables. |
*/ |
#include <synch/condvar.h> |
#include <synch/mutex.h> |
#include <synch/waitq.h> |
#include <synch/synch.h> |
#include <arch.h> |
/** Initialize condition variable. |
* |
* @param cv Condition variable. |
*/ |
void condvar_initialize(condvar_t *cv) |
{ |
waitq_initialize(&cv->wq); |
} |
/** |
* Signal the condition has become true |
* to the first waiting thread by waking it up. |
* |
* @param cv Condition variable. |
*/ |
void condvar_signal(condvar_t *cv) |
{ |
waitq_wakeup(&cv->wq, WAKEUP_FIRST); |
} |
/** |
* Signal the condition has become true |
* to all waiting threads by waking them up. |
* |
* @param cv Condition variable. |
*/ |
void condvar_broadcast(condvar_t *cv) |
{ |
waitq_wakeup(&cv->wq, WAKEUP_ALL); |
} |
/** Wait for the condition becoming true. |
* |
* @param cv Condition variable. |
* @param mtx Mutex. |
* @param usec Timeout value in microseconds. |
* @param flags Select mode of operation. |
* |
* For exact description of meaning of possible combinations |
* of usec and flags, see comment for waitq_sleep_timeout(). |
* Note that when SYNCH_FLAGS_NON_BLOCKING is specified here, |
* ESYNCH_WOULD_BLOCK is always returned. |
* |
* @return See comment for waitq_sleep_timeout(). |
*/ |
int _condvar_wait_timeout(condvar_t *cv, mutex_t *mtx, uint32_t usec, int flags) |
{ |
int rc; |
ipl_t ipl; |
ipl = waitq_sleep_prepare(&cv->wq); |
mutex_unlock(mtx); |
cv->wq.missed_wakeups = 0; /* Enforce blocking. */ |
rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, flags); |
mutex_lock(mtx); |
waitq_sleep_finish(&cv->wq, rc, ipl); |
return rc; |
} |
/** @} |
*/ |
/branches/rcu/kernel/generic/src/synch/spinlock.c |
---|
0,0 → 1,163 |
/* |
* Copyright (c) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup sync |
* @{ |
*/ |
/** |
* @file |
* @brief Spinlocks. |
*/ |
#include <synch/spinlock.h> |
#include <atomic.h> |
#include <arch/barrier.h> |
#include <arch.h> |
#include <preemption.h> |
#include <print.h> |
#include <debug.h> |
#include <symtab.h> |
#ifdef CONFIG_FB |
#include <genarch/fb/fb.h> |
#endif |
#ifdef CONFIG_SMP |
/** Initialize spinlock |
* |
* Initialize spinlock. |
* |
* @param sl Pointer to spinlock_t structure. |
*/ |
void spinlock_initialize(spinlock_t *sl, char *name) |
{ |
atomic_set(&sl->val, 0); |
#ifdef CONFIG_DEBUG_SPINLOCK |
sl->name = name; |
#endif |
} |
/** Lock spinlock |
* |
* Lock spinlock. |
* This version has limitted ability to report |
* possible occurence of deadlock. |
* |
* @param sl Pointer to spinlock_t structure. |
*/ |
#ifdef CONFIG_DEBUG_SPINLOCK |
#define DEADLOCK_THRESHOLD 100000000 |
void spinlock_lock_debug(spinlock_t *sl) |
{ |
count_t i = 0; |
char *symbol; |
bool deadlock_reported = false; |
preemption_disable(); |
while (test_and_set(&sl->val)) { |
/* |
* We need to be careful about printflock and fb_lock. |
* Both of them are used to report deadlocks via |
* printf() and fb_putchar(). |
* |
* We trust our code that there is no possible deadlock |
* caused by these two locks (except when an exception |
* is triggered for instance by printf() or fb_putchar()). |
* However, we encountered false positives caused by very |
* slow VESA framebuffer interaction (especially when |
* run in a simulator) that caused problems with both |
* printflock and fb_lock. |
* |
* Possible deadlocks on both printflock and fb_lock |
* are therefore not reported as they would cause an |
* infinite recursion. |
*/ |
if (sl == &printflock) |
continue; |
#ifdef CONFIG_FB |
if (sl == &fb_lock) |
continue; |
#endif |
if (i++ > DEADLOCK_THRESHOLD) { |
printf("cpu%d: looping on spinlock %.*p:%s, " |
"caller=%.*p", CPU->id, sizeof(uintptr_t) * 2, sl, |
sl->name, sizeof(uintptr_t) * 2, CALLER); |
symbol = get_symtab_entry(CALLER); |
if (symbol) |
printf("(%s)", symbol); |
printf("\n"); |
i = 0; |
deadlock_reported = true; |
} |
} |
if (deadlock_reported) |
printf("cpu%d: not deadlocked\n", CPU->id); |
/* |
* Prevent critical section code from bleeding out this way up. |
*/ |
CS_ENTER_BARRIER(); |
} |
#endif |
/** Lock spinlock conditionally |
* |
* Lock spinlock conditionally. |
* If the spinlock is not available at the moment, |
* signal failure. |
* |
* @param sl Pointer to spinlock_t structure. |
* |
* @return Zero on failure, non-zero otherwise. |
*/ |
int spinlock_trylock(spinlock_t *sl) |
{ |
int rc; |
preemption_disable(); |
rc = !test_and_set(&sl->val); |
/* |
* Prevent critical section code from bleeding out this way up. |
*/ |
CS_ENTER_BARRIER(); |
if (!rc) |
preemption_enable(); |
return rc; |
} |
#endif |
/** @} |
*/ |
/branches/rcu/kernel/generic/src/synch/futex.c |
---|
0,0 → 1,345 |
/* |
* Copyright (c) 2006 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup sync |
* @{ |
*/ |
/** |
* @file |
* @brief Kernel backend for futexes. |
*/ |
#include <synch/futex.h> |
#include <synch/rwlock.h> |
#include <synch/spinlock.h> |
#include <synch/synch.h> |
#include <mm/frame.h> |
#include <mm/page.h> |
#include <mm/slab.h> |
#include <proc/thread.h> |
#include <proc/task.h> |
#include <genarch/mm/page_pt.h> |
#include <genarch/mm/page_ht.h> |
#include <adt/hash_table.h> |
#include <adt/list.h> |
#include <arch.h> |
#include <align.h> |
#include <panic.h> |
#include <errno.h> |
#include <print.h> |
#define FUTEX_HT_SIZE 1024 /* keep it a power of 2 */ |
static void futex_initialize(futex_t *futex); |
static futex_t *futex_find(uintptr_t paddr); |
static index_t futex_ht_hash(unative_t *key); |
static bool futex_ht_compare(unative_t *key, count_t keys, link_t *item); |
static void futex_ht_remove_callback(link_t *item); |
/** |
* Read-write lock protecting global futex hash table. |
* It is also used to serialize access to all futex_t structures. |
* Must be acquired before the task futex B+tree lock. |
*/ |
static rwlock_t futex_ht_lock; |
/** Futex hash table. */ |
static hash_table_t futex_ht; |
/** Futex hash table operations. */ |
static hash_table_operations_t futex_ht_ops = { |
.hash = futex_ht_hash, |
.compare = futex_ht_compare, |
.remove_callback = futex_ht_remove_callback |
}; |
/** Initialize futex subsystem. */ |
void futex_init(void) |
{ |
rwlock_initialize(&futex_ht_lock); |
hash_table_create(&futex_ht, FUTEX_HT_SIZE, 1, &futex_ht_ops); |
} |
/** Initialize kernel futex structure. |
* |
* @param futex Kernel futex structure. |
*/ |
void futex_initialize(futex_t *futex) |
{ |
waitq_initialize(&futex->wq); |
link_initialize(&futex->ht_link); |
futex->paddr = 0; |
futex->refcount = 1; |
} |
/** Sleep in futex wait queue. |
* |
* @param uaddr Userspace address of the futex counter. |
* @param usec If non-zero, number of microseconds this thread is willing to |
* sleep. |
* @param flags Select mode of operation. |
* |
* @return One of ESYNCH_TIMEOUT, ESYNCH_OK_ATOMIC and ESYNCH_OK_BLOCKED. See |
* synch.h. If there is no physical mapping for uaddr ENOENT is returned. |
*/ |
unative_t sys_futex_sleep_timeout(uintptr_t uaddr, uint32_t usec, int flags) |
{ |
futex_t *futex; |
uintptr_t paddr; |
pte_t *t; |
ipl_t ipl; |
ipl = interrupts_disable(); |
/* |
* Find physical address of futex counter. |
*/ |
page_table_lock(AS, true); |
t = page_mapping_find(AS, ALIGN_DOWN(uaddr, PAGE_SIZE)); |
if (!t || !PTE_VALID(t) || !PTE_PRESENT(t)) { |
page_table_unlock(AS, true); |
interrupts_restore(ipl); |
return (unative_t) ENOENT; |
} |
paddr = PTE_GET_FRAME(t) + (uaddr - ALIGN_DOWN(uaddr, PAGE_SIZE)); |
page_table_unlock(AS, true); |
interrupts_restore(ipl); |
futex = futex_find(paddr); |
return (unative_t) waitq_sleep_timeout(&futex->wq, usec, flags | |
SYNCH_FLAGS_INTERRUPTIBLE); |
} |
/** Wakeup one thread waiting in futex wait queue. |
* |
* @param uaddr Userspace address of the futex counter. |
* |
* @return ENOENT if there is no physical mapping for uaddr. |
*/ |
unative_t sys_futex_wakeup(uintptr_t uaddr) |
{ |
futex_t *futex; |
uintptr_t paddr; |
pte_t *t; |
ipl_t ipl; |
ipl = interrupts_disable(); |
/* |
* Find physical address of futex counter. |
*/ |
page_table_lock(AS, true); |
t = page_mapping_find(AS, ALIGN_DOWN(uaddr, PAGE_SIZE)); |
if (!t || !PTE_VALID(t) || !PTE_PRESENT(t)) { |
page_table_unlock(AS, true); |
interrupts_restore(ipl); |
return (unative_t) ENOENT; |
} |
paddr = PTE_GET_FRAME(t) + (uaddr - ALIGN_DOWN(uaddr, PAGE_SIZE)); |
page_table_unlock(AS, true); |
interrupts_restore(ipl); |
futex = futex_find(paddr); |
waitq_wakeup(&futex->wq, WAKEUP_FIRST); |
return 0; |
} |
/** Find kernel address of the futex structure corresponding to paddr. |
* |
* If the structure does not exist already, a new one is created. |
* |
* @param paddr Physical address of the userspace futex counter. |
* |
* @return Address of the kernel futex structure. |
*/ |
futex_t *futex_find(uintptr_t paddr) |
{ |
link_t *item; |
futex_t *futex; |
btree_node_t *leaf; |
/* |
* Find the respective futex structure |
* or allocate new one if it does not exist already. |
*/ |
rwlock_read_lock(&futex_ht_lock); |
item = hash_table_find(&futex_ht, &paddr); |
if (item) { |
futex = hash_table_get_instance(item, futex_t, ht_link); |
/* |
* See if the current task knows this futex. |
*/ |
mutex_lock(&TASK->futexes_lock); |
if (!btree_search(&TASK->futexes, paddr, &leaf)) { |
/* |
* The futex is new to the current task. |
* However, we only have read access. |
* Gain write access and try again. |
*/ |
mutex_unlock(&TASK->futexes_lock); |
goto gain_write_access; |
} |
mutex_unlock(&TASK->futexes_lock); |
rwlock_read_unlock(&futex_ht_lock); |
} else { |
gain_write_access: |
/* |
* Upgrade to writer is not currently supported, |
* therefore, it is necessary to release the read lock |
* and reacquire it as a writer. |
*/ |
rwlock_read_unlock(&futex_ht_lock); |
rwlock_write_lock(&futex_ht_lock); |
/* |
* Avoid possible race condition by searching |
* the hash table once again with write access. |
*/ |
item = hash_table_find(&futex_ht, &paddr); |
if (item) { |
futex = hash_table_get_instance(item, futex_t, ht_link); |
/* |
* See if this futex is known to the current task. |
*/ |
mutex_lock(&TASK->futexes_lock); |
if (!btree_search(&TASK->futexes, paddr, &leaf)) { |
/* |
* The futex is new to the current task. |
* Upgrade its reference count and put it to the |
* current task's B+tree of known futexes. |
*/ |
futex->refcount++; |
btree_insert(&TASK->futexes, paddr, futex, |
leaf); |
} |
mutex_unlock(&TASK->futexes_lock); |
rwlock_write_unlock(&futex_ht_lock); |
} else { |
futex = (futex_t *) malloc(sizeof(futex_t), 0); |
futex_initialize(futex); |
futex->paddr = paddr; |
hash_table_insert(&futex_ht, &paddr, &futex->ht_link); |
/* |
* This is the first task referencing the futex. |
* It can be directly inserted into its |
* B+tree of known futexes. |
*/ |
mutex_lock(&TASK->futexes_lock); |
btree_insert(&TASK->futexes, paddr, futex, NULL); |
mutex_unlock(&TASK->futexes_lock); |
rwlock_write_unlock(&futex_ht_lock); |
} |
} |
return futex; |
} |
/** Compute hash index into futex hash table. |
* |
* @param key Address where the key (i.e. physical address of futex counter) is |
* stored. |
* |
* @return Index into futex hash table. |
*/ |
index_t futex_ht_hash(unative_t *key) |
{ |
return *key & (FUTEX_HT_SIZE-1); |
} |
/** Compare futex hash table item with a key. |
* |
* @param key Address where the key (i.e. physical address of futex counter) is |
* stored. |
* |
* @return True if the item matches the key. False otherwise. |
*/ |
bool futex_ht_compare(unative_t *key, count_t keys, link_t *item) |
{ |
futex_t *futex; |
ASSERT(keys == 1); |
futex = hash_table_get_instance(item, futex_t, ht_link); |
return *key == futex->paddr; |
} |
/** Callback for removal items from futex hash table. |
* |
* @param item Item removed from the hash table. |
*/ |
void futex_ht_remove_callback(link_t *item) |
{ |
futex_t *futex; |
futex = hash_table_get_instance(item, futex_t, ht_link); |
free(futex); |
} |
/** Remove references from futexes known to the current task. */ |
void futex_cleanup(void) |
{ |
link_t *cur; |
rwlock_write_lock(&futex_ht_lock); |
mutex_lock(&TASK->futexes_lock); |
for (cur = TASK->futexes.leaf_head.next; |
cur != &TASK->futexes.leaf_head; cur = cur->next) { |
btree_node_t *node; |
int i; |
node = list_get_instance(cur, btree_node_t, leaf_link); |
for (i = 0; i < node->keys; i++) { |
futex_t *ftx; |
uintptr_t paddr = node->key[i]; |
ftx = (futex_t *) node->value[i]; |
if (--ftx->refcount == 0) |
hash_table_remove(&futex_ht, &paddr, 1); |
} |
} |
mutex_unlock(&TASK->futexes_lock); |
rwlock_write_unlock(&futex_ht_lock); |
} |
/** @} |
*/ |
/branches/rcu/kernel/generic/src/synch/mutex.c |
---|
0,0 → 1,84 |
/* |
* Copyright (c) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup sync |
* @{ |
*/ |
/** |
* @file |
* @brief Mutexes. |
*/ |
#include <synch/mutex.h> |
#include <synch/semaphore.h> |
#include <synch/synch.h> |
/** Initialize mutex |
* |
* Initialize mutex. |
* |
* @param mtx Mutex. |
*/ |
void mutex_initialize(mutex_t *mtx) |
{ |
semaphore_initialize(&mtx->sem, 1); |
} |
/** Acquire mutex |
* |
* Acquire mutex. |
* Timeout mode and non-blocking mode can be requested. |
* |
* @param mtx Mutex. |
* @param usec Timeout in microseconds. |
* @param flags Specify mode of operation. |
* |
* For exact description of possible combinations of |
* usec and flags, see comment for waitq_sleep_timeout(). |
* |
* @return See comment for waitq_sleep_timeout(). |
*/ |
int _mutex_lock_timeout(mutex_t *mtx, uint32_t usec, int flags) |
{ |
return _semaphore_down_timeout(&mtx->sem, usec, flags); |
} |
/** Release mutex |
* |
* Release mutex. |
* |
* @param mtx Mutex. |
*/ |
void mutex_unlock(mutex_t *mtx) |
{ |
semaphore_up(&mtx->sem); |
} |
/** @} |
*/ |
/branches/rcu/kernel/generic/src/synch/semaphore.c |
---|
0,0 → 1,98 |
/* |
* Copyright (c) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup sync |
* @{ |
*/ |
/** |
* @file |
* @brief Semaphores. |
*/ |
#include <synch/semaphore.h> |
#include <synch/waitq.h> |
#include <synch/spinlock.h> |
#include <synch/synch.h> |
#include <arch/asm.h> |
#include <arch.h> |
/** Initialize semaphore |
* |
* Initialize semaphore. |
* |
* @param s Semaphore. |
* @param val Maximal number of threads allowed to enter critical section. |
*/ |
void semaphore_initialize(semaphore_t *s, int val) |
{ |
ipl_t ipl; |
waitq_initialize(&s->wq); |
ipl = interrupts_disable(); |
spinlock_lock(&s->wq.lock); |
s->wq.missed_wakeups = val; |
spinlock_unlock(&s->wq.lock); |
interrupts_restore(ipl); |
} |
/** Semaphore down |
* |
* Semaphore down. |
* Conditional mode and mode with timeout can be requested. |
* |
* @param s Semaphore. |
* @param usec Timeout in microseconds. |
* @param flags Select mode of operation. |
* |
* For exact description of possible combinations of |
* usec and flags, see comment for waitq_sleep_timeout(). |
* |
* @return See comment for waitq_sleep_timeout(). |
*/ |
int _semaphore_down_timeout(semaphore_t *s, uint32_t usec, int flags) |
{ |
return waitq_sleep_timeout(&s->wq, usec, flags); |
} |
/** Semaphore up |
* |
* Semaphore up. |
* |
* @param s Semaphore. |
*/ |
void semaphore_up(semaphore_t *s) |
{ |
waitq_wakeup(&s->wq, WAKEUP_FIRST); |
} |
/** @} |
*/ |