Subversion Repositories HelenOS-historic

Compare Revisions

Ignore whitespace Rev 412 → Rev 413

/SPARTAN/trunk/src/synch/rwlock.c
96,14 → 96,14
*/
int _rwlock_write_lock_timeout(rwlock_t *rwl, __u32 usec, int trylock)
{
pri_t pri;
ipl_t ipl;
int rc;
pri = cpu_priority_high();
ipl = interrupts_disable();
spinlock_lock(&THREAD->lock);
THREAD->rwlock_holder_type = RWLOCK_WRITER;
spinlock_unlock(&THREAD->lock);
cpu_priority_restore(pri);
interrupts_restore(ipl);
 
/*
* Writers take the easy part.
118,7 → 118,7
* No claims about its holder can be made.
*/
pri = cpu_priority_high();
ipl = interrupts_disable();
spinlock_lock(&rwl->lock);
/*
* Now when rwl is locked, we can inspect it again.
128,7 → 128,7
if (rwl->readers_in)
let_others_in(rwl, ALLOW_READERS_ONLY);
spinlock_unlock(&rwl->lock);
cpu_priority_restore(pri);
interrupts_restore(ipl);
}
return rc;
151,9 → 151,9
int _rwlock_read_lock_timeout(rwlock_t *rwl, __u32 usec, int trylock)
{
int rc;
pri_t pri;
ipl_t ipl;
pri = cpu_priority_high();
ipl = interrupts_disable();
spinlock_lock(&THREAD->lock);
THREAD->rwlock_holder_type = RWLOCK_READER;
spinlock_unlock(&THREAD->lock);
204,7 → 204,7
case ESYNCH_TIMEOUT:
/*
* The sleep timeouted.
* We just restore the cpu priority.
* We just restore interrupt priority level.
*/
case ESYNCH_OK_BLOCKED:
/*
215,7 → 215,7
* Same time means both events happen atomically when
* rwl->lock is held.)
*/
cpu_priority_restore(pri);
interrupts_restore(ipl);
break;
case ESYNCH_OK_ATOMIC:
panic("_mutex_lock_timeout()==ESYNCH_OK_ATOMIC");
236,7 → 236,7
rwl->readers_in++;
spinlock_unlock(&rwl->lock);
cpu_priority_restore(pri);
interrupts_restore(ipl);
 
return ESYNCH_OK_ATOMIC;
}
251,13 → 251,13
*/
void rwlock_write_unlock(rwlock_t *rwl)
{
pri_t pri;
ipl_t ipl;
pri = cpu_priority_high();
ipl = interrupts_disable();
spinlock_lock(&rwl->lock);
let_others_in(rwl, ALLOW_ALL);
spinlock_unlock(&rwl->lock);
cpu_priority_restore(pri);
interrupts_restore(ipl);
}
 
272,14 → 272,14
*/
void rwlock_read_unlock(rwlock_t *rwl)
{
pri_t pri;
ipl_t ipl;
 
pri = cpu_priority_high();
ipl = interrupts_disable();
spinlock_lock(&rwl->lock);
if (!--rwl->readers_in)
let_others_in(rwl, ALLOW_ALL);
spinlock_unlock(&rwl->lock);
cpu_priority_restore(pri);
interrupts_restore(ipl);
}
 
 
289,7 → 289,7
* to waiting readers or a writer.
*
* Must be called with rwl->lock locked.
* Must be called with cpu_priority_high'ed.
* Must be called with interrupts_disable()'d.
*
* @param rwl Reader/Writer lock.
* @param readers_only See the description below.
/SPARTAN/trunk/src/synch/semaphore.c
42,17 → 42,17
*/
void semaphore_initialize(semaphore_t *s, int val)
{
pri_t pri;
ipl_t ipl;
waitq_initialize(&s->wq);
pri = cpu_priority_high();
ipl = interrupts_disable();
 
spinlock_lock(&s->wq.lock);
s->wq.missed_wakeups = val;
spinlock_unlock(&s->wq.lock);
 
cpu_priority_restore(pri);
interrupts_restore(ipl);
}
 
/** Semaphore down
/SPARTAN/trunk/src/synch/waitq.c
137,11 → 137,11
*/
int waitq_sleep_timeout(waitq_t *wq, __u32 usec, int nonblocking)
{
volatile pri_t pri; /* must be live after context_restore() */
volatile ipl_t ipl; /* must be live after context_restore() */
restart:
pri = cpu_priority_high();
ipl = interrupts_disable();
/*
* Busy waiting for a delayed timeout.
153,7 → 153,7
spinlock_lock(&THREAD->lock);
if (THREAD->timeout_pending) {
spinlock_unlock(&THREAD->lock);
cpu_priority_restore(pri);
interrupts_restore(ipl);
goto restart;
}
spinlock_unlock(&THREAD->lock);
164,7 → 164,7
if (wq->missed_wakeups) {
wq->missed_wakeups--;
spinlock_unlock(&wq->lock);
cpu_priority_restore(pri);
interrupts_restore(ipl);
return ESYNCH_OK_ATOMIC;
}
else {
171,7 → 171,7
if (nonblocking && (usec == 0)) {
/* return immediatelly instead of going to sleep */
spinlock_unlock(&wq->lock);
cpu_priority_restore(pri);
interrupts_restore(ipl);
return ESYNCH_WOULD_BLOCK;
}
}
189,7 → 189,7
*/
before_thread_runs();
spinlock_unlock(&THREAD->lock);
cpu_priority_restore(pri);
interrupts_restore(ipl);
return ESYNCH_TIMEOUT;
}
THREAD->timeout_pending = 1;
207,7 → 207,7
spinlock_unlock(&THREAD->lock);
 
scheduler(); /* wq->lock is released in scheduler_separated_stack() */
cpu_priority_restore(pri);
interrupts_restore(ipl);
return ESYNCH_OK_BLOCKED;
}
228,15 → 228,15
*/
void waitq_wakeup(waitq_t *wq, int all)
{
pri_t pri;
ipl_t ipl;
 
pri = cpu_priority_high();
ipl = interrupts_disable();
spinlock_lock(&wq->lock);
 
_waitq_wakeup_unsafe(wq, all);
 
spinlock_unlock(&wq->lock);
cpu_priority_restore(pri);
interrupts_restore(ipl);
}
 
/** Internal SMP- and IRQ-unsafe version of waitq_wakeup()