Subversion Repositories HelenOS

Compare Revisions

Ignore whitespace Rev 2182 → Rev 2183

/trunk/kernel/generic/include/synch/mutex.h
44,13 → 44,11
} mutex_t;
 
#define mutex_lock(mtx) \
_mutex_lock_timeout((mtx),SYNCH_NO_TIMEOUT,SYNCH_FLAGS_NONE)
_mutex_lock_timeout((mtx), SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE)
#define mutex_trylock(mtx) \
_mutex_lock_timeout((mtx),SYNCH_NO_TIMEOUT,SYNCH_FLAGS_NON_BLOCKING)
#define mutex_lock_timeout(mtx,usec) \
_mutex_lock_timeout((mtx),(usec),SYNCH_FLAGS_NON_BLOCKING)
#define mutex_lock_active(mtx) \
while (mutex_trylock((mtx)) != ESYNCH_OK_ATOMIC)
_mutex_lock_timeout((mtx), SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NON_BLOCKING)
#define mutex_lock_timeout(mtx, usec) \
_mutex_lock_timeout((mtx), (usec), SYNCH_FLAGS_NON_BLOCKING)
 
extern void mutex_initialize(mutex_t *mtx);
extern int _mutex_lock_timeout(mutex_t *mtx, uint32_t usec, int flags);
/trunk/kernel/generic/include/synch/spinlock.h
101,8 → 101,26
preemption_enable();
}
 
#ifdef CONFIG_DEBUG_SPINLOCK
 
extern int printf(const char *, ...);
 
#define DEADLOCK_THRESHOLD 100000000
#define DEADLOCK_PROBE_INIT(pname) count_t pname = 0
#define DEADLOCK_PROBE(pname, value) \
if ((pname)++ > (value)) { \
(pname) = 0; \
printf("Deadlock probe %s: exceeded threshold %d\n", \
"cpu%d: function=%s, line=%d\n", \
#pname, (value), CPU->id, __FUNCTION__, __LINE__); \
}
#else
#define DEADLOCK_PROBE_INIT(pname)
#define DEADLOCK_PROBE(pname, value)
#endif
 
#else
 
/* On UP systems, spinlocks are effectively left out. */
#define SPINLOCK_DECLARE(name)
#define SPINLOCK_EXTERN(name)
113,6 → 131,9
#define spinlock_trylock(x) (preemption_disable(), 1)
#define spinlock_unlock(x) preemption_enable()
 
#define DEADLOCK_PROBE_INIT(pname)
#define DEADLOCK_PROBE(pname, value)
 
#endif
 
#endif
/trunk/kernel/generic/include/mm/as.h
101,11 → 101,11
*/
asid_t asid;
/** Number of references (i.e tasks that reference this as). */
atomic_t refcount;
 
mutex_t lock;
/** Number of references (i.e tasks that reference this as). */
count_t refcount;
/** B+tree of address space areas. */
btree_t as_area_btree;
147,11 → 147,11
*/
asid_t asid;
 
/** Number of references (i.e tasks that reference this as). */
atomic_t refcount;
 
mutex_t lock;
 
/** Number of references (i.e tasks that reference this as). */
count_t refcount;
 
/** B+tree of address space areas. */
btree_t as_area_btree;
/trunk/kernel/generic/src/synch/spinlock.c
73,7 → 73,6
* @param sl Pointer to spinlock_t structure.
*/
#ifdef CONFIG_DEBUG_SPINLOCK
#define DEADLOCK_THRESHOLD 100000000
void spinlock_lock_debug(spinlock_t *sl)
{
count_t i = 0;
/trunk/kernel/generic/src/synch/waitq.c
86,6 → 86,7
thread_t *t = (thread_t *) data;
waitq_t *wq;
bool do_wakeup = false;
DEADLOCK_PROBE_INIT(p_wqlock);
 
spinlock_lock(&threads_lock);
if (!thread_exists(t))
96,6 → 97,7
if ((wq = t->sleep_queue)) { /* assignment */
if (!spinlock_trylock(&wq->lock)) {
spinlock_unlock(&t->lock);
DEADLOCK_PROBE(p_wqlock, DEADLOCK_THRESHOLD);
goto grab_locks; /* avoid deadlock */
}
 
128,6 → 130,7
waitq_t *wq;
bool do_wakeup = false;
ipl_t ipl;
DEADLOCK_PROBE_INIT(p_wqlock);
 
ipl = interrupts_disable();
spinlock_lock(&threads_lock);
147,6 → 150,7
if (!spinlock_trylock(&wq->lock)) {
spinlock_unlock(&t->lock);
DEADLOCK_PROBE(p_wqlock, DEADLOCK_THRESHOLD);
goto grab_locks; /* avoid deadlock */
}
 
/trunk/kernel/generic/src/time/timeout.c
45,7 → 45,6
#include <arch/asm.h>
#include <arch.h>
 
 
/** Initialize timeouts
*
* Initialize kernel timeouts.
175,6 → 174,7
timeout_t *hlp;
link_t *l;
ipl_t ipl;
DEADLOCK_PROBE_INIT(p_tolock);
 
grab_locks:
ipl = interrupts_disable();
186,7 → 186,8
}
if (!spinlock_trylock(&t->cpu->timeoutlock)) {
spinlock_unlock(&t->lock);
interrupts_restore(ipl);
interrupts_restore(ipl);
DEADLOCK_PROBE(p_tolock, DEADLOCK_THRESHOLD);
goto grab_locks;
}
/trunk/kernel/generic/src/proc/scheduler.c
377,7 → 377,8
void scheduler_separated_stack(void)
{
int priority;
DEADLOCK_PROBE_INIT(p_joinwq);
 
ASSERT(CPU != NULL);
if (THREAD) {
406,6 → 407,8
spinlock_unlock(&THREAD->lock);
delay(10);
spinlock_lock(&THREAD->lock);
DEADLOCK_PROBE(p_joinwq,
DEADLOCK_THRESHOLD);
goto repeat;
}
_waitq_wakeup_unsafe(&THREAD->join_wq, false);
/trunk/kernel/generic/src/proc/task.c
41,6 → 41,7
#include <proc/uarg.h>
#include <mm/as.h>
#include <mm/slab.h>
#include <atomic.h>
#include <synch/spinlock.h>
#include <synch/waitq.h>
#include <arch.h>
140,11 → 141,8
 
/*
* Increment address space reference count.
* TODO: Reconsider the locking scheme.
*/
mutex_lock(&as->lock);
as->refcount++;
mutex_unlock(&as->lock);
atomic_inc(&as->refcount);
 
spinlock_lock(&tasks_lock);
 
166,15 → 164,8
task_destroy_arch(t);
btree_destroy(&t->futexes);
 
mutex_lock_active(&t->as->lock);
if (--t->as->refcount == 0) {
mutex_unlock(&t->as->lock);
if (atomic_predec(&t->as->refcount) == 0)
as_destroy(t->as);
/*
* t->as is destroyed.
*/
} else
mutex_unlock(&t->as->lock);
free(t);
TASK = NULL;
/trunk/kernel/generic/src/proc/thread.c
496,7 → 496,7
ipl_t ipl;
 
/*
* Since the thread is expected to not be already detached,
* Since the thread is expected not to be already detached,
* pointer to it must be still valid.
*/
ipl = interrupts_disable();
/trunk/kernel/generic/src/mm/as.c
57,6 → 57,7
#include <genarch/mm/page_ht.h>
#include <mm/asid.h>
#include <arch/mm/asid.h>
#include <preemption.h>
#include <synch/spinlock.h>
#include <synch/mutex.h>
#include <adt/list.h>
181,7 → 182,7
else
as->asid = ASID_INVALID;
as->refcount = 0;
atomic_set(&as->refcount, 0);
as->cpu_refcount = 0;
#ifdef AS_PAGE_TABLE
as->genarch.page_table = page_table_create(flags);
196,13 → 197,16
*
* When there are no tasks referencing this address space (i.e. its refcount is
* zero), the address space can be destroyed.
*
* We know that we don't hold any spinlock.
*/
void as_destroy(as_t *as)
{
ipl_t ipl;
bool cond;
DEADLOCK_PROBE_INIT(p_asidlock);
 
ASSERT(as->refcount == 0);
ASSERT(atomic_get(&as->refcount) == 0);
/*
* Since there is no reference to this area,
209,8 → 213,23
* it is safe not to lock its mutex.
*/
 
ipl = interrupts_disable();
spinlock_lock(&asidlock);
/*
* We need to avoid deadlock between TLB shootdown and asidlock.
* We therefore try to take asid conditionally and if we don't succeed,
* we enable interrupts and try again. This is done while preemption is
* disabled to prevent nested context switches. We also depend on the
* fact that so far no spinlocks are held.
*/
preemption_disable();
ipl = interrupts_read();
retry:
interrupts_disable();
if (!spinlock_trylock(&asidlock)) {
interrupts_enable();
DEADLOCK_PROBE(p_asidlock, DEADLOCK_THRESHOLD);
goto retry;
}
preemption_enable(); /* Interrupts disabled, enable preemption */
if (as->asid != ASID_INVALID && as != AS_KERNEL) {
if (as != AS && as->cpu_refcount == 0)
list_remove(&as->inactive_as_with_asid_link);
472,15 → 491,16
/*
* Finish TLB shootdown sequence.
*/
 
tlb_invalidate_pages(as->asid, area->base + pages * PAGE_SIZE,
area->pages - pages);
tlb_shootdown_finalize();
/*
* Invalidate software translation caches (e.g. TSB on sparc64).
*/
as_invalidate_translation_cache(as, area->base +
pages * PAGE_SIZE, area->pages - pages);
tlb_shootdown_finalize();
} else {
/*
* Growing the area.
568,14 → 588,14
/*
* Finish TLB shootdown sequence.
*/
 
tlb_invalidate_pages(as->asid, area->base, area->pages);
tlb_shootdown_finalize();
/*
* Invalidate potential software translation caches (e.g. TSB on
* sparc64).
*/
as_invalidate_translation_cache(as, area->base, area->pages);
tlb_shootdown_finalize();
btree_destroy(&area->used_space);
 
867,12 → 887,29
* scheduling. Sleeping here would lead to deadlock on wakeup. Another
* thing which is forbidden in this context is locking the address space.
*
* When this function is enetered, no spinlocks may be held.
*
* @param old Old address space or NULL.
* @param new New address space.
*/
void as_switch(as_t *old_as, as_t *new_as)
{
spinlock_lock(&asidlock);
DEADLOCK_PROBE_INIT(p_asidlock);
preemption_disable();
retry:
(void) interrupts_disable();
if (!spinlock_trylock(&asidlock)) {
/*
* Avoid deadlock with TLB shootdown.
* We can enable interrupts here because
* preemption is disabled. We should not be
* holding any other lock.
*/
(void) interrupts_enable();
DEADLOCK_PROBE(p_asidlock, DEADLOCK_THRESHOLD);
goto retry;
}
preemption_enable();
 
/*
* First, take care of the old address space.
/trunk/kernel/generic/src/ipc/ipc.c
374,6 → 374,7
int i;
call_t *call;
phone_t *phone;
DEADLOCK_PROBE_INIT(p_phonelck);
 
/* Disconnect all our phones ('ipc_phone_hangup') */
for (i=0;i < IPC_MAX_PHONES; i++)
387,9 → 388,10
spinlock_lock(&TASK->answerbox.lock);
while (!list_empty(&TASK->answerbox.connected_phones)) {
phone = list_get_instance(TASK->answerbox.connected_phones.next,
phone_t, link);
phone_t, link);
if (! spinlock_trylock(&phone->lock)) {
spinlock_unlock(&TASK->answerbox.lock);
DEADLOCK_PROBE(p_phonelck, DEADLOCK_THRESHOLD);
goto restart_phones;
}
/trunk/kernel/generic/src/ipc/irq.c
336,6 → 336,7
while (box->irq_head.next != &box->irq_head) {
link_t *cur = box->irq_head.next;
irq_t *irq;
DEADLOCK_PROBE_INIT(p_irqlock);
irq = list_get_instance(cur, irq_t, notif_cfg.link);
if (!spinlock_trylock(&irq->lock)) {
344,6 → 345,7
*/
spinlock_unlock(&box->irq_lock);
interrupts_restore(ipl);
DEADLOCK_PROBE(p_irqlock, DEADLOCK_THRESHOLD);
goto loop;
}