/trunk/kernel/generic/src/synch/spinlock.c |
---|
73,7 → 73,6 |
* @param sl Pointer to spinlock_t structure. |
*/ |
#ifdef CONFIG_DEBUG_SPINLOCK |
#define DEADLOCK_THRESHOLD 100000000 |
void spinlock_lock_debug(spinlock_t *sl) |
{ |
count_t i = 0; |
/trunk/kernel/generic/src/synch/waitq.c |
---|
86,6 → 86,7 |
thread_t *t = (thread_t *) data; |
waitq_t *wq; |
bool do_wakeup = false; |
DEADLOCK_PROBE_INIT(p_wqlock); |
spinlock_lock(&threads_lock); |
if (!thread_exists(t)) |
96,6 → 97,7 |
if ((wq = t->sleep_queue)) { /* assignment */ |
if (!spinlock_trylock(&wq->lock)) { |
spinlock_unlock(&t->lock); |
DEADLOCK_PROBE(p_wqlock, DEADLOCK_THRESHOLD); |
goto grab_locks; /* avoid deadlock */ |
} |
128,6 → 130,7 |
waitq_t *wq; |
bool do_wakeup = false; |
ipl_t ipl; |
DEADLOCK_PROBE_INIT(p_wqlock); |
ipl = interrupts_disable(); |
spinlock_lock(&threads_lock); |
147,6 → 150,7 |
if (!spinlock_trylock(&wq->lock)) { |
spinlock_unlock(&t->lock); |
DEADLOCK_PROBE(p_wqlock, DEADLOCK_THRESHOLD); |
goto grab_locks; /* avoid deadlock */ |
} |
/trunk/kernel/generic/src/time/timeout.c |
---|
45,7 → 45,6 |
#include <arch/asm.h> |
#include <arch.h> |
/** Initialize timeouts |
* |
* Initialize kernel timeouts. |
175,6 → 174,7 |
timeout_t *hlp; |
link_t *l; |
ipl_t ipl; |
DEADLOCK_PROBE_INIT(p_tolock); |
grab_locks: |
ipl = interrupts_disable(); |
187,6 → 187,7 |
if (!spinlock_trylock(&t->cpu->timeoutlock)) { |
spinlock_unlock(&t->lock); |
interrupts_restore(ipl); |
DEADLOCK_PROBE(p_tolock, DEADLOCK_THRESHOLD); |
goto grab_locks; |
} |
/trunk/kernel/generic/src/proc/scheduler.c |
---|
377,6 → 377,7 |
void scheduler_separated_stack(void) |
{ |
int priority; |
DEADLOCK_PROBE_INIT(p_joinwq); |
ASSERT(CPU != NULL); |
406,6 → 407,8 |
spinlock_unlock(&THREAD->lock); |
delay(10); |
spinlock_lock(&THREAD->lock); |
DEADLOCK_PROBE(p_joinwq, |
DEADLOCK_THRESHOLD); |
goto repeat; |
} |
_waitq_wakeup_unsafe(&THREAD->join_wq, false); |
/trunk/kernel/generic/src/proc/task.c |
---|
41,6 → 41,7 |
#include <proc/uarg.h> |
#include <mm/as.h> |
#include <mm/slab.h> |
#include <atomic.h> |
#include <synch/spinlock.h> |
#include <synch/waitq.h> |
#include <arch.h> |
140,11 → 141,8 |
/* |
* Increment address space reference count. |
* TODO: Reconsider the locking scheme. |
*/ |
mutex_lock(&as->lock); |
as->refcount++; |
mutex_unlock(&as->lock); |
atomic_inc(&as->refcount); |
spinlock_lock(&tasks_lock); |
166,15 → 164,8 |
task_destroy_arch(t); |
btree_destroy(&t->futexes); |
mutex_lock_active(&t->as->lock); |
if (--t->as->refcount == 0) { |
mutex_unlock(&t->as->lock); |
if (atomic_predec(&t->as->refcount) == 0) |
as_destroy(t->as); |
/* |
* t->as is destroyed. |
*/ |
} else |
mutex_unlock(&t->as->lock); |
free(t); |
TASK = NULL; |
/trunk/kernel/generic/src/proc/thread.c |
---|
496,7 → 496,7 |
ipl_t ipl; |
/* |
* Since the thread is expected to not be already detached, |
* Since the thread is expected not to be already detached, |
* pointer to it must be still valid. |
*/ |
ipl = interrupts_disable(); |
/trunk/kernel/generic/src/mm/as.c |
---|
57,6 → 57,7 |
#include <genarch/mm/page_ht.h> |
#include <mm/asid.h> |
#include <arch/mm/asid.h> |
#include <preemption.h> |
#include <synch/spinlock.h> |
#include <synch/mutex.h> |
#include <adt/list.h> |
181,7 → 182,7 |
else |
as->asid = ASID_INVALID; |
as->refcount = 0; |
atomic_set(&as->refcount, 0); |
as->cpu_refcount = 0; |
#ifdef AS_PAGE_TABLE |
as->genarch.page_table = page_table_create(flags); |
196,13 → 197,16 |
* |
* When there are no tasks referencing this address space (i.e. its refcount is |
* zero), the address space can be destroyed. |
* |
* We know that we don't hold any spinlock. |
*/ |
void as_destroy(as_t *as) |
{ |
ipl_t ipl; |
bool cond; |
DEADLOCK_PROBE_INIT(p_asidlock); |
ASSERT(as->refcount == 0); |
ASSERT(atomic_get(&as->refcount) == 0); |
/* |
* Since there is no reference to this area, |
209,8 → 213,23 |
* it is safe not to lock its mutex. |
*/ |
ipl = interrupts_disable(); |
spinlock_lock(&asidlock); |
/* |
* We need to avoid deadlock between TLB shootdown and asidlock. |
* We therefore try to take asid conditionally and if we don't succeed, |
* we enable interrupts and try again. This is done while preemption is |
* disabled to prevent nested context switches. We also depend on the |
* fact that so far no spinlocks are held. |
*/ |
preemption_disable(); |
ipl = interrupts_read(); |
retry: |
interrupts_disable(); |
if (!spinlock_trylock(&asidlock)) { |
interrupts_enable(); |
DEADLOCK_PROBE(p_asidlock, DEADLOCK_THRESHOLD); |
goto retry; |
} |
preemption_enable(); /* Interrupts disabled, enable preemption */ |
if (as->asid != ASID_INVALID && as != AS_KERNEL) { |
if (as != AS && as->cpu_refcount == 0) |
list_remove(&as->inactive_as_with_asid_link); |
472,15 → 491,16 |
/* |
* Finish TLB shootdown sequence. |
*/ |
tlb_invalidate_pages(as->asid, area->base + pages * PAGE_SIZE, |
area->pages - pages); |
tlb_shootdown_finalize(); |
/* |
* Invalidate software translation caches (e.g. TSB on sparc64). |
*/ |
as_invalidate_translation_cache(as, area->base + |
pages * PAGE_SIZE, area->pages - pages); |
tlb_shootdown_finalize(); |
} else { |
/* |
* Growing the area. |
568,14 → 588,14 |
/* |
* Finish TLB shootdown sequence. |
*/ |
tlb_invalidate_pages(as->asid, area->base, area->pages); |
tlb_shootdown_finalize(); |
/* |
* Invalidate potential software translation caches (e.g. TSB on |
* sparc64). |
*/ |
as_invalidate_translation_cache(as, area->base, area->pages); |
tlb_shootdown_finalize(); |
btree_destroy(&area->used_space); |
867,12 → 887,29 |
* scheduling. Sleeping here would lead to deadlock on wakeup. Another |
* thing which is forbidden in this context is locking the address space. |
* |
* When this function is enetered, no spinlocks may be held. |
* |
* @param old Old address space or NULL. |
* @param new New address space. |
*/ |
void as_switch(as_t *old_as, as_t *new_as) |
{ |
spinlock_lock(&asidlock); |
DEADLOCK_PROBE_INIT(p_asidlock); |
preemption_disable(); |
retry: |
(void) interrupts_disable(); |
if (!spinlock_trylock(&asidlock)) { |
/* |
* Avoid deadlock with TLB shootdown. |
* We can enable interrupts here because |
* preemption is disabled. We should not be |
* holding any other lock. |
*/ |
(void) interrupts_enable(); |
DEADLOCK_PROBE(p_asidlock, DEADLOCK_THRESHOLD); |
goto retry; |
} |
preemption_enable(); |
/* |
* First, take care of the old address space. |
/trunk/kernel/generic/src/ipc/ipc.c |
---|
374,6 → 374,7 |
int i; |
call_t *call; |
phone_t *phone; |
DEADLOCK_PROBE_INIT(p_phonelck); |
/* Disconnect all our phones ('ipc_phone_hangup') */ |
for (i=0;i < IPC_MAX_PHONES; i++) |
390,6 → 391,7 |
phone_t, link); |
if (! spinlock_trylock(&phone->lock)) { |
spinlock_unlock(&TASK->answerbox.lock); |
DEADLOCK_PROBE(p_phonelck, DEADLOCK_THRESHOLD); |
goto restart_phones; |
} |
/trunk/kernel/generic/src/ipc/irq.c |
---|
336,6 → 336,7 |
while (box->irq_head.next != &box->irq_head) { |
link_t *cur = box->irq_head.next; |
irq_t *irq; |
DEADLOCK_PROBE_INIT(p_irqlock); |
irq = list_get_instance(cur, irq_t, notif_cfg.link); |
if (!spinlock_trylock(&irq->lock)) { |
344,6 → 345,7 |
*/ |
spinlock_unlock(&box->irq_lock); |
interrupts_restore(ipl); |
DEADLOCK_PROBE(p_irqlock, DEADLOCK_THRESHOLD); |
goto loop; |
} |