/kernel/trunk/generic/include/synch/waitq.h |
---|
38,8 → 38,15 |
#define WAKEUP_FIRST 0 |
#define WAKEUP_ALL 1 |
/** Wait queue structure. */ |
struct waitq { |
/** Lock protecting wait queue structure. |
* |
* Must be acquired before T.lock for each T of type thread_t. |
*/ |
spinlock_t lock; |
int missed_wakeups; /**< Number of waitq_wakeup() calls that didn't find a thread to wake up. */ |
link_t head; /**< List of sleeping threads for wich there was no missed_wakeup. */ |
}; |
51,7 → 58,7 |
extern void waitq_initialize(waitq_t *wq); |
extern int waitq_sleep_timeout(waitq_t *wq, __u32 usec, int nonblocking); |
extern void waitq_wakeup(waitq_t *wq, int all); |
extern void _waitq_wakeup_unsafe(waitq_t *wq, int all); |
extern void waitq_wakeup(waitq_t *wq, bool all); |
extern void _waitq_wakeup_unsafe(waitq_t *wq, bool all); |
#endif |
/kernel/trunk/generic/include/proc/thread.h |
---|
64,7 → 64,12 |
link_t th_link; /**< Links to threads within containing task. */ |
link_t threads_link; /**< Link to the list of all threads. */ |
/* items below are protected by lock */ |
/** Lock protecting thread structure. |
* |
* Protects the whole thread structure except list links above. |
* Must be acquired before T.lock for each T of type task_t. |
* |
*/ |
spinlock_t lock; |
void (* thread_code)(void *); /**< Function implementing the thread. */ |
109,7 → 114,14 |
__u8 *ustack; /**< Thread's user stack. */ |
}; |
extern spinlock_t threads_lock; /**< Lock protecting threads_head list. */ |
/** Thread list lock. |
* |
* This lock protects all link_t structures chained in threads_head. |
* Must be acquired before T.lock for each T of type thread_t. |
* |
*/ |
extern spinlock_t threads_lock; |
extern link_t threads_head; /**< List of all threads in the system. */ |
extern void thread_init(void); |
/kernel/trunk/generic/src/proc/scheduler.c |
---|
433,7 → 433,7 |
} |
/* |
* Through the 'THE' structure, we keep track of THREAD, TASK, CPU |
* Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM |
* and preemption counter. At this point THE could be coming either |
* from THREAD's or CPU's stack. |
*/ |
/kernel/trunk/generic/src/proc/thread.c |
---|
54,8 → 54,8 |
char *thread_states[] = {"Invalid", "Running", "Sleeping", "Ready", "Entering", "Exiting"}; /**< Thread states */ |
spinlock_t threads_lock; |
link_t threads_head; |
spinlock_t threads_lock; /**< Lock protecting threads_head list. For locking rules, see declaration thereof. */ |
link_t threads_head; /**< List of all threads. */ |
static spinlock_t tidlock; |
__u32 last_tid = 0; |
/kernel/trunk/generic/src/synch/rwlock.c |
---|
26,9 → 26,10 |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/* |
* Reader/Writer locks |
/** Reader/Writer locks |
* |
* A reader/writer lock can be held by multiple readers at a time. |
* Or it can be exclusively held by a sole writer at a time. |
*/ |
/* |
75,7 → 76,7 |
* @param rwl Reader/Writer lock. |
*/ |
void rwlock_initialize(rwlock_t *rwl) { |
spinlock_initialize(&rwl->lock, "rwlock"); |
spinlock_initialize(&rwl->lock, "rwlock_t"); |
mutex_initialize(&rwl->exclusive); |
rwl->readers_in = 0; |
} |
218,10 → 219,10 |
interrupts_restore(ipl); |
break; |
case ESYNCH_OK_ATOMIC: |
panic("_mutex_lock_timeout()==ESYNCH_OK_ATOMIC"); |
panic("_mutex_lock_timeout()==ESYNCH_OK_ATOMIC\n"); |
break; |
dafault: |
panic("invalid ESYNCH"); |
panic("invalid ESYNCH\n"); |
break; |
} |
return rc; |
283,7 → 284,7 |
} |
/** Direct handoff |
/** Direct handoff of reader/writer lock ownership. |
* |
* Direct handoff of reader/writer lock ownership |
* to waiting readers or a writer. |
306,7 → 307,7 |
{ |
rwlock_type_t type = RWLOCK_NONE; |
thread_t *t = NULL; |
int one_more = 1; |
bool one_more = true; |
spinlock_lock(&rwl->exclusive.sem.wq.lock); |
352,7 → 353,7 |
if (t) { |
spinlock_lock(&t->lock); |
if (t->rwlock_holder_type != RWLOCK_READER) |
one_more = 0; |
one_more = false; |
spinlock_unlock(&t->lock); |
} |
} |
/kernel/trunk/generic/src/synch/spinlock.c |
---|
62,9 → 62,10 |
*/ |
void spinlock_lock(spinlock_t *sl) |
{ |
int i = 0; |
count_t i = 0; |
__address caller = ((__address *) &sl)[-1]; |
char *symbol; |
bool deadlock_reported = false; |
preemption_disable(); |
while (test_and_set(&sl->val)) { |
76,9 → 77,13 |
printf("(%s)", symbol); |
printf("\n"); |
i = 0; |
deadlock_reported = true; |
} |
} |
if (deadlock_reported) |
printf("cpu%d: not deadlocked\n", CPU->id); |
/* |
* Prevent critical section code from bleeding out this way up. |
*/ |
/kernel/trunk/generic/src/synch/waitq.c |
---|
33,6 → 33,7 |
#include <proc/scheduler.h> |
#include <arch/asm.h> |
#include <arch/types.h> |
#include <typedefs.h> |
#include <time/timeout.h> |
#include <arch.h> |
#include <context.h> |
67,7 → 68,7 |
{ |
thread_t *t = (thread_t *) data; |
waitq_t *wq; |
int do_wakeup = 0; |
bool do_wakeup = false; |
spinlock_lock(&threads_lock); |
if (!list_member(&t->threads_link, &threads_head)) |
75,24 → 76,25 |
grab_locks: |
spinlock_lock(&t->lock); |
if (wq = t->sleep_queue) { |
if (wq = t->sleep_queue) { /* assignment */ |
if (!spinlock_trylock(&wq->lock)) { |
spinlock_unlock(&t->lock); |
goto grab_locks; /* avoid deadlock */ |
goto grab_locks; /* avoid deadlock */ |
} |
list_remove(&t->wq_link); |
t->saved_context = t->sleep_timeout_context; |
do_wakeup = 1; |
do_wakeup = true; |
spinlock_unlock(&wq->lock); |
t->sleep_queue = NULL; |
} |
t->timeout_pending = 0; |
t->timeout_pending = false; |
spinlock_unlock(&t->lock); |
if (do_wakeup) thread_ready(t); |
if (do_wakeup) |
thread_ready(t); |
out: |
spinlock_unlock(&threads_lock); |
193,7 → 195,7 |
interrupts_restore(ipl); |
return ESYNCH_TIMEOUT; |
} |
THREAD->timeout_pending = 1; |
THREAD->timeout_pending = true; |
timeout_register(&THREAD->sleep_timeout, (__u64) usec, waitq_interrupted_sleep, THREAD); |
} |
227,7 → 229,7 |
* @param all If this is non-zero, all sleeping threads |
* will be woken up and missed count will be zeroed. |
*/ |
void waitq_wakeup(waitq_t *wq, int all) |
void waitq_wakeup(waitq_t *wq, bool all) |
{ |
ipl_t ipl; |
250,7 → 252,7 |
* @param all If this is non-zero, all sleeping threads |
* will be woken up and missed count will be zeroed. |
*/ |
void _waitq_wakeup_unsafe(waitq_t *wq, int all) |
void _waitq_wakeup_unsafe(waitq_t *wq, bool all) |
{ |
thread_t *t; |
257,7 → 259,8 |
loop: |
if (list_empty(&wq->head)) { |
wq->missed_wakeups++; |
if (all) wq->missed_wakeups = 0; |
if (all) |
wq->missed_wakeups = 0; |
return; |
} |
266,11 → 269,12 |
list_remove(&t->wq_link); |
spinlock_lock(&t->lock); |
if (t->timeout_pending && timeout_unregister(&t->sleep_timeout)) |
t->timeout_pending = 0; |
t->timeout_pending = false; |
t->sleep_queue = NULL; |
spinlock_unlock(&t->lock); |
thread_ready(t); |
if (all) goto loop; |
if (all) |
goto loop; |
} |
/kernel/trunk/arch/ia32/include/atomic.h |
---|
53,8 → 53,8 |
{ |
atomic_t r; |
__asm__ volatile ( |
"movl $1,%0;" |
"lock xaddl %0,%1;" |
"movl $1, %0\n" |
"lock xaddl %0, %1\n" |
: "=r"(r), "=m" (*val) |
); |
return r; |
66,8 → 66,8 |
{ |
atomic_t r; |
__asm__ volatile ( |
"movl $-1,%0;" |
"lock xaddl %0,%1;" |
"movl $-1, %0\n" |
"lock xaddl %0, %1\n" |
: "=r"(r), "=m" (*val) |
); |
return r; |
76,8 → 76,6 |
#define atomic_inc_post(val) (atomic_inc_pre(val)+1) |
#define atomic_dec_post(val) (atomic_dec_pre(val)-1) |
static inline int test_and_set(volatile int *val) { |
int v; |