/trunk/kernel/generic/src/synch/waitq.c |
---|
188,20 → 188,20 |
* SYNCH_FLAGS_INTERRUPTIBLE bit is specified in flags. |
* If usec is greater than zero, regardless of the value of the |
* SYNCH_FLAGS_NON_BLOCKING bit in flags, the call will not return until either timeout, |
* interruption or wakeup comes. |
* SYNCH_FLAGS_NON_BLOCKING bit in flags, the call will not return until either |
* timeout, interruption or wakeup comes. |
* |
* If usec is zero and the SYNCH_FLAGS_NON_BLOCKING bit is not set in flags, the call |
* will not return until wakeup or interruption comes. |
* If usec is zero and the SYNCH_FLAGS_NON_BLOCKING bit is not set in flags, |
* the call will not return until wakeup or interruption comes. |
* |
* If usec is zero and the SYNCH_FLAGS_NON_BLOCKING bit is set in flags, the call will |
* immediately return, reporting either success or failure. |
* If usec is zero and the SYNCH_FLAGS_NON_BLOCKING bit is set in flags, the |
* call will immediately return, reporting either success or failure. |
* |
* @return Returns one of: ESYNCH_WOULD_BLOCK, ESYNCH_TIMEOUT, ESYNCH_INTERRUPTED, |
* ESYNCH_OK_ATOMIC, ESYNCH_OK_BLOCKED. |
* @return One of: ESYNCH_WOULD_BLOCK, ESYNCH_TIMEOUT, ESYNCH_INTERRUPTED, |
* ESYNCH_OK_ATOMIC, ESYNCH_OK_BLOCKED. |
* |
* @li ESYNCH_WOULD_BLOCK means that the sleep failed because at the time |
* of the call there was no pending wakeup. |
* @li ESYNCH_WOULD_BLOCK means that the sleep failed because at the time of the |
* call there was no pending wakeup. |
* |
* @li ESYNCH_TIMEOUT means that the sleep timed out. |
* |
351,7 → 351,8 |
return ESYNCH_TIMEOUT; |
} |
THREAD->timeout_pending = true; |
timeout_register(&THREAD->sleep_timeout, (uint64_t) usec, waitq_timeouted_sleep, THREAD); |
timeout_register(&THREAD->sleep_timeout, (uint64_t) usec, |
waitq_timeouted_sleep, THREAD); |
} |
list_append(&THREAD->wq_link, &wq->head); |
364,7 → 365,8 |
spinlock_unlock(&THREAD->lock); |
scheduler(); /* wq->lock is released in scheduler_separated_stack() */ |
/* wq->lock is released in scheduler_separated_stack() */ |
scheduler(); |
return ESYNCH_OK_BLOCKED; |
} |
372,16 → 374,15 |
/** Wake up first thread sleeping in a wait queue |
* |
* Wake up first thread sleeping in a wait queue. |
* This is the SMP- and IRQ-safe wrapper meant for |
* general use. |
* Wake up first thread sleeping in a wait queue. This is the SMP- and IRQ-safe |
* wrapper meant for general use. |
* |
* Besides its 'normal' wakeup operation, it attempts |
* to unregister possible timeout. |
* Besides its 'normal' wakeup operation, it attempts to unregister possible |
* timeout. |
* |
* @param wq Pointer to wait queue. |
* @param all If this is non-zero, all sleeping threads |
* will be woken up and missed count will be zeroed. |
* @param all If this is non-zero, all sleeping threads will be woken up and |
* missed count will be zeroed. |
*/ |
void waitq_wakeup(waitq_t *wq, bool all) |
{ |
398,13 → 399,12 |
/** Internal SMP- and IRQ-unsafe version of waitq_wakeup() |
* |
* This is the internal SMP- and IRQ-unsafe version |
* of waitq_wakeup(). It assumes wq->lock is already |
* locked and interrupts are already disabled. |
* This is the internal SMP- and IRQ-unsafe version of waitq_wakeup(). It |
* assumes wq->lock is already locked and interrupts are already disabled. |
* |
* @param wq Pointer to wait queue. |
* @param all If this is non-zero, all sleeping threads |
* will be woken up and missed count will be zeroed. |
* @param all If this is non-zero, all sleeping threads will be woken up and |
* missed count will be zeroed. |
*/ |
void _waitq_wakeup_unsafe(waitq_t *wq, bool all) |
{ |
/trunk/kernel/generic/src/cpu/cpu.c |
---|
59,9 → 59,9 |
void cpu_init(void) { |
int i, j; |
#ifdef CONFIG_SMP |
#ifdef CONFIG_SMP |
if (config.cpu_active == 1) { |
#endif /* CONFIG_SMP */ |
#endif /* CONFIG_SMP */ |
cpus = (cpu_t *) malloc(sizeof(cpu_t) * config.cpu_count, |
FRAME_ATOMIC); |
if (!cpus) |
83,9 → 83,9 |
} |
} |
#ifdef CONFIG_SMP |
#ifdef CONFIG_SMP |
} |
#endif /* CONFIG_SMP */ |
#endif /* CONFIG_SMP */ |
CPU = &cpus[config.cpu_active-1]; |
/trunk/kernel/generic/src/proc/scheduler.c |
---|
1,5 → 1,5 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* Copyright (C) 2001-2007 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
142,7 → 142,8 |
/* Might sleep */ |
spinlock_unlock(&THREAD->lock); |
spinlock_unlock(&CPU->lock); |
THREAD->saved_fpu_context = slab_alloc(fpu_context_slab, 0); |
THREAD->saved_fpu_context = |
slab_alloc(fpu_context_slab, 0); |
/* We may have switched CPUs during slab_alloc */ |
goto restart; |
} |
231,7 → 232,7 |
spinlock_lock(&t->lock); |
t->cpu = CPU; |
t->ticks = us2ticks((i+1)*10000); |
t->ticks = us2ticks((i + 1) * 10000); |
t->priority = i; /* correct rq index */ |
/* |
267,7 → 268,7 |
list_initialize(&head); |
spinlock_lock(&CPU->lock); |
if (CPU->needs_relink > NEEDS_RELINK_MAX) { |
for (i = start; i<RQ_COUNT-1; i++) { |
for (i = start; i < RQ_COUNT - 1; i++) { |
/* remember and empty rq[i + 1] */ |
r = &CPU->rq[i + 1]; |
spinlock_lock(&r->lock); |
331,9 → 332,9 |
} |
/* |
* Interrupt priority level of preempted thread is recorded here |
* to facilitate scheduler() invocations from interrupts_disable()'d |
* code (e.g. waitq_sleep_timeout()). |
* Interrupt priority level of preempted thread is recorded |
* here to facilitate scheduler() invocations from |
* interrupts_disable()'d code (e.g. waitq_sleep_timeout()). |
*/ |
THREAD->saved_context.ipl = ipl; |
} |
394,8 → 395,8 |
thread_destroy(THREAD); |
} else { |
/* |
* The thread structure is kept allocated until somebody |
* calls thread_detach() on it. |
* The thread structure is kept allocated until |
* somebody calls thread_detach() on it. |
*/ |
if (!spinlock_trylock(&THREAD->join_wq.lock)) { |
/* |
421,13 → 422,15 |
THREAD->priority = -1; |
/* |
* We need to release wq->lock which we locked in waitq_sleep(). |
* Address of wq->lock is kept in THREAD->sleep_queue. |
* We need to release wq->lock which we locked in |
* waitq_sleep(). Address of wq->lock is kept in |
* THREAD->sleep_queue. |
*/ |
spinlock_unlock(&THREAD->sleep_queue->lock); |
/* |
* Check for possible requests for out-of-context invocation. |
* Check for possible requests for out-of-context |
* invocation. |
*/ |
if (THREAD->call_me) { |
THREAD->call_me(THREAD->call_me_with); |
443,7 → 446,8 |
/* |
* Entering state is unexpected. |
*/ |
panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]); |
panic("tid%d: unexpected state %s\n", THREAD->tid, |
thread_states[THREAD->state]); |
break; |
} |
459,7 → 463,8 |
relink_rq(priority); |
/* |
* If both the old and the new task are the same, lots of work is avoided. |
* If both the old and the new task are the same, lots of work is |
* avoided. |
*/ |
if (TASK != THREAD->task) { |
as_t *as1 = NULL; |
476,7 → 481,8 |
spinlock_unlock(&THREAD->task->lock); |
/* |
* Note that it is possible for two tasks to share one address space. |
* Note that it is possible for two tasks to share one address |
* space. |
*/ |
if (as1 != as2) { |
/* |
493,8 → 499,9 |
THREAD->state = Running; |
#ifdef SCHEDULER_VERBOSE |
printf("cpu%d: tid %d (priority=%d,ticks=%lld,nrdy=%ld)\n", |
CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, atomic_get(&CPU->nrdy)); |
printf("cpu%d: tid %d (priority=%d, ticks=%lld, nrdy=%ld)\n", |
CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, |
atomic_get(&CPU->nrdy)); |
#endif |
/* |
508,7 → 515,8 |
before_thread_runs(); |
/* |
* Copy the knowledge of CPU, TASK, THREAD and preemption counter to thread's stack. |
* Copy the knowledge of CPU, TASK, THREAD and preemption counter to |
* thread's stack. |
*/ |
the_copy(THE, (the_t *) THREAD->kstack); |
555,10 → 563,11 |
goto satisfied; |
/* |
* Searching least priority queues on all CPU's first and most priority queues on all CPU's last. |
* Searching least priority queues on all CPU's first and most priority |
* queues on all CPU's last. |
*/ |
for (j=RQ_COUNT-1; j >= 0; j--) { |
for (i=0; i < config.cpu_active; i++) { |
for (j= RQ_COUNT - 1; j >= 0; j--) { |
for (i = 0; i < config.cpu_active; i++) { |
link_t *l; |
runq_t *r; |
cpu_t *cpu; |
567,7 → 576,8 |
/* |
* Not interested in ourselves. |
* Doesn't require interrupt disabling for kcpulb has THREAD_FLAG_WIRED. |
* Doesn't require interrupt disabling for kcpulb has |
* THREAD_FLAG_WIRED. |
*/ |
if (CPU == cpu) |
continue; |
588,13 → 598,16 |
while (l != &r->rq_head) { |
t = list_get_instance(l, thread_t, rq_link); |
/* |
* We don't want to steal CPU-wired threads neither threads already |
* stolen. The latter prevents threads from migrating between CPU's |
* without ever being run. We don't want to steal threads whose FPU |
* context is still in CPU. |
* We don't want to steal CPU-wired threads |
* neither threads already stolen. The latter |
* prevents threads from migrating between CPU's |
* without ever being run. We don't want to |
* steal threads whose FPU context is still in |
* CPU. |
*/ |
spinlock_lock(&t->lock); |
if ((!(t->flags & (THREAD_FLAG_WIRED | THREAD_FLAG_STOLEN))) && |
if ((!(t->flags & (THREAD_FLAG_WIRED | |
THREAD_FLAG_STOLEN))) && |
(!(t->fpu_context_engaged)) ) { |
/* |
* Remove t from r. |
621,8 → 634,9 |
*/ |
spinlock_lock(&t->lock); |
#ifdef KCPULB_VERBOSE |
printf("kcpulb%d: TID %d -> cpu%d, nrdy=%ld, avg=%nd\n", |
CPU->id, t->tid, CPU->id, atomic_get(&CPU->nrdy), |
printf("kcpulb%d: TID %d -> cpu%d, nrdy=%ld, " |
"avg=%nd\n", CPU->id, t->tid, CPU->id, |
atomic_get(&CPU->nrdy), |
atomic_get(&nrdy) / config.cpu_active); |
#endif |
t->flags |= THREAD_FLAG_STOLEN; |
637,7 → 651,8 |
goto satisfied; |
/* |
* We are not satisfied yet, focus on another CPU next time. |
* We are not satisfied yet, focus on another |
* CPU next time. |
*/ |
k++; |
688,9 → 703,10 |
spinlock_lock(&cpus[cpu].lock); |
printf("cpu%d: address=%p, nrdy=%ld, needs_relink=%ld\n", |
cpus[cpu].id, &cpus[cpu], atomic_get(&cpus[cpu].nrdy), cpus[cpu].needs_relink); |
cpus[cpu].id, &cpus[cpu], atomic_get(&cpus[cpu].nrdy), |
cpus[cpu].needs_relink); |
for (i=0; i<RQ_COUNT; i++) { |
for (i = 0; i < RQ_COUNT; i++) { |
r = &cpus[cpu].rq[i]; |
spinlock_lock(&r->lock); |
if (!r->n) { |
698,10 → 714,11 |
continue; |
} |
printf("\trq[%d]: ", i); |
for (cur=r->rq_head.next; cur!=&r->rq_head; cur=cur->next) { |
for (cur = r->rq_head.next; cur != &r->rq_head; |
cur = cur->next) { |
t = list_get_instance(cur, thread_t, rq_link); |
printf("%d(%s) ", t->tid, |
thread_states[t->state]); |
thread_states[t->state]); |
} |
printf("\n"); |
spinlock_unlock(&r->lock); |
/trunk/kernel/generic/src/proc/thread.c |
---|
80,13 → 80,16 |
"Undead" |
}; |
/** Lock protecting the threads_btree B+tree. For locking rules, see declaration thereof. */ |
/** Lock protecting the threads_btree B+tree. |
* |
* For locking rules, see declaration thereof. |
*/ |
SPINLOCK_INITIALIZE(threads_lock); |
/** B+tree of all threads. |
* |
* When a thread is found in the threads_btree B+tree, it is guaranteed to exist as long |
* as the threads_lock is held. |
* When a thread is found in the threads_btree B+tree, it is guaranteed to |
* exist as long as the threads_lock is held. |
*/ |
btree_t threads_btree; |
98,11 → 101,10 |
slab_cache_t *fpu_context_slab; |
#endif |
/** Thread wrapper |
/** Thread wrapper. |
* |
* This wrapper is provided to ensure that every thread |
* makes a call to thread_exit() when its implementing |
* function returns. |
* This wrapper is provided to ensure that every thread makes a call to |
* thread_exit() when its implementing function returns. |
* |
* interrupts_disable() is assumed. |
* |
201,14 → 203,12 |
{ |
THREAD = NULL; |
atomic_set(&nrdy,0); |
thread_slab = slab_cache_create("thread_slab", |
sizeof(thread_t),0, |
thr_constructor, thr_destructor, 0); |
thread_slab = slab_cache_create("thread_slab", sizeof(thread_t), 0, |
thr_constructor, thr_destructor, 0); |
#ifdef ARCH_HAS_FPU |
fpu_context_slab = slab_cache_create("fpu_slab", |
sizeof(fpu_context_t), |
FPU_CONTEXT_ALIGN, |
NULL, NULL, 0); |
fpu_context_slab = slab_cache_create("fpu_slab", sizeof(fpu_context_t), |
FPU_CONTEXT_ALIGN, NULL, NULL, 0); |
#endif |
btree_create(&threads_btree); |
234,7 → 234,7 |
ASSERT(! (t->state == Ready)); |
i = (t->priority < RQ_COUNT -1) ? ++t->priority : t->priority; |
i = (t->priority < RQ_COUNT - 1) ? ++t->priority : t->priority; |
cpu = CPU; |
if (t->flags & THREAD_FLAG_WIRED) { |
267,7 → 267,7 |
*/ |
void thread_destroy(thread_t *t) |
{ |
bool destroy_task = false; |
bool destroy_task = false; |
ASSERT(t->state == Exiting || t->state == Undead); |
ASSERT(t->task); |
274,8 → 274,8 |
ASSERT(t->cpu); |
spinlock_lock(&t->cpu->lock); |
if(t->cpu->fpu_owner==t) |
t->cpu->fpu_owner=NULL; |
if(t->cpu->fpu_owner == t) |
t->cpu->fpu_owner = NULL; |
spinlock_unlock(&t->cpu->lock); |
spinlock_unlock(&t->lock); |
310,12 → 310,14 |
* @param task Task to which the thread belongs. |
* @param flags Thread flags. |
* @param name Symbolic name. |
* @param uncounted Thread's accounting doesn't affect accumulated task accounting. |
* @param uncounted Thread's accounting doesn't affect accumulated task |
* accounting. |
* |
* @return New thread's structure on success, NULL on failure. |
* |
*/ |
thread_t *thread_create(void (* func)(void *), void *arg, task_t *task, int flags, char *name, bool uncounted) |
thread_t *thread_create(void (* func)(void *), void *arg, task_t *task, |
int flags, char *name, bool uncounted) |
{ |
thread_t *t; |
ipl_t ipl; |
325,7 → 327,8 |
return NULL; |
/* Not needed, but good for debugging */ |
memsetb((uintptr_t) t->kstack, THREAD_STACK_SIZE * 1 << STACK_FRAMES, 0); |
memsetb((uintptr_t) t->kstack, THREAD_STACK_SIZE * 1 << STACK_FRAMES, |
0); |
ipl = interrupts_disable(); |
spinlock_lock(&tidlock); |
334,7 → 337,8 |
interrupts_restore(ipl); |
context_save(&t->saved_context); |
context_set(&t->saved_context, FADDR(cushion), (uintptr_t) t->kstack, THREAD_STACK_SIZE); |
context_set(&t->saved_context, FADDR(cushion), (uintptr_t) t->kstack, |
THREAD_STACK_SIZE); |
the_initialize((the_t *) t->kstack); |
376,7 → 380,8 |
t->fpu_context_exists = 0; |
t->fpu_context_engaged = 0; |
thread_create_arch(t); /* might depend on previous initialization */ |
/* might depend on previous initialization */ |
thread_create_arch(t); |
/* |
* Attach to the containing task. |
398,7 → 403,8 |
* Register this thread in the system-wide list. |
*/ |
spinlock_lock(&threads_lock); |
btree_insert(&threads_btree, (btree_key_t) ((uintptr_t) t), (void *) t, NULL); |
btree_insert(&threads_btree, (btree_key_t) ((uintptr_t) t), (void *) t, |
NULL); |
spinlock_unlock(&threads_lock); |
interrupts_restore(ipl); |
408,9 → 414,8 |
/** Terminate thread. |
* |
* End current thread execution and switch it to the exiting |
* state. All pending timeouts are executed. |
* |
* End current thread execution and switch it to the exiting state. All pending |
* timeouts are executed. |
*/ |
void thread_exit(void) |
{ |
419,7 → 424,8 |
restart: |
ipl = interrupts_disable(); |
spinlock_lock(&THREAD->lock); |
if (THREAD->timeout_pending) { /* busy waiting for timeouts in progress */ |
if (THREAD->timeout_pending) { |
/* busy waiting for timeouts in progress */ |
spinlock_unlock(&THREAD->lock); |
interrupts_restore(ipl); |
goto restart; |
443,7 → 449,7 |
*/ |
void thread_sleep(uint32_t sec) |
{ |
thread_usleep(sec*1000000); |
thread_usleep(sec * 1000000); |
} |
/** Wait for another thread to exit. |