/SPARTAN/trunk/src/proc/scheduler.c |
---|
69,9 → 69,9 |
loop: |
cpu_priority_high(); |
spinlock_lock(&the->cpu->lock); |
n = the->cpu->nrdy; |
spinlock_unlock(&the->cpu->lock); |
spinlock_lock(&CPU->lock); |
n = CPU->nrdy; |
spinlock_unlock(&CPU->lock); |
cpu_priority_low(); |
81,8 → 81,8 |
* If the load balancing thread is not running, wake it up and |
* set CPU-private flag that the kcpulb has been started. |
*/ |
if (test_and_set(&the->cpu->kcpulbstarted) == 0) { |
waitq_wakeup(&the->cpu->kcpulb_wq, 0); |
if (test_and_set(&CPU->kcpulbstarted) == 0) { |
waitq_wakeup(&CPU->kcpulb_wq, 0); |
goto loop; |
} |
#endif /* __SMP__ */ |
100,7 → 100,7 |
cpu_priority_high(); |
for (i = 0; i<RQ_COUNT; i++) { |
r = &the->cpu->rq[i]; |
r = &CPU->rq[i]; |
spinlock_lock(&r->lock); |
if (r->n == 0) { |
/* |
114,9 → 114,9 |
nrdy--; |
spinlock_unlock(&nrdylock); |
spinlock_lock(&the->cpu->lock); |
the->cpu->nrdy--; |
spinlock_unlock(&the->cpu->lock); |
spinlock_lock(&CPU->lock); |
CPU->nrdy--; |
spinlock_unlock(&CPU->lock); |
r->n--; |
129,7 → 129,7 |
spinlock_unlock(&r->lock); |
spinlock_lock(&t->lock); |
t->cpu = the->cpu; |
t->cpu = CPU; |
t->ticks = us2ticks((i+1)*10000); |
t->pri = i; /* eventually correct rq index */ |
159,11 → 159,11 |
int i, n; |
list_initialize(&head); |
spinlock_lock(&the->cpu->lock); |
if (the->cpu->needs_relink > NEEDS_RELINK_MAX) { |
spinlock_lock(&CPU->lock); |
if (CPU->needs_relink > NEEDS_RELINK_MAX) { |
for (i = start; i<RQ_COUNT-1; i++) { |
/* remember and empty rq[i + 1] */ |
r = &the->cpu->rq[i + 1]; |
r = &CPU->rq[i + 1]; |
spinlock_lock(&r->lock); |
list_concat(&head, &r->rq_head); |
n = r->n; |
171,15 → 171,15 |
spinlock_unlock(&r->lock); |
/* append rq[i + 1] to rq[i] */ |
r = &the->cpu->rq[i]; |
r = &CPU->rq[i]; |
spinlock_lock(&r->lock); |
list_concat(&r->rq_head, &head); |
r->n += n; |
spinlock_unlock(&r->lock); |
} |
the->cpu->needs_relink = 0; |
CPU->needs_relink = 0; |
} |
spinlock_unlock(&the->cpu->lock); |
spinlock_unlock(&CPU->lock); |
} |
195,17 → 195,17 |
if (haltstate) |
halt(); |
if (the->thread) { |
spinlock_lock(&the->thread->lock); |
if (!context_save(&the->thread->saved_context)) { |
if (THREAD) { |
spinlock_lock(&THREAD->lock); |
if (!context_save(&THREAD->saved_context)) { |
/* |
* This is the place where threads leave scheduler(); |
*/ |
spinlock_unlock(&the->thread->lock); |
cpu_priority_restore(the->thread->saved_context.pri); |
spinlock_unlock(&THREAD->lock); |
cpu_priority_restore(THREAD->saved_context.pri); |
return; |
} |
the->thread->saved_context.pri = pri; |
THREAD->saved_context.pri = pri; |
} |
/* |
220,10 → 220,10 |
* Therefore the scheduler() function continues in |
* scheduler_separated_stack(). |
*/ |
context_save(&the->cpu->saved_context); |
the->cpu->saved_context.sp = (__address) &the->cpu->stack[CPU_STACK_SIZE-8]; |
the->cpu->saved_context.pc = (__address) scheduler_separated_stack; |
context_restore(&the->cpu->saved_context); |
context_save(&CPU->saved_context); |
CPU->saved_context.sp = (__address) &CPU->stack[CPU_STACK_SIZE-8]; |
CPU->saved_context.pc = (__address) scheduler_separated_stack; |
context_restore(&CPU->saved_context); |
/* not reached */ |
} |
231,34 → 231,34 |
{ |
int priority; |
if (the->thread) { |
switch (the->thread->state) { |
if (THREAD) { |
switch (THREAD->state) { |
case Running: |
the->thread->state = Ready; |
spinlock_unlock(&the->thread->lock); |
thread_ready(the->thread); |
THREAD->state = Ready; |
spinlock_unlock(&THREAD->lock); |
thread_ready(THREAD); |
break; |
case Exiting: |
frame_free((__address) the->thread->kstack); |
if (the->thread->ustack) { |
frame_free((__address) the->thread->ustack); |
frame_free((__address) THREAD->kstack); |
if (THREAD->ustack) { |
frame_free((__address) THREAD->ustack); |
} |
/* |
* Detach from the containing task. |
*/ |
spinlock_lock(&the->task->lock); |
list_remove(&the->thread->th_link); |
spinlock_unlock(&the->task->lock); |
spinlock_lock(&TASK->lock); |
list_remove(&THREAD->th_link); |
spinlock_unlock(&TASK->lock); |
spinlock_unlock(&the->thread->lock); |
spinlock_unlock(&THREAD->lock); |
spinlock_lock(&threads_lock); |
list_remove(&the->thread->threads_link); |
list_remove(&THREAD->threads_link); |
spinlock_unlock(&threads_lock); |
free(the->thread); |
free(THREAD); |
break; |
266,24 → 266,24 |
/* |
* Prefer the thread after it's woken up. |
*/ |
the->thread->pri = -1; |
THREAD->pri = -1; |
/* |
* We need to release wq->lock which we locked in waitq_sleep(). |
* Address of wq->lock is kept in the->thread->sleep_queue. |
* Address of wq->lock is kept in THREAD->sleep_queue. |
*/ |
spinlock_unlock(&the->thread->sleep_queue->lock); |
spinlock_unlock(&THREAD->sleep_queue->lock); |
/* |
* Check for possible requests for out-of-context invocation. |
*/ |
if (the->thread->call_me) { |
the->thread->call_me(the->thread->call_me_with); |
the->thread->call_me = NULL; |
the->thread->call_me_with = NULL; |
if (THREAD->call_me) { |
THREAD->call_me(THREAD->call_me_with); |
THREAD->call_me = NULL; |
THREAD->call_me_with = NULL; |
} |
spinlock_unlock(&the->thread->lock); |
spinlock_unlock(&THREAD->lock); |
break; |
291,38 → 291,38 |
/* |
* Entering state is unexpected. |
*/ |
panic("tid%d: unexpected state %s\n", the->thread->tid, thread_states[the->thread->state]); |
panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]); |
break; |
} |
the->thread = NULL; |
THREAD = NULL; |
} |
the->thread = find_best_thread(); |
THREAD = find_best_thread(); |
spinlock_lock(&the->thread->lock); |
priority = the->thread->pri; |
spinlock_unlock(&the->thread->lock); |
spinlock_lock(&THREAD->lock); |
priority = THREAD->pri; |
spinlock_unlock(&THREAD->lock); |
relink_rq(priority); |
spinlock_lock(&the->thread->lock); |
spinlock_lock(&THREAD->lock); |
/* |
* If both the old and the new task are the same, lots of work is avoided. |
*/ |
if (the->task != the->thread->task) { |
if (TASK != THREAD->task) { |
vm_t *m1 = NULL; |
vm_t *m2; |
if (the->task) { |
spinlock_lock(&the->task->lock); |
m1 = the->task->vm; |
spinlock_unlock(&the->task->lock); |
if (TASK) { |
spinlock_lock(&TASK->lock); |
m1 = TASK->vm; |
spinlock_unlock(&TASK->lock); |
} |
spinlock_lock(&the->thread->task->lock); |
m2 = the->thread->task->vm; |
spinlock_unlock(&the->thread->task->lock); |
spinlock_lock(&THREAD->task->lock); |
m2 = THREAD->task->vm; |
spinlock_unlock(&THREAD->task->lock); |
/* |
* Note that it is possible for two tasks to share one vm mapping. |
337,16 → 337,16 |
} |
vm_install(m2); |
} |
the->task = the->thread->task; |
TASK = THREAD->task; |
} |
the->thread->state = Running; |
THREAD->state = Running; |
#ifdef SCHEDULER_VERBOSE |
printf("cpu%d: tid %d (pri=%d,ticks=%d,nrdy=%d)\n", the->cpu->id, the->thread->tid, the->thread->pri, the->thread->ticks, the->cpu->nrdy); |
printf("cpu%d: tid %d (pri=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->pri, THREAD->ticks, CPU->nrdy); |
#endif |
context_restore(&the->thread->saved_context); |
context_restore(&THREAD->saved_context); |
/* not reached */ |
} |
365,7 → 365,7 |
/* |
* Sleep until there's some work to do. |
*/ |
waitq_sleep(&the->cpu->kcpulb_wq); |
waitq_sleep(&CPU->kcpulb_wq); |
not_satisfied: |
/* |
374,10 → 374,10 |
* passes. Each time get the most up to date counts. |
*/ |
pri = cpu_priority_high(); |
spinlock_lock(&the->cpu->lock); |
spinlock_lock(&CPU->lock); |
count = nrdy / config.cpu_active; |
count -= the->cpu->nrdy; |
spinlock_unlock(&the->cpu->lock); |
count -= CPU->nrdy; |
spinlock_unlock(&CPU->lock); |
cpu_priority_restore(pri); |
if (count <= 0) |
399,7 → 399,7 |
* Not interested in ourselves. |
* Doesn't require interrupt disabling for kcpulb is X_WIRED. |
*/ |
if (the->cpu == cpu) |
if (CPU == cpu) |
continue; |
restart: pri = cpu_priority_high(); |
460,7 → 460,7 |
*/ |
spinlock_lock(&t->lock); |
#ifdef KCPULB_VERBOSE |
printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", the->cpu->id, t->tid, the->cpu->id, the->cpu->nrdy, nrdy / config.cpu_active); |
printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, CPU->nrdy, nrdy / config.cpu_active); |
#endif |
t->flags |= X_STOLEN; |
spinlock_unlock(&t->lock); |
483,7 → 483,7 |
} |
} |
if (the->cpu->nrdy) { |
if (CPU->nrdy) { |
/* |
* Be a little bit light-weight and let migrated threads run. |
*/ |
503,7 → 503,7 |
/* |
* Tell find_best_thread() to wake us up later again. |
*/ |
the->cpu->kcpulbstarted = 0; |
CPU->kcpulbstarted = 0; |
goto loop; |
} |
/SPARTAN/trunk/src/proc/task.c |
---|
40,7 → 40,7 |
void task_init(void) |
{ |
the->task = NULL; |
TASK = NULL; |
spinlock_initialize(&tasks_lock); |
list_initialize(&tasks_head); |
} |
/SPARTAN/trunk/src/proc/thread.c |
---|
66,11 → 66,11 |
*/ |
void cushion(void) |
{ |
void (*f)(void *) = the->thread->thread_code; |
void *arg = the->thread->thread_arg; |
void (*f)(void *) = THREAD->thread_code; |
void *arg = THREAD->thread_arg; |
/* this is where each thread wakes up after its creation */ |
spinlock_unlock(&the->thread->lock); |
spinlock_unlock(&THREAD->lock); |
cpu_priority_low(); |
f(arg); |
80,7 → 80,7 |
void thread_init(void) |
{ |
the->thread = NULL; |
THREAD = NULL; |
nrdy = 0; |
spinlock_initialize(&threads_lock); |
list_initialize(&threads_head); |
214,14 → 214,14 |
restart: |
pri = cpu_priority_high(); |
spinlock_lock(&the->thread->lock); |
if (the->thread->timeout_pending) { /* busy waiting for timeouts in progress */ |
spinlock_unlock(&the->thread->lock); |
spinlock_lock(&THREAD->lock); |
if (THREAD->timeout_pending) { /* busy waiting for timeouts in progress */ |
spinlock_unlock(&THREAD->lock); |
cpu_priority_restore(pri); |
goto restart; |
} |
the->thread->state = Exiting; |
spinlock_unlock(&the->thread->lock); |
THREAD->state = Exiting; |
spinlock_unlock(&THREAD->lock); |
scheduler(); |
} |
247,9 → 247,9 |
pri_t pri; |
pri = cpu_priority_high(); |
spinlock_lock(&the->thread->lock); |
the->thread->call_me = call_me; |
the->thread->call_me_with = call_me_with; |
spinlock_unlock(&the->thread->lock); |
spinlock_lock(&THREAD->lock); |
THREAD->call_me = call_me; |
THREAD->call_me_with = call_me_with; |
spinlock_unlock(&THREAD->lock); |
cpu_priority_restore(pri); |
} |
/SPARTAN/trunk/src/main/kinit.c |
---|
70,7 → 70,7 |
* not mess together with kcpulb and klwtm threads. |
* Just a beautification. |
*/ |
if (t = thread_create(kmp, NULL, the->task, 0)) { |
if (t = thread_create(kmp, NULL, TASK, 0)) { |
spinlock_lock(&t->lock); |
t->flags |= X_WIRED; |
t->cpu = &cpus[0]; |
95,7 → 95,7 |
*/ |
for (i = 0; i < config.cpu_count; i++) { |
if (t = thread_create(kcpulb, NULL, the->task, 0)) { |
if (t = thread_create(kcpulb, NULL, TASK, 0)) { |
spinlock_lock(&t->lock); |
t->flags |= X_WIRED; |
t->cpu = &cpus[i]; |
/SPARTAN/trunk/src/main/main.c |
---|
184,9 → 184,9 |
* collide with another CPU coming up. To prevent this, we |
* switch to this cpu's private stack prior to waking kmp up. |
*/ |
the->cpu->saved_context.sp = (__address) &the->cpu->stack[CPU_STACK_SIZE-8]; |
the->cpu->saved_context.pc = (__address) main_ap_separated_stack; |
context_restore(&the->cpu->saved_context); |
CPU->saved_context.sp = (__address) &CPU->stack[CPU_STACK_SIZE-8]; |
CPU->saved_context.pc = (__address) main_ap_separated_stack; |
context_restore(&CPU->saved_context); |
/* not reached */ |
} |
/SPARTAN/trunk/src/synch/rwlock.c |
---|
80,9 → 80,9 |
int rc; |
pri = cpu_priority_high(); |
spinlock_lock(&the->thread->lock); |
the->thread->rwlock_holder_type = RWLOCK_WRITER; |
spinlock_unlock(&the->thread->lock); |
spinlock_lock(&THREAD->lock); |
THREAD->rwlock_holder_type = RWLOCK_WRITER; |
spinlock_unlock(&THREAD->lock); |
cpu_priority_restore(pri); |
/* |
120,9 → 120,9 |
pri_t pri; |
pri = cpu_priority_high(); |
spinlock_lock(&the->thread->lock); |
the->thread->rwlock_holder_type = RWLOCK_READER; |
spinlock_unlock(&the->thread->lock); |
spinlock_lock(&THREAD->lock); |
THREAD->rwlock_holder_type = RWLOCK_READER; |
spinlock_unlock(&THREAD->lock); |
spinlock_lock(&rwl->lock); |
/SPARTAN/trunk/src/synch/spinlock.c |
---|
49,7 → 49,7 |
while (test_and_set(&sl->val)) { |
if (i++ > 300000) { |
printf("cpu%d: looping on spinlock %X, caller=%X\n", the->cpu->id, sl, caller); |
printf("cpu%d: looping on spinlock %X, caller=%X\n", CPU->id, sl, caller); |
i = 0; |
} |
} |
/SPARTAN/trunk/src/synch/waitq.c |
---|
129,13 → 129,13 |
* Simply, the thread is not allowed to go to sleep if |
* there are timeouts in progress. |
*/ |
spinlock_lock(&the->thread->lock); |
if (the->thread->timeout_pending) { |
spinlock_unlock(&the->thread->lock); |
spinlock_lock(&THREAD->lock); |
if (THREAD->timeout_pending) { |
spinlock_unlock(&THREAD->lock); |
cpu_priority_restore(pri); |
goto restart; |
} |
spinlock_unlock(&the->thread->lock); |
spinlock_unlock(&THREAD->lock); |
spinlock_lock(&wq->lock); |
159,30 → 159,30 |
/* |
* Now we are firmly decided to go to sleep. |
*/ |
spinlock_lock(&the->thread->lock); |
spinlock_lock(&THREAD->lock); |
if (usec) { |
/* We use the timeout variant. */ |
if (!context_save(&the->thread->sleep_timeout_context)) { |
if (!context_save(&THREAD->sleep_timeout_context)) { |
/* |
* Short emulation of scheduler() return code. |
*/ |
spinlock_unlock(&the->thread->lock); |
spinlock_unlock(&THREAD->lock); |
cpu_priority_restore(pri); |
return ESYNCH_TIMEOUT; |
} |
the->thread->timeout_pending = 1; |
timeout_register(&the->thread->sleep_timeout, (__u64) usec, waitq_interrupted_sleep, the->thread); |
THREAD->timeout_pending = 1; |
timeout_register(&THREAD->sleep_timeout, (__u64) usec, waitq_interrupted_sleep, THREAD); |
} |
list_append(&the->thread->wq_link, &wq->head); |
list_append(&THREAD->wq_link, &wq->head); |
/* |
* Suspend execution. |
*/ |
the->thread->state = Sleeping; |
the->thread->sleep_queue = wq; |
THREAD->state = Sleeping; |
THREAD->sleep_queue = wq; |
spinlock_unlock(&the->thread->lock); |
spinlock_unlock(&THREAD->lock); |
scheduler(); /* wq->lock is released in scheduler_separated_stack() */ |
cpu_priority_restore(pri); |
/SPARTAN/trunk/src/lib/func.c |
---|
38,7 → 38,7 |
{ |
haltstate = 1; |
cpu_priority_high(); |
printf("cpu%d: halted\n", the->cpu->id); |
printf("cpu%d: halted\n", CPU->id); |
cpu_halt(); |
} |
/SPARTAN/trunk/src/cpu/cpu.c |
---|
27,6 → 27,7 |
*/ |
#include <cpu.h> |
#include <arch.h> |
#include <arch/cpu.h> |
#include <mm/heap.h> |
#include <mm/page.h> |
84,7 → 85,7 |
} |
#endif /* __SMP__ */ |
the->cpu = &cpus[config.cpu_active-1]; |
CPU = &cpus[config.cpu_active-1]; |
cpu_identify(); |
cpu_arch_init(); |
} |
/SPARTAN/trunk/src/time/delay.c |
---|
41,6 → 41,6 |
pri_t pri; |
pri = cpu_priority_high(); |
asm_delay_loop(microseconds * the->cpu->delay_loop_const); |
asm_delay_loop(microseconds * CPU->delay_loop_const); |
cpu_priority_restore(pri); |
} |
/SPARTAN/trunk/src/time/timeout.c |
---|
39,8 → 39,8 |
void timeout_init(void) |
{ |
spinlock_initialize(&the->cpu->timeoutlock); |
list_initialize(&the->cpu->timeout_active_head); |
spinlock_initialize(&CPU->timeoutlock); |
list_initialize(&CPU->timeout_active_head); |
} |
70,13 → 70,13 |
__u64 sum; |
pri = cpu_priority_high(); |
spinlock_lock(&the->cpu->timeoutlock); |
spinlock_lock(&CPU->timeoutlock); |
spinlock_lock(&t->lock); |
if (t->cpu) |
panic("timeout_register: t->cpu != 0"); |
t->cpu = the->cpu; |
t->cpu = CPU; |
t->ticks = us2ticks(time); |
t->handler = f; |
86,8 → 86,8 |
* Insert t into the active timeouts list according to t->ticks. |
*/ |
sum = 0; |
l = the->cpu->timeout_active_head.next; |
while (l != &the->cpu->timeout_active_head) { |
l = CPU->timeout_active_head.next; |
while (l != &CPU->timeout_active_head) { |
hlp = list_get_instance(l, timeout_t, link); |
spinlock_lock(&hlp->lock); |
if (t->ticks < sum + hlp->ticks) { |
109,7 → 109,7 |
/* |
* Decrease ticks of t's immediate succesor by t->ticks. |
*/ |
if (l != &the->cpu->timeout_active_head) { |
if (l != &CPU->timeout_active_head) { |
spinlock_lock(&hlp->lock); |
hlp->ticks -= t->ticks; |
spinlock_unlock(&hlp->lock); |
116,7 → 116,7 |
} |
spinlock_unlock(&t->lock); |
spinlock_unlock(&the->cpu->timeoutlock); |
spinlock_unlock(&CPU->timeoutlock); |
cpu_priority_restore(pri); |
} |
/SPARTAN/trunk/src/time/clock.c |
---|
57,8 → 57,8 |
* To avoid lock ordering problems, |
* run all expired timeouts as you visit them. |
*/ |
spinlock_lock(&the->cpu->timeoutlock); |
while ((l = the->cpu->timeout_active_head.next) != &the->cpu->timeout_active_head) { |
spinlock_lock(&CPU->timeoutlock); |
while ((l = CPU->timeout_active_head.next) != &CPU->timeout_active_head) { |
h = list_get_instance(l, timeout_t, link); |
spinlock_lock(&h->lock); |
if (h->ticks-- != 0) { |
70,30 → 70,30 |
arg = h->arg; |
timeout_reinitialize(h); |
spinlock_unlock(&h->lock); |
spinlock_unlock(&the->cpu->timeoutlock); |
spinlock_unlock(&CPU->timeoutlock); |
f(arg); |
spinlock_lock(&the->cpu->timeoutlock); |
spinlock_lock(&CPU->timeoutlock); |
} |
spinlock_unlock(&the->cpu->timeoutlock); |
spinlock_unlock(&CPU->timeoutlock); |
/* |
* Do CPU usage accounting and find out whether to preempt the->thread. |
* Do CPU usage accounting and find out whether to preempt THREAD. |
*/ |
if (the->thread) { |
spinlock_lock(&the->cpu->lock); |
the->cpu->needs_relink++; |
spinlock_unlock(&the->cpu->lock); |
if (THREAD) { |
spinlock_lock(&CPU->lock); |
CPU->needs_relink++; |
spinlock_unlock(&CPU->lock); |
spinlock_lock(&the->thread->lock); |
if (!the->thread->ticks--) { |
spinlock_unlock(&the->thread->lock); |
spinlock_lock(&THREAD->lock); |
if (!THREAD->ticks--) { |
spinlock_unlock(&THREAD->lock); |
scheduler(); |
} |
else { |
spinlock_unlock(&the->thread->lock); |
spinlock_unlock(&THREAD->lock); |
} |
} |