Subversion Repositories HelenOS-historic

Compare Revisions

Ignore whitespace Rev 14 → Rev 15

/SPARTAN/trunk/test/synch/rwlock2/test.c
74,7 → 74,7
rwlock_read_lock(&rwlock);
rwlock_read_lock(&rwlock);
thrd = thread_create(writer, NULL, the->task, 0);
thrd = thread_create(writer, NULL, TASK, 0);
if (thrd)
thread_ready(thrd);
else
/SPARTAN/trunk/test/synch/rwlock3/test.c
44,16 → 44,16
 
void reader(void *arg)
{
printf("cpu%d, tid %d: trying to lock rwlock for reading....\n", the->cpu->id, the->thread->tid);
printf("cpu%d, tid %d: trying to lock rwlock for reading....\n", CPU->id, THREAD->tid);
rwlock_read_lock(&rwlock);
rwlock_read_unlock(&rwlock);
printf("cpu%d, tid %d: success\n", the->cpu->id, the->thread->tid);
printf("cpu%d, tid %d: success\n", CPU->id, THREAD->tid);
 
printf("cpu%d, tid %d: trying to lock rwlock for writing....\n", the->cpu->id, the->thread->tid);
printf("cpu%d, tid %d: trying to lock rwlock for writing....\n", CPU->id, THREAD->tid);
 
rwlock_write_lock(&rwlock);
rwlock_write_unlock(&rwlock);
printf("cpu%d, tid %d: success\n", the->cpu->id, the->thread->tid);
printf("cpu%d, tid %d: success\n", CPU->id, THREAD->tid);
printf("Test passed.\n");
 
77,7 → 77,7
rwlock_write_lock(&rwlock);
for (i=0; i<4; i++) {
thrd = thread_create(reader, NULL, the->task, 0);
thrd = thread_create(reader, NULL, TASK, 0);
if (thrd)
thread_ready(thrd);
else
/SPARTAN/trunk/test/synch/semaphore1/test.c
99,7 → 99,7
for (j=0; j<(CONSUMERS+PRODUCERS)/2; j++) {
for (k=0; k<i; k++) {
thrd = thread_create(consumer, NULL, the->task, 0);
thrd = thread_create(consumer, NULL, TASK, 0);
if (thrd)
thread_ready(thrd);
else
106,7 → 106,7
failed();
}
for (k=0; k<(4-i); k++) {
thrd = thread_create(producer, NULL, the->task, 0);
thrd = thread_create(producer, NULL, TASK, 0);
if (thrd)
thread_ready(thrd);
else
/SPARTAN/trunk/test/synch/rwlock4/test.c
73,13 → 73,13
waitq_sleep(&can_start);
 
to = random(40000);
printf("cpu%d, tid %d w+ (%d)\n", the->cpu->id, the->thread->tid, to);
printf("cpu%d, tid %d w+ (%d)\n", CPU->id, THREAD->tid, to);
rc = rwlock_write_lock_timeout(&rwlock, to);
if (SYNCH_FAILED(rc)) {
printf("cpu%d, tid %d w!\n", the->cpu->id, the->thread->tid);
printf("cpu%d, tid %d w!\n", CPU->id, THREAD->tid);
return;
};
printf("cpu%d, tid %d w=\n", the->cpu->id, the->thread->tid);
printf("cpu%d, tid %d w=\n", CPU->id, THREAD->tid);
 
if (rwlock.readers_in) panic("Oops.");
thread_usleep(random(1000000));
86,7 → 86,7
if (rwlock.readers_in) panic("Oops.");
 
rwlock_write_unlock(&rwlock);
printf("cpu%d, tid %d w-\n", the->cpu->id, the->thread->tid);
printf("cpu%d, tid %d w-\n", CPU->id, THREAD->tid);
}
 
void reader(void *arg)
95,16 → 95,16
waitq_sleep(&can_start);
to = random(2000);
printf("cpu%d, tid %d r+ (%d)\n", the->cpu->id, the->thread->tid, to);
printf("cpu%d, tid %d r+ (%d)\n", CPU->id, THREAD->tid, to);
rc = rwlock_read_lock_timeout(&rwlock, to);
if (SYNCH_FAILED(rc)) {
printf("cpu%d, tid %d r!\n", the->cpu->id, the->thread->tid);
printf("cpu%d, tid %d r!\n", CPU->id, THREAD->tid);
return;
}
printf("cpu%d, tid %d r=\n", the->cpu->id, the->thread->tid);
printf("cpu%d, tid %d r=\n", CPU->id, THREAD->tid);
thread_usleep(30000);
rwlock_read_unlock(&rwlock);
printf("cpu%d, tid %d r-\n", the->cpu->id, the->thread->tid);
printf("cpu%d, tid %d r-\n", CPU->id, THREAD->tid);
}
 
void failed(void)
134,7 → 134,7
k = random(7) + 1;
printf("Creating %d readers\n", k);
for (i=0; i<k; i++) {
thrd = thread_create(reader, NULL, the->task, 0);
thrd = thread_create(reader, NULL, TASK, 0);
if (thrd)
thread_ready(thrd);
else
144,7 → 144,7
k = random(5) + 1;
printf("Creating %d writers\n", k);
for (i=0; i<k; i++) {
thrd = thread_create(writer, NULL, the->task, 0);
thrd = thread_create(writer, NULL, TASK, 0);
if (thrd)
thread_ready(thrd);
else
/SPARTAN/trunk/test/synch/semaphore2/test.c
69,18 → 69,18
waitq_sleep(&can_start);
to = random(20000);
printf("cpu%d, tid %d down+ (%d)\n", the->cpu->id, the->thread->tid, to);
printf("cpu%d, tid %d down+ (%d)\n", CPU->id, THREAD->tid, to);
rc = semaphore_down_timeout(&sem, to);
if (SYNCH_FAILED(rc)) {
printf("cpu%d, tid %d down!\n", the->cpu->id, the->thread->tid);
printf("cpu%d, tid %d down!\n", CPU->id, THREAD->tid);
return;
}
printf("cpu%d, tid %d down=\n", the->cpu->id, the->thread->tid);
printf("cpu%d, tid %d down=\n", CPU->id, THREAD->tid);
thread_usleep(random(30000));
semaphore_up(&sem);
printf("cpu%d, tid %d up\n", the->cpu->id, the->thread->tid);
printf("cpu%d, tid %d up\n", CPU->id, THREAD->tid);
}
 
void failed(void)
107,7 → 107,7
k = random(7) + 1;
printf("Creating %d consumers\n", k);
for (i=0; i<k; i++) {
thrd = thread_create(consumer, NULL, the->task, 0);
thrd = thread_create(consumer, NULL, TASK, 0);
if (thrd)
thread_ready(thrd);
else
/SPARTAN/trunk/test/synch/rwlock5/test.c
95,7 → 95,7
for (j=0; j<(READERS+WRITERS)/2; j++) {
for (k=0; k<i; k++) {
thrd = thread_create(reader, NULL, the->task, 0);
thrd = thread_create(reader, NULL, TASK, 0);
if (thrd)
thread_ready(thrd);
else
102,7 → 102,7
failed();
}
for (k=0; k<(4-i); k++) {
thrd = thread_create(writer, NULL, the->task, 0);
thrd = thread_create(writer, NULL, TASK, 0);
if (thrd)
thread_ready(thrd);
else
/SPARTAN/trunk/include/arch.h
34,6 → 34,10
 
#include <cpu.h>
 
#define CPU (the->cpu)
#define THREAD (the->thread)
#define TASK (the->task)
 
extern cpu_private_page_t *the;
 
extern void arch_init(void);
/SPARTAN/trunk/src/proc/scheduler.c
69,9 → 69,9
loop:
cpu_priority_high();
 
spinlock_lock(&the->cpu->lock);
n = the->cpu->nrdy;
spinlock_unlock(&the->cpu->lock);
spinlock_lock(&CPU->lock);
n = CPU->nrdy;
spinlock_unlock(&CPU->lock);
 
cpu_priority_low();
81,8 → 81,8
* If the load balancing thread is not running, wake it up and
* set CPU-private flag that the kcpulb has been started.
*/
if (test_and_set(&the->cpu->kcpulbstarted) == 0) {
waitq_wakeup(&the->cpu->kcpulb_wq, 0);
if (test_and_set(&CPU->kcpulbstarted) == 0) {
waitq_wakeup(&CPU->kcpulb_wq, 0);
goto loop;
}
#endif /* __SMP__ */
100,7 → 100,7
cpu_priority_high();
 
for (i = 0; i<RQ_COUNT; i++) {
r = &the->cpu->rq[i];
r = &CPU->rq[i];
spinlock_lock(&r->lock);
if (r->n == 0) {
/*
114,9 → 114,9
nrdy--;
spinlock_unlock(&nrdylock);
 
spinlock_lock(&the->cpu->lock);
the->cpu->nrdy--;
spinlock_unlock(&the->cpu->lock);
spinlock_lock(&CPU->lock);
CPU->nrdy--;
spinlock_unlock(&CPU->lock);
 
r->n--;
 
129,7 → 129,7
spinlock_unlock(&r->lock);
 
spinlock_lock(&t->lock);
t->cpu = the->cpu;
t->cpu = CPU;
 
t->ticks = us2ticks((i+1)*10000);
t->pri = i; /* eventually correct rq index */
159,11 → 159,11
int i, n;
 
list_initialize(&head);
spinlock_lock(&the->cpu->lock);
if (the->cpu->needs_relink > NEEDS_RELINK_MAX) {
spinlock_lock(&CPU->lock);
if (CPU->needs_relink > NEEDS_RELINK_MAX) {
for (i = start; i<RQ_COUNT-1; i++) {
/* remember and empty rq[i + 1] */
r = &the->cpu->rq[i + 1];
r = &CPU->rq[i + 1];
spinlock_lock(&r->lock);
list_concat(&head, &r->rq_head);
n = r->n;
171,15 → 171,15
spinlock_unlock(&r->lock);
/* append rq[i + 1] to rq[i] */
r = &the->cpu->rq[i];
r = &CPU->rq[i];
spinlock_lock(&r->lock);
list_concat(&r->rq_head, &head);
r->n += n;
spinlock_unlock(&r->lock);
}
the->cpu->needs_relink = 0;
CPU->needs_relink = 0;
}
spinlock_unlock(&the->cpu->lock);
spinlock_unlock(&CPU->lock);
 
}
 
195,17 → 195,17
if (haltstate)
halt();
 
if (the->thread) {
spinlock_lock(&the->thread->lock);
if (!context_save(&the->thread->saved_context)) {
if (THREAD) {
spinlock_lock(&THREAD->lock);
if (!context_save(&THREAD->saved_context)) {
/*
* This is the place where threads leave scheduler();
*/
spinlock_unlock(&the->thread->lock);
cpu_priority_restore(the->thread->saved_context.pri);
spinlock_unlock(&THREAD->lock);
cpu_priority_restore(THREAD->saved_context.pri);
return;
}
the->thread->saved_context.pri = pri;
THREAD->saved_context.pri = pri;
}
 
/*
220,10 → 220,10
* Therefore the scheduler() function continues in
* scheduler_separated_stack().
*/
context_save(&the->cpu->saved_context);
the->cpu->saved_context.sp = (__address) &the->cpu->stack[CPU_STACK_SIZE-8];
the->cpu->saved_context.pc = (__address) scheduler_separated_stack;
context_restore(&the->cpu->saved_context);
context_save(&CPU->saved_context);
CPU->saved_context.sp = (__address) &CPU->stack[CPU_STACK_SIZE-8];
CPU->saved_context.pc = (__address) scheduler_separated_stack;
context_restore(&CPU->saved_context);
/* not reached */
}
 
231,34 → 231,34
{
int priority;
 
if (the->thread) {
switch (the->thread->state) {
if (THREAD) {
switch (THREAD->state) {
case Running:
the->thread->state = Ready;
spinlock_unlock(&the->thread->lock);
thread_ready(the->thread);
THREAD->state = Ready;
spinlock_unlock(&THREAD->lock);
thread_ready(THREAD);
break;
 
case Exiting:
frame_free((__address) the->thread->kstack);
if (the->thread->ustack) {
frame_free((__address) the->thread->ustack);
frame_free((__address) THREAD->kstack);
if (THREAD->ustack) {
frame_free((__address) THREAD->ustack);
}
/*
* Detach from the containing task.
*/
spinlock_lock(&the->task->lock);
list_remove(&the->thread->th_link);
spinlock_unlock(&the->task->lock);
spinlock_lock(&TASK->lock);
list_remove(&THREAD->th_link);
spinlock_unlock(&TASK->lock);
 
spinlock_unlock(&the->thread->lock);
spinlock_unlock(&THREAD->lock);
spinlock_lock(&threads_lock);
list_remove(&the->thread->threads_link);
list_remove(&THREAD->threads_link);
spinlock_unlock(&threads_lock);
free(the->thread);
free(THREAD);
break;
266,24 → 266,24
/*
* Prefer the thread after it's woken up.
*/
the->thread->pri = -1;
THREAD->pri = -1;
 
/*
* We need to release wq->lock which we locked in waitq_sleep().
* Address of wq->lock is kept in the->thread->sleep_queue.
* Address of wq->lock is kept in THREAD->sleep_queue.
*/
spinlock_unlock(&the->thread->sleep_queue->lock);
spinlock_unlock(&THREAD->sleep_queue->lock);
 
/*
* Check for possible requests for out-of-context invocation.
*/
if (the->thread->call_me) {
the->thread->call_me(the->thread->call_me_with);
the->thread->call_me = NULL;
the->thread->call_me_with = NULL;
if (THREAD->call_me) {
THREAD->call_me(THREAD->call_me_with);
THREAD->call_me = NULL;
THREAD->call_me_with = NULL;
}
 
spinlock_unlock(&the->thread->lock);
spinlock_unlock(&THREAD->lock);
break;
 
291,38 → 291,38
/*
* Entering state is unexpected.
*/
panic("tid%d: unexpected state %s\n", the->thread->tid, thread_states[the->thread->state]);
panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]);
break;
}
the->thread = NULL;
THREAD = NULL;
}
the->thread = find_best_thread();
THREAD = find_best_thread();
spinlock_lock(&the->thread->lock);
priority = the->thread->pri;
spinlock_unlock(&the->thread->lock);
spinlock_lock(&THREAD->lock);
priority = THREAD->pri;
spinlock_unlock(&THREAD->lock);
relink_rq(priority);
 
spinlock_lock(&the->thread->lock);
spinlock_lock(&THREAD->lock);
 
/*
* If both the old and the new task are the same, lots of work is avoided.
*/
if (the->task != the->thread->task) {
if (TASK != THREAD->task) {
vm_t *m1 = NULL;
vm_t *m2;
 
if (the->task) {
spinlock_lock(&the->task->lock);
m1 = the->task->vm;
spinlock_unlock(&the->task->lock);
if (TASK) {
spinlock_lock(&TASK->lock);
m1 = TASK->vm;
spinlock_unlock(&TASK->lock);
}
 
spinlock_lock(&the->thread->task->lock);
m2 = the->thread->task->vm;
spinlock_unlock(&the->thread->task->lock);
spinlock_lock(&THREAD->task->lock);
m2 = THREAD->task->vm;
spinlock_unlock(&THREAD->task->lock);
/*
* Note that it is possible for two tasks to share one vm mapping.
337,16 → 337,16
}
vm_install(m2);
}
the->task = the->thread->task;
TASK = THREAD->task;
}
 
the->thread->state = Running;
THREAD->state = Running;
 
#ifdef SCHEDULER_VERBOSE
printf("cpu%d: tid %d (pri=%d,ticks=%d,nrdy=%d)\n", the->cpu->id, the->thread->tid, the->thread->pri, the->thread->ticks, the->cpu->nrdy);
printf("cpu%d: tid %d (pri=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->pri, THREAD->ticks, CPU->nrdy);
#endif
 
context_restore(&the->thread->saved_context);
context_restore(&THREAD->saved_context);
/* not reached */
}
 
365,7 → 365,7
/*
* Sleep until there's some work to do.
*/
waitq_sleep(&the->cpu->kcpulb_wq);
waitq_sleep(&CPU->kcpulb_wq);
 
not_satisfied:
/*
374,10 → 374,10
* passes. Each time get the most up to date counts.
*/
pri = cpu_priority_high();
spinlock_lock(&the->cpu->lock);
spinlock_lock(&CPU->lock);
count = nrdy / config.cpu_active;
count -= the->cpu->nrdy;
spinlock_unlock(&the->cpu->lock);
count -= CPU->nrdy;
spinlock_unlock(&CPU->lock);
cpu_priority_restore(pri);
 
if (count <= 0)
399,7 → 399,7
* Not interested in ourselves.
* Doesn't require interrupt disabling for kcpulb is X_WIRED.
*/
if (the->cpu == cpu)
if (CPU == cpu)
continue;
 
restart: pri = cpu_priority_high();
460,7 → 460,7
*/
spinlock_lock(&t->lock);
#ifdef KCPULB_VERBOSE
printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", the->cpu->id, t->tid, the->cpu->id, the->cpu->nrdy, nrdy / config.cpu_active);
printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, CPU->nrdy, nrdy / config.cpu_active);
#endif
t->flags |= X_STOLEN;
spinlock_unlock(&t->lock);
483,7 → 483,7
}
}
 
if (the->cpu->nrdy) {
if (CPU->nrdy) {
/*
* Be a little bit light-weight and let migrated threads run.
*/
503,7 → 503,7
/*
* Tell find_best_thread() to wake us up later again.
*/
the->cpu->kcpulbstarted = 0;
CPU->kcpulbstarted = 0;
goto loop;
}
 
/SPARTAN/trunk/src/proc/task.c
40,7 → 40,7
 
void task_init(void)
{
the->task = NULL;
TASK = NULL;
spinlock_initialize(&tasks_lock);
list_initialize(&tasks_head);
}
/SPARTAN/trunk/src/proc/thread.c
66,11 → 66,11
*/
void cushion(void)
{
void (*f)(void *) = the->thread->thread_code;
void *arg = the->thread->thread_arg;
void (*f)(void *) = THREAD->thread_code;
void *arg = THREAD->thread_arg;
 
/* this is where each thread wakes up after its creation */
spinlock_unlock(&the->thread->lock);
spinlock_unlock(&THREAD->lock);
cpu_priority_low();
 
f(arg);
80,7 → 80,7
 
void thread_init(void)
{
the->thread = NULL;
THREAD = NULL;
nrdy = 0;
spinlock_initialize(&threads_lock);
list_initialize(&threads_head);
214,14 → 214,14
 
restart:
pri = cpu_priority_high();
spinlock_lock(&the->thread->lock);
if (the->thread->timeout_pending) { /* busy waiting for timeouts in progress */
spinlock_unlock(&the->thread->lock);
spinlock_lock(&THREAD->lock);
if (THREAD->timeout_pending) { /* busy waiting for timeouts in progress */
spinlock_unlock(&THREAD->lock);
cpu_priority_restore(pri);
goto restart;
}
the->thread->state = Exiting;
spinlock_unlock(&the->thread->lock);
THREAD->state = Exiting;
spinlock_unlock(&THREAD->lock);
scheduler();
}
 
247,9 → 247,9
pri_t pri;
pri = cpu_priority_high();
spinlock_lock(&the->thread->lock);
the->thread->call_me = call_me;
the->thread->call_me_with = call_me_with;
spinlock_unlock(&the->thread->lock);
spinlock_lock(&THREAD->lock);
THREAD->call_me = call_me;
THREAD->call_me_with = call_me_with;
spinlock_unlock(&THREAD->lock);
cpu_priority_restore(pri);
}
/SPARTAN/trunk/src/main/kinit.c
70,7 → 70,7
* not mess together with kcpulb and klwtm threads.
* Just a beautification.
*/
if (t = thread_create(kmp, NULL, the->task, 0)) {
if (t = thread_create(kmp, NULL, TASK, 0)) {
spinlock_lock(&t->lock);
t->flags |= X_WIRED;
t->cpu = &cpus[0];
95,7 → 95,7
*/
for (i = 0; i < config.cpu_count; i++) {
 
if (t = thread_create(kcpulb, NULL, the->task, 0)) {
if (t = thread_create(kcpulb, NULL, TASK, 0)) {
spinlock_lock(&t->lock);
t->flags |= X_WIRED;
t->cpu = &cpus[i];
/SPARTAN/trunk/src/main/main.c
184,9 → 184,9
* collide with another CPU coming up. To prevent this, we
* switch to this cpu's private stack prior to waking kmp up.
*/
the->cpu->saved_context.sp = (__address) &the->cpu->stack[CPU_STACK_SIZE-8];
the->cpu->saved_context.pc = (__address) main_ap_separated_stack;
context_restore(&the->cpu->saved_context);
CPU->saved_context.sp = (__address) &CPU->stack[CPU_STACK_SIZE-8];
CPU->saved_context.pc = (__address) main_ap_separated_stack;
context_restore(&CPU->saved_context);
/* not reached */
}
 
/SPARTAN/trunk/src/synch/rwlock.c
80,9 → 80,9
int rc;
pri = cpu_priority_high();
spinlock_lock(&the->thread->lock);
the->thread->rwlock_holder_type = RWLOCK_WRITER;
spinlock_unlock(&the->thread->lock);
spinlock_lock(&THREAD->lock);
THREAD->rwlock_holder_type = RWLOCK_WRITER;
spinlock_unlock(&THREAD->lock);
cpu_priority_restore(pri);
 
/*
120,9 → 120,9
pri_t pri;
pri = cpu_priority_high();
spinlock_lock(&the->thread->lock);
the->thread->rwlock_holder_type = RWLOCK_READER;
spinlock_unlock(&the->thread->lock);
spinlock_lock(&THREAD->lock);
THREAD->rwlock_holder_type = RWLOCK_READER;
spinlock_unlock(&THREAD->lock);
 
spinlock_lock(&rwl->lock);
 
/SPARTAN/trunk/src/synch/spinlock.c
49,7 → 49,7
 
while (test_and_set(&sl->val)) {
if (i++ > 300000) {
printf("cpu%d: looping on spinlock %X, caller=%X\n", the->cpu->id, sl, caller);
printf("cpu%d: looping on spinlock %X, caller=%X\n", CPU->id, sl, caller);
i = 0;
}
}
/SPARTAN/trunk/src/synch/waitq.c
129,13 → 129,13
* Simply, the thread is not allowed to go to sleep if
* there are timeouts in progress.
*/
spinlock_lock(&the->thread->lock);
if (the->thread->timeout_pending) {
spinlock_unlock(&the->thread->lock);
spinlock_lock(&THREAD->lock);
if (THREAD->timeout_pending) {
spinlock_unlock(&THREAD->lock);
cpu_priority_restore(pri);
goto restart;
}
spinlock_unlock(&the->thread->lock);
spinlock_unlock(&THREAD->lock);
spinlock_lock(&wq->lock);
159,30 → 159,30
/*
* Now we are firmly decided to go to sleep.
*/
spinlock_lock(&the->thread->lock);
spinlock_lock(&THREAD->lock);
if (usec) {
/* We use the timeout variant. */
if (!context_save(&the->thread->sleep_timeout_context)) {
if (!context_save(&THREAD->sleep_timeout_context)) {
/*
* Short emulation of scheduler() return code.
*/
spinlock_unlock(&the->thread->lock);
spinlock_unlock(&THREAD->lock);
cpu_priority_restore(pri);
return ESYNCH_TIMEOUT;
}
the->thread->timeout_pending = 1;
timeout_register(&the->thread->sleep_timeout, (__u64) usec, waitq_interrupted_sleep, the->thread);
THREAD->timeout_pending = 1;
timeout_register(&THREAD->sleep_timeout, (__u64) usec, waitq_interrupted_sleep, THREAD);
}
 
list_append(&the->thread->wq_link, &wq->head);
list_append(&THREAD->wq_link, &wq->head);
 
/*
* Suspend execution.
*/
the->thread->state = Sleeping;
the->thread->sleep_queue = wq;
THREAD->state = Sleeping;
THREAD->sleep_queue = wq;
 
spinlock_unlock(&the->thread->lock);
spinlock_unlock(&THREAD->lock);
 
scheduler(); /* wq->lock is released in scheduler_separated_stack() */
cpu_priority_restore(pri);
/SPARTAN/trunk/src/lib/func.c
38,7 → 38,7
{
haltstate = 1;
cpu_priority_high();
printf("cpu%d: halted\n", the->cpu->id);
printf("cpu%d: halted\n", CPU->id);
cpu_halt();
}
 
/SPARTAN/trunk/src/cpu/cpu.c
27,6 → 27,7
*/
 
#include <cpu.h>
#include <arch.h>
#include <arch/cpu.h>
#include <mm/heap.h>
#include <mm/page.h>
84,7 → 85,7
}
#endif /* __SMP__ */
the->cpu = &cpus[config.cpu_active-1];
CPU = &cpus[config.cpu_active-1];
cpu_identify();
cpu_arch_init();
}
/SPARTAN/trunk/src/time/delay.c
41,6 → 41,6
pri_t pri;
 
pri = cpu_priority_high();
asm_delay_loop(microseconds * the->cpu->delay_loop_const);
asm_delay_loop(microseconds * CPU->delay_loop_const);
cpu_priority_restore(pri);
}
/SPARTAN/trunk/src/time/timeout.c
39,8 → 39,8
 
void timeout_init(void)
{
spinlock_initialize(&the->cpu->timeoutlock);
list_initialize(&the->cpu->timeout_active_head);
spinlock_initialize(&CPU->timeoutlock);
list_initialize(&CPU->timeout_active_head);
}
 
 
70,13 → 70,13
__u64 sum;
 
pri = cpu_priority_high();
spinlock_lock(&the->cpu->timeoutlock);
spinlock_lock(&CPU->timeoutlock);
spinlock_lock(&t->lock);
if (t->cpu)
panic("timeout_register: t->cpu != 0");
 
t->cpu = the->cpu;
t->cpu = CPU;
t->ticks = us2ticks(time);
t->handler = f;
86,8 → 86,8
* Insert t into the active timeouts list according to t->ticks.
*/
sum = 0;
l = the->cpu->timeout_active_head.next;
while (l != &the->cpu->timeout_active_head) {
l = CPU->timeout_active_head.next;
while (l != &CPU->timeout_active_head) {
hlp = list_get_instance(l, timeout_t, link);
spinlock_lock(&hlp->lock);
if (t->ticks < sum + hlp->ticks) {
109,7 → 109,7
/*
* Decrease ticks of t's immediate succesor by t->ticks.
*/
if (l != &the->cpu->timeout_active_head) {
if (l != &CPU->timeout_active_head) {
spinlock_lock(&hlp->lock);
hlp->ticks -= t->ticks;
spinlock_unlock(&hlp->lock);
116,7 → 116,7
}
 
spinlock_unlock(&t->lock);
spinlock_unlock(&the->cpu->timeoutlock);
spinlock_unlock(&CPU->timeoutlock);
cpu_priority_restore(pri);
}
 
/SPARTAN/trunk/src/time/clock.c
57,8 → 57,8
* To avoid lock ordering problems,
* run all expired timeouts as you visit them.
*/
spinlock_lock(&the->cpu->timeoutlock);
while ((l = the->cpu->timeout_active_head.next) != &the->cpu->timeout_active_head) {
spinlock_lock(&CPU->timeoutlock);
while ((l = CPU->timeout_active_head.next) != &CPU->timeout_active_head) {
h = list_get_instance(l, timeout_t, link);
spinlock_lock(&h->lock);
if (h->ticks-- != 0) {
70,30 → 70,30
arg = h->arg;
timeout_reinitialize(h);
spinlock_unlock(&h->lock);
spinlock_unlock(&the->cpu->timeoutlock);
spinlock_unlock(&CPU->timeoutlock);
 
f(arg);
 
spinlock_lock(&the->cpu->timeoutlock);
spinlock_lock(&CPU->timeoutlock);
}
spinlock_unlock(&the->cpu->timeoutlock);
spinlock_unlock(&CPU->timeoutlock);
 
/*
* Do CPU usage accounting and find out whether to preempt the->thread.
* Do CPU usage accounting and find out whether to preempt THREAD.
*/
 
if (the->thread) {
spinlock_lock(&the->cpu->lock);
the->cpu->needs_relink++;
spinlock_unlock(&the->cpu->lock);
if (THREAD) {
spinlock_lock(&CPU->lock);
CPU->needs_relink++;
spinlock_unlock(&CPU->lock);
spinlock_lock(&the->thread->lock);
if (!the->thread->ticks--) {
spinlock_unlock(&the->thread->lock);
spinlock_lock(&THREAD->lock);
if (!THREAD->ticks--) {
spinlock_unlock(&THREAD->lock);
scheduler();
}
else {
spinlock_unlock(&the->thread->lock);
spinlock_unlock(&THREAD->lock);
}
}
 
/SPARTAN/trunk/arch/mips/src/exception.c
42,9 → 42,9
epc = cp0_epc_read();
cp0_status_write(cp0_status_read() & ~ cp0_status_exl_exception_bit);
 
if (the->thread) {
the->thread->saved_pri = pri;
the->thread->saved_epc = epc;
if (THREAD) {
THREAD->saved_pri = pri;
THREAD->saved_epc = epc;
}
/* decode exception number and process the exception */
switch(excno = (cp0_cause_read()>>2)&0x1f) {
54,9 → 54,9
default: panic(PANIC "unhandled exception %d\n", excno); break;
}
if (the->thread) {
pri = the->thread->saved_pri;
epc = the->thread->saved_epc;
if (THREAD) {
pri = THREAD->saved_pri;
epc = THREAD->saved_epc;
}
 
cp0_epc_write(epc);
/SPARTAN/trunk/arch/mips/src/cpu/cpu.c
83,8 → 83,8
 
void cpu_identify(void)
{
the->cpu->arch.rev_num = cp0_prid_read() & 0xff;
the->cpu->arch.imp_num = (cp0_prid_read() >> 8) & 0xff;
CPU->arch.rev_num = cp0_prid_read() & 0xff;
CPU->arch.imp_num = (cp0_prid_read() >> 8) & 0xff;
}
 
void cpu_print_report(cpu_t *m)
/SPARTAN/trunk/arch/mips/src/mm/tlb.c
46,7 → 46,7
 
void tlb_invalid(void)
{
panic(PANIC "%X: TLB exception at %X", cp0_badvaddr_read(), the->thread ? the->thread->saved_epc : 0);
panic(PANIC "%X: TLB exception at %X", cp0_badvaddr_read(), THREAD ? THREAD->saved_epc : 0);
}
 
void tlb_invalidate(int asid)
/SPARTAN/trunk/arch/ia32/src/smp/apic.c
114,7 → 114,7
 
void apic_spurious(__u8 n, __u32 stack[])
{
printf("cpu%d: APIC spurious interrupt\n", the->cpu->id);
printf("cpu%d: APIC spurious interrupt\n", CPU->id);
}
 
int apic_poll_errors(void)
142,7 → 142,7
}
 
/*
* Send all CPUs excluding the->cpu IPI vector.
* Send all CPUs excluding CPU IPI vector.
*/
int l_apic_broadcast_custom_ipi(__u8 vector)
{
232,7 → 232,7
 
l_apic[TPR] &= TPRClear;
 
if (the->cpu->arch.family >= 6)
if (CPU->arch.family >= 6)
enable_l_apic_in_msr();
tmp = l_apic[ICRlo] & ICRloClear;
270,7 → 270,7
#ifdef LAPIC_VERBOSE
int i, lint;
 
printf("LVT on cpu%d, LAPIC ID: %d\n", the->cpu->id, (l_apic[L_APIC_ID] >> 24)&0xf);
printf("LVT on cpu%d, LAPIC ID: %d\n", CPU->id, (l_apic[L_APIC_ID] >> 24)&0xf);
 
printf("LVT_Tm: ");
if (l_apic[LVT_Tm] & (1<<17)) printf("periodic"); else printf("one-shot"); putchar(',');
304,7 → 304,7
/*
* This register is supported only on P6 and higher.
*/
if (the->cpu->family > 5) {
if (CPU->family > 5) {
printf("LVT_PCINT: ");
if (l_apic[LVT_PCINT] & (1<<16)) printf("masked"); else printf("not masked"); putchar(',');
if (l_apic[LVT_PCINT] & (1<<12)) printf("send pending"); else printf("idle"); putchar(',');
/SPARTAN/trunk/arch/ia32/src/cpu/cpu.c
61,7 → 61,7
 
void cpu_arch_init(void)
{
the->cpu->arch.tss = tss_p;
CPU->arch.tss = tss_p;
}
 
 
70,7 → 70,7
cpu_info_t info;
int i;
 
the->cpu->arch.vendor = VendorUnknown;
CPU->arch.vendor = VendorUnknown;
if (has_cpuid()) {
cpuid(0, &info);
 
81,7 → 81,7
info.cpuid_ecx==AMD_CPUID_ECX &&
info.cpuid_edx==AMD_CPUID_EDX) {
the->cpu->arch.vendor = VendorAMD;
CPU->arch.vendor = VendorAMD;
}
 
/*
91,14 → 91,14
info.cpuid_ecx==INTEL_CPUID_ECX &&
info.cpuid_edx==INTEL_CPUID_EDX) {
 
the->cpu->arch.vendor = VendorIntel;
CPU->arch.vendor = VendorIntel;
 
}
cpuid(1, &info);
the->cpu->arch.family = (info.cpuid_eax>>8)&0xf;
the->cpu->arch.model = (info.cpuid_eax>>4)&0xf;
the->cpu->arch.stepping = (info.cpuid_eax>>0)&0xf;
CPU->arch.family = (info.cpuid_eax>>8)&0xf;
CPU->arch.model = (info.cpuid_eax>>4)&0xf;
CPU->arch.stepping = (info.cpuid_eax>>0)&0xf;
}
}
 
/SPARTAN/trunk/arch/ia32/src/userspace.c
42,8 → 42,8
/*
* Prepare TSS stack selector and pointers for next syscall.
*/
the->cpu->arch.tss->esp0 = (__address) &the->thread->kstack[THREAD_STACK_SIZE-8];
the->cpu->arch.tss->ss0 = selector(KDATA_DES);
CPU->arch.tss->esp0 = (__address) &THREAD->kstack[THREAD_STACK_SIZE-8];
CPU->arch.tss->ss0 = selector(KDATA_DES);
__asm__ volatile (""
"pushl %0\n"
/SPARTAN/trunk/arch/ia32/src/drivers/i8259.c
116,5 → 116,5
 
void pic_spurious(__u8 n, __u32 stack[])
{
printf("cpu%d: PIC spurious interrupt\n", the->cpu->id);
printf("cpu%d: PIC spurious interrupt\n", CPU->id);
}
/SPARTAN/trunk/arch/ia32/src/drivers/i8042.c
54,5 → 54,5
 
trap_virtual_eoi();
x = inb(0x60);
printf("%d", the->cpu->id);;
printf("%d", CPU->id);;
}
/SPARTAN/trunk/arch/ia32/src/drivers/i8254.c
110,7 → 110,7
o2 |= inb(CLK_PORT1) << 8;
 
 
the->cpu->delay_loop_const = ((MAGIC_NUMBER*LOOPS)/1000) / ((t1-t2)-(o1-o2)) +
CPU->delay_loop_const = ((MAGIC_NUMBER*LOOPS)/1000) / ((t1-t2)-(o1-o2)) +
(((MAGIC_NUMBER*LOOPS)/1000) % ((t1-t2)-(o1-o2)) ? 1 : 0);
 
118,7 → 118,7
delay(1<<SHIFT);
clk2 = rdtsc();
the->cpu->frequency_mhz = (clk2-clk1)>>SHIFT;
CPU->frequency_mhz = (clk2-clk1)>>SHIFT;
 
return;
}